hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a10aa61d182fa8b7ff2a8ca41ba9f91223510ef
| 11,862
|
py
|
Python
|
spyne/protocol/json.py
|
lemanyk/spyne
|
12bea0be357ceebec1cf877270ce240424357c7b
|
[
"BSD-3-Clause"
] | 1
|
2021-06-07T16:19:41.000Z
|
2021-06-07T16:19:41.000Z
|
spyne/protocol/json.py
|
lemanyk/spyne
|
12bea0be357ceebec1cf877270ce240424357c7b
|
[
"BSD-3-Clause"
] | null | null | null |
spyne/protocol/json.py
|
lemanyk/spyne
|
12bea0be357ceebec1cf877270ce240424357c7b
|
[
"BSD-3-Clause"
] | null | null | null |
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""The ``spyne.protocol.json`` package contains the Json-related protocols.
Currently, only :class:`spyne.protocol.json.JsonDocument` is supported.
Initially released in 2.8.0-rc.
This module is EXPERIMENTAL. You may not recognize the code here next time you
look at it.
Missing Types
=============
The JSON standard does not define every type that Spyne supports. These include
Date/Time types as well as arbitrary-length integers and arbitrary-precision
decimals. Integers are parsed to ``int``\s or ``long``\s seamlessly but
``Decimal``\s are only parsed correctly when they come off as strings.
While it's possible to e.g. (de)serialize floats to ``Decimal``\s by adding
hooks to ``parse_float`` [#]_ (and convert later as necessary), such
customizations apply to the whole incoming document which pretty much messes up
``AnyDict`` serialization and deserialization.
It also wasn't possible to work with ``object_pairs_hook`` as Spyne's parsing
is always "from outside to inside" whereas ``object_pairs_hook`` is passed
``dict``\s basically in any order "from inside to outside".
.. [#] http://docs.python.org/2/library/json.html#json.loads
"""
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
from itertools import chain
try:
import simplejson as json
from simplejson.decoder import JSONDecodeError
except ImportError:
import json
JSONDecodeError = ValueError
from spyne.error import ValidationError
from spyne.error import ResourceNotFoundError
from spyne.model.binary import BINARY_ENCODING_BASE64
from spyne.model.primitive import Date
from spyne.model.primitive import Time
from spyne.model.primitive import DateTime
from spyne.model.primitive import Double
from spyne.model.primitive import Integer
from spyne.model.primitive import Boolean
from spyne.model.fault import Fault
from spyne.protocol.dictdoc import HierDictDocument
class JsonEncoder(json.JSONEncoder):
def default(self, o):
try:
return super(JsonEncoder, self).default(o)
except TypeError, e:
# if json can't serialize it, it's possibly a generator. If not,
# additional hacks are welcome :)
logger.exception(e)
return list(o)
class JsonDocument(HierDictDocument):
"""An implementation of the json protocol that uses simplejson package when
available, json package otherwise.
:param ignore_wrappers: Does not serialize wrapper objects.
:param complex_as: One of (list, dict). When list, the complex objects are
serialized to a list of values instead of a dict of key/value pairs.
"""
mime_type = 'application/json'
type = set(HierDictDocument.type)
type.add('json')
default_binary_encoding = BINARY_ENCODING_BASE64
# flags used just for tests
_decimal_as_string = True
def __init__(self, app=None, validator=None, mime_type=None,
ignore_uncap=False,
# DictDocument specific
ignore_wrappers=True, complex_as=dict, ordered=False):
HierDictDocument.__init__(self, app, validator, mime_type, ignore_uncap,
ignore_wrappers, complex_as, ordered)
self._from_string_handlers[Double] = lambda cls, val: val
self._from_string_handlers[Boolean] = lambda cls, val: val
self._from_string_handlers[Integer] = lambda cls, val: val
self._to_string_handlers[Double] = lambda cls, val: val
self._to_string_handlers[Boolean] = lambda cls, val: val
self._to_string_handlers[Integer] = lambda cls, val: val
def validate(self, key, cls, val):
super(JsonDocument, self).validate(key, cls, val)
if issubclass(cls, (DateTime, Date, Time)) and not (
isinstance(val, basestring) and
cls.validate_string(cls, val)):
raise ValidationError(key, val)
def create_in_document(self, ctx, in_string_encoding=None):
"""Sets ``ctx.in_document`` using ``ctx.in_string``."""
if in_string_encoding is None:
in_string_encoding = 'UTF-8'
try:
ctx.in_document = json.loads(
''.join(ctx.in_string).decode(in_string_encoding),
)
except JSONDecodeError, e:
raise Fault('Client.JsonDecodeError', repr(e))
def create_out_string(self, ctx, out_string_encoding='utf8'):
"""Sets ``ctx.out_string`` using ``ctx.out_document``."""
ctx.out_string = (json.dumps(o, cls=JsonEncoder)
for o in ctx.out_document)
class JsonP(JsonDocument):
"""The JsonP protocol puts the reponse document inside a designated
javascript function call. The input protocol is identical to the
JsonDocument protocol.
:param callback_name: The name of the function call that will wrapp all
response documents.
For other arguents, see :class:`spyne.protocol.json.JsonDocument`.
"""
type = set(HierDictDocument.type)
type.add('jsonp')
def __init__(self, callback_name, *args, **kwargs):
super(JsonP, self).__init__(*args, **kwargs)
self.callback_name = callback_name
def create_out_string(self, ctx):
super(JsonP, self).create_out_string(ctx)
ctx.out_string = chain(
[self.callback_name, '('],
ctx.out_string,
[');'],
)
class _SpyneJsonRpc1(JsonDocument):
version = 1
VERSION = 'ver'
BODY = 'body'
HEAD = 'head'
FAULT = 'fault'
def decompose_incoming_envelope(self, ctx, message=JsonDocument.REQUEST):
indoc = ctx.in_document
if not isinstance(indoc, dict):
raise ValidationError("Invalid Request")
ver = indoc.get(self.VERSION)
if ver is None:
raise ValidationError("Missing Version")
body = indoc.get(self.BODY)
err = indoc.get(self.FAULT)
if body is None and err is None:
raise ValidationError("Missing request")
ctx.protocol.error = False
if err is not None:
ctx.in_body_doc = err
ctx.protocol.error = True
else:
if not isinstance(body, dict):
raise ValidationError("Missing request body")
if not len(body) == 1:
raise ValidationError("Need len(body) == 1")
ctx.in_header_doc = indoc.get(self.HEAD)
if not isinstance(ctx.in_header_doc, list):
ctx.in_header_doc = [ctx.in_header_doc]
(ctx.method_request_string,ctx.in_body_doc), = body.items()
def deserialize(self, ctx, message):
assert message in (self.REQUEST, self.RESPONSE)
self.event_manager.fire_event('before_deserialize', ctx)
if ctx.descriptor is None:
raise ResourceNotFoundError(ctx.method_request_string)
if ctx.protocol.error:
ctx.in_object = None
ctx.in_error = self._doc_to_object(Fault, ctx.in_body_doc)
else:
if message is self.REQUEST:
header_class = ctx.descriptor.in_header
body_class = ctx.descriptor.in_message
elif message is self.RESPONSE:
header_class = ctx.descriptor.out_header
body_class = ctx.descriptor.out_message
# decode header objects
if (ctx.in_header_doc is not None and header_class is not None):
headers = [None] * len(header_class)
for i, (header_doc, head_class) in enumerate(
zip(ctx.in_header_doc, header_class)):
if header_doc is not None and i < len(header_doc):
headers[i] = self._doc_to_object(head_class, header_doc)
if len(headers) == 1:
ctx.in_header = headers[0]
else:
ctx.in_header = headers
# decode method arguments
if ctx.in_body_doc is None:
ctx.in_object = [None] * len(body_class._type_info)
else:
ctx.in_object = self._doc_to_object(body_class, ctx.in_body_doc)
self.event_manager.fire_event('after_deserialize', ctx)
def serialize(self, ctx, message):
assert message in (self.REQUEST, self.RESPONSE)
self.event_manager.fire_event('before_serialize', ctx)
# construct the soap response, and serialize it
nsmap = self.app.interface.nsmap
ctx.out_document = {
"ver": self.version,
}
if ctx.out_error is not None:
ctx.out_document[self.FAULT] = Fault.to_dict(Fault, ctx.out_error)
else:
if message is self.REQUEST:
header_message_class = ctx.descriptor.in_header
body_message_class = ctx.descriptor.in_message
elif message is self.RESPONSE:
header_message_class = ctx.descriptor.out_header
body_message_class = ctx.descriptor.out_message
# assign raw result to its wrapper, result_message
out_type_info = body_message_class._type_info
out_object = body_message_class()
keys = iter(out_type_info)
values = iter(ctx.out_object)
while True:
try:
k = keys.next()
except StopIteration:
break
try:
v = values.next()
except StopIteration:
v = None
setattr(out_object, k, v)
ctx.out_document[self.BODY] = ctx.out_body_doc = \
self._object_to_doc(body_message_class, out_object)
# header
if ctx.out_header is not None and header_message_class is not None:
if isinstance(ctx.out_header, (list, tuple)):
out_headers = ctx.out_header
else:
out_headers = (ctx.out_header,)
ctx.out_header_doc = out_header_doc = []
for header_class, out_header in zip(header_message_class,
out_headers):
out_header_doc.append(self._object_to_doc(header_class,
out_header))
if len(out_header_doc) > 1:
ctx.out_document[self.HEAD] = out_header_doc
else:
ctx.out_document[self.HEAD] = out_header_doc[0]
self.event_manager.fire_event('after_serialize', ctx)
_json_rpc_flavours = {
'spyne': _SpyneJsonRpc1
}
def JsonRpc(flavour, *args, **kwargs):
assert flavour in _json_rpc_flavours, "Unknown JsonRpc flavour"
return _json_rpc_flavours[flavour](*args, **kwargs)
| 35.836858
| 80
| 0.625695
|
4a10aadfa445e3668041d60b8476b9edf02f69ea
| 1,292
|
py
|
Python
|
venv/lib/python3.8/site-packages/vsts/git/v4_1/models/git_user_date.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/git/v4_1/models/git_user_date.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/git/v4_1/models/git_user_date.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class GitUserDate(Model):
"""GitUserDate.
:param date: Date of the Git operation.
:type date: datetime
:param email: Email address of the user performing the Git operation.
:type email: str
:param name: Name of the user performing the Git operation.
:type name: str
"""
_attribute_map = {
'date': {'key': 'date', 'type': 'iso-8601'},
'email': {'key': 'email', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, date=None, email=None, name=None):
super(GitUserDate, self).__init__()
self.date = date
self.email = email
self.name = name
| 38
| 95
| 0.487616
|
4a10ab7e3ec05ac3c50d0a5eae98410fcd8c8911
| 9,258
|
py
|
Python
|
eb-archive/eb6.py
|
SBC-Collaboration/SBC-EventBuilder
|
0ee90774b5b8f1e15ccc1847677254641aab7676
|
[
"MIT"
] | null | null | null |
eb-archive/eb6.py
|
SBC-Collaboration/SBC-EventBuilder
|
0ee90774b5b8f1e15ccc1847677254641aab7676
|
[
"MIT"
] | null | null | null |
eb-archive/eb6.py
|
SBC-Collaboration/SBC-EventBuilder
|
0ee90774b5b8f1e15ccc1847677254641aab7676
|
[
"MIT"
] | null | null | null |
from tkinter.constants import DISABLED
from tkinter import ttk
import RPi.GPIO as GPIO
import tkinter as tk
from datetime import datetime
from PIL import ImageTk, Image
from pathlib import Path
import time
import os
# GPIO Setup
SimOut = [11, 13, 15]
InPins = [18, 36, 40]
OutPins = [16, 32, 38]
Error_LED = 38
Trig_0_LED = 37
GPIO.setmode(GPIO.BOARD) # use Physical GPIO Numbering
GPIO.setup(InPins, GPIO.IN)
GPIO.setup(OutPins, GPIO.OUT)
GPIO.output(OutPins, GPIO.LOW)
GPIO.setup(SimOut, GPIO.OUT)
GPIO.output(SimOut, GPIO.LOW)
GPIO.setup(Error_LED, GPIO.OUT)
GPIO.output(Error_LED, GPIO.LOW)
GPIO.setup(Trig_0_LED, GPIO.OUT)
GPIO.output(Trig_0_LED, GPIO.LOW)
# Tkinter setup
tk_root = tk.Tk()
tk_root.title('Event Builder')
tabControl = ttk.Notebook(tk_root)
tab1 = ttk.Frame(tabControl)
tab2 = ttk.Frame(tabControl)
tab2 = ttk.Frame(tabControl)
tabControl.add(tab1, text='Event run')
tabControl.add(tab2, text='Save Directory')
tabControl.add(tab2, text='Image Viewer')
tabControl.pack(expand=1, fill='both')
##############################################
######## Tab 1: event start/end etc. #########
##############################################
class EB:
def __init__(self, master, max_time):
self.master = master
self.max_time = max_time
self.show_time = tk.DoubleVar()
self.show_time.set(0)
self.display_time = ttk.Label(self.master, text='Event time: ', textvariable=self.show_time)
self.display_time.grid(row=0, column=0)
self.buttonstart = ttk.Button(self.master, text='Start Event', command=self.run_event)
self.buttonstart.grid(row=1, column=0)
self.buttonmantrig = ttk.Button(self.master, text='Man_Trigger', command=self.set_trig)
self.buttonmantrig.grid(row=2, column=0)
self.buttonquit = ttk.Button(self.master, text='Quit', command=self.leave)
self.buttonquit.grid(row=3, column=0)
self.trig_0_state = False
self.trig_reset = True
# Sets trig_reset to false and sends activate signal to all camera RPis
def event_start(self):
for i in range(len(SimOut)):
GPIO.output(SimOut[i], GPIO.HIGH)
make_folder()
return False
# Helper for fifo_signal
def check_in_pins(self):
for i in range(len(InPins)):
if GPIO.input(InPins[i]) != GPIO.HIGH:
return False
return True
# Checks if all RPis respond and are ready. Sets trig_0_state false if all RPis
# are ready (event can keep running)
def fifo_signal(self):
if (self.check_in_pins() and not self.trig_reset): # input signals from RPis
# send output signal to arduino once all RPi_State == Active
for i in range(len(OutPins)):
GPIO.output(OutPins[i], GPIO.HIGH)
self.buttonstart.grid_forget()
self.buttonstart = ttk.Button(self.master, text='Start Event', state=DISABLED)
self.buttonstart.grid(row=1, column=0)
return False
else: # Error indicator
event_error_label = ttk.Label(self.master, text='Error: All pins are not active')
event_error_label.grid(row=4, column=0)
event_error_label.after(3000, event_error_label.destroy)
return True
# Recursively calls itself (keeps the event running) until timer exceeds max time
# or Man_Trigger button is pressed.
def iterate(self):
timer = time.perf_counter() - self.tic
if (timer > self.max_time or self.trig_0_state):
for i in range(len(OutPins)):
GPIO.output(OutPins[i], GPIO.LOW)
if self.trig_0_state:
# Make sure to remove this LED indicator section later!
GPIO.output(Trig_0_LED, GPIO.HIGH)
time.sleep(0.1)
GPIO.output(Trig_0_LED, GPIO.LOW)
self.buttonstart.grid_forget()
self.buttonstart = ttk.Button(self.master, text='Start Event', command=self.run_event)
self.buttonstart.grid(row=1, column=0)
else:
self.show_time.set(round(timer, 4))
self.master.after(50, self.iterate)
# If Man_Trigger button is pressed, sets trig_0_state to true and ends the event
# before timer passes max time.
def set_trig(self):
self.trig_0_state = True
def run_event(self):
self.trig_reset = self.event_start()
self.trig_0_state = self.fifo_signal()
self.tic = time.perf_counter()
for i in range(len(SimOut)):
GPIO.output(SimOut[i], GPIO.LOW)
self.iterate()
def leave(self):
GPIO.cleanup()
tk_root.quit()
exit()
# Makes new folder for current event and sets softlink to the created folder
curr_directory = 'temp'
def make_folder():
global curr_directory
now = datetime.now()
today = now.strftime('%d') + '_' + now.strftime('%m') + '_' + now.strftime('%Y')
try:
os.mkdir('./'+ today)
except FileExistsError:
print('Directory for today already exists')
index = 0
for root, dirs, files in os.walk('./' + today):
for d in dirs:
index += 1
try:
os.mkdir(today + '/' + str(index))
except Exception as e:
print(e)
curr_directory = './Desktop/'+today+'/'+str(index)
os.symlink(curr_directory, 'temp')
os.rename('temp', '/home/pi/Images')
print('Made directory: ', curr_directory)
##############################################
########### Tab 2: Image viewer ##############
##############################################
baseheight = 500
# returns a list of all the images (png, jpg) in a directory resized to height = 500 pixels
def image_walk(directory, camera):
image_list = []
for root, dirs, files in os.walk(directory):
for file in files:
filename, extension = os.path.splitext(file)
if extension == '.png' or '.jpg':
if filename[-1] == str(camera):
# rescaling images to height of 500 pixels
img = Image.open('images0/'+file)
hpercent = (baseheight / float(img.size[1]))
wsize = int((float(img.size[0]) * float(hpercent)))
img = img.resize((wsize, baseheight), Image.ANTIALIAS)
image = ImageTk.PhotoImage(img)
image_list.append(image)
return image_list
curr_directory_path = Path(curr_directory)
image_list0 = image_walk(curr_directory_path, 0)
image_list1 = image_walk(curr_directory_path, 1)
def check():
if len(image_list0) > 0 and len(image_list1) > 0:
left_img = ttk.Label(tab2, image=image_list0[0])
left_img.grid(row=0, column=0, columnspan=3)
right_img = ttk.Label(tab2, image=image_list1[0])
right_img.grid(row=0, column=4, columnspan=3)
return True
else: return False
images_present = check()
image_number = 0
# updates forward and backward buttons depending on which image number is being displayed
# (the code is the same for forward and backward, just use diff parts!)
def image_buttons():
global left_img
global right_img
global button_forward
global button_back
global image_number
if images_present:
left_img.grid_forget()
left_img = ttk.Label(tab2, image=image_list0[image_number])
right_img.grid_forget()
right_img = ttk.Label(tab2, image=image_list1[image_number])
# use lambda when you want to use buttons to call a function with a value
button_forward = ttk.Button(tab2, text='>>', width=25, command=forward)
button_back = ttk.Button(tab2, text='<<', width=25, command=back)
left_img.grid(row=0, column=0, columnspan=3)
right_img.grid(row=0, column=4, columnspan=3)
button_back.grid(row=1, column=0)
button_forward.grid(row=1, column=2)
def back(event):
global image_number
if image_number != 0:
image_number -= 1
image_buttons()
if image_number == 0:
button_back = ttk.Button(tab2, width=25, text='<<', state=DISABLED)
button_back.grid(row=1, column=0)
def forward(event):
global image_number
if (image_number < len(image_list0) and image_number < len(image_list1)):
image_number += 1
image_buttons()
else:
button_forward = ttk.Button(tab2, width=25, text='>>', state=DISABLED)
button_forward.grid(row=1, column=2)
def leave():
GPIO.cleanup()
tk_root.quit()
exit()
# Initialize buttons
def initialize_buttons():
global images_present
button_back = ttk.Button(tab2, text='<<', width=25, command=back, state=DISABLED)
button_quit = ttk.Button(tab2, text='Quit', command=leave)
if images_present:
button_forward = ttk.Button(tab2, text='>>', width=25, command=lambda: forward(2))
else:
button_forward = ttk.Button(tab2, text='>>', width=25, state=DISABLED)
button_back.grid(row=1, column=0)
button_quit.grid(row=1, column=1)
button_forward.grid(row=1, column=2)
initialize_buttons()
tab2.bind('<Left>', back)
tab2.bind('<Right>', forward)
#####################
event = EB(tab1, 2)
tk_root.mainloop()
| 33.543478
| 100
| 0.626485
|
4a10abb8c632263e60fe9a15ba965c9beb0fb718
| 2,462
|
py
|
Python
|
gnes/encoder/text/bert.py
|
awesome-archive/gnes
|
21311f05747303d0acdc303f2ade830ef971f47d
|
[
"Apache-2.0"
] | null | null | null |
gnes/encoder/text/bert.py
|
awesome-archive/gnes
|
21311f05747303d0acdc303f2ade830ef971f47d
|
[
"Apache-2.0"
] | null | null | null |
gnes/encoder/text/bert.py
|
awesome-archive/gnes
|
21311f05747303d0acdc303f2ade830ef971f47d
|
[
"Apache-2.0"
] | null | null | null |
# Tencent is pleased to support the open source community by making GNES available.
#
# Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=low-comment-ratio
from typing import List
import numpy as np
from ..base import CompositionalEncoder, BaseTextEncoder
from ...helper import batching
class BertEncoder(BaseTextEncoder):
store_args_kwargs = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.is_trained = True
self._bc_encoder_args = args
self._bc_encoder_kwargs = kwargs
def post_init(self):
from bert_serving.client import BertClient
self.bc_encoder = BertClient(*self._bc_encoder_args, **self._bc_encoder_kwargs)
@batching
def encode(self, text: List[str], *args, **kwargs) -> np.ndarray:
return self.bc_encoder.encode(text, *args, **kwargs) # type: np.ndarray
def close(self):
self.bc_encoder.close()
class BertEncoderWithServer(CompositionalEncoder):
def encode(self, text: List[str], *args, **kwargs) -> np.ndarray:
return self.component['bert_client'].encode(text, *args, **kwargs)
class BertEncoderServer(BaseTextEncoder):
store_args_kwargs = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
bert_args = ['-%s' % v for v in args]
for k, v in kwargs.items():
bert_args.append('-%s' % k)
bert_args.append(str(v))
self._bert_args = bert_args
self.is_trained = True
def post_init(self):
from bert_serving.server import BertServer
from bert_serving.server import get_args_parser
self.bert_server = BertServer(get_args_parser().parse_args(self._bert_args))
self.bert_server.start()
self.bert_server.is_ready.wait()
def close(self):
self.bert_server.close()
| 32.394737
| 87
| 0.692526
|
4a10ad912b303fd2a335fbfb64e3b2dc5e6acf98
| 4,882
|
py
|
Python
|
src/dewloosh/math/interval.py
|
dewloosh/dewloosh-math
|
41e2e4a9a92b567a7ef93f7414a1e439bf7dd9f4
|
[
"MIT"
] | 2
|
2021-12-11T17:25:53.000Z
|
2022-01-06T15:36:22.000Z
|
src/dewloosh/math/interval.py
|
dewloosh/dewloosh-math
|
41e2e4a9a92b567a7ef93f7414a1e439bf7dd9f4
|
[
"MIT"
] | null | null | null |
src/dewloosh/math/interval.py
|
dewloosh/dewloosh-math
|
41e2e4a9a92b567a7ef93f7414a1e439bf7dd9f4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from numba.extending import unbox, NativeValue
from numba.extending import box
from numba.core import cgutils
from numba.extending import lower_builtin
from numba.extending import overload_attribute, overload
from numba.extending import make_attribute_wrapper
from numba.extending import models, register_model
from numba.extending import type_callable
from numba.extending import typeof_impl
from numba import types
import numpy as np
import operator
class Interval(object):
"""
A half-open interval on the real number line.
"""
def __init__(self, lo, hi, arr):
self.lo = lo
self.hi = hi
self._arr = arr
def __repr__(self):
return 'Interval(%f, %f)' % (self.lo, self.hi)
@property
def width(self):
return self.hi - self.lo
def mean(self):
return np.mean(self._arr)
def __getitem__(self, key):
return self._arr[key]
class IntervalType(types.Type):
def __init__(self):
self.data = types.Array(types.float64, 1, 'C')
super(IntervalType, self).__init__(name='Interval')
make_attribute_wrapper(IntervalType, 'lo', 'lo')
make_attribute_wrapper(IntervalType, 'hi', 'hi')
make_attribute_wrapper(IntervalType, 'data', 'data')
@typeof_impl.register(Interval)
def typeof_index(val, c):
return IntervalType()
@type_callable(Interval)
def type_interval(context):
def typer(lo, hi, data):
if isinstance(lo, types.Float) and isinstance(hi, types.Float):
return IntervalType()
return typer
@register_model(IntervalType)
class StructModel(models.StructModel):
def __init__(self, dmm, fe_type):
members = [
('lo', types.float64),
('hi', types.float64),
('data', fe_type.data),
]
models.StructModel.__init__(self, dmm, fe_type, members)
@overload_attribute(IntervalType, "width")
def get_width(interval):
def getter(interval):
return interval.hi - interval.lo
return getter
@overload_attribute(IntervalType, "mean")
def get_mean(interval):
def getter(interval):
return np.mean(interval.data)
return getter
@overload(operator.getitem)
def overload_dummy_getitem(obj, idx):
if isinstance(obj, IntervalType):
def dummy_getitem_impl(obj, idx):
return obj.data[idx]
return dummy_getitem_impl
@lower_builtin(Interval, types.Float, types.Float, types.Array(types.float64, 1, 'C'))
def impl_interval(context, builder, sig, args):
typ = sig.return_type
lo, hi, data = args
interval = cgutils.create_struct_proxy(typ)(context, builder)
interval.lo = lo
interval.hi = hi
interval.data = data
return interval._getvalue()
@unbox(IntervalType)
def unbox_interval(typ, obj, c):
"""
Convert a Interval object to a native interval structure.
"""
lo_obj = c.pyapi.object_getattr_string(obj, "lo")
hi_obj = c.pyapi.object_getattr_string(obj, "hi")
data_obj = c.pyapi.object_getattr_string(obj, "_arr")
interval = cgutils.create_struct_proxy(typ)(c.context, c.builder)
interval.lo = c.pyapi.float_as_double(lo_obj)
interval.hi = c.pyapi.float_as_double(hi_obj)
interval.data = c.unbox(typ.data, data_obj).value
c.pyapi.decref(lo_obj)
c.pyapi.decref(hi_obj)
c.pyapi.decref(data_obj)
is_error = cgutils.is_not_null(c.builder, c.pyapi.err_occurred())
return NativeValue(interval._getvalue(), is_error=is_error)
@box(IntervalType)
def box_interval(typ, val, c):
"""
Convert a native interval structure to an Interval object.
"""
interval = cgutils.create_struct_proxy(
typ)(c.context, c.builder, value=val)
lo_obj = c.pyapi.float_from_double(interval.lo)
hi_obj = c.pyapi.float_from_double(interval.hi)
data_obj = c.box(typ.data, interval.data)
class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Interval))
res = c.pyapi.call_function_objargs(class_obj, (lo_obj, hi_obj, data_obj))
c.pyapi.decref(lo_obj)
c.pyapi.decref(hi_obj)
c.pyapi.decref(data_obj)
c.pyapi.decref(class_obj)
return res
if __name__ == '__main__':
from numba import jit
@jit(nopython=True)
def inside_interval(interval, x):
return interval.lo <= x < interval.hi
@jit(nopython=True)
def interval_width(interval):
return interval.width
@jit(nopython=True)
def interval_data(interval):
return interval.data
@jit(nopython=True)
def interval_getitem(interval, i):
return interval[i]
@jit(nopython=True)
def new_interval(lo, hi, data):
return Interval(lo, hi, data)
lo = 1.0
hi = 3.0
data = np.array([1.1, 3.1, 2.1])
print(new_interval(lo, hi, data)._arr)
print(interval_data(new_interval(lo, hi, data)))
print(interval_getitem(new_interval(lo, hi, data), 0))
| 27.581921
| 86
| 0.683736
|
4a10ae0991118915cad50e8d1b6ca582b7b49588
| 43,308
|
py
|
Python
|
pcdet/models/detectors/detector3d_template.py
|
SunshengGu/XC_eval_pcdet
|
616f3baf05ba81bdf6461c6b814079a81b55ae68
|
[
"Apache-2.0"
] | 1
|
2022-01-17T20:06:27.000Z
|
2022-01-17T20:06:27.000Z
|
pcdet/models/detectors/detector3d_template.py
|
SunshengGu/XC_eval_pcdet
|
616f3baf05ba81bdf6461c6b814079a81b55ae68
|
[
"Apache-2.0"
] | null | null | null |
pcdet/models/detectors/detector3d_template.py
|
SunshengGu/XC_eval_pcdet
|
616f3baf05ba81bdf6461c6b814079a81b55ae68
|
[
"Apache-2.0"
] | null | null | null |
import os
import torch
import torch.nn as nn
import copy
from ...ops.iou3d_nms import iou3d_nms_utils
from .. import backbones_2d, backbones_3d, dense_heads, roi_heads
from ..backbones_2d import map_to_bev
from ..backbones_3d import pfe, vfe
from ..model_utils import model_nms_utils
class Detector3DTemplate(nn.Module):
def __init__(self, model_cfg, num_class, dataset):
super().__init__()
# self.explain = explain # additional argument indicating if we are in explain mode
self.model_cfg = model_cfg
self.num_class = num_class
self.dataset = dataset
self.class_names = dataset.class_names
self.register_buffer('global_step', torch.LongTensor(1).zero_())
self.module_topology = [
'vfe', 'backbone_3d', 'map_to_bev_module', 'pfe',
'backbone_2d', 'dense_head', 'point_head', 'roi_head'
]
@property
def mode(self):
return 'TRAIN' if self.training else 'TEST'
def update_global_step(self):
self.global_step += 1
def build_networks(self):
# # ********** debug message **************
# print("\n keys in self.state_dict before model is built:")
# for key in self.state_dict():
# print(key)
# # ********** debug message **************
model_info_dict = {
'module_list': [],
'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features,
'num_point_features': self.dataset.point_feature_encoder.num_point_features,
'grid_size': self.dataset.grid_size,
'point_cloud_range': self.dataset.point_cloud_range,
'voxel_size': self.dataset.voxel_size
}
# # ********** debug message **************
# print("showing module names in self.module_topology")
# for mod_name in self.module_topology:
# print(mod_name)
# # ********** debug message **************
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
# # ********** debug message **************
# print("\n keys in self.state_dict after model is built:")
# for key in self.state_dict():
# print(key)
# # ********** debug message **************
return model_info_dict['module_list']
def build_vfe(self, model_info_dict):
if self.model_cfg.get('VFE', None) is None:
# ********** debug message **************
# print('\n no VFE')
# ********** debug message **************
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.VFE.NAME](
model_cfg=self.model_cfg.VFE,
num_point_features=model_info_dict['num_rawpoint_features'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size']
)
model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()
model_info_dict['module_list'].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_3D', None) is None:
# ********** debug message **************
# print('\n no 3D backbone')
# ********** debug message **************
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME](
model_cfg=self.model_cfg.BACKBONE_3D,
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_point_features'] = backbone_3d_module.num_point_features
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
if self.model_cfg.get('MAP_TO_BEV', None) is None:
# ********** debug message **************
# print('\n no map_to_bev_module')
# ********** debug message **************
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME](
model_cfg=self.model_cfg.MAP_TO_BEV,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
if self.model_cfg.get('BACKBONE_2D', None) is None:
# ********** debug message **************
# print('\n no 2D backbone')
# ********** debug message **************
return None, model_info_dict
if 'num_bev_features' not in model_info_dict:
model_info_dict['num_bev_features'] = 64
backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME](
model_cfg=self.model_cfg.BACKBONE_2D,
input_channels=model_info_dict['num_bev_features']
# # TODO: hard code just for the sake of building a simpler pointpillar, need to change back later
# input_channels=64
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_pfe(self, model_info_dict):
if self.model_cfg.get('PFE', None) is None:
# ********** debug message **************
# print('\n no pfe')
# ********** debug message **************
return None, model_info_dict
pfe_module = pfe.__all__[self.model_cfg.PFE.NAME](
model_cfg=self.model_cfg.PFE,
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
num_bev_features=model_info_dict['num_bev_features'],
num_rawpoint_features=model_info_dict['num_rawpoint_features']
)
model_info_dict['module_list'].append(pfe_module)
model_info_dict['num_point_features'] = pfe_module.num_point_features
model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion
return pfe_module, model_info_dict
def build_dense_head(self, model_info_dict):
if self.model_cfg.get('DENSE_HEAD', None) is None:
return None, model_info_dict
dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD.NAME](
model_cfg=self.model_cfg.DENSE_HEAD,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.DENSE_HEAD.CLASS_AGNOSTIC else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)
)
model_info_dict['module_list'].append(dense_head_module)
return dense_head_module, model_info_dict
def build_point_head(self, model_info_dict):
if self.model_cfg.get('POINT_HEAD', None) is None:
return None, model_info_dict
if self.model_cfg.POINT_HEAD.get('USE_POINT_FEATURES_BEFORE_FUSION', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module = dense_heads.__all__[self.model_cfg.POINT_HEAD.NAME](
model_cfg=self.model_cfg.POINT_HEAD,
input_channels=num_point_features,
num_class=self.num_class if not self.model_cfg.POINT_HEAD.CLASS_AGNOSTIC else 1,
predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False)
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def build_roi_head(self, model_info_dict):
if self.model_cfg.get('ROI_HEAD', None) is None:
return None, model_info_dict
point_head_module = roi_heads.__all__[self.model_cfg.ROI_HEAD.NAME](
model_cfg=self.model_cfg.ROI_HEAD,
input_channels=model_info_dict['num_point_features'],
num_class=self.num_class if not self.model_cfg.ROI_HEAD.CLASS_AGNOSTIC else 1,
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def forward(self, **kwargs):
raise NotImplementedError
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]
else:
multihead_label_mapping = batch_dict['multihead_label_mapping']
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]
cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds, box_preds=cur_box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
if post_process_cfg.OUTPUT_RAW_SCORE:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
def post_processing_xai(self, tensor_values, batch_dict, box_limit=30):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
roi_labels: (B, num_rois) 1 .. num_classes
Returns:
:param box_limit:
:param batch_dict:
:param tensor_values:
"""
# print('\n starting the post_processing() function')
# tensor_values is just for compatibility with Captum, only useful when in explain mode
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
boxes_with_cls_scores = []
# all_anchor_boxes = []
# boxes_params = []
anchor_selections = []
batch_dict['box_count'] = {} # store the number of boxes for each image in the sample
batch_dict['sigmoid_anchor_scores'] = []
output_anchor = post_process_cfg.OUTPUT_ANCHOR_BOXES # indicates if we output anchor boxes
anchor_scores = [] # store class scores for individual anchor boxes
anchor_boxes = []
anchor_labels = []
# max_box_ind = 0 # index of the input in the batch with most number of boxes
max_num_boxes = box_limit
for index in range(batch_size):
# the 'None' here just means return None if key not found
if batch_dict.get('batch_index', None) is not None:
# print('\n batch_dict has the \'bactch_index\' entry!')
# print('\n shape of batch_dict[\'batch_cls_preds\']' + str(batch_dict['batch_cls_preds'].shape))
assert batch_dict['batch_cls_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
# print('\n batch_dict does NOT have the \'bactch_index\' entry!')
# print('\n shape of batch_dict[\'batch_cls_preds\']' + str(batch_dict['batch_cls_preds'].shape))
assert batch_dict['batch_cls_preds'].shape.__len__() == 3
batch_mask = index
# inside the for loop, we only care about one particular sample, not the entire mini-batch
box_preds = batch_dict['batch_box_preds'][batch_mask]
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
if str(type(tensor_values)) == 'torch.Tensor':
cls_preds = tensor_values[batch_mask]
src_cls_preds = cls_preds
src_box_preds = box_preds
# print("src_box_preds.shape: {}".format(src_box_preds.shape))
anchor_scores.append(src_cls_preds)
anchor_boxes.append(src_box_preds)
# print('src_box_preds.shape before nms: {}'.format(src_box_preds.shape))
# print('src_cls_preds.shape before nms: {}'.format(src_cls_preds.shape))
# the second dimension of cls_preds should be the same as the number of classes
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
batch_dict['sigmoid_anchor_scores'].append(torch.sigmoid(src_cls_preds))
else:
batch_dict['sigmoid_anchor_scores'].append(src_cls_preds)
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
raise NotImplementedError
else:
# in python, -1 means the last dimension
# torch.max(input, dim, keepdim=False, out=None) returns a tuple:
# 1. the maximum values in the indicated dimension
# 2. the indices of the maximum values in the indicated dimension
# now, for each box, we have a class prediction
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
# orig_label_preds = label_preds + 1
# orig_cls_preds = cls_preds
anchor_labels.append(label_preds)
label_preds = batch_dict['roi_labels'][index] if batch_dict.get('has_class_labels',
False) else label_preds + 1
if batch_dict.get('has_class_labels', False):
print('\n no key named \'has_class_labels\' in batch_dict')
# print('\n shape of label_preds after: ' + str(label_preds.shape))
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
anchor_selections.append(selected)
# print("\nlen(selected): {}\n".format(len(selected)))
if post_process_cfg.OUTPUT_RAW_SCORE: # no need to worry about this, false by default
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores # this is the original code
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
# # the following modifications did nothing
# final_scores = orig_cls_preds[selected]
# final_labels = orig_label_preds[selected]
# final_boxes = src_box_preds[selected]
# for label in final_labels:
# print('label is {}'.format(label))
batch_dict['box_count'][index] = final_scores.shape[0]
# if final_scores.shape[0] > max_num_boxes:
# max_box_ind = index
# max_num_boxes = final_scores.shape[0]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
# print('src_cls_pred[selected] data type: ' + str(type(src_cls_preds[selected])))
# print('src_cls_pred[selected] shape: ' + str(src_cls_preds[selected].shape))
boxes_with_cls_scores.append(src_cls_preds[selected])
# boxes_params.append(src_box_preds[selected])
batch_dict['pred_dicts'] = pred_dicts
batch_dict['recall_dict'] = recall_dict
batch_dict['anchor_selections'] = anchor_selections
# # note: torch.stack only works if every dimension except for dimension 0 matches
# boxes_with_cls_scores = torch.stack(boxes_with_cls_scores)
if output_anchor:
anchor_scores = torch.stack(anchor_scores)
batch_dict['anchor_scores'] = anchor_scores
batch_dict['anchor_boxes'] = anchor_boxes
batch_dict['anchor_labels'] = anchor_labels
return anchor_scores
# pad each output in the batch to match dimensions with the maximum length output
# then stack the individual outputs together to get a tensor as the batch outout
for i in range(len(boxes_with_cls_scores)):
if boxes_with_cls_scores[i].shape[0] > max_num_boxes:
# more than max_num_boxes boxes detected
boxes_with_cls_scores[i] = boxes_with_cls_scores[i][:max_num_boxes]
elif boxes_with_cls_scores[i].shape[0] < max_num_boxes:
# less than max_num_boxes boxes detected
padding_size = max_num_boxes - boxes_with_cls_scores[i].shape[0]
padding = torch.zeros(padding_size, 3)
padding = padding.float().cuda() # load `padding` to GPU
boxes_with_cls_scores[i] = torch.cat((boxes_with_cls_scores[i], padding), 0)
else:
continue
boxes_with_cls_scores = torch.stack(boxes_with_cls_scores)
# boxes_params = torch.stack(boxes_params)
# print('\n finishing the post_processing() function')
return boxes_with_cls_scores
def post_processing_tensor(self, tensor_values, batch_dict, box_limit=30):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
roi_labels: (B, num_rois) 1 .. num_classes
Returns:
:param box_limit:
:param batch_dict:
:param tensor_values:
"""
# print('\n starting the post_processing() function')
# tensor_values is just for compatibility with Captum, only useful when in explain mode
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
boxes_with_cls_scores = []
# all_anchor_boxes = []
# boxes_params = []
anchor_selections = []
batch_dict['box_count'] = {} # store the number of boxes for each image in the sample
batch_dict['sigmoid_anchor_scores'] = []
output_anchor = post_process_cfg.OUTPUT_ANCHOR_BOXES # indicates if we output anchor boxes
anchor_scores = [] # store class scores for individual anchor boxes
anchor_boxes = []
anchor_labels = []
# max_box_ind = 0 # index of the input in the batch with most number of boxes
max_num_boxes = box_limit
for index in range(batch_size):
# the 'None' here just means return None if key not found
if batch_dict.get('batch_index', None) is not None:
# print('\n batch_dict has the \'bactch_index\' entry!')
# print('\n shape of batch_dict[\'batch_cls_preds\']' + str(batch_dict['batch_cls_preds'].shape))
assert batch_dict['batch_cls_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
# print('\n batch_dict does NOT have the \'bactch_index\' entry!')
# print('\n shape of batch_dict[\'batch_cls_preds\']' + str(batch_dict['batch_cls_preds'].shape))
assert batch_dict['batch_cls_preds'].shape.__len__() == 3
batch_mask = index
# inside the for loop, we only care about one particular sample, not the entire mini-batch
box_preds = batch_dict['batch_box_preds'][batch_mask]
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
if str(type(tensor_values)) == 'torch.Tensor':
cls_preds = tensor_values[batch_mask]
src_cls_preds = cls_preds
src_box_preds = box_preds
# print("src_box_preds.shape: {}".format(src_box_preds.shape))
anchor_scores.append(src_cls_preds)
anchor_boxes.append(src_box_preds)
# print('src_box_preds.shape before nms: {}'.format(src_box_preds.shape))
# print('src_cls_preds.shape before nms: {}'.format(src_cls_preds.shape))
# the second dimension of cls_preds should be the same as the number of classes
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
batch_dict['sigmoid_anchor_scores'].append(torch.sigmoid(src_cls_preds))
else:
batch_dict['sigmoid_anchor_scores'].append(src_cls_preds)
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
raise NotImplementedError
else:
# in python, -1 means the last dimension
# torch.max(input, dim, keepdim=False, out=None) returns a tuple:
# 1. the maximum values in the indicated dimension
# 2. the indices of the maximum values in the indicated dimension
# now, for each box, we have a class prediction
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
# orig_label_preds = label_preds + 1
# orig_cls_preds = cls_preds
anchor_labels.append(label_preds)
label_preds = batch_dict['roi_labels'][index] if batch_dict.get('has_class_labels',
False) else label_preds + 1
if batch_dict.get('has_class_labels', False):
print('\n no key named \'has_class_labels\' in batch_dict')
# print('\n shape of label_preds after: ' + str(label_preds.shape))
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
anchor_selections.append(selected)
if post_process_cfg.OUTPUT_RAW_SCORE: # no need to worry about this, false by default
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
# print("len(selected): {}".format(len(selected)))
final_scores = selected_scores # this is the original code
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
# print("len(final_labels): {}".format(len(final_labels)))
# print("len(final_scores): {}".format(len(final_scores)))
batch_dict['box_count'][index] = final_scores.shape[0]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
# print('src_cls_pred[selected] data type: ' + str(type(src_cls_preds[selected])))
# print('src_cls_pred[selected] shape: ' + str(src_cls_preds[selected].shape))
boxes_with_cls_scores.append(src_cls_preds[selected])
# boxes_params.append(src_box_preds[selected])
batch_dict['pred_dicts'] = pred_dicts
batch_dict['recall_dict'] = recall_dict
batch_dict['anchor_selections'] = anchor_selections
# # note: torch.stack only works if every dimension except for dimension 0 matches
# boxes_with_cls_scores = torch.stack(boxes_with_cls_scores)
anchor_scores = torch.stack(anchor_scores)
batch_dict['anchor_scores'] = anchor_scores
batch_dict['anchor_boxes'] = anchor_boxes
batch_dict['anchor_labels'] = anchor_labels
return pred_dicts, recall_dict
def post_processing_v2(self, batch_dict, box_limit=30):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
roi_labels: (B, num_rois) 1 .. num_classes
Returns:
:param box_limit:
:param batch_dict:
:param tensor_values:
"""
# print('\n starting the post_processing() function')
# tensor_values is just for compatibility with Captum, only useful when in explain mode
post_process_cfg = self.model_cfg.POST_PROCESSING
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
boxes_with_cls_scores = []
# all_anchor_boxes = []
# boxes_params = []
anchor_selections = []
batch_dict['box_count'] = {} # store the number of boxes for each image in the sample
batch_dict['sigmoid_anchor_scores'] = []
output_anchor = post_process_cfg.OUTPUT_ANCHOR_BOXES # indicates if we output anchor boxes
anchor_scores = [] # store class scores for individual anchor boxes
anchor_boxes = []
anchor_labels = []
# max_box_ind = 0 # index of the input in the batch with most number of boxes
max_num_boxes = box_limit
for index in range(batch_size):
# the 'None' here just means return None if key not found
if batch_dict.get('batch_index', None) is not None:
# print('\n batch_dict has the \'bactch_index\' entry!')
# print('\n shape of batch_dict[\'batch_cls_preds\']' + str(batch_dict['batch_cls_preds'].shape))
assert batch_dict['batch_cls_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
# print('\n batch_dict does NOT have the \'bactch_index\' entry!')
# print('\n shape of batch_dict[\'batch_cls_preds\']' + str(batch_dict['batch_cls_preds'].shape))
assert batch_dict['batch_cls_preds'].shape.__len__() == 3
batch_mask = index
# inside the for loop, we only care about one particular sample, not the entire mini-batch
box_preds = batch_dict['batch_box_preds'][batch_mask]
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
src_box_preds = box_preds
# print("src_box_preds.shape: {}".format(src_box_preds.shape))
anchor_scores.append(src_cls_preds)
anchor_boxes.append(src_box_preds)
# print('src_box_preds.shape before nms: {}'.format(src_box_preds.shape))
# print('src_cls_preds.shape before nms: {}'.format(src_cls_preds.shape))
# the second dimension of cls_preds should be the same as the number of classes
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
batch_dict['sigmoid_anchor_scores'].append(torch.sigmoid(src_cls_preds))
else:
batch_dict['sigmoid_anchor_scores'].append(src_cls_preds)
if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS:
raise NotImplementedError
else:
# in python, -1 means the last dimension
# torch.max(input, dim, keepdim=False, out=None) returns a tuple:
# 1. the maximum values in the indicated dimension
# 2. the indices of the maximum values in the indicated dimension
# now, for each box, we have a class prediction
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
# orig_label_preds = label_preds + 1
# orig_cls_preds = cls_preds
anchor_labels.append(label_preds)
label_preds = batch_dict['roi_labels'][index] if batch_dict.get('has_class_labels',
False) else label_preds + 1
if batch_dict.get('has_class_labels', False):
print('\n no key named \'has_class_labels\' in batch_dict')
# print('\n shape of label_preds after: ' + str(label_preds.shape))
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.NMS_CONFIG,
score_thresh=post_process_cfg.SCORE_THRESH
)
# print("\nlen(selected): {}\n".format(len(selected)))
anchor_selections.append(selected)
if post_process_cfg.OUTPUT_RAW_SCORE: # no need to worry about this, false by default
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores # this is the original code
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
batch_dict['box_count'][index] = final_scores.shape[0]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.RECALL_THRESH_LIST
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
# print('src_cls_pred[selected] data type: ' + str(type(src_cls_preds[selected])))
# print('src_cls_pred[selected] shape: ' + str(src_cls_preds[selected].shape))
boxes_with_cls_scores.append(src_cls_preds[selected])
# boxes_params.append(src_box_preds[selected])
batch_dict['pred_dicts'] = pred_dicts
batch_dict['recall_dict'] = recall_dict
batch_dict['anchor_selections'] = anchor_selections
# # note: torch.stack only works if every dimension except for dimension 0 matches
# boxes_with_cls_scores = torch.stack(boxes_with_cls_scores)
if output_anchor:
anchor_scores = torch.stack(anchor_scores)
batch_dict['anchor_scores'] = anchor_scores
batch_dict['anchor_boxes'] = anchor_boxes
batch_dict['anchor_labels'] = anchor_labels
return anchor_scores
# pad each output in the batch to match dimensions with the maximum length output
# then stack the individual outputs together to get a tensor as the batch outout
for i in range(len(boxes_with_cls_scores)):
if boxes_with_cls_scores[i].shape[0] > max_num_boxes:
# more than max_num_boxes boxes detected
boxes_with_cls_scores[i] = boxes_with_cls_scores[i][:max_num_boxes]
elif boxes_with_cls_scores[i].shape[0] < max_num_boxes:
# less than max_num_boxes boxes detected
padding_size = max_num_boxes - boxes_with_cls_scores[i].shape[0]
padding = torch.zeros(padding_size, 3)
padding = padding.float().cuda() # load `padding` to GPU
boxes_with_cls_scores[i] = torch.cat((boxes_with_cls_scores[i], padding), 0)
else:
continue
boxes_with_cls_scores = torch.stack(boxes_with_cls_scores)
# boxes_params = torch.stack(boxes_params)
# print('\n finishing the post_processing() function')
return boxes_with_cls_scores
@staticmethod
def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
if 'gt_boxes' not in data_dict:
return recall_dict
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
recall_dict['gt'] += cur_gt.shape[0]
else:
gt_iou = box_preds.new_zeros(box_preds.shape[0])
return recall_dict
def load_params_from_file(self, filename, logger, to_cpu=False):
# file name is a checkpoint file
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
if 'version' in checkpoint:
logger.info('==> Checkpoint trained from version: %s' % checkpoint['version'])
# # ********** debug message **************
# print("keys in self.state_dict before processing ckpt data")
# for key in self.state_dict():
# print(key)
# # ********** debug message **************
update_model_state = {}
for key, val in model_state_disk.items():
if key in self.state_dict() and self.state_dict()[key].shape == model_state_disk[key].shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
state_dict = self.state_dict()
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(self.state_dict())))
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self.load_state_dict(checkpoint['model_state'])
if optimizer is not None:
print("optimizer argument present!")
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
print("optimizer_state NOT in checkpoint!")
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
print("optimizer_filename {} exists!".format(optimizer_filename))
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
else:
print("optimizer_filename {} does not exist!".format(optimizer_filename))
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch
| 49.102041
| 113
| 0.604484
|
4a10af0f8ef8cb76c9d82d62fcc1d2e32da883b5
| 3,942
|
py
|
Python
|
tests/functional/desktop/test_rewrites.py
|
navgurukul-shivani18/kitsune
|
a7cf49ab1bfcf4e770938116968824b2b0fa5bb1
|
[
"BSD-3-Clause"
] | 4
|
2021-05-17T11:38:08.000Z
|
2021-08-19T06:42:39.000Z
|
tests/functional/desktop/test_rewrites.py
|
navgurukul-shivani18/kitsune
|
a7cf49ab1bfcf4e770938116968824b2b0fa5bb1
|
[
"BSD-3-Clause"
] | 32
|
2021-04-15T22:35:58.000Z
|
2022-01-04T21:30:05.000Z
|
tests/functional/desktop/test_rewrites.py
|
navgurukul-shivani18/kitsune
|
a7cf49ab1bfcf4e770938116968824b2b0fa5bb1
|
[
"BSD-3-Clause"
] | 3
|
2020-06-14T06:59:46.000Z
|
2020-06-15T14:45:56.000Z
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
import requests
import urllib
@pytest.mark.nondestructive
class TestRedirects:
_user_agent_firefox = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:13.0) ' \
'Gecko/20100101 Firefox/13.0.1'
def _check_redirect(self, base_url, start_url, user_agent=_user_agent_firefox, locale='en-US'):
if 'support.mozilla.org' not in base_url:
pytest.skip("Skipped per dev instructions on continuous deployment. "
"To be run only on Prod")
start_url = base_url + start_url
headers = {'user-agent': user_agent,
'accept-language': locale}
return requests.get(start_url, headers=headers)
@pytest.mark.parametrize(('input', 'expected'), [
('/1/firefox/4.0/WINNT/en-US/firefox-help/',
'/en-US/products/firefox?as=u&utm_source=inproduct'),
('/1/firefox/4.0/WINNT/en-US/firefox-f1/',
'/en-US/products/firefox?as=u&utm_source=inproduct'),
('/1/firefox/4.0/WINNT/en-US/firefox-osxkey/',
'/en-US/products/firefox?as=u&utm_source=inproduct'),
('/1/firefox/4.0/Darwin/en-US/firefox-help/',
'/en-US/products/firefox?as=u&utm_source=inproduct'),
('/1/firefox/4.0/Darwin/en-US/firefox-f1/',
'/en-US/products/firefox?as=u&utm_source=inproduct'),
('/1/firefox/4.0/Darwin/en-US/firefox-osxkey/',
'/en-US/products/firefox?as=u&utm_source=inproduct'),
('/1/firefox/4.0/Linux/en-US/firefox-help/',
'/en-US/products/firefox?as=u&utm_source=inproduct'),
('/1/firefox/4.0/Linux/en-US/firefox-f1/',
'/en-US/products/firefox?as=u&utm_source=inproduct'),
('/1/firefox/4.0/Linux/en-US/firefox-osxkey/',
'/en-US/products/firefox?as=u&utm_source=inproduct'),
])
def test_browser_redirect_to_sumo(self, base_url, input, expected):
expected_url = base_url + expected
r = self._check_redirect(base_url, input)
assert expected_url == urllib.unquote(r.url)
assert requests.codes.ok == r.status_code
@pytest.mark.parametrize(('input'), [
('/1/firefox/4.0/WINNT/en-US/prefs-main/'),
('/1/firefox/4.0/Darwin/en-US/prefs-main/'),
('/1/firefox/4.0/Linux/en-US/prefs-main/'),
('/1/firefox/4.0/WINNT/en-US/prefs-clear-private-data/'),
('/1/firefox/4.0/Darwin/en-US/prefs-clear-private-data/'),
('/1/firefox/4.0/Linux/en-US/prefs-clear-private-data/'),
('/1/firefox/4.0/WINNT/en-US/prefs-fonts-and-colors/')])
def test_kb_redirects_status_ok(self, base_url, input):
r = self._check_redirect(base_url, input)
assert requests.codes.ok == r.status_code
@pytest.mark.parametrize(('input', 'expected'), [
('/1/mobile/4.0/android/en-US/firefox-help',
'/en-US/products/mobile/popular-articles-android?as=u&utm_source=inproduct'),
('/1/mobile/4.0/iphone/en-US/firefox-help',
'/en-US/products/mobile/popular-articles-android?as=u&utm_source=inproduct'),
('/1/mobile/4.0/nokia/en-US/firefox-help',
'/en-US/products/mobile/popular-articles-android?as=u&utm_source=inproduct')])
def test_old_mobile_redirects(self, base_url, input, expected):
expected_url = base_url + expected
r = self._check_redirect(base_url, input)
assert expected_url == urllib.unquote(r.url)
assert requests.codes.ok == r.status_code
@pytest.mark.parametrize(('input'), [
('/1/firefox-home/4.0/iPhone/en-US'),
('/1/firefox-home/4.0/iPhone/en-US/log-in')])
def test_iphone_kb_redirects_status_ok(self, base_url, input):
r = self._check_redirect(base_url, input)
assert requests.codes.ok == r.status_code
| 46.928571
| 99
| 0.643582
|
4a10af1f74245a1d6e19961d51179b0d95c79e44
| 18,095
|
py
|
Python
|
gammapy/modeling/models/temporal.py
|
AtreyeeS/gammapy
|
a3b47c3da08900a833f0360e0374203e054cadfc
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/modeling/models/temporal.py
|
AtreyeeS/gammapy
|
a3b47c3da08900a833f0360e0374203e054cadfc
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/modeling/models/temporal.py
|
AtreyeeS/gammapy
|
a3b47c3da08900a833f0360e0374203e054cadfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Time-dependent models."""
import numpy as np
import scipy.interpolate
from astropy import units as u
from astropy.table import Table
from astropy.time import Time
from astropy.utils import lazyproperty
from gammapy.modeling import Parameter
from gammapy.utils.random import InverseCDFSampler, get_random_state
from gammapy.utils.scripts import make_path
from gammapy.utils.time import time_ref_from_dict
from .core import Model
# TODO: make this a small ABC to define a uniform interface.
class TemporalModel(Model):
"""Temporal model base class.
evaluates on astropy.time.Time objects"""
_type = "temporal"
def __call__(self, time):
"""Evaluate model
Parameters
----------
time : `~astropy.time.Time`
Time object
"""
kwargs = {par.name: par.quantity for par in self.parameters}
time = u.Quantity(time.mjd, "day")
return self.evaluate(time, **kwargs)
@property
def type(self):
return self._type
@staticmethod
def time_sum(t_min, t_max):
"""
Total time between t_min and t_max
Parameters
----------
t_min, t_max: `~astropy.time.Time`
Lower and upper bound of integration range
Returns
-------
time_sum : `~astropy.time.TimeDelta`
Summed time in the intervals.
"""
diff = t_max - t_min
# TODO: this is a work-around for https://github.com/astropy/astropy/issues/10501
return u.Quantity(np.sum(diff.to_value("day")), "day")
def plot(self, time_range, ax=None):
"""
Plot Temporal Model.
Parameters
----------
time_range : `~astropy.time.Time`
times to plot the model
ax : `~matplotlib.axes.Axes`, optional
axis
Returns
-------
ax : `~matplotlib.axes.Axes`, optional
axis
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
t_min, t_max = time_range
n_value = 100
delta = t_max - t_min
times = t_min + delta * np.linspace(0, 1, n_value)
val = self(times)
ax.plot(times.mjd, val)
return ax
def sample_time(self, n_events, t_min, t_max, t_delta="1 s", random_state=0):
"""Sample arrival times of events.
Parameters
----------
n_events : int
Number of events to sample.
t_min : `~astropy.time.Time`
Start time of the sampling.
t_max : `~astropy.time.Time`
Stop time of the sampling.
t_delta : `~astropy.units.Quantity`
Time step used for sampling of the temporal model.
random_state : {int, 'random-seed', 'global-rng', `~numpy.random.RandomState`}
Defines random number generator initialisation.
Passed to `~gammapy.utils.random.get_random_state`.
Returns
-------
time : `~astropy.units.Quantity`
Array with times of the sampled events.
"""
t_min = Time(t_min)
t_max = Time(t_max)
t_delta = u.Quantity(t_delta)
random_state = get_random_state(random_state)
ontime = u.Quantity((t_max - t_min).sec, "s")
time_unit = u.Unit(self.table.meta["TIMEUNIT"]) if hasattr(self, 'table') else ontime.unit
t_stop = ontime.to_value(time_unit)
# TODO: the separate time unit handling is unfortunate, but the quantity support for np.arange and np.interp
# is still incomplete, refactor once we change to recent numpy and astropy versions
t_step = t_delta.to_value(time_unit)
if hasattr(self, 'table'):
t = np.arange(0, t_stop, t_step)
pdf = self.evaluate(t)
sampler = InverseCDFSampler(pdf=pdf, random_state=random_state)
time_pix = sampler.sample(n_events)[0]
time = np.interp(time_pix, np.arange(len(t)), t) * time_unit
else:
t_step = (t_step * u.s).to("d")
t = Time(np.arange(t_min.mjd, t_max.mjd, t_step.value), format="mjd")
pdf = self(t)
sampler = InverseCDFSampler(pdf=pdf, random_state=random_state)
time_pix = sampler.sample(n_events)[0]
time = (np.interp(time_pix, np.arange(len(t)), t.value - min(t.value)) * t_step.unit).to(time_unit)
return t_min + time
class ConstantTemporalModel(TemporalModel):
"""Constant temporal model."""
tag = ["ConstantTemporalModel", "const"]
@staticmethod
def evaluate(time):
"""Evaluate at given times."""
return np.ones(time.shape)
def integral(self, t_min, t_max):
"""Evaluate the integrated flux within the given time intervals
Parameters
----------
t_min: `~astropy.time.Time`
Start times of observation
t_max: `~astropy.time.Time`
Stop times of observation
Returns
-------
norm : `~astropy.units.Quantity`
Integrated flux norm on the given time intervals
"""
return (t_max - t_min) / self.time_sum(t_min, t_max)
class LinearTemporalModel(TemporalModel):
"""Temporal model with a linear variation.
For more information see :ref:`linear-temporal-model`.
Parameters
----------
alpha : float
Constant term of the baseline flux
beta : `~astropy.units.Quantity`
Time variation coefficient of the flux
t_ref: `~astropy.units.Quantity`
The reference time in mjd. Frozen per default, at 2000-01-01.
"""
tag = ["LinearTemporalModel", "linear"]
alpha = Parameter("alpha", 1., frozen=False)
beta = Parameter("beta", 0., unit="d-1", frozen=False)
_t_ref_default = Time("2000-01-01")
t_ref = Parameter("t_ref", _t_ref_default.mjd, unit="day", frozen=True)
@staticmethod
def evaluate(time, alpha, beta, t_ref):
"""Evaluate at given times"""
return alpha + beta*(time - t_ref)
def integral(self, t_min, t_max):
"""Evaluate the integrated flux within the given time intervals
Parameters
----------
t_min: `~astropy.time.Time`
Start times of observation
t_max: `~astropy.time.Time`
Stop times of observation
Returns
-------
norm : float
Integrated flux norm on the given time intervals
"""
pars = self.parameters
alpha = pars["alpha"]
beta = pars["beta"].quantity
t_ref = Time(pars["t_ref"].quantity, format="mjd")
value = alpha*(t_max-t_min) + \
beta/2.*((t_max-t_ref)*(t_max-t_ref)-(t_min-t_ref)*(t_min-t_ref))
return value / self.time_sum(t_min, t_max)
class ExpDecayTemporalModel(TemporalModel):
r"""Temporal model with an exponential decay.
.. math::
F(t) = exp(-(t - t_ref)/t0)
Parameters
----------
t0 : `~astropy.units.Quantity`
Decay time scale
t_ref: `~astropy.units.Quantity`
The reference time in mjd. Frozen per default, at 2000-01-01 .
"""
tag = ["ExpDecayTemporalModel", "exp-decay"]
t0 = Parameter("t0", "1 d", frozen=False)
_t_ref_default = Time("2000-01-01")
t_ref = Parameter("t_ref", _t_ref_default.mjd, unit="day", frozen=True)
@staticmethod
def evaluate(time, t0, t_ref):
"""Evaluate at given times"""
return np.exp(-(time - t_ref) / t0)
def integral(self, t_min, t_max):
"""Evaluate the integrated flux within the given time intervals
Parameters
----------
t_min: `~astropy.time.Time`
Start times of observation
t_max: `~astropy.time.Time`
Stop times of observation
Returns
-------
norm : float
Integrated flux norm on the given time intervals
"""
pars = self.parameters
t0 = pars["t0"].quantity
t_ref = Time(pars["t_ref"].quantity, format="mjd")
value = self.evaluate(t_max, t0, t_ref) - self.evaluate(t_min, t0, t_ref)
return -t0 * value / self.time_sum(t_min, t_max)
class GaussianTemporalModel(TemporalModel):
r"""A Gaussian temporal profile
..math::
F(t) = exp( -0.5 * \frac{ (t - t_{ref})^2 } { \sigma^2 })
Parameters
----------
t_ref: `~astropy.units.Quantity`
The reference time in mjd at the peak.
sigma : `~astropy.units.Quantity`
Width of the gaussian profile.
"""
tag = ["GaussianTemporalModel", "gauss"]
_t_ref_default = Time("2000-01-01")
t_ref = Parameter("t_ref", _t_ref_default.mjd, unit="day", frozen=False)
sigma = Parameter("sigma", "1 d", frozen=False)
@staticmethod
def evaluate(time, t_ref, sigma):
return np.exp(-((time - t_ref) ** 2) / (2 * sigma ** 2))
def integral(self, t_min, t_max, **kwargs):
"""Evaluate the integrated flux within the given time intervals
Parameters
----------
t_min: `~astropy.time.Time`
Start times of observation
t_max: `~astropy.time.Time`
Stop times of observation
Returns
-------
norm : float
Integrated flux norm on the given time intervals
"""
pars = self.parameters
sigma = pars["sigma"].quantity
t_ref = Time(pars["t_ref"].quantity, format="mjd")
norm = np.sqrt(np.pi / 2) * sigma
u_min = (t_min - t_ref) / (np.sqrt(2) * sigma)
u_max = (t_max - t_ref) / (np.sqrt(2) * sigma)
integral = norm * (scipy.special.erf(u_max) - scipy.special.erf(u_min))
return integral / self.time_sum(t_min, t_max)
class LightCurveTemplateTemporalModel(TemporalModel):
"""Temporal light curve model.
The lightcurve is given as a table with columns ``time`` and ``norm``.
The ``norm`` is supposed to be a unit-less multiplicative factor in the model,
to be multiplied with a spectral model.
The model does linear interpolation for times between the given ``(time, norm)`` values.
The implementation currently uses `scipy.interpolate. InterpolatedUnivariateSpline`,
using degree ``k=1`` to get linear interpolation.
This class also contains an ``integral`` method, making the computation of
mean fluxes for a given time interval a one-liner.
Parameters
----------
table : `~astropy.table.Table`
A table with 'TIME' vs 'NORM'
Examples
--------
Read an example light curve object:
>>> from gammapy.modeling.models import LightCurveTemplateTemporalModel
>>> path = '$GAMMAPY_DATA/tests/models/light_curve/lightcrv_PKSB1222+216.fits'
>>> light_curve = LightCurveTemplateTemporalModel.read(path)
Show basic information about the lightcurve:
>>> print(light_curve)
LightCurveTemplateTemporalModel model summary:
Start time: 59000.5 MJD
End time: 61862.5 MJD
Norm min: 0.01551196351647377
Norm max: 1.0
<BLANKLINE>
Compute ``norm`` at a given time:
>>> light_curve.evaluate(60000)
array(0.01551196)
Compute mean ``norm`` in a given time interval:
>>> from astropy.time import Time
>>> times = Time([60000, 61000], format='mjd')
>>> light_curve.integral(times[0], times[1])
<Quantity 0.01721725>
"""
tag = ["LightCurveTemplateTemporalModel", "template"]
def __init__(self, table, filename=None):
self.table = table
if filename is not None:
filename = str(make_path(filename))
self.filename = filename
super().__init__()
def __str__(self):
norm = self.table["NORM"]
return (
f"{self.__class__.__name__} model summary:\n"
f"Start time: {self._time[0].mjd} MJD\n"
f"End time: {self._time[-1].mjd} MJD\n"
f"Norm min: {norm.min()}\n"
f"Norm max: {norm.max()}\n"
)
@classmethod
def read(cls, path):
"""Read lightcurve model table from FITS file.
TODO: This doesn't read the XML part of the model yet.
"""
filename = str(make_path(path))
return cls(Table.read(filename), filename=filename)
def write(self, path=None, overwrite=False):
if path is None:
path = self.filename
if path is None:
raise ValueError(f"filename is required for {self.tag}")
else:
self.filename = str(make_path(path))
self.table.write(self.filename, overwrite=overwrite)
@lazyproperty
def _interpolator(self, ext=0):
x = self._time.value
y = self.table["NORM"].data
return scipy.interpolate.InterpolatedUnivariateSpline(x, y, k=1, ext=ext)
@lazyproperty
def _time_ref(self):
return time_ref_from_dict(self.table.meta)
@lazyproperty
def _time(self):
return self._time_ref + self.table["TIME"].data * getattr(
u, self.table.meta["TIMEUNIT"]
)
def evaluate(self, time, ext=0):
"""Evaluate for a given time.
Parameters
----------
time : array_like
Time since the ``reference`` time.
ext : int or str, optional, default: 0
Parameter passed to ~scipy.interpolate.InterpolatedUnivariateSpline
Controls the extrapolation mode for GTIs outside the range
0 or "extrapolate", return the extrapolated value.
1 or "zeros", return 0
2 or "raise", raise a ValueError
3 or "const", return the boundary value.
Returns
-------
norm : array_like
Norm at the given times.
"""
return self._interpolator(time, ext=ext)
def integral(self, t_min, t_max):
"""Evaluate the integrated flux within the given time intervals
Parameters
----------
t_min: `~astropy.time.Time`
Start times of observation
t_max: `~astropy.time.Time`
Stop times of observation
Returns
-------
norm: The model integrated flux
"""
n1 = self._interpolator.antiderivative()(t_max.mjd)
n2 = self._interpolator.antiderivative()(t_min.mjd)
return u.Quantity(n1 - n2, "day") / self.time_sum(t_min, t_max)
@classmethod
def from_dict(cls, data):
return cls.read(data["filename"])
def to_dict(self, full_output=False):
"""Create dict for YAML serialisation"""
return {"type": self.tag[0], "filename": self.filename}
class PowerLawTemporalModel(TemporalModel):
"""Temporal model with a Power Law decay.
For more information see :ref:`powerlaw-temporal-model`.
Parameters
----------
alpha : float
Decay time power
t_ref: `~astropy.units.Quantity`
The reference time in mjd. Frozen by default, at 2000-01-01.
t0: `~astropy.units.Quantity`
The scaling time in mjd. Fixed by default, at 1 day.
"""
tag = ["PowerLawTemporalModel", "powerlaw"]
alpha = Parameter("alpha", 1., frozen=False)
_t_ref_default = Time("2000-01-01")
t_ref = Parameter("t_ref", _t_ref_default.mjd, unit="day", frozen=True)
t0 = Parameter("t0", "1 d", frozen=True)
@staticmethod
def evaluate(time, alpha, t_ref, t0=1*u.day):
"""Evaluate at given times"""
return np.power((time - t_ref)/t0, alpha)
def integral(self, t_min, t_max):
"""Evaluate the integrated flux within the given time intervals
Parameters
----------
t_min: `~astropy.time.Time`
Start times of observation
t_max: `~astropy.time.Time`
Stop times of observation
Returns
-------
norm : float
Integrated flux norm on the given time intervals
"""
pars = self.parameters
alpha = pars["alpha"].quantity
t0 = pars["t0"].quantity
t_ref = Time(pars["t_ref"].quantity, format="mjd")
if alpha != -1:
value = self.evaluate(t_max, alpha+1., t_ref, t0) - self.evaluate(t_min, alpha+1., t_ref, t0)
return t0 / (alpha+1.) * value / self.time_sum(t_min, t_max)
else:
value = np.log((t_max-t_ref)/(t_min-t_ref))
return t0 * value / self.time_sum(t_min, t_max)
class SineTemporalModel(TemporalModel):
"""Temporal model with a sinusoidal modulation.
For more information see :ref:`sine-temporal-model`.
Parameters
----------
amp : float
Amplitude of the sinusoidal function
t_ref: `~astropy.units.Quantity`
The reference time in mjd.
omega: `~astropy.units.Quantity`
Pulsation of the signal.
"""
tag = ["SineTemporalModel", "sinus"]
amp = Parameter("amp", 1., frozen=False)
omega = Parameter("omega", "1. rad/day", frozen=False)
_t_ref_default = Time("2000-01-01")
t_ref = Parameter("t_ref", _t_ref_default.mjd, unit="day", frozen=False)
@staticmethod
def evaluate(time, amp, omega, t_ref):
"""Evaluate at given times"""
return 1. + amp * np.sin(omega*(time-t_ref))
def integral(self, t_min, t_max):
"""Evaluate the integrated flux within the given time intervals
Parameters
----------
t_min: `~astropy.time.Time`
Start times of observation
t_max: `~astropy.time.Time`
Stop times of observation
Returns
-------
norm : float
Integrated flux norm on the given time intervals
"""
pars = self.parameters
omega = pars["omega"].quantity.to_value('rad/day')
amp = pars["amp"].value
t_ref = Time(pars["t_ref"].quantity, format="mjd")
value = (t_max-t_min) - \
amp/omega*(np.sin(omega*(t_max-t_ref).to_value('day'))-np.sin(omega*(t_min-t_ref).to_value('day')))
return value / self.time_sum(t_min, t_max)
| 31.252159
| 116
| 0.596795
|
4a10af957a1bdf1d3d44d6ce183d3e93f9043d12
| 1,468
|
py
|
Python
|
scripts/tests/chiptest/__init__.py
|
hakanjansson/connectedhomeip
|
980e1770075e5209d719693bb2e70d1966e4949f
|
[
"Apache-2.0"
] | 1
|
2022-02-21T08:33:37.000Z
|
2022-02-21T08:33:37.000Z
|
scripts/tests/chiptest/__init__.py
|
hakanjansson/connectedhomeip
|
980e1770075e5209d719693bb2e70d1966e4949f
|
[
"Apache-2.0"
] | null | null | null |
scripts/tests/chiptest/__init__.py
|
hakanjansson/connectedhomeip
|
980e1770075e5209d719693bb2e70d1966e4949f
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pathlib import Path
import os
import logging
import subprocess
import re
import chiptest.linux
import chiptest.runner
from .test_definition import TestTarget, TestDefinition, ApplicationPaths
def AllTests(chip_tool: str):
"""Executes `chip_tool` binary to see what tests are available.
"""
result = subprocess.run([chip_tool, 'tests', 'list'], capture_output=True)
for name in result.stdout.decode('utf8').split('\n'):
if not name:
continue
if name.startswith('TV_'):
target = TestTarget.TV
elif name.startswith('DL_'):
target = TestTarget.DOOR_LOCK
else:
target = TestTarget.ALL_CLUSTERS
yield TestDefinition(run_name=name, name=name, target=target)
__all__ = ['TestTarget', 'TestDefinition', 'AllTests', 'ApplicationPaths']
| 29.36
| 78
| 0.694823
|
4a10afc4e66c8e49bc1f1709cf665ad65fb185dc
| 4,754
|
py
|
Python
|
migrate.py
|
afit/rimworld-save-migrator
|
a142dcef807e58a471b8bd840e4c4416ced73173
|
[
"MIT"
] | 3
|
2018-07-02T23:36:52.000Z
|
2019-01-21T17:16:35.000Z
|
migrate.py
|
afit/rimworld-save-migrator
|
a142dcef807e58a471b8bd840e4c4416ced73173
|
[
"MIT"
] | null | null | null |
migrate.py
|
afit/rimworld-save-migrator
|
a142dcef807e58a471b8bd840e4c4416ced73173
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
from os.path import exists, join, dirname
from importlib import import_module
from util.exceptions import MisformedSaveError
from util.filesystem import get_saves, get_save_path, get_path_from_name
from util.saves import Save
def migrate():
if len(sys.argv) > 1:
save_name = sys.argv[1]
else: save_name = None
debug = len(sys.argv) > 2 and sys.argv[2] == 'debug'
if not save_name:
print 'No argument provided; listing saves in %s...\n' % get_save_path()
saves = get_saves()
if not saves:
print 'However, there aren\'t any saves there.'
exit(0)
try:
from lxml import etree
except:
print 'Couldn\'t load lxml; you can install it by running: `pip install lxml`'
exit(-1)
for path in saves:
try:
save = Save( path )
except MisformedSaveError, e:
print '\tSave "%s" misformed; skipping it...' % path[:-4]
print '\tError: %s' % e.message
continue
# Print that info.
print ' * "%s" (version: %s, seed: %s %s, playtime: %s, mods: %s)' % (
save.name, save.version,
save.seed, save.size,
save.playtime, ', '.join( save.mods.keys() ),
)
print '\nRun this script again with a path or save name to migrate it.'
exit(0)
# Let's figure out what to do with the save we've been told about.
if exists( save_name ): # Hey, it's a path
save = Save( save_name )
else: # It must be a name
try:
save = Save( get_path_from_name( save_name ) )
except IOError:
print 'Couldn\'t find path or save called "%s".' % save_name
exit(-1)
print 'Examining save; it is version %s.' % save.version
if save.mods and save.mods.keys() != ['Core']:
print 'This save includes mods, which complicate migration. If migration does not complete, try re-saving your original game without mods.'
# Lets look at the save we've been told about.
if save.versions[0] == 1: # Version 1. That's the latest, for now.
print 'Further migrations not supported; check for an updated script.'
exit(-1)
elif save.versions[1] < 15:
print 'Follow the guide at https://github.com/afit/rimworld-save-migrator to migrate saves from below A15.'
exit(-1)
elif save.versions[1] == 17:
print 'This save can be migrated to B18 by loading and saving it in RimWorld B18.'
exit(-1)
# This leaves us with A15, A16 and B18. We need a seed to be able to migrate these.
# Let's use a matrix.
matrix = {
'0.18': {
'seed_needed': None,
'seed_readable': '1.0',
'migration': 'versions.u1migration',
},
'0.16': {
'seed_needed': [0, 17],
'seed_readable': 'A17',
'migration': 'versions.a17migration',
},
'0.15': {
'seed_needed': [0, 16],
'seed_readable': 'A16',
'migration': 'versions.a16migration',
},
}
mi = matrix[ '.'.join( str(x) for x in save.versions[0:2] ) ]
# If one wasn't passed, let's use one of the sample saves.
if mi['seed_needed']:
print 'In order to migrate this save, data is needed from a new %s save; I\'ll use your most recently modified save of this version.' % mi['seed_readable']
found = False
for s in get_saves():
seed = Save( s )
if seed.versions[0:2] == mi['seed_needed']:
found = True
break
if not found:
print 'Couldn\'t find a save of this version to use as a seed: please create one.'
exit(-1)
print 'Using "%s" as seed...' % seed.name
migration_name = '%s.%smigration.rws' % ( save.name, mi['seed_readable'] )
migration_path = join( dirname( save.path ), migration_name )
print 'Migrating to new save "%s"...' % migration_name
if not debug and exists( migration_path ):
print 'Can\'t migrate as "%s" already exists: move it away first?' % migration_name
exit(-1)
migration = import_module( mi['migration'] )
if mi['seed_needed']:
migration.migrate( save.path, seed.path, migration_path )
else:
migration.migrate( save.path, migration_path )
print 'Migrated successfully to "%s", you should load and save this before migrating further. Good luck!' % migration_name
print 'Report issues etc. to https://github.com/afit/rimworld-save-migrator.'
exit(0)
if __name__ == '__main__':
migrate()
| 34.449275
| 163
| 0.57846
|
4a10b18ff44a90fb0309537e2f4e5bc34eed3cbd
| 3,916
|
py
|
Python
|
main_test.py
|
msaperst/photobooth
|
120147f50b802df84b59caf7729976202b90191e
|
[
"Apache-2.0"
] | null | null | null |
main_test.py
|
msaperst/photobooth
|
120147f50b802df84b59caf7729976202b90191e
|
[
"Apache-2.0"
] | null | null | null |
main_test.py
|
msaperst/photobooth
|
120147f50b802df84b59caf7729976202b90191e
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from main import *
def clean_up():
if os.path.exists(imageQueue):
shutil.rmtree(imageQueue)
if os.path.exists(imageStore):
shutil.rmtree(imageStore)
if os.path.exists(imageBackup):
shutil.rmtree(imageBackup)
class MainTest(unittest.TestCase):
def setUp(self):
clean_up()
def tearDown(self):
clean_up()
def test_valid_default_alignment(self):
f = open('logo.jpg', 'w')
f.close()
check_photos()
self.assertTrue(True)
os.remove('logo.jpg')
def test_valid_alignment(self):
f = open('somePhoto.jpg', 'w')
f.close()
check_photos(3, 3, 8, 'somePhoto.jpg')
self.assertTrue(True)
os.remove('somePhoto.jpg')
def test_valid_alignment_bad_logo(self):
self.assertRaises(ValueError, check_photos, 3, 3, 8, 'somePhoto.jpg')
def test_valid_alignment_no_logo(self):
check_photos(3, 3, 9, None)
self.assertTrue(True)
def test_invalid_alignment(self):
self.assertRaises(ValueError, check_photos, 3, 3, 9, 'somePhoto.jpg')
def test_valid_user(self):
check_user('root')
self.assertTrue(True)
def test_invalid_user(self):
self.assertRaises(UserWarning, check_user, 'max')
def test_folders_do_not_exist(self):
create_folders()
self.assertTrue(os.access(imageQueue, os.X_OK | os.W_OK))
self.assertTrue(os.access(imageStore, os.X_OK | os.W_OK))
self.assertTrue(os.access(imageBackup, os.X_OK | os.W_OK))
self.assertTrue(os.access(os.path.join(imageStore, 'prints'), os.X_OK | os.W_OK))
def test_folders_do_exist(self):
create_folders()
create_folders()
self.assertTrue(os.access(imageQueue, os.X_OK | os.W_OK))
self.assertTrue(os.access(imageStore, os.X_OK | os.W_OK))
self.assertTrue(os.access(imageBackup, os.X_OK | os.W_OK))
self.assertTrue(os.access(os.path.join(imageStore, 'prints'), os.X_OK | os.W_OK))
def test_not_enough_to_go_none(self):
create_folders()
self.assertFalse(ready_to_process())
def test_not_enough_to_go_two(self):
create_folders()
f = open(os.path.join(imageQueue, 'photo1.jpg'), 'w')
f.close()
f = open(os.path.join(imageQueue, 'photo2.jpg'), 'w')
f.close()
self.assertFalse(ready_to_process())
os.remove(os.path.join(imageQueue, 'photo1.jpg'))
os.remove(os.path.join(imageQueue, 'photo2.jpg'))
def test_not_enough_to_go_three(self):
create_folders()
f = open(os.path.join(imageQueue, 'photo1.jpg'), 'w')
f.close()
f = open(os.path.join(imageQueue, 'photo2.jpg'), 'w')
f.close()
f = open(os.path.join(imageQueue, 'photo3.jpg'), 'w')
f.close()
self.assertTrue(ready_to_process())
os.remove(os.path.join(imageQueue, 'photo1.jpg'))
os.remove(os.path.join(imageQueue, 'photo2.jpg'))
os.remove(os.path.join(imageQueue, 'photo3.jpg'))
def test_not_enough_to_go_five(self):
create_folders()
f = open(os.path.join(imageQueue, 'photo1.jpg'), 'w')
f.close()
f = open(os.path.join(imageQueue, 'photo2.jpg'), 'w')
f.close()
f = open(os.path.join(imageQueue, 'photo3.jpg'), 'w')
f.close()
f = open(os.path.join(imageQueue, 'photo4.jpg'), 'w')
f.close()
f = open(os.path.join(imageQueue, 'photo5.jpg'), 'w')
f.close()
self.assertTrue(ready_to_process())
os.remove(os.path.join(imageQueue, 'photo1.jpg'))
os.remove(os.path.join(imageQueue, 'photo2.jpg'))
os.remove(os.path.join(imageQueue, 'photo3.jpg'))
os.remove(os.path.join(imageQueue, 'photo4.jpg'))
os.remove(os.path.join(imageQueue, 'photo5.jpg'))
if __name__ == '__main__':
unittest.main()
| 33.186441
| 89
| 0.621808
|
4a10b24b84675989de5888c4c9dcead1440562b9
| 11,853
|
py
|
Python
|
virtual/lib/python3.6/site-packages/PIL/IcnsImagePlugin.py
|
kenmutuma001/galleria
|
1bbb9fbd3ca8bf7a030dbcbcbd1674d392055d72
|
[
"Unlicense"
] | 445
|
2019-01-26T13:50:26.000Z
|
2022-03-18T05:17:38.000Z
|
venv/lib/python3.7/site-packages/PIL/IcnsImagePlugin.py
|
haideraltahan/CropMe
|
75a111b9d3b2c50c6f2a9a36d21432053f02284d
|
[
"MIT"
] | 242
|
2019-01-29T15:48:27.000Z
|
2022-03-31T22:09:21.000Z
|
virtual/lib/python3.6/site-packages/PIL/IcnsImagePlugin.py
|
ngishjonathan/gallery
|
dd67f28887316d6277927c667f6641d26317b0b8
|
[
"MIT"
] | 64
|
2018-04-25T08:51:57.000Z
|
2022-01-29T14:13:57.000Z
|
#
# The Python Imaging Library.
# $Id$
#
# macOS icns file decoder, based on icns.py by Bob Ippolito.
#
# history:
# 2004-10-09 fl Turned into a PIL plugin; removed 2.3 dependencies.
#
# Copyright (c) 2004 by Bob Ippolito.
# Copyright (c) 2004 by Secret Labs.
# Copyright (c) 2004 by Fredrik Lundh.
# Copyright (c) 2014 by Alastair Houghton.
#
# See the README file for information on usage and redistribution.
#
from PIL import Image, ImageFile, PngImagePlugin
from PIL._binary import i8
import io
import os
import shutil
import struct
import sys
import tempfile
enable_jpeg2k = hasattr(Image.core, 'jp2klib_version')
if enable_jpeg2k:
from PIL import Jpeg2KImagePlugin
HEADERSIZE = 8
def nextheader(fobj):
return struct.unpack('>4sI', fobj.read(HEADERSIZE))
def read_32t(fobj, start_length, size):
# The 128x128 icon seems to have an extra header for some reason.
(start, length) = start_length
fobj.seek(start)
sig = fobj.read(4)
if sig != b'\x00\x00\x00\x00':
raise SyntaxError('Unknown signature, expecting 0x00000000')
return read_32(fobj, (start + 4, length - 4), size)
def read_32(fobj, start_length, size):
"""
Read a 32bit RGB icon resource. Seems to be either uncompressed or
an RLE packbits-like scheme.
"""
(start, length) = start_length
fobj.seek(start)
pixel_size = (size[0] * size[2], size[1] * size[2])
sizesq = pixel_size[0] * pixel_size[1]
if length == sizesq * 3:
# uncompressed ("RGBRGBGB")
indata = fobj.read(length)
im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1)
else:
# decode image
im = Image.new("RGB", pixel_size, None)
for band_ix in range(3):
data = []
bytesleft = sizesq
while bytesleft > 0:
byte = fobj.read(1)
if not byte:
break
byte = i8(byte)
if byte & 0x80:
blocksize = byte - 125
byte = fobj.read(1)
for i in range(blocksize):
data.append(byte)
else:
blocksize = byte + 1
data.append(fobj.read(blocksize))
bytesleft -= blocksize
if bytesleft <= 0:
break
if bytesleft != 0:
raise SyntaxError(
"Error reading channel [%r left]" % bytesleft
)
band = Image.frombuffer(
"L", pixel_size, b"".join(data), "raw", "L", 0, 1
)
im.im.putband(band.im, band_ix)
return {"RGB": im}
def read_mk(fobj, start_length, size):
# Alpha masks seem to be uncompressed
start = start_length[0]
fobj.seek(start)
pixel_size = (size[0] * size[2], size[1] * size[2])
sizesq = pixel_size[0] * pixel_size[1]
band = Image.frombuffer(
"L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1
)
return {"A": band}
def read_png_or_jpeg2000(fobj, start_length, size):
(start, length) = start_length
fobj.seek(start)
sig = fobj.read(12)
if sig[:8] == b'\x89PNG\x0d\x0a\x1a\x0a':
fobj.seek(start)
im = PngImagePlugin.PngImageFile(fobj)
return {"RGBA": im}
elif sig[:4] == b'\xff\x4f\xff\x51' \
or sig[:4] == b'\x0d\x0a\x87\x0a' \
or sig == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a':
if not enable_jpeg2k:
raise ValueError('Unsupported icon subimage format (rebuild PIL '
'with JPEG 2000 support to fix this)')
# j2k, jpc or j2c
fobj.seek(start)
jp2kstream = fobj.read(length)
f = io.BytesIO(jp2kstream)
im = Jpeg2KImagePlugin.Jpeg2KImageFile(f)
if im.mode != 'RGBA':
im = im.convert('RGBA')
return {"RGBA": im}
else:
raise ValueError('Unsupported icon subimage format')
class IcnsFile(object):
SIZES = {
(512, 512, 2): [
(b'ic10', read_png_or_jpeg2000),
],
(512, 512, 1): [
(b'ic09', read_png_or_jpeg2000),
],
(256, 256, 2): [
(b'ic14', read_png_or_jpeg2000),
],
(256, 256, 1): [
(b'ic08', read_png_or_jpeg2000),
],
(128, 128, 2): [
(b'ic13', read_png_or_jpeg2000),
],
(128, 128, 1): [
(b'ic07', read_png_or_jpeg2000),
(b'it32', read_32t),
(b't8mk', read_mk),
],
(64, 64, 1): [
(b'icp6', read_png_or_jpeg2000),
],
(32, 32, 2): [
(b'ic12', read_png_or_jpeg2000),
],
(48, 48, 1): [
(b'ih32', read_32),
(b'h8mk', read_mk),
],
(32, 32, 1): [
(b'icp5', read_png_or_jpeg2000),
(b'il32', read_32),
(b'l8mk', read_mk),
],
(16, 16, 2): [
(b'ic11', read_png_or_jpeg2000),
],
(16, 16, 1): [
(b'icp4', read_png_or_jpeg2000),
(b'is32', read_32),
(b's8mk', read_mk),
],
}
def __init__(self, fobj):
"""
fobj is a file-like object as an icns resource
"""
# signature : (start, length)
self.dct = dct = {}
self.fobj = fobj
sig, filesize = nextheader(fobj)
if sig != b'icns':
raise SyntaxError('not an icns file')
i = HEADERSIZE
while i < filesize:
sig, blocksize = nextheader(fobj)
if blocksize <= 0:
raise SyntaxError('invalid block header')
i += HEADERSIZE
blocksize -= HEADERSIZE
dct[sig] = (i, blocksize)
fobj.seek(blocksize, io.SEEK_CUR)
i += blocksize
def itersizes(self):
sizes = []
for size, fmts in self.SIZES.items():
for (fmt, reader) in fmts:
if fmt in self.dct:
sizes.append(size)
break
return sizes
def bestsize(self):
sizes = self.itersizes()
if not sizes:
raise SyntaxError("No 32bit icon resources found")
return max(sizes)
def dataforsize(self, size):
"""
Get an icon resource as {channel: array}. Note that
the arrays are bottom-up like windows bitmaps and will likely
need to be flipped or transposed in some way.
"""
dct = {}
for code, reader in self.SIZES[size]:
desc = self.dct.get(code)
if desc is not None:
dct.update(reader(self.fobj, desc, size))
return dct
def getimage(self, size=None):
if size is None:
size = self.bestsize()
if len(size) == 2:
size = (size[0], size[1], 1)
channels = self.dataforsize(size)
im = channels.get('RGBA', None)
if im:
return im
im = channels.get("RGB").copy()
try:
im.putalpha(channels["A"])
except KeyError:
pass
return im
##
# Image plugin for Mac OS icons.
class IcnsImageFile(ImageFile.ImageFile):
"""
PIL image support for Mac OS .icns files.
Chooses the best resolution, but will possibly load
a different size image if you mutate the size attribute
before calling 'load'.
The info dictionary has a key 'sizes' that is a list
of sizes that the icns file has.
"""
format = "ICNS"
format_description = "Mac OS icns resource"
def _open(self):
self.icns = IcnsFile(self.fp)
self.mode = 'RGBA'
self.info['sizes'] = self.icns.itersizes()
self.best_size = self.icns.bestsize()
self.size = (self.best_size[0] * self.best_size[2],
self.best_size[1] * self.best_size[2])
# Just use this to see if it's loaded or not yet.
self.tile = ('',)
@property
def size(self):
return self._size
@size.setter
def size(self, value):
info_size = value
if info_size not in self.info['sizes'] and len(info_size) == 2:
info_size = (info_size[0], info_size[1], 1)
if info_size not in self.info['sizes'] and len(info_size) == 3 and \
info_size[2] == 1:
simple_sizes = [(size[0] * size[2], size[1] * size[2])
for size in self.info['sizes']]
if value in simple_sizes:
info_size = self.info['sizes'][simple_sizes.index(value)]
if info_size not in self.info['sizes']:
raise ValueError(
"This is not one of the allowed sizes of this image")
self._size = value
def load(self):
if len(self.size) == 3:
self.best_size = self.size
self.size = (self.best_size[0] * self.best_size[2],
self.best_size[1] * self.best_size[2])
Image.Image.load(self)
if not self.tile:
return
self.load_prepare()
# This is likely NOT the best way to do it, but whatever.
im = self.icns.getimage(self.best_size)
# If this is a PNG or JPEG 2000, it won't be loaded yet
im.load()
self.im = im.im
self.mode = im.mode
self.size = im.size
if self._exclusive_fp:
self.fp.close()
self.fp = None
self.icns = None
self.tile = ()
self.load_end()
def _save(im, fp, filename):
"""
Saves the image as a series of PNG files,
that are then converted to a .icns file
using the macOS command line utility 'iconutil'.
macOS only.
"""
if hasattr(fp, "flush"):
fp.flush()
# create the temporary set of pngs
iconset = tempfile.mkdtemp('.iconset')
provided_images = {im.width: im
for im in im.encoderinfo.get("append_images", [])}
last_w = None
second_path = None
for w in [16, 32, 128, 256, 512]:
prefix = 'icon_{}x{}'.format(w, w)
first_path = os.path.join(iconset, prefix+'.png')
if last_w == w:
shutil.copyfile(second_path, first_path)
else:
im_w = provided_images.get(w, im.resize((w, w), Image.LANCZOS))
im_w.save(first_path)
second_path = os.path.join(iconset, prefix+'@2x.png')
im_w2 = provided_images.get(w*2, im.resize((w*2, w*2), Image.LANCZOS))
im_w2.save(second_path)
last_w = w*2
# iconutil -c icns -o {} {}
from subprocess import Popen, PIPE, CalledProcessError
convert_cmd = ["iconutil", "-c", "icns", "-o", filename, iconset]
with open(os.devnull, 'wb') as devnull:
convert_proc = Popen(convert_cmd, stdout=PIPE, stderr=devnull)
convert_proc.stdout.close()
retcode = convert_proc.wait()
# remove the temporary files
shutil.rmtree(iconset)
if retcode:
raise CalledProcessError(retcode, convert_cmd)
Image.register_open(IcnsImageFile.format, IcnsImageFile,
lambda x: x[:4] == b'icns')
Image.register_extension(IcnsImageFile.format, '.icns')
if sys.platform == 'darwin':
Image.register_save(IcnsImageFile.format, _save)
Image.register_mime(IcnsImageFile.format, "image/icns")
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Syntax: python IcnsImagePlugin.py [file]")
sys.exit()
imf = IcnsImageFile(open(sys.argv[1], 'rb'))
for size in imf.info['sizes']:
imf.size = size
imf.load()
im = imf.im
im.save('out-%s-%s-%s.png' % size)
im = Image.open(sys.argv[1])
im.save("out.png")
if sys.platform == 'windows':
os.startfile("out.png")
| 29.781407
| 78
| 0.545347
|
4a10b439da4a083603296fd82c22cba3cdc76b7f
| 430
|
py
|
Python
|
lib/MotifEnsemble/Utils/MakeNewReport.py
|
arwyer/MotifEnsemble
|
9a003d8a874ecaa9b9cc492495c2f29d09266535
|
[
"MIT"
] | 1
|
2019-07-20T05:53:22.000Z
|
2019-07-20T05:53:22.000Z
|
lib/MotifEnsemble/Utils/MakeNewReport.py
|
man4ish/MotifEnsemble
|
cf68e01a82bd3b1fa524ca5370763c82b222d0d3
|
[
"MIT"
] | 1
|
2019-07-08T17:48:12.000Z
|
2019-07-08T17:48:49.000Z
|
lib/MotifEnsemble/Utils/MakeNewReport.py
|
man4ish/MotifEnsemble
|
cf68e01a82bd3b1fa524ca5370763c82b222d0d3
|
[
"MIT"
] | 1
|
2019-01-08T15:48:11.000Z
|
2019-01-08T15:48:11.000Z
|
import json
import sys
import os
class MakeNewReport:
def __init__(self):
pass
def MakeReport(self, htmlDir, motifSet):
reportPath = '/kb/module/lib/MotifEnsemble/Utils/Report/*'
CopyCommand = 'cp -r ' + reportPath + ' ' + htmlDir
os.system(CopyCommand)
jsonFName = htmlDir + '/ReportMotif.json'
with open(jsonFName,'w') as motifjson:
json.dump(motifSet,motifjson)
return
| 25.294118
| 64
| 0.655814
|
4a10b483abececa748eda7041a0e19fe8a0b1fac
| 706
|
py
|
Python
|
music_library/migrations/0002_instrument.py
|
loictessier/weborchestra
|
80a7d8e3d94b84df65e8ecc52ea8b5cd200204e0
|
[
"MIT"
] | null | null | null |
music_library/migrations/0002_instrument.py
|
loictessier/weborchestra
|
80a7d8e3d94b84df65e8ecc52ea8b5cd200204e0
|
[
"MIT"
] | 4
|
2020-12-15T11:13:59.000Z
|
2021-06-10T20:28:54.000Z
|
music_library/migrations/0002_instrument.py
|
loictessier/weborchestra
|
80a7d8e3d94b84df65e8ecc52ea8b5cd200204e0
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.9 on 2020-12-12 15:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('music_library', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Instrument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(max_length=50, unique=True, verbose_name='Nom')),
('music_score', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='music_library.MusicScore')),
],
),
]
| 30.695652
| 127
| 0.621813
|
4a10b5ec7d423a992575382378affffbcc16125d
| 21,402
|
py
|
Python
|
infoblox_client/object_manager.py
|
Maciejzzzz/infoblox-client
|
2efaf4b92fb077af28a2f5e44b88b6083efaa084
|
[
"Apache-2.0"
] | 108
|
2015-11-13T14:39:00.000Z
|
2022-03-29T15:47:35.000Z
|
infoblox_client/object_manager.py
|
Maciejzzzz/infoblox-client
|
2efaf4b92fb077af28a2f5e44b88b6083efaa084
|
[
"Apache-2.0"
] | 286
|
2015-10-24T07:29:44.000Z
|
2022-03-22T01:09:55.000Z
|
infoblox_client/object_manager.py
|
Maciejzzzz/infoblox-client
|
2efaf4b92fb077af28a2f5e44b88b6083efaa084
|
[
"Apache-2.0"
] | 98
|
2015-09-24T14:34:53.000Z
|
2022-03-16T14:00:13.000Z
|
# Copyright 2015 Infoblox Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from oslo_log import log as logging
except ImportError: # pragma: no cover
import logging
from infoblox_client import exceptions as ib_ex
from infoblox_client import objects as obj
from infoblox_client import utils as ib_utils
LOG = logging.getLogger(__name__)
class InfobloxObjectManager(object):
def __init__(self, connector):
self.connector = connector
def create_network_view(self, network_view, extattrs):
return obj.NetworkView.create(self.connector,
name=network_view,
extattrs=extattrs)
def delete_network_view(self, network_view):
# never delete default network view
if network_view == 'default':
return
nview = obj.NetworkView.search(self.connector,
name=network_view)
if nview:
nview.delete()
def create_dns_view(self, network_view, dns_view):
return obj.DNSView.create(self.connector,
name=dns_view,
network_view=network_view)
def delete_dns_view(self, dns_view):
dns_view = obj.DNSView.search(self.connector,
name=dns_view)
if dns_view:
dns_view.delete()
def create_network(self, net_view_name, cidr, nameservers=None,
members=None, gateway_ip=None, dhcp_trel_ip=None,
network_extattrs=None):
"""Create NIOS Network and prepare DHCP options.
Some DHCP options are valid for IPv4 only, so just skip processing
them for IPv6 case.
:param net_view_name: network view name
:param cidr: network to allocate, example '172.23.23.0/24'
:param nameservers: list of name servers hosts/ip
:param members: list of objects.AnyMember objects that are expected
to serve dhcp for created network
:param gateway_ip: gateway ip for the network (valid for IPv4 only)
:param dhcp_trel_ip: ip address of dhcp relay (valid for IPv4 only)
:param network_extattrs: extensible attributes for network (instance of
objects.EA)
:returns: created network (instance of objects.Network)
"""
ipv4 = ib_utils.determine_ip_version(cidr) == 4
options = []
if nameservers:
options.append(obj.Dhcpoption(name='domain-name-servers',
value=",".join(nameservers)))
if ipv4 and gateway_ip:
options.append(obj.Dhcpoption(name='routers',
value=gateway_ip))
if ipv4 and dhcp_trel_ip:
options.append(obj.Dhcpoption(name='dhcp-server-identifier',
num=54,
value=dhcp_trel_ip))
return obj.Network.create(self.connector,
network_view=net_view_name,
cidr=cidr,
members=members,
options=options,
extattrs=network_extattrs,
check_if_exists=False)
def get_network(self, network_view, cidr):
return obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
def create_ip_range(self, network_view, start_ip, end_ip, network,
disable, range_extattrs):
"""Creates IPRange or fails if already exists."""
return obj.IPRange.create(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip,
cidr=network,
disable=disable,
extattrs=range_extattrs,
check_if_exists=False)
def delete_ip_range(self, network_view, start_ip, end_ip):
range = obj.IPRange.search(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip)
if range:
range.delete()
def has_networks(self, network_view_name):
networks = obj.Network.search_all(self.connector,
network_view=network_view_name)
return bool(networks)
def network_exists(self, network_view, cidr):
"""Deprecated, use get_network() instead."""
LOG.warning(
"DEPRECATION WARNING! Using network_exists() is deprecated "
"and to be removed in next releases. "
"Use get_network() or objects.Network.search instead")
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
return network is not None
def delete_network(self, network_view, cidr):
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
if network:
network.delete()
def create_network_from_template(self, network_view, cidr, template,
extattrs):
return obj.Network.create(self.connector,
network_view=network_view,
cidr=cidr,
template=template,
extattrs=extattrs,
check_if_exists=False)
def update_network_options(self, ib_network, extattrs=None):
if extattrs:
if ib_network.extattrs:
# Merge EA values as dicts
ea_dict = ib_network.extattrs.ea_dict
ea_dict.update(extattrs.ea_dict)
merged_ea = obj.EA(ea_dict)
ib_network.extattrs = merged_ea
else:
ib_network.extattrs = extattrs
return ib_network.update()
def get_host_record(self, dns_view, ip, network_view=None):
return obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip,
network_view=network_view)
def find_hostname(self, dns_view, hostname, ip, network_view=None):
return obj.HostRecord.search(self.connector,
name=hostname,
view=dns_view,
ip=ip,
network_view=network_view)
def find_host_records_by_mac(self, dns_view, mac, network_view=None):
host_records = []
host_records.extend(obj.HostRecord.search_all(
self.connector, view=dns_view, mac=mac, network_view=network_view))
# Unfortunately WAPI does not support search host records by DUID, so
# search host addresses by duid and then search hosts by name
ipv6_host_addresses = obj.IPv6HostAddress.search_all(
self.connector, duid=mac, network_view=network_view)
ipv6_hosts = []
for addr in ipv6_host_addresses:
hosts = obj.HostRecordV6.search_all(
self.connector, name=addr.host, view=dns_view,
network_view=network_view)
for host in hosts:
if host not in ipv6_hosts:
ipv6_hosts.append(host)
host_records.extend(ipv6_hosts)
return host_records
def create_host_record_for_given_ip(self, dns_view, zone_auth,
hostname, mac, ip, extattrs,
use_dhcp, use_dns=True):
name = '.'.join([hostname, zone_auth])
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
configure_for_dns=use_dns,
extattrs=extattrs,
check_if_exists=False)
def create_host_record_from_range(self, dns_view, network_view_name,
zone_auth, hostname, mac, first_ip,
last_ip, extattrs, use_dhcp,
use_dns=True):
name = '.'.join([hostname, zone_auth])
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view_name, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
configure_for_dns=use_dns,
extattrs=extattrs,
check_if_exists=False)
def delete_host_record(self, dns_view, ip_address, network_view=None):
host_record = obj.HostRecord.search(self.connector,
view=dns_view, ip=ip_address,
network_view=network_view)
if host_record:
host_record.delete()
def create_fixed_address_for_given_ip(self, network_view, mac, ip,
extattrs):
return obj.FixedAddress.create(self.connector,
network_view=network_view,
mac=mac,
ip=ip,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_range(self, network_view, mac, first_ip,
last_ip, extattrs):
ip = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
return obj.FixedAddress.create(self.connector,
ip=ip,
mac=mac,
network_view=network_view,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_cidr(self, netview, mac, cidr, extattrs):
ip = obj.IPAllocation.next_available_ip_from_cidr(netview, cidr)
return obj.FixedAddress.create(self.connector,
network_view=netview,
ip=ip,
mac=mac,
extattrs=extattrs,
check_if_exists=False)
def delete_fixed_address(self, network_view, ip_address):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip_address)
if fixed_address:
fixed_address.delete()
def get_fixed_addresses_by_mac(self, network_view, mac):
return obj.FixedAddress.search_all(
self.connector, network_view=network_view, mac=mac)
def add_ip_to_record(self, host_record, ip, mac, use_dhcp=True):
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def add_ip_to_host_record_from_range(self, host_record, network_view,
mac, first_ip, last_ip,
use_dhcp=True):
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def delete_ip_from_host_record(self, host_record, ip):
host_record.ip.remove(ip)
return host_record.update()
def has_dns_zones(self, dns_view):
zones = obj.DNSZone.search_all(self.connector, view=dns_view)
return bool(zones)
def create_dns_zone(self, dns_view, dns_zone,
grid_primary=None, grid_secondaries=None,
zone_format=None, ns_group=None, prefix=None,
extattrs=None):
return obj.DNSZone.create(self.connector,
fqdn=dns_zone,
view=dns_view,
extattrs=extattrs,
zone_format=zone_format,
ns_group=ns_group,
prefix=prefix,
grid_primary=grid_primary,
grid_secondaries=grid_secondaries)
def delete_dns_zone(self, dns_view, dns_zone_fqdn):
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.delete()
def update_dns_zone_attrs(self, dns_view, dns_zone_fqdn, extattrs):
if not extattrs:
return
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.extattrs = extattrs
dns_zone.update()
def update_host_record_eas(self, dns_view, ip, extattrs):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip)
if host_record:
host_record.extattrs = extattrs
host_record.update()
def update_fixed_address_eas(self, network_view, ip, extattrs):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip)
if fixed_address:
fixed_address.extattrs = extattrs
fixed_address.update()
def update_dns_record_eas(self, dns_view, ip, extattrs):
a_record = obj.ARecordBase.search(self.connector,
ip=ip,
view=dns_view)
if a_record:
a_record.extattrs = extattrs
a_record.update()
ptr_record = obj.PtrRecord.search(self.connector,
ip=ip,
view=dns_view)
if ptr_record:
ptr_record.extattrs = extattrs
ptr_record.update()
def bind_name_with_host_record(self, dns_view, ip, name, extattrs,
network_view=None):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip,
network_view=network_view)
if host_record:
host_record.name = name
host_record.extattrs = extattrs
host_record.update()
def bind_name_with_record_a(self, dns_view, ip, name, bind_list,
extattrs):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in bind_list) or
(not is_ipv4 and 'record:aaaa' in bind_list)):
obj.ARecordBase.create(self.connector,
view=dns_view,
ip=ip,
name=name,
extattrs=extattrs,
update_if_exists=True)
if 'record:ptr' in bind_list:
obj.PtrRecord.create(self.connector,
view=dns_view,
ip=ip,
ptrdname=name,
extattrs=extattrs,
update_if_exists=True)
def unbind_name_from_record_a(self, dns_view, ip, name, unbind_list):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in unbind_list) or
(not is_ipv4 and 'record:aaaa' in unbind_list)):
a_record = obj.ARecordBase.search(self.connector,
view=dns_view,
ip=ip,
name=name)
if a_record:
self.delete_objects_associated_with_a_record(a_record.name,
a_record.view,
unbind_list)
a_record.delete()
if 'record:ptr' in unbind_list:
ptr_record = obj.PtrRecord.search(self.connector,
view=dns_view,
ip=ip,
ptrdname=name)
if ptr_record:
ptr_record.delete()
def get_member(self, member):
member.fetch()
return member
def get_all_ea_definitions(self):
return obj.EADefinition.search_all(self.connector)
def create_ea_definition(self, ea_def, reraise=False):
try:
return obj.EADefinition.create(self.connector,
check_if_exists=False,
**ea_def)
except ib_ex.InfobloxCannotCreateObject:
LOG.error('Unable to create Extensible Attribute Definition '
'%s' % ea_def)
if reraise:
raise
def create_required_ea_definitions(self, required_ea_defs, reraise=False):
existing_ea_defs = self.get_all_ea_definitions()
missing_ea_defs = []
for req_def in required_ea_defs:
if not [ea_def for ea_def in existing_ea_defs
if ea_def.name == req_def['name']]:
missing_ea_defs.append(req_def)
created_ea_defs = []
for ea_def in missing_ea_defs:
if self.create_ea_definition(ea_def, reraise=reraise):
created_ea_defs.append(ea_def)
return created_ea_defs
def restart_all_services(self, member):
if not member._ref:
member.fetch(only_ref=True)
self.connector.call_func('restartservices', member._ref,
{'restart_option': 'RESTART_IF_NEEDED',
'service_option': 'ALL'})
def delete_objects_associated_with_a_record(self, name, view, delete_list):
"""Deletes records associated with record:a or record:aaaa."""
search_objects = {}
if 'record:cname' in delete_list:
search_objects['record:cname'] = 'canonical'
if 'record:txt' in delete_list:
search_objects['record:txt'] = 'name'
if not search_objects:
return
for obj_type, search_type in search_objects.items():
payload = {'view': view,
search_type: name}
ib_objs = self.connector.get_object(obj_type, payload)
if ib_objs:
for ib_obj in ib_objs:
self.delete_object_by_ref(ib_obj['_ref'])
def delete_all_associated_objects(self, network_view, ip, delete_list):
LOG.warning(
"DEPRECATION WARNING! Using delete_all_associated_objects() "
"is deprecated and to be removed in next releases. "
"Use unbind_name_from_record_a() instead.")
def delete_object_by_ref(self, ref):
try:
self.connector.delete_object(ref)
except ib_ex.InfobloxCannotDeleteObject:
pass
| 44.310559
| 79
| 0.517382
|
4a10b63b4e8a9efbada0c30c59be8b75c9e0b918
| 2,568
|
py
|
Python
|
shellstreaming/core/partitioned_batch_queue.py
|
laysakura/shellstreaming
|
a1e34af507b94d51ba588ad4a039ce0115b46475
|
[
"Apache-2.0"
] | 1
|
2015-10-04T10:03:27.000Z
|
2015-10-04T10:03:27.000Z
|
shellstreaming/core/partitioned_batch_queue.py
|
laysakura/shellstreaming
|
a1e34af507b94d51ba588ad4a039ce0115b46475
|
[
"Apache-2.0"
] | null | null | null |
shellstreaming/core/partitioned_batch_queue.py
|
laysakura/shellstreaming
|
a1e34af507b94d51ba588ad4a039ce0115b46475
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
shellstreaming.core.partitioned_batch_queue
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:synopsis:
"""
# standard modules
import threading
# 3rd party modules
import pyhashxx
# my modules
from shellstreaming.core.batch import Batch
from shellstreaming.core.batch_queue import BatchQueue
class PartitionedBatchQueue(object):
"""Queue of output batch"""
def __init__(self, num_q, partition_key):
"""Constructor
:param num_q: number of internal :class:`BatchQueue`.
Number of workers is expected to be used
:param partition_key: column name of records in batch.
value of this column is used to distribute record to internal queues.
"""
self._qs = [BatchQueue() for i in range(num_q)]
self._key = partition_key
self._records = 0
self._lock = threading.Lock()
self._finished = False
def push(self, batch):
""""""
if batch is None:
map(lambda i: self._qs[i].push(None), range(len(self._qs)))
return
self._lock.acquire()
self._records += len(batch)
self._lock.release()
# [todo] - performance: splitting batch too small?
rdef = batch.record_def()
# distribute records using hash function
partitioned_records = [
[] # array of Record
for i in range(len(self._qs))
]
key_idx = rdef.colindex_by_colname(self._key)
for rec in batch:
val = rec[key_idx]
h = pyhashxx.hashxx(bytes(val)) # [todo] - customize hash function?
records = partitioned_records[h % len(self._qs)]
records.append(rec)
# really push distributed records into BatchQueue
for i in range(len(self._qs)):
self._qs[i].push(Batch(rdef, partitioned_records[i]))
def pop(self, pop_from):
"""
:param pop_from: queue id to pop batch from.
Worker number is expected to be used.
:type pop_from: int
"""
q = self._qs[pop_from]
batch = q.pop()
if batch is None:
self._finished = True
self.push(None) # supply `None` again in case other consumers are informed `empty`
return None
self._lock.acquire()
self._records -= len(batch)
self._lock.release()
return batch
def records(self):
if self._finished:
return None
return self._records
| 28.533333
| 95
| 0.574377
|
4a10b672d508b3e1d0a09ba2042522ddeae35724
| 3,001
|
py
|
Python
|
old/qt/ml.py
|
blanpa/Machine_Learning_Demonstrator
|
d0bce6e55e0ee879fae05c2bced93ec19ecda028
|
[
"MIT"
] | null | null | null |
old/qt/ml.py
|
blanpa/Machine_Learning_Demonstrator
|
d0bce6e55e0ee879fae05c2bced93ec19ecda028
|
[
"MIT"
] | null | null | null |
old/qt/ml.py
|
blanpa/Machine_Learning_Demonstrator
|
d0bce6e55e0ee879fae05c2bced93ec19ecda028
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qt\ml.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_machinelearning(object):
def setupUi(self, machinelearning):
machinelearning.setObjectName("machinelearning")
machinelearning.resize(867, 666)
self.centralwidget = QtWidgets.QWidget(machinelearning)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("E+H Serif")
font.setPointSize(26)
font.setBold(False)
font.setWeight(50)
self.label.setFont(font)
self.label.setWordWrap(True)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.bild = QtWidgets.QLabel(self.centralwidget)
self.bild.setObjectName("bild")
self.verticalLayout.addWidget(self.bild)
self.text = QtWidgets.QLabel(self.centralwidget)
self.text.setObjectName("text")
self.verticalLayout.addWidget(self.text)
self.aufnehmen = QtWidgets.QPushButton(self.centralwidget)
self.aufnehmen.setObjectName("aufnehmen")
self.verticalLayout.addWidget(self.aufnehmen)
self.einlernen = QtWidgets.QPushButton(self.centralwidget)
self.einlernen.setObjectName("einlernen")
self.verticalLayout.addWidget(self.einlernen)
self.testen = QtWidgets.QPushButton(self.centralwidget)
self.testen.setObjectName("testen")
self.verticalLayout.addWidget(self.testen)
machinelearning.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(machinelearning)
self.menubar.setGeometry(QtCore.QRect(0, 0, 867, 31))
self.menubar.setObjectName("menubar")
machinelearning.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(machinelearning)
self.statusbar.setObjectName("statusbar")
machinelearning.setStatusBar(self.statusbar)
self.retranslateUi(machinelearning)
QtCore.QMetaObject.connectSlotsByName(machinelearning)
def retranslateUi(self, machinelearning):
_translate = QtCore.QCoreApplication.translate
machinelearning.setWindowTitle(_translate("machinelearning", "MainWindow"))
self.label.setText(_translate("machinelearning", "machine learning"))
self.bild.setText(_translate("machinelearning", "TextLabel"))
self.text.setText(_translate("machinelearning", "TextLabel"))
self.aufnehmen.setText(_translate("machinelearning", "Aufnehmen"))
self.einlernen.setText(_translate("machinelearning", "Einlernen"))
self.testen.setText(_translate("machinelearning", "Testen"))
| 45.469697
| 83
| 0.710763
|
4a10b86615ed0ae9282aba7bc0b2cd719879f5d9
| 8,009
|
py
|
Python
|
deps/v8/tools/push-to-trunk/bump_up_version.py
|
dberesford/io.js
|
560c82fb8340903d958d99175aa206e826d04df6
|
[
"Artistic-2.0"
] | 78
|
2016-03-03T02:27:35.000Z
|
2022-01-18T14:11:25.000Z
|
tools/push-to-trunk/bump_up_version.py
|
realjade/v8
|
f0c9cc0bbfd461c7f516799d9a58e9a7395f737e
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 479
|
2016-02-10T00:21:41.000Z
|
2020-11-26T09:40:03.000Z
|
tools/push-to-trunk/bump_up_version.py
|
realjade/v8
|
f0c9cc0bbfd461c7f516799d9a58e9a7395f737e
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 16
|
2016-04-15T05:02:23.000Z
|
2020-03-04T16:26:01.000Z
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Script for auto-increasing the version on bleeding_edge.
The script can be run regularly by a cron job. It will increase the build
level of the version on bleeding_edge if:
- the lkgr version is smaller than the version of the latest revision,
- the lkgr version is not a version change itself,
- the tree is not closed for maintenance.
The new version will be the maximum of the bleeding_edge and trunk versions +1.
E.g. latest bleeding_edge version: 3.22.11.0 and latest trunk 3.23.0.0 gives
the new version 3.23.1.0.
This script requires a depot tools git checkout. I.e. 'fetch v8'.
"""
import argparse
import os
import sys
from common_includes import *
VERSION_BRANCH = "auto-bump-up-version"
# TODO(machenbach): Add vc interface that works on git mirror.
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
# TODO(machenbach): Remove after the git switch.
if(self.Config("PERSISTFILE_BASENAME") ==
"/tmp/v8-bump-up-version-tempfile"):
print "This script is disabled until after the v8 git migration."
return True
# Check for a clean workdir.
if not self.GitIsWorkdirClean(): # pragma: no cover
# This is in case a developer runs this script on a dirty tree.
self.GitStash()
self.GitCheckout("master")
self.GitPull()
# Ensure a clean version branch.
self.DeleteBranch(VERSION_BRANCH)
class GetCurrentBleedingEdgeVersion(Step):
MESSAGE = "Get latest bleeding edge version."
def RunStep(self):
self.GitCheckout("master")
# Store latest version and revision.
self.ReadAndPersistVersion()
self["latest_version"] = self.ArrayToVersion("")
self["latest"] = self.GitLog(n=1, format="%H")
print "Bleeding edge version: %s" % self["latest_version"]
# This step is pure paranoia. It forbids the script to continue if the last
# commit changed version.cc. Just in case the other bailout has a bug, this
# prevents the script from continuously commiting version changes.
class LastChangeBailout(Step):
MESSAGE = "Stop script if the last change modified the version."
def RunStep(self):
if VERSION_FILE in self.GitChangedFiles(self["latest"]):
print "Stop due to recent version change."
return True
# TODO(machenbach): Implement this for git.
class FetchLKGR(Step):
MESSAGE = "Fetching V8 LKGR."
def RunStep(self):
lkgr_url = "https://v8-status.appspot.com/lkgr"
self["lkgr_svn"] = self.ReadURL(lkgr_url, wait_plan=[5])
# TODO(machenbach): Implement this for git. With a git lkgr we could simply
# checkout that revision. With svn, we have to search backwards until that
# revision is found.
class GetLKGRVersion(Step):
MESSAGE = "Get bleeding edge lkgr version."
def RunStep(self):
self.GitCheckout("master")
# If the commit was made from svn, there is a mapping entry in the commit
# message.
self["lkgr"] = self.GitLog(
grep="^git-svn-id: [^@]*@%s [A-Za-z0-9-]*$" % self["lkgr_svn"],
format="%H")
# FIXME(machenbach): http://crbug.com/391712 can lead to svn lkgrs on the
# trunk branch (rarely).
if not self["lkgr"]: # pragma: no cover
self.Die("No git hash found for svn lkgr.")
self.GitCreateBranch(VERSION_BRANCH, self["lkgr"])
self.ReadAndPersistVersion("lkgr_")
self["lkgr_version"] = self.ArrayToVersion("lkgr_")
print "LKGR version: %s" % self["lkgr_version"]
# Ensure a clean version branch.
self.GitCheckout("master")
self.DeleteBranch(VERSION_BRANCH)
class LKGRVersionUpToDateBailout(Step):
MESSAGE = "Stop script if the lkgr has a renewed version."
def RunStep(self):
# If a version-change commit becomes the lkgr, don't bump up the version
# again.
if VERSION_FILE in self.GitChangedFiles(self["lkgr"]):
print "Stop because the lkgr is a version change itself."
return True
# Don't bump up the version if it got updated already after the lkgr.
if SortingKey(self["lkgr_version"]) < SortingKey(self["latest_version"]):
print("Stop because the latest version already changed since the lkgr "
"version.")
return True
class GetTrunkVersion(Step):
MESSAGE = "Get latest trunk version."
def RunStep(self):
self.GitCheckout("candidates")
self.GitPull()
self.ReadAndPersistVersion("trunk_")
self["trunk_version"] = self.ArrayToVersion("trunk_")
print "Trunk version: %s" % self["trunk_version"]
class CalculateVersion(Step):
MESSAGE = "Calculate the new version."
def RunStep(self):
if self["lkgr_build"] == "9999": # pragma: no cover
# If version control on bleeding edge was switched off, just use the last
# trunk version.
self["lkgr_version"] = self["trunk_version"]
# The new version needs to be greater than the max on bleeding edge and
# trunk.
max_version = max(self["trunk_version"],
self["lkgr_version"],
key=SortingKey)
# Strip off possible leading zeros.
self["new_major"], self["new_minor"], self["new_build"], _ = (
map(str, map(int, max_version.split("."))))
self["new_build"] = str(int(self["new_build"]) + 1)
self["new_patch"] = "0"
self["new_version"] = ("%s.%s.%s.0" %
(self["new_major"], self["new_minor"], self["new_build"]))
print "New version is %s" % self["new_version"]
if self._options.dry_run: # pragma: no cover
print "Dry run, skipping version change."
return True
class CheckTreeStatus(Step):
MESSAGE = "Checking v8 tree status message."
def RunStep(self):
status_url = "https://v8-status.appspot.com/current?format=json"
status_json = self.ReadURL(status_url, wait_plan=[5, 20, 300, 300])
message = json.loads(status_json)["message"]
if re.search(r"maintenance|no commits", message, flags=re.I):
print "Skip version change by tree status: \"%s\"" % message
return True
class ChangeVersion(Step):
MESSAGE = "Bump up the version."
def RunStep(self):
self.GitCreateBranch(VERSION_BRANCH, "master")
self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
try:
msg = "[Auto-roll] Bump up version to %s" % self["new_version"]
self.GitCommit("%s\n\nTBR=%s" % (msg, self._options.author),
author=self._options.author)
if self._options.svn:
self.SVNCommit("branches/bleeding_edge", msg)
else:
self.GitUpload(author=self._options.author,
force=self._options.force_upload,
bypass_hooks=True)
self.GitDCommit()
print "Successfully changed the version."
finally:
# Clean up.
self.GitCheckout("master")
self.DeleteBranch(VERSION_BRANCH)
class BumpUpVersion(ScriptsBase):
def _PrepareOptions(self, parser):
parser.add_argument("--dry_run", help="Don't commit the new version.",
default=False, action="store_true")
def _ProcessOptions(self, options): # pragma: no cover
if not options.dry_run and not options.author:
print "Specify your chromium.org email with -a"
return False
options.wait_for_lgtm = False
options.force_readline_defaults = True
options.force_upload = True
return True
def _Config(self):
return {
"PERSISTFILE_BASENAME": "/tmp/v8-bump-up-version-tempfile",
"PATCH_FILE": "/tmp/v8-bump-up-version-tempfile-patch-file",
}
def _Steps(self):
return [
Preparation,
GetCurrentBleedingEdgeVersion,
LastChangeBailout,
FetchLKGR,
GetLKGRVersion,
LKGRVersionUpToDateBailout,
GetTrunkVersion,
CalculateVersion,
CheckTreeStatus,
ChangeVersion,
]
if __name__ == "__main__": # pragma: no cover
sys.exit(BumpUpVersion().Run())
| 31.908367
| 79
| 0.677613
|
4a10b8d7fe8f97292e24927f64fd8083704d17e2
| 1,916
|
py
|
Python
|
synth.py
|
vam-google/python-texttospeech
|
5d5f6b4f408958b5602e72030ebb4d05fde5a968
|
[
"Apache-2.0"
] | null | null | null |
synth.py
|
vam-google/python-texttospeech
|
5d5f6b4f408958b5602e72030ebb4d05fde5a968
|
[
"Apache-2.0"
] | null | null | null |
synth.py
|
vam-google/python-texttospeech
|
5d5f6b4f408958b5602e72030ebb4d05fde5a968
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import re
import synthtool as s
from synthtool import gcp
gapic = gcp.GAPICBazel()
common = gcp.CommonTemplates()
versions = ["v1beta1", "v1"]
# ----------------------------------------------------------------------------
# Generate texttospeech GAPIC layer
# ----------------------------------------------------------------------------
for version in versions:
library = gapic.py_library(
service="texttospeech",
version=version,
bazel_target=f"//google/cloud/texttospeech/{version}:texttospeech-{version}-py",
include_protos=True,
)
s.move(library / f"google/cloud/texttospeech_{version}")
s.move(library / f"tests/unit/gapic/{version}")
s.move(library / f"docs/gapic/{version}")
# Use the highest version library to generate import alias.
s.move(library / "google/cloud/texttospeech.py")
# Fix bad docstrings.
s.replace("**/gapic/*_client.py", r'\\"(.+?)-\*\\"', r'"\1-\\*"')
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library(unit_cov_level=85, cov_level=85)
s.move(templated_files)
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
| 36.846154
| 88
| 0.600731
|
4a10b940d175d8ae292a4891f4eff6576a17e131
| 1,233
|
py
|
Python
|
docs/conf.py
|
jdhodges/trackfilter
|
fff52cc2a99392519846340a30992da2c5de4a80
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
jdhodges/trackfilter
|
fff52cc2a99392519846340a30992da2c5de4a80
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
jdhodges/trackfilter
|
fff52cc2a99392519846340a30992da2c5de4a80
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
source_suffix = '.rst'
master_doc = 'index'
project = 'TrackFilter'
year = '2019'
author = 'Stephane Bruckert'
copyright = '{0}, {1}'.format(year, author)
version = release = '0.2.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/mirrorfm/trackfilter/issues/%s', '#'),
'pr': ('https://github.com/mirrorfm/trackfilter/pull/%s', 'PR #'),
}
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only set the theme if we're building docs locally
html_theme = 'sphinx_rtd_theme'
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
| 25.6875
| 72
| 0.67721
|
4a10b9ad1182ad6c90ea8b3cc58f6803341e6990
| 5,015
|
py
|
Python
|
pyocd/target/builtin/target_HC32L07x.py
|
Kris-b50122/pyOCD
|
df605c7aa04d72c5187ab652445063cc232cf378
|
[
"Apache-2.0"
] | 3
|
2019-06-05T01:32:06.000Z
|
2020-05-20T08:55:46.000Z
|
pyocd/target/builtin/target_HC32L07x.py
|
Kris-b50122/pyOCD
|
df605c7aa04d72c5187ab652445063cc232cf378
|
[
"Apache-2.0"
] | 1
|
2019-07-05T10:13:09.000Z
|
2019-07-05T10:51:43.000Z
|
pyocd/target/builtin/target_HC32L07x.py
|
Kris-b50122/pyOCD
|
df605c7aa04d72c5187ab652445063cc232cf378
|
[
"Apache-2.0"
] | 1
|
2019-01-21T03:01:53.000Z
|
2019-01-21T03:01:53.000Z
|
# pyOCD debugger
# Copyright (c) 2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...flash.flash import Flash
from ...coresight.coresight_target import CoreSightTarget
from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ...debug.svd.loader import SVDFile
#DEBUG_ACTIVE
DEBUG_ACTIVE = 0x40002038
DEBUG_ACTIVE_VAL = 0x00000FFF
FLASH_ALGO = {
'load_address' : 0x20000000,
# Flash algorithm as a hex string
'instructions': [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x4770ba40, 0x4770bac0, 0x6a014809, 0xd4fc06c9, 0x62c14908, 0x62c14908, 0x22036a01, 0x62014311,
0x60092100, 0x06c96a01, 0x2000d4fc, 0x00004770, 0x40020000, 0x00005a5a, 0x0000a5a5, 0x6a0a4909,
0xd4fc06d2, 0x62ca4a08, 0x62ca4a08, 0x08926a0a, 0x1c920092, 0x2200620a, 0x6a086002, 0xd4fc06c0,
0x47702000, 0x40020000, 0x00005a5a, 0x0000a5a5, 0x2000b438, 0x70084669, 0x68014846, 0x0f4a0609,
0x700a4669, 0x89094944, 0x054968c2, 0x0ad20d49, 0x430a02d2, 0x4a4160c2, 0x49416082, 0x68036081,
0x022d2507, 0x600343ab, 0x60816082, 0x24016803, 0x60034323, 0x051b68c3, 0x466bd5fc, 0x2b00781b,
0x6082d025, 0x68036081, 0x43a324e0, 0x60826003, 0x466b6081, 0x2b01781b, 0x2b02d006, 0x2b03d009,
0x2b04d00c, 0xe00ed113, 0x24026803, 0x600343a3, 0x6803e00d, 0x43a32404, 0xe0086003, 0x24086803,
0x600343a3, 0x6803e003, 0x43a32410, 0x60826003, 0x68036081, 0x600343ab, 0x60816082, 0x24036803,
0x43a302e4, 0x6a036003, 0x43230524, 0x481d6203, 0x62c162c2, 0x240c6a03, 0x620343a3, 0x62c162c2,
0x60032320, 0x62c162c2, 0x60432317, 0x62c162c2, 0x6083231b, 0x62c162c2, 0x60c34b13, 0x62c162c2,
0x61034b12, 0x62c162c2, 0x61432318, 0x62c162c2, 0x618323f0, 0x62c162c2, 0x00db237d, 0x62c261c3,
0x230062c1, 0x630343db, 0x62c162c2, 0xbc386343, 0x47702000, 0x40002000, 0x00100c00, 0x00005a5a,
0x0000a5a5, 0x40020000, 0x00004650, 0x000222e0, 0x4b0db430, 0x25004c0b, 0x4c0c62dc, 0x6a1c62dc,
0x00a408a4, 0x621c1c64, 0xd9072900, 0x55445d54, 0x06e46a1c, 0x1c6dd4fc, 0xd3f7428d, 0x2000bc30,
0x00004770, 0x00005a5a, 0x40020000, 0x0000a5a5, 0x49054806, 0x4a0662c1, 0x230062c2, 0x62c16303,
0x634362c2, 0x47704618, 0x00005a5a, 0x40020000, 0x0000a5a5, 0x00000000
],
# Relative function addresses
'pc_init': 0x20000091,
'pc_unInit': 0x20000211,
'pc_program_page': 0x200001d1,
'pc_erase_sector': 0x2000005d,
'pc_eraseAll': 0x20000029,
'static_base' : 0x20000000 + 0x00000020 + 0x00000214,
'begin_stack' : 0x20000500,
'begin_data' : 0x20000000 + 0x1000,
'page_size' : 0x200,
'analyzer_supported' : False,
'analyzer_address' : 0x00000000,
'page_buffers' : [0x20001000, 0x20001200], # Enable double buffering
'min_program_length' : 0x200,
}
class HC32L072(CoreSightTarget):
VENDOR = "HDSC"
MEMORY_MAP = MemoryMap(
FlashRegion( start=0x00000000, length=0x20000, sector_size=0x200,
is_boot_memory=True,
algo=FLASH_ALGO),
RamRegion( start=0x20000000, length=0x4000)
)
def __init__(self, session):
super(HC32L072, self).__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("HC32L07x.svd")
def post_connect_hook(self):
self.write32(DEBUG_ACTIVE, DEBUG_ACTIVE_VAL)
class HC32L073(CoreSightTarget):
VENDOR = "HDSC"
MEMORY_MAP = MemoryMap(
FlashRegion( start=0x00000000, length=0x20000, sector_size=0x200,
is_boot_memory=True,
algo=FLASH_ALGO),
RamRegion( start=0x20000000, length=0x4000)
)
def __init__(self, session):
super(HC32L073, self).__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("HC32L07x.svd")
def post_connect_hook(self):
self.write32(DEBUG_ACTIVE, DEBUG_ACTIVE_VAL)
class HC32F072(CoreSightTarget):
VENDOR = "HDSC"
MEMORY_MAP = MemoryMap(
FlashRegion( start=0x00000000, length=0x20000, sector_size=0x200,
is_boot_memory=True,
algo=FLASH_ALGO),
RamRegion( start=0x20000000, length=0x4000)
)
def __init__(self, session):
super(HC32F072, self).__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("HC32L07x.svd")
def post_connect_hook(self):
self.write32(DEBUG_ACTIVE, DEBUG_ACTIVE_VAL)
| 40.443548
| 99
| 0.722632
|
4a10ba7b3b184f9369d3ba3bc47ae150b33cb234
| 2,650
|
py
|
Python
|
gamedb/settings.py
|
mammawhy9/gamedb
|
bc9945dece1f3eb53157fc95c031a8da6425c546
|
[
"MIT"
] | 1
|
2016-10-13T16:47:08.000Z
|
2016-10-13T16:47:08.000Z
|
gamedb/settings.py
|
mammawhy9/gamedb
|
bc9945dece1f3eb53157fc95c031a8da6425c546
|
[
"MIT"
] | null | null | null |
gamedb/settings.py
|
mammawhy9/gamedb
|
bc9945dece1f3eb53157fc95c031a8da6425c546
|
[
"MIT"
] | null | null | null |
"""
Django settings for gamedb project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&2x^*51fc6u%=u=x-8k2f-deybrw^3)fingy&ctv(e7%_-u5&^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'game',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'gamedb.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gamedb.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| 25.480769
| 71
| 0.698113
|
4a10ba858c26a3d757443b16c303d30676591b21
| 159
|
py
|
Python
|
compiled/construct/process_custom_no_args.py
|
smarek/ci_targets
|
c5edee7b0901fd8e7f75f85245ea4209b38e0cb3
|
[
"MIT"
] | 4
|
2017-04-08T12:55:11.000Z
|
2020-12-05T21:09:31.000Z
|
compiled/construct/process_custom_no_args.py
|
smarek/ci_targets
|
c5edee7b0901fd8e7f75f85245ea4209b38e0cb3
|
[
"MIT"
] | 7
|
2018-04-23T01:30:33.000Z
|
2020-10-30T23:56:14.000Z
|
compiled/construct/process_custom_no_args.py
|
smarek/ci_targets
|
c5edee7b0901fd8e7f75f85245ea4209b38e0cb3
|
[
"MIT"
] | 6
|
2017-04-08T11:41:14.000Z
|
2020-10-30T22:47:31.000Z
|
from construct import *
from construct.lib import *
process_custom_no_args = Struct(
'buf' / FixedSized(5, GreedyBytes),
)
_schema = process_custom_no_args
| 17.666667
| 36
| 0.773585
|
4a10bafbcd65b5a1d2aec273f60fde29a7d7ab8c
| 7,734
|
py
|
Python
|
tests/test_esrgan.py
|
drcut/mmediting
|
e7f13f16dc63f1698d819248ed045983b35c0dbe
|
[
"Apache-2.0"
] | 1
|
2021-04-30T23:08:16.000Z
|
2021-04-30T23:08:16.000Z
|
tests/test_esrgan.py
|
drcut/mmediting
|
e7f13f16dc63f1698d819248ed045983b35c0dbe
|
[
"Apache-2.0"
] | null | null | null |
tests/test_esrgan.py
|
drcut/mmediting
|
e7f13f16dc63f1698d819248ed045983b35c0dbe
|
[
"Apache-2.0"
] | null | null | null |
from unittest.mock import patch
import torch
from mmcv.runner import obj_from_dict
from mmedit.models import build_model
from mmedit.models.backbones import MSRResNet
from mmedit.models.components import ModifiedVGG
from mmedit.models.losses import GANLoss, L1Loss
def test_esrgan():
model_cfg = dict(
type='ESRGAN',
generator=dict(
type='MSRResNet',
in_channels=3,
out_channels=3,
mid_channels=4,
num_blocks=1,
upscale_factor=4),
discriminator=dict(type='ModifiedVGG', in_channels=3, mid_channels=2),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'),
gan_loss=dict(
type='GANLoss',
gan_type='vanilla',
real_label_val=1.0,
fake_label_val=0,
loss_weight=5e-3))
train_cfg = None
test_cfg = None
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'ESRGAN'
assert isinstance(restorer.generator, MSRResNet)
assert isinstance(restorer.discriminator, ModifiedVGG)
assert isinstance(restorer.pixel_loss, L1Loss)
assert isinstance(restorer.gan_loss, GANLoss)
# prepare data
inputs = torch.rand(1, 3, 32, 32)
targets = torch.rand(1, 3, 128, 128)
data_batch = {'lq': inputs, 'gt': targets}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.9, 0.999))
optimizer = {
'generator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'generator').parameters())),
'discriminator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'discriminator').parameters()))
}
# test train_step
with patch.object(
restorer,
'perceptual_loss',
return_value=(torch.tensor(1.0), torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake',
'loss_pix'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
optimizer = {
'generator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'generator').parameters())),
'discriminator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(restorer, 'discriminator').parameters()))
}
data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}
# train_step
with patch.object(
restorer,
'perceptual_loss',
return_value=(torch.tensor(1.0).cuda(),
torch.tensor(2.0).cuda())):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_perceptual', 'loss_gan', 'loss_d_real',
'loss_d_fake', 'loss_pix'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'],
data_batch['lq'].cpu())
assert torch.equal(outputs['results']['gt'],
data_batch['gt'].cpu())
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test disc_steps and disc_init_steps
data_batch = {'lq': inputs.cpu(), 'gt': targets.cpu()}
train_cfg = dict(disc_steps=2, disc_init_steps=2)
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
with patch.object(
restorer,
'perceptual_loss',
return_value=(torch.tensor(1.0), torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in ['loss_d_real', 'loss_d_fake']:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test without pixel loss and perceptual loss
model_cfg_ = model_cfg.copy()
model_cfg_.pop('pixel_loss')
restorer = build_model(model_cfg_, train_cfg=None, test_cfg=None)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in ['loss_gan', 'loss_d_real', 'loss_d_fake']:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test train_step w/o loss_percep
restorer = build_model(model_cfg, train_cfg=None, test_cfg=None)
with patch.object(
restorer, 'perceptual_loss',
return_value=(None, torch.tensor(2.0))):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_style', 'loss_gan', 'loss_d_real', 'loss_d_fake',
'loss_pix'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
# test train_step w/o loss_style
restorer = build_model(model_cfg, train_cfg=None, test_cfg=None)
with patch.object(
restorer, 'perceptual_loss',
return_value=(torch.tensor(2.0), None)):
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
for v in [
'loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake',
'loss_pix'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['lq'], data_batch['lq'])
assert torch.equal(outputs['results']['gt'], data_batch['gt'])
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 3, 128, 128)
| 40.705263
| 78
| 0.600853
|
4a10bb7b3a4603395fed238c01d27660517b83af
| 5,764
|
py
|
Python
|
flink-python/pyflink/datastream/tests/test_check_point_config.py
|
sbairos/flink
|
0799b5c20a127110e47439668cf8f8db2e4ecbf3
|
[
"Apache-2.0"
] | 4
|
2020-05-28T03:36:05.000Z
|
2022-02-22T13:46:05.000Z
|
flink-python/pyflink/datastream/tests/test_check_point_config.py
|
sbairos/flink
|
0799b5c20a127110e47439668cf8f8db2e4ecbf3
|
[
"Apache-2.0"
] | 37
|
2020-06-25T08:23:56.000Z
|
2020-12-28T02:18:37.000Z
|
flink-python/pyflink/datastream/tests/test_check_point_config.py
|
sbairos/flink
|
0799b5c20a127110e47439668cf8f8db2e4ecbf3
|
[
"Apache-2.0"
] | 5
|
2019-11-11T01:22:27.000Z
|
2021-11-22T08:37:30.000Z
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.datastream import (CheckpointConfig, CheckpointingMode, ExternalizedCheckpointCleanup,
StreamExecutionEnvironment)
from pyflink.java_gateway import get_gateway
from pyflink.testing.test_case_utils import PyFlinkTestCase
class CheckpointConfigTests(PyFlinkTestCase):
def setUp(self):
self.env = StreamExecutionEnvironment\
.get_execution_environment()
self.checkpoint_config = self.env.get_checkpoint_config()
def test_constant(self):
gateway = get_gateway()
JCheckpointConfig = gateway.jvm.org.apache.flink.streaming.api.environment.CheckpointConfig
self.assertEqual(CheckpointConfig.DEFAULT_MAX_CONCURRENT_CHECKPOINTS,
JCheckpointConfig.DEFAULT_MAX_CONCURRENT_CHECKPOINTS)
self.assertEqual(CheckpointConfig.DEFAULT_MIN_PAUSE_BETWEEN_CHECKPOINTS,
JCheckpointConfig.DEFAULT_MIN_PAUSE_BETWEEN_CHECKPOINTS)
self.assertEqual(CheckpointConfig.DEFAULT_TIMEOUT, JCheckpointConfig.DEFAULT_TIMEOUT)
self.assertEqual(CheckpointConfig.DEFAULT_MODE,
CheckpointingMode._from_j_checkpointing_mode(
JCheckpointConfig.DEFAULT_MODE))
def test_is_checkpointing_enabled(self):
self.assertFalse(self.checkpoint_config.is_checkpointing_enabled())
self.env.enable_checkpointing(1000)
self.assertTrue(self.checkpoint_config.is_checkpointing_enabled())
def test_get_set_checkpointing_mode(self):
self.assertEqual(self.checkpoint_config.get_checkpointing_mode(),
CheckpointingMode.EXACTLY_ONCE)
self.checkpoint_config.set_checkpointing_mode(CheckpointingMode.AT_LEAST_ONCE)
self.assertEqual(self.checkpoint_config.get_checkpointing_mode(),
CheckpointingMode.AT_LEAST_ONCE)
self.checkpoint_config.set_checkpointing_mode(CheckpointingMode.EXACTLY_ONCE)
self.assertEqual(self.checkpoint_config.get_checkpointing_mode(),
CheckpointingMode.EXACTLY_ONCE)
def test_get_set_checkpoint_interval(self):
self.assertEqual(self.checkpoint_config.get_checkpoint_interval(), -1)
self.checkpoint_config.set_checkpoint_interval(1000)
self.assertEqual(self.checkpoint_config.get_checkpoint_interval(), 1000)
def test_get_set_checkpoint_timeout(self):
self.assertEqual(self.checkpoint_config.get_checkpoint_timeout(), 600000)
self.checkpoint_config.set_checkpoint_timeout(300000)
self.assertEqual(self.checkpoint_config.get_checkpoint_timeout(), 300000)
def test_get_set_min_pause_between_checkpoints(self):
self.assertEqual(self.checkpoint_config.get_min_pause_between_checkpoints(), 0)
self.checkpoint_config.set_min_pause_between_checkpoints(100000)
self.assertEqual(self.checkpoint_config.get_min_pause_between_checkpoints(), 100000)
def test_get_set_max_concurrent_checkpoints(self):
self.assertEqual(self.checkpoint_config.get_max_concurrent_checkpoints(), 1)
self.checkpoint_config.set_max_concurrent_checkpoints(2)
self.assertEqual(self.checkpoint_config.get_max_concurrent_checkpoints(), 2)
def test_get_set_fail_on_checkpointing_errors(self):
self.assertTrue(self.checkpoint_config.is_fail_on_checkpointing_errors())
self.checkpoint_config.set_fail_on_checkpointing_errors(False)
self.assertFalse(self.checkpoint_config.is_fail_on_checkpointing_errors())
def test_get_set_externalized_checkpoints_cleanup(self):
self.assertFalse(self.checkpoint_config.is_externalized_checkpoints_enabled())
self.assertIsNone(self.checkpoint_config.get_externalized_checkpoint_cleanup())
self.checkpoint_config.enable_externalized_checkpoints(
ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)
self.assertTrue(self.checkpoint_config.is_externalized_checkpoints_enabled())
self.assertEqual(self.checkpoint_config.get_externalized_checkpoint_cleanup(),
ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)
self.checkpoint_config.enable_externalized_checkpoints(
ExternalizedCheckpointCleanup.DELETE_ON_CANCELLATION)
self.assertEqual(self.checkpoint_config.get_externalized_checkpoint_cleanup(),
ExternalizedCheckpointCleanup.DELETE_ON_CANCELLATION)
def test_get_set_prefer_checkpoint_for_recovery(self):
self.assertFalse(self.checkpoint_config.is_prefer_checkpoint_for_recovery())
self.checkpoint_config.set_prefer_checkpoint_for_recovery(True)
self.assertTrue(self.checkpoint_config.is_prefer_checkpoint_for_recovery())
| 41.768116
| 99
| 0.740285
|
4a10bb921a624f8d331590a97ace6627aae79f17
| 103
|
py
|
Python
|
Grid/__init__.py
|
Jack12xl/a-toy-fluid-engine
|
45ce4007ce6e804dcfdee8da307e131c9c3e7c7d
|
[
"MIT"
] | 21
|
2020-09-17T10:51:55.000Z
|
2022-03-15T20:27:00.000Z
|
Grid/__init__.py
|
Jack12xl/a-toy-fluid-engine
|
45ce4007ce6e804dcfdee8da307e131c9c3e7c7d
|
[
"MIT"
] | 8
|
2020-09-18T08:52:34.000Z
|
2021-02-07T09:27:49.000Z
|
Grid/__init__.py
|
Jack12xl/myFluid
|
45ce4007ce6e804dcfdee8da307e131c9c3e7c7d
|
[
"MIT"
] | 1
|
2020-09-20T11:10:35.000Z
|
2020-09-20T11:10:35.000Z
|
from .Grid import GRIDTYPE, Grid
from .FaceGrid import *
from .CellGrid import *
from .Sampler import *
| 25.75
| 32
| 0.76699
|
4a10bbeb47f4cc557828209f99ea00462c6eaa00
| 309
|
py
|
Python
|
sdk/identity/azure-identity/tests/perfstress_tests/__init__.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/identity/azure-identity/tests/perfstress_tests/__init__.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/identity/azure-identity/tests/perfstress_tests/__init__.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# -------------------------------------------------------------------------
| 51.5
| 75
| 0.398058
|
4a10bc10d49e9905ae56e8a2967e0d350dd4ed32
| 1,692
|
py
|
Python
|
boto3_type_annotations/boto3_type_annotations/resource_groups/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 119
|
2018-12-01T18:20:57.000Z
|
2022-02-02T10:31:29.000Z
|
boto3_type_annotations/boto3_type_annotations/resource_groups/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 15
|
2018-11-16T00:16:44.000Z
|
2021-11-13T03:44:18.000Z
|
boto3_type_annotations/boto3_type_annotations/resource_groups/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 11
|
2019-05-06T05:26:51.000Z
|
2021-09-28T15:27:59.000Z
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
pass
def create_group(self, Name: str, ResourceQuery: Dict, Description: str = None, Tags: Dict = None) -> Dict:
pass
def delete_group(self, GroupName: str) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_group(self, GroupName: str) -> Dict:
pass
def get_group_query(self, GroupName: str) -> Dict:
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_tags(self, Arn: str) -> Dict:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def list_group_resources(self, GroupName: str, Filters: List = None, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_groups(self, Filters: List = None, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def search_resources(self, ResourceQuery: Dict, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def tag(self, Arn: str, Tags: Dict) -> Dict:
pass
def untag(self, Arn: str, Keys: List) -> Dict:
pass
def update_group(self, GroupName: str, Description: str = None) -> Dict:
pass
def update_group_query(self, GroupName: str, ResourceQuery: Dict) -> Dict:
pass
| 29.172414
| 131
| 0.655437
|
4a10bd9cf8eb9eba28cce625ed9c1fe59245238a
| 1,389
|
py
|
Python
|
python.py
|
LiamJHealy/LiamJHealy.github.io
|
8a8b6e524f3c5ff2895e7141dee9dccab3358849
|
[
"MIT"
] | null | null | null |
python.py
|
LiamJHealy/LiamJHealy.github.io
|
8a8b6e524f3c5ff2895e7141dee9dccab3358849
|
[
"MIT"
] | null | null | null |
python.py
|
LiamJHealy/LiamJHealy.github.io
|
8a8b6e524f3c5ff2895e7141dee9dccab3358849
|
[
"MIT"
] | null | null | null |
graph = {
'A' : ['B','C'],
'B' : ['D', 'E'],
'C' : ['F'],
'D' : [],
'E' : [],
'F' : []
}
visited = set()
def dfs_no_recursive(visited, graph, node):
stack = [node]
while(len(stack) > 0):
s = stack.pop()
if s not in visited:
print(s)
visited.add(s)
for neighbour in graph[s]:
stack.append(neighbour)
dfs_no_recursive(visited, graph, 'A')
visited = set()
def dfs_recursion(visited, graph, node):
if node not in visited:
print(node)
visited.add(node)
for neighbour in graph[node]:
dfs_recursion(visited, graph, neighbour)
dfs_recursion(visited, graph, 'A')
# initialise an empty visited set
visited = set()
def bfs(visited, graph, node):
'''
:type visited: set
:type graph: dictionary
:type note: string
:rtype: None
'''
# initialise an empty queue list
queue = []
visited.add(node)
queue.append(node)
# while the queue is not empty
while queue:
# pop the first element in the queue
s = queue.pop(0)
# add each unvisited neighbour to the end of the queue
for neighbour in graph[s]:
if neighbour not in visited:
visited.add(neighbour)
queue.append(neighbour)
# execute the function
bfs(visited, graph, 'A')
| 19.027397
| 62
| 0.550756
|
4a10be6f71f49441bab403caab4143460bf9a84b
| 6,396
|
py
|
Python
|
tests/sparseml/pytorch/sparsification/pruning/test_modifier_pruning_movement.py
|
anmarques/sparseml
|
c8352f1d896bfb1258add4e563d8163d3702b5ef
|
[
"Apache-2.0"
] | null | null | null |
tests/sparseml/pytorch/sparsification/pruning/test_modifier_pruning_movement.py
|
anmarques/sparseml
|
c8352f1d896bfb1258add4e563d8163d3702b5ef
|
[
"Apache-2.0"
] | null | null | null |
tests/sparseml/pytorch/sparsification/pruning/test_modifier_pruning_movement.py
|
anmarques/sparseml
|
c8352f1d896bfb1258add4e563d8163d3702b5ef
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
from flaky import flaky
from sparseml.pytorch.sparsification import MovementPruningModifier
from sparseml.pytorch.utils import tensor_sparsity
from tests.sparseml.pytorch.helpers import LinearNet
from tests.sparseml.pytorch.optim.test_modifier import (
ScheduledUpdateModifierTest,
create_optim_adam,
create_optim_sgd,
)
from tests.sparseml.pytorch.sparsification.pruning.helpers import (
pruning_modifier_serialization_vals_test,
)
from tests.sparseml.pytorch.helpers import ( # noqa isort:skip
test_epoch,
test_loss,
test_steps_per_epoch,
)
@flaky(max_runs=3, min_passes=2)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
reason="Skipping pytorch tests",
)
@pytest.mark.parametrize(
"modifier_lambda",
[
lambda: MovementPruningModifier(
params="__ALL_PRUNABLE__",
init_sparsity=0.05,
final_sparsity=0.95,
start_epoch=10.0,
end_epoch=25.0,
update_frequency=1.0,
inter_func="cubic",
),
],
scope="function",
)
@pytest.mark.parametrize("model_lambda", [LinearNet], scope="function")
@pytest.mark.parametrize(
"optim_lambda",
[create_optim_sgd, create_optim_adam],
scope="function",
)
class TestMovementPruningModifier(ScheduledUpdateModifierTest):
def test_lifecycle(
self,
modifier_lambda,
model_lambda,
optim_lambda,
test_steps_per_epoch, # noqa: F811
):
modifier = modifier_lambda()
model = model_lambda()
optimizer = optim_lambda(model)
self.initialize_helper(modifier, model)
# check sparsity is not set before
for epoch in range(int(modifier.start_epoch)):
assert not modifier.update_ready(epoch, test_steps_per_epoch)
assert modifier.applied_sparsity is None
epoch = int(modifier.start_epoch)
assert modifier.update_ready(epoch, test_steps_per_epoch)
modifier.scheduled_update(model, optimizer, epoch, test_steps_per_epoch)
applied_sparsities = modifier.applied_sparsity
if not isinstance(applied_sparsities, list):
applied_sparsities = [applied_sparsities]
if not isinstance(modifier.init_sparsity, str):
assert all(
applied_sparsity == modifier.init_sparsity
for applied_sparsity in applied_sparsities
)
else:
assert len(modifier._init_sparsity) == len(modifier.module_masks.layers)
for idx, param in enumerate(modifier.module_masks.params_data):
assert modifier._init_sparsity[idx] == tensor_sparsity(param).item()
last_sparsities = applied_sparsities
# check forward pass
input_shape = model_lambda.layer_descs()[0].input_size
test_batch = torch.randn(10, *input_shape)
_ = model(test_batch)
while epoch < modifier.end_epoch - modifier.update_frequency:
epoch += modifier.update_frequency
assert modifier.update_ready(epoch, test_steps_per_epoch)
modifier.scheduled_update(model, optimizer, epoch, test_steps_per_epoch)
applied_sparsities = modifier.applied_sparsity
if not isinstance(applied_sparsities, list):
applied_sparsities = [applied_sparsities]
assert all(
applied_sparsity > last_sparsity
for applied_sparsity, last_sparsity in zip(
applied_sparsities, last_sparsities
)
)
last_sparsities = applied_sparsities
_ = model(test_batch) # check forward pass
epoch = int(modifier.end_epoch)
assert modifier.update_ready(epoch, test_steps_per_epoch)
modifier.scheduled_update(model, optimizer, epoch, test_steps_per_epoch)
def _test_final_sparsity_applied():
final_sparsities = (
[modifier.final_sparsity]
if isinstance(modifier.final_sparsity, float)
else modifier.final_sparsity
)
assert all(
sparsity in final_sparsities for sparsity in modifier.applied_sparsity
)
_test_final_sparsity_applied()
for epoch in range(int(modifier.end_epoch) + 1, int(modifier.end_epoch) + 6):
assert not modifier.update_ready(epoch, test_steps_per_epoch)
_test_final_sparsity_applied()
def test_movement_pruning_yaml():
init_sparsity = 0.05
final_sparsity = 0.8
start_epoch = 5.0
end_epoch = 15.0
update_frequency = 1.0
params = "__ALL_PRUNABLE__"
inter_func = "cubic"
mask_type = "block"
yaml_str = f"""
!MovementPruningModifier
init_sparsity: {init_sparsity}
final_sparsity: {final_sparsity}
start_epoch: {start_epoch}
end_epoch: {end_epoch}
update_frequency: {update_frequency}
params: {params}
inter_func: {inter_func}
mask_type: {mask_type}
"""
yaml_modifier = MovementPruningModifier.load_obj(yaml_str)
serialized_modifier = MovementPruningModifier.load_obj(
str(yaml_modifier)
) # type: MovementPruningModifier
obj_modifier = MovementPruningModifier(
init_sparsity=init_sparsity,
final_sparsity=final_sparsity,
start_epoch=start_epoch,
end_epoch=end_epoch,
update_frequency=update_frequency,
params=params,
inter_func=inter_func,
mask_type=mask_type,
)
assert isinstance(yaml_modifier, MovementPruningModifier)
pruning_modifier_serialization_vals_test(
yaml_modifier, serialized_modifier, obj_modifier
)
| 33.84127
| 86
| 0.678862
|
4a10be719a72df5beb4e7571367c2983db8ff7a2
| 4,920
|
py
|
Python
|
Layer.py
|
NCTUMLlab/Si-Xun-Luo-Self-Supervised_Learning_for_Online_SpeakerDiarization
|
d4b22d55ab8d366235675a54c508c86d5eb28cd5
|
[
"MIT"
] | null | null | null |
Layer.py
|
NCTUMLlab/Si-Xun-Luo-Self-Supervised_Learning_for_Online_SpeakerDiarization
|
d4b22d55ab8d366235675a54c508c86d5eb28cd5
|
[
"MIT"
] | 1
|
2021-04-09T09:38:24.000Z
|
2021-04-09T09:38:24.000Z
|
Layer.py
|
NCTUMLlab/Si-Xun-Luo-Self-Supervised_Learning_for_Online_SpeakerDiarization
|
d4b22d55ab8d366235675a54c508c86d5eb28cd5
|
[
"MIT"
] | 2
|
2021-07-07T15:54:06.000Z
|
2022-02-01T20:36:59.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class TDNN(nn.Module):
def __init__(
self,
input_dim=23,
output_dim=512,
context_size=5,
stride=1,
dilation=1,
batch_norm=False,
dropout_p=0
):
'''
TDNN as defined by https://www.danielpovey.com/files/2015_interspeech_multisplice.pdf
Affine transformation not applied globally to all frames but smaller windows with local context
batch_norm: True to include batch normalisation after the non linearity
Context size and dilation determine the frames selected
(although context size is not really defined in the traditional sense)
For example:
context size 5 and dilation 1 is equivalent to [-2,-1,0,1,2]
context size 3 and dilation 2 is equivalent to [-2, 0, 2]
context size 1 and dilation 1 is equivalent to [0]
'''
super(TDNN, self).__init__()
self.context_size = context_size
self.stride = stride
self.input_dim = input_dim
self.output_dim = output_dim
self.dilation = dilation
self.dropout_p = dropout_p
self.batch_norm = batch_norm
self.kernel = nn.Linear(self.input_dim*self.context_size, self.output_dim)
self.nonlinearity = nn.ReLU()
if self.batch_norm:
self.bn = nn.BatchNorm1d(output_dim)
if self.dropout_p:
self.drop = nn.Dropout(p=self.dropout_p)
def forward(self, x):
'''
input: size (batch, seq_len, input_features)
outpu: size (batch, new_seq_len, output_features)
'''
_, _, d = x.shape
assert (d == self.input_dim), 'Input dimension was wrong. Expected ({}), got ({})'.format(self.input_dim, d)
x = x.unsqueeze(1)
# Unfold input into smaller temporal contexts
x = F.unfold(
x,
(self.context_size, self.input_dim),
stride=(1,self.input_dim),
dilation=(self.dilation,1)
)
# N, output_dim*context_size, new_t = x.shape
x = x.transpose(1,2)
if self.dropout_p:
x = self.drop(x)
x = self.kernel(x)
x = self.nonlinearity(x)
if self.batch_norm:
x = x.transpose(1,2)
x = self.bn(x)
x = x.transpose(1,2)
return x
class StatsPooling(nn.Module):
def __init__(self):
super(StatsPooling,self).__init__()
def forward(self,varient_length_tensor):
mean = varient_length_tensor.mean(dim=1)
std = varient_length_tensor.std(dim=1)
return mean+std
class FullyConnected(nn.Module):
def __init__(self):
super(FullyConnected, self).__init__()
self.hidden1 = nn.Linear(512,512)
self.hidden2 = nn.Linear(512,512)
self.dropout = nn.Dropout(p=0.25)
def forward(self, x):
x = self.hidden1(x)#F.relu( self.hidden1(x))
x = self.dropout(x)
x = self.hidden2(x)
return x
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.frame1 = TDNN(input_dim=23, output_dim=512, context_size=5, dilation=1)
self.frame2 = TDNN(input_dim=512, output_dim=512, context_size=3, dilation=2)
self.frame3 = TDNN(input_dim=512, output_dim=512, context_size=3, dilation=3)
self.frame4 = TDNN(input_dim=512, output_dim=512, context_size=1, dilation=1)
self.frame5 = TDNN(input_dim=512, output_dim=512, context_size=1, dilation=1)
self.pooling = StatsPooling()
self.fully = FullyConnected()
self.softmax = nn.Softmax(dim=1)
def forward(self,x):
x1 = self.frame1(x)
x2 = self.frame2(x1)
x3 = self.frame3(x2)
x4 = self.frame4(x3)
x5 = self.frame5(x4)
x6 = self.pooling(x5)
x7 = self.fully(x6)
x7 = self.softmax(x7)
return x7
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
#self.mfcc = MFCC(sample_rate=8000, n_mfcc= 23)
self.cos = nn.CosineSimilarity(dim=1, eps=1e-6)
self.TDNN = MyModel()
self.softmax = nn.Softmax(dim=1)
def load_(self):
self.TDNN = torch.load('best.pkl')
def save_(self,epoch_):
torch.save(self.TDNN,str(epoch_)+'_model.pkl')
def forward(self, x ):
one = torch.squeeze(x[:,0:1,:,:])
other = torch.squeeze(x[:,1:2,:,:])
one = self.TDNN(one)
other= self.TDNN(other)
output = self.cos(one,other)
return output
| 30.37037
| 116
| 0.564024
|
4a10bf985c80ef2033b7946b6ff0c5cf143f1254
| 3,483
|
py
|
Python
|
salt/returners/mongo_future_return.py
|
d--j/salt
|
579f900be67a80e1a77674bc6aa21fec836c1c4c
|
[
"Apache-2.0"
] | 2
|
2019-03-30T02:12:56.000Z
|
2021-03-08T18:59:46.000Z
|
salt/returners/mongo_future_return.py
|
epoelke/salt
|
80ae64e54f9f336d3cdb6e03e42f2a50469ec8f2
|
[
"Apache-2.0"
] | null | null | null |
salt/returners/mongo_future_return.py
|
epoelke/salt
|
80ae64e54f9f336d3cdb6e03e42f2a50469ec8f2
|
[
"Apache-2.0"
] | 1
|
2020-03-07T07:04:55.000Z
|
2020-03-07T07:04:55.000Z
|
'''
Return data to a mongodb server
Required python modules: pymongo
This returner will send data from the minions to a MongoDB server. To
configure the settings for your MongoDB server, add the following lines
to the minion config files::
mongo.db: <database name>
mongo.host: <server ip address>
mongo.user: <MongoDB username>
mongo.password: <MongoDB user password>
mongo.port: 27017
This mongo returner is being developed to replace the default mongodb returner
in the future and should not be considered API stable yet.
'''
# Import python libs
import logging
# Import third party libs
try:
import pymongo
HAS_PYMONGO = True
except ImportError:
HAS_PYMONGO = False
log = logging.getLogger(__name__)
def __virtual__():
if not HAS_PYMONGO:
return False
return 'mongo'
def _remove_dots(src):
output = {}
for key, val in src.iteritems():
if isinstance(val, dict):
val = _remove_dots(val)
output[key.replace('.', '-')] = val
return output
def _get_conn():
'''
Return a mongodb connection object
'''
conn = pymongo.Connection(
__salt__['config.option']('mongo.host'),
__salt__['config.option']('mongo.port'))
mdb = conn[__salt__['config.option']('mongo.db')]
user = __salt__['config.option']('mongo.user')
password = __salt__['config.option']('mongo.password')
if user and password:
mdb.authenticate(user, password)
return conn, mdb
def returner(ret):
'''
Return data to a mongodb server
'''
conn, mdb = _get_conn()
col = mdb[ret['id']]
if isinstance(ret['return'], dict):
back = _remove_dots(ret['return'])
else:
back = ret['return']
log.debug(back)
sdata = {ret['jid']: back, 'fun': ret['fun']}
if 'out' in ret:
sdata['out'] = ret['out']
col.insert(sdata)
def save_load(jid, load):
'''
Save the load for a given job id
'''
conn, mdb = _get_conn()
col = mdb[jid]
col.insert(load)
def get_load(jid):
'''
Return the load associated with a given job id
'''
conn, mdb = _get_conn()
return mdb[jid].find_one()
def get_jid(jid):
'''
Return the return information associated with a jid
'''
conn, mdb = _get_conn()
ret = {}
for collection in mdb.collection_names():
rdata = mdb[collection].find_one({jid: {'$exists': 'true'}})
if rdata:
ret[collection] = rdata
return ret
def get_fun(fun):
'''
Return the most recent jobs that have executed the named function
'''
conn, mdb = _get_conn()
ret = {}
for collection in mdb.collection_names():
rdata = mdb[collection].find_one({'fun': fun})
if rdata:
ret[collection] = rdata
return ret
def get_minions():
'''
Return a list of minions
'''
conn, mdb = _get_conn()
ret = []
for name in mdb.collection_names():
if len(name) == 20:
try:
int(name)
continue
except ValueError:
pass
ret.append(name)
return ret
def get_jids():
'''
Return a list of job ids
'''
conn, mdb = _get_conn()
ret = []
for name in mdb.collection_names():
if len(name) == 20:
try:
int(name)
ret.append(name)
except ValueError:
pass
return ret
| 21.63354
| 78
| 0.589147
|
4a10c0369302d3bdcfc5b8393f2e2f10c95309ab
| 3,286
|
py
|
Python
|
monai/metrics/compute_meandice.py
|
HastingsGreer/MONAI
|
5e12c5d52638b2e434e212819452775f67cbb0de
|
[
"Apache-2.0"
] | 1
|
2020-11-05T09:53:14.000Z
|
2020-11-05T09:53:14.000Z
|
monai/metrics/compute_meandice.py
|
HastingsGreer/MONAI
|
5e12c5d52638b2e434e212819452775f67cbb0de
|
[
"Apache-2.0"
] | null | null | null |
monai/metrics/compute_meandice.py
|
HastingsGreer/MONAI
|
5e12c5d52638b2e434e212819452775f67cbb0de
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from monai.networks.utils import one_hot
def compute_meandice(y_pred,
y,
include_background=False,
to_onehot_y=True,
mutually_exclusive=True,
add_sigmoid=False,
logit_thresh=None):
"""Computes dice score metric from full size Tensor and collects average.
Args:
y_pred (torch.Tensor): input data to compute, typical segmentation model output.
it must be One-Hot format and first dim is batch, example shape: [16, 3, 32, 32].
y (torch.Tensor): ground truth to compute mean dice metric, the first dim is batch.
include_background (Bool): whether to skip dice computation on the first channel of the predicted output.
to_onehot_y (Bool): whether to convert `y` into the one-hot format.
mutually_exclusive (Bool): if True, `y_pred` will be converted into a binary matrix using
a combination of argmax and to_onehot.
add_sigmoid (Bool): whether to add sigmoid function to y_pred before computation.
logit_thresh (Float): the threshold value used to convert `y_pred` into a binary matrix.
Note:
This method provide two options to convert `y_pred` into a binary matrix:
(1) when `mutually_exclusive` is True, it uses a combination of argmax and to_onehot
(2) when `mutually_exclusive` is False, it uses a threshold `logit_thresh`
(optionally with a sigmoid function before thresholding).
"""
n_channels_y_pred = y_pred.shape[1]
if mutually_exclusive:
if logit_thresh is not None:
raise ValueError('`logit_thresh` is incompatible when mutually_exclusive is True.')
y_pred = torch.argmax(y_pred, dim=1, keepdim=True)
y_pred = one_hot(y_pred, n_channels_y_pred)
else: # channel-wise thresholding
if add_sigmoid:
y_pred = torch.sigmoid(y_pred)
if logit_thresh is not None:
y_pred = (y_pred >= logit_thresh).float()
if to_onehot_y:
y = one_hot(y, n_channels_y_pred)
if not include_background:
y = y[:, 1:] if y.shape[1] > 1 else y
y_pred = y_pred[:, 1:] if y_pred.shape[1] > 1 else y_pred
# reducing only spatial dimensions (not batch nor channels)
reduce_axis = list(range(2, y_pred.dim()))
intersection = torch.sum(y * y_pred, reduce_axis)
y_o = torch.sum(y, reduce_axis)
y_pred_o = torch.sum(y_pred, reduce_axis)
denominator = y_o + y_pred_o
f = (2.0 * intersection) / denominator
# final reduce_mean across batches and channels
return torch.mean(f)
| 43.813333
| 113
| 0.66829
|
4a10c1ad3b41682eb2e8eacd5ecb932f5d79530a
| 403
|
py
|
Python
|
lims/addressbook/migrations/0002_auto_20180301_0958.py
|
sqilz/LIMS-Backend
|
b64e1fa512f89e4492803d44c6b8c35e4d4724cc
|
[
"MIT"
] | 12
|
2017-03-01T10:39:36.000Z
|
2022-01-04T06:17:19.000Z
|
lims/addressbook/migrations/0002_auto_20180301_0958.py
|
sqilz/LIMS-Backend
|
b64e1fa512f89e4492803d44c6b8c35e4d4724cc
|
[
"MIT"
] | 29
|
2017-04-25T14:05:08.000Z
|
2021-06-21T14:41:53.000Z
|
lims/addressbook/migrations/0002_auto_20180301_0958.py
|
sqilz/LIMS-Backend
|
b64e1fa512f89e4492803d44c6b8c35e4d4724cc
|
[
"MIT"
] | 4
|
2017-10-11T16:22:53.000Z
|
2021-02-23T15:45:21.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-03-01 09:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('addressbook', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='address',
options={'ordering': ['-id']},
),
]
| 20.15
| 48
| 0.600496
|
4a10c21fc321cdc23bea718de7ef2a5d7a901504
| 5,764
|
py
|
Python
|
torchgeo/datasets/resisc45.py
|
GIShkl/GAOFEN2021_CHANGEDETECTION
|
5b7251cb1e951a04c7effacab6c1233232158472
|
[
"MIT"
] | 3
|
2021-12-12T09:45:41.000Z
|
2022-03-10T08:34:22.000Z
|
torchgeo/datasets/resisc45.py
|
lyp19/GAOFEN2021_CHANGEDETECTION
|
5b7251cb1e951a04c7effacab6c1233232158472
|
[
"MIT"
] | null | null | null |
torchgeo/datasets/resisc45.py
|
lyp19/GAOFEN2021_CHANGEDETECTION
|
5b7251cb1e951a04c7effacab6c1233232158472
|
[
"MIT"
] | 1
|
2021-11-13T05:40:18.000Z
|
2021-11-13T05:40:18.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""RESISC45 dataset."""
import os
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import torch
from torch import Tensor
from torchvision.datasets import ImageFolder
from .geo import VisionDataset
from .utils import download_and_extract_archive
class RESISC45(VisionDataset, ImageFolder): # type: ignore[misc]
"""RESISC45 dataset.
The `RESISC45 <http://www.escience.cn/people/JunweiHan/NWPU-RESISC45.html>`_
dataset is a dataset for remote sensing image scene classification.
Dataset features:
* 31,500 images with 0.2-30 m per pixel resolution (256x256 px)
* three spectral bands - RGB
* 45 scene classes, 700 images per class
* images extracted from Google Earth from over 100 countries
* images conditions with high variability (resolution, weather, illumination)
Dataset format:
* images are three-channel jpgs
Dataset classes:
0. airplane
1. airport
2. baseball_diamond
3. basketball_court
4. beach
5. bridge
6. chaparral
7. church
8. circular_farmland
9. cloud
10. commercial_area
11. dense_residential
12. desert
13. forest
14. freeway
15. golf_course
16. ground_track_field
17. harbor
18. industrial_area
19. intersection
20. island
21. lake
22. meadow
23. medium_residential
24. mobile_home_park
25. mountain
26. overpass
27. palace
28. parking_lot
29. railway
30. railway_station
31. rectangular_farmland
32. river
33. roundabout
34. runway
35. sea_ice
36. ship
37. snowberg
38. sparse_residential
39. stadium
40. storage_tank
41. tennis_court
42. terrace
43. thermal_power_station
44. wetland
If you use this dataset in your research, please cite the following paper:
* https://doi.org/10.1109/jproc.2017.2675998
"""
url = "https://drive.google.com/file/d/1DnPSU5nVSN7xv95bpZ3XQ0JhKXZOKgIv"
md5 = "d824acb73957502b00efd559fc6cfbbb"
filename = "NWPU-RESISC45.rar"
directory = "NWPU-RESISC45"
def __init__(
self,
root: str = "data",
transforms: Optional[Callable[[Dict[str, Tensor]], Dict[str, Tensor]]] = None,
download: bool = False,
checksum: bool = False,
) -> None:
"""Initialize a new PatternNet dataset instance.
Args:
root: root directory where dataset can be found
transforms: a function/transform that takes input sample and its target as
entry and returns a transformed version
download: if True, download dataset and store it in the root directory
checksum: if True, check the MD5 of the downloaded files (may be slow)
Raises:
RuntimeError: if ``download=False`` and data is not found, or checksums
don't match
"""
self.root = root
self.checksum = checksum
if download:
self._download()
if not self._check_integrity():
raise RuntimeError(
"Dataset not found or corrupted. "
+ "You can use download=True to download it"
)
# When transform & target_transform are None, ImageFolder.__getitem__[index]
# returns a PIL.Image and int for image and label, respectively
super().__init__(
root=os.path.join(root, self.directory),
transform=None,
target_transform=None,
)
# Must be set after calling super().__init__()
self.transforms = transforms
def __getitem__(self, index: int) -> Dict[str, Tensor]:
"""Return an index within the dataset.
Args:
index: index to return
Returns:
data and label at that index
"""
image, label = self._load_image(index)
sample = {"image": image, "label": label}
if self.transforms is not None:
sample = self.transforms(sample)
return sample
def __len__(self) -> int:
"""Return the number of data points in the dataset.
Returns:
length of the dataset
"""
return len(self.imgs)
def _load_image(self, index: int) -> Tuple[Tensor, Tensor]:
"""Load a single image and it's class label.
Args:
index: index to return
Returns:
the image
the image class label
"""
img, label = ImageFolder.__getitem__(self, index)
array = np.array(img)
tensor: Tensor = torch.from_numpy(array) # type: ignore[attr-defined]
# Convert from HxWxC to CxHxW
tensor = tensor.permute((2, 0, 1))
label = torch.tensor(label) # type: ignore[attr-defined]
return tensor, label
def _check_integrity(self) -> bool:
"""Checks the integrity of the dataset structure.
Returns:
True if the dataset directories and split files are found, else False
"""
filepath = os.path.join(self.root, self.directory)
if not os.path.exists(filepath):
return False
return True
def _download(self) -> None:
"""Download the dataset and extract it.
Raises:
AssertionError: if the checksum of split.py does not match
"""
if self._check_integrity():
print("Files already downloaded and verified")
return
download_and_extract_archive(
self.url,
self.root,
filename=self.filename,
md5=self.md5 if self.checksum else None,
)
| 27.578947
| 86
| 0.618668
|
4a10c462dc41d8922eab29aeb8642f8546f9f6ab
| 3,687
|
py
|
Python
|
language-modeling/fast_transformers/attention/local_attention.py
|
minhtannguyen/transformer-mgk
|
304ebf3781b1eb4aeef93f2757319775d2fcdbc4
|
[
"CC0-1.0"
] | 5
|
2021-11-06T16:10:31.000Z
|
2021-12-25T19:47:42.000Z
|
language-modeling/fast_transformers/attention/.ipynb_checkpoints/local_attention-checkpoint.py
|
minhtannguyen/transformer-mgk
|
304ebf3781b1eb4aeef93f2757319775d2fcdbc4
|
[
"CC0-1.0"
] | null | null | null |
language-modeling/fast_transformers/attention/.ipynb_checkpoints/local_attention-checkpoint.py
|
minhtannguyen/transformer-mgk
|
304ebf3781b1eb4aeef93f2757319775d2fcdbc4
|
[
"CC0-1.0"
] | 2
|
2021-11-30T03:36:54.000Z
|
2021-12-25T19:49:58.000Z
|
"""Implement local context attention."""
from math import sqrt
import torch
from torch.nn import Module, Dropout
from torch.nn import functional as F
from ..attention_registry import AttentionRegistry, Optional, Int, Float, \
EventDispatcherInstance
from ..events import EventDispatcher
from ..local_product import local_dot_product, local_weighted_average
class LocalAttention(Module):
"""Implement fast local attention where a query can only attend to
neighboring keys.
In this attention module the query Q_i can only attend to a key K_j if
|i-j| < local_context/2.
Arguments
---------
local_context: The neighborhood to consider for local attention.
softmax_temp: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.1)
event_dispatcher: str or EventDispatcher instance to be used by this
module for dispatching events (default: the default
global dispatcher)
"""
def __init__(self, local_context, softmax_temp=None, attention_dropout=0.1,
event_dispatcher=""):
super(LocalAttention, self).__init__()
self.local_context = local_context
self.softmax_temp = softmax_temp
self.dropout = Dropout(attention_dropout)
self.event_dispatcher = EventDispatcher.get(event_dispatcher)
def forward(self, queries, keys, values, attn_mask, query_lengths,
key_lengths):
"""Implements the local attention.
The attn_mask can be anything but the only values that will be
considered will be the ones in the neighborhood of each query.
Arguments
---------
queries: (N, L, H, E) The tensor containing the queries
keys: (N, S, H, E) The tensor containing the keys
values: (N, S, H, D) The tensor containing the values
attn_mask: An implementation of BaseMask that encodes where each
query can attend to
query_lengths: An implementation of BaseMask that encodes how
many queries each sequence in the batch consists of
key_lengths: An implementation of BaseMask that encodes how
many queries each sequence in the batch consists of
"""
# Extract some shapes and compute the temperature
N, L, H, E = queries.shape
_, S, _, D = values.shape
context = self.local_context
softmax_temp = self.softmax_temp or 1./sqrt(E)
# Permute the dimensions to NHLE instead of NLHE
queries = queries.permute(0, 2, 1, 3).contiguous()
keys = keys.permute(0, 2, 1, 3).contiguous()
values = values.permute(0, 2, 1, 3).contiguous()
QK = local_dot_product(
queries,
keys,
attn_mask.additive_matrix_finite,
key_lengths.lengths,
self.local_context
)
A = self.dropout(torch.softmax(softmax_temp * QK, dim=-1))
V_new = local_weighted_average(A, values)
return V_new.permute(0, 2, 1, 3).contiguous()
# Register the attention implementation so that it becomes available in our
# builders
AttentionRegistry.register(
"local", LocalAttention,
[
("local_context", Int),
("softmax_temp", Optional(Float)),
("attention_dropout", Optional(Float, 0.1)),
("event_dispatcher", Optional(EventDispatcherInstance, ""))
]
)
| 38.010309
| 79
| 0.635476
|
4a10c4b902df028cdc6778cd5f4a39f7ff67add0
| 1,942
|
py
|
Python
|
IOT/RaspberryPi/watch.py
|
syureu/Hellog2
|
f61524ffe6f2a3836d13085e9e29e2015bba9f87
|
[
"Apache-2.0"
] | null | null | null |
IOT/RaspberryPi/watch.py
|
syureu/Hellog2
|
f61524ffe6f2a3836d13085e9e29e2015bba9f87
|
[
"Apache-2.0"
] | null | null | null |
IOT/RaspberryPi/watch.py
|
syureu/Hellog2
|
f61524ffe6f2a3836d13085e9e29e2015bba9f87
|
[
"Apache-2.0"
] | null | null | null |
import time
import os
import requests
try:
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
except ModuleNotFoundError as e:
print (e)
os.system("pip install watchdog")
class Handler(FileSystemEventHandler):
def on_created(self, event):
print (f'event type : {event.event_type}\n'
f'event src_path : {event.src_path}')
if event.is_directory:
print ("Make Directory")
else:
Fname, Extension = os.path.splitext(os.path.basename(event.src_path))
if Extension == '.mfd':
#response = requests.get("http://192.168.137.159:8888/rasp_server/watch/")
response = requests.get("http://127.0.0.1:8888/rasp_server/watch/")
print ("Make MFD File")
def on_deleted(self, event):
print ("Delete Event")
def on_moved(self, event):
print (f'event type : {event.event_type}\n')
class Watcher:
def __init__(self, path):
print ("Watching ... ")
self.event_handler = None
self.observer = Observer()
self.target_directory = path
self.currentDirectorySetting()
def currentDirectorySetting(self):
print ("==================================")
print ("Current Working Directory : ", end=" ")
os.chdir(self.target_directory)
print ("{cwd}".format(cwd = os.getcwd()))
print ("==================================")
def run(self):
self.event_handler = Handler()
self.observer.schedule(
self.event_handler,
self.target_directory,
recursive=False
)
self.observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt as e:
print ("Stop Watching ... ")
self.observer.stop()
myWatcher = Watcher("/home/pi/Hash")
myWatcher.run()
| 30.34375
| 89
| 0.571061
|
4a10c5a608972d4f28a7e37b6d725af299a900e0
| 474
|
py
|
Python
|
website/migrations/0005_about_us_video.py
|
munisisazade/demo_programs
|
ec80b6f7eb0161c124d0512f5075779714726b2d
|
[
"MIT"
] | null | null | null |
website/migrations/0005_about_us_video.py
|
munisisazade/demo_programs
|
ec80b6f7eb0161c124d0512f5075779714726b2d
|
[
"MIT"
] | null | null | null |
website/migrations/0005_about_us_video.py
|
munisisazade/demo_programs
|
ec80b6f7eb0161c124d0512f5075779714726b2d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-13 22:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0004_auto_20170414_0230'),
]
operations = [
migrations.AddField(
model_name='about_us',
name='video',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| 22.571429
| 74
| 0.622363
|
4a10c5ca70a1c93a79e13deb6132b6702bb8053a
| 14,191
|
py
|
Python
|
feature_examples/pytorch/octconv/octconv_example.py
|
Paperspace/tutorials
|
8e20ffb687080c44e75dabea594d2b57acc53713
|
[
"MIT"
] | null | null | null |
feature_examples/pytorch/octconv/octconv_example.py
|
Paperspace/tutorials
|
8e20ffb687080c44e75dabea594d2b57acc53713
|
[
"MIT"
] | null | null | null |
feature_examples/pytorch/octconv/octconv_example.py
|
Paperspace/tutorials
|
8e20ffb687080c44e75dabea594d2b57acc53713
|
[
"MIT"
] | 1
|
2022-02-25T12:07:16.000Z
|
2022-02-25T12:07:16.000Z
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import argparse
from tqdm import tqdm
import os
import sys
import json
import torch
import torch.nn as nn
import torchvision
import poptorch
import popart
import torch.optim as optim
import octconv
def cifar10(data_dir, train=True):
"""
Get the normalized CIFAR-10 dataset
"""
(mean, std) = ((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616))
transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std)
])
dataset = torchvision.datasets.CIFAR10(data_dir,
train=train,
download=True,
transform=transforms)
return dataset
def createConvBlock(in_channels, out_channels):
"""
Creates a conv --> batchnorm --> relu --> maxpool block
"""
conv = nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
padding=1,
bias=False)
norm = nn.BatchNorm2d(out_channels)
relu = nn.ReLU()
pool = nn.MaxPool2d(2)
return nn.Sequential(conv, norm, relu, pool)
def applyMultiConv(module):
"""
Applies a poptorch.MultiConv to a module
Any data-independent convolutions in the module will be executed in parallel
on the IPU using the PopLibs multi-convolution implementation.
"""
forward_impl = module.forward
def forwardWithMultiConv(*args, **kwargs):
with poptorch.MultiConv():
return forward_impl(*args, **kwargs)
module.forward = forwardWithMultiConv
class OctConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, alpha, use_multi=True):
"""
Module containing an octave convolution --> batchnorm --> relu --> maxpool
Uses the Octave convolution as described in the paper
"Drop an Octave: Reducing Spatial Redundancy in Convolutional Neural Networks with Octave Convolution"
https://arxiv.org/pdf/1904.05049.pdf
This module ensures that both the high and low frequency feature outputs
(when both are present) have the same operations applied to them.
"""
super().__init__()
self.octconv = octconv.OctConv2d(in_channels,
out_channels,
kernel_size=3,
padding=1,
alpha=alpha)
if use_multi:
applyMultiConv(self.octconv)
norm_high = nn.BatchNorm2d(self.octconv.out_channels["high"])
relu = nn.ReLU()
pool = nn.MaxPool2d(2)
self.high_seq = nn.Sequential(norm_high, relu, pool)
self.has_low = self.octconv.alpha_out > 0.
if self.has_low:
norm_low = nn.BatchNorm2d(self.octconv.out_channels["low"])
self.low_seq = nn.Sequential(norm_low, relu, pool)
def forward(self, input):
out = self.octconv(input)
if self.has_low:
# Propagate both high and low frequency features
y_high, y_low = out
return self.high_seq(y_high), self.low_seq(y_low)
else:
return self.high_seq(out)
class ClassificationModel(nn.Module):
def __init__(self, conv_mode="vanilla", alpha=0.5, expansion=1):
"""
CNN model for image classification tasks.
conv_mode: Selects the convolution implementation used in the model:
* "vanilla": Uses the standard torch.nn.Conv2d
* "octave": Uses octconv.OctConv2d
* "multi-octave: Uses poptorch.MultiConv to accelerate "octave"
alpha: Ratio of low-frequency features used in the octave convolutions.
expansion: Factor for parametrizing the width of the model.
"""
super().__init__()
assert isinstance(expansion, int) and expansion > 0, \
f"Invalid expansion \"{expansion}\". Must be a positive integer."
self.num_channels = 16 * expansion
if conv_mode == "vanilla":
self._makeVanilla()
elif conv_mode == "octave":
self._makeOctave(alpha, use_multi=False)
elif conv_mode == "multi-octave":
self._makeOctave(alpha, use_multi=True)
else:
raise AssertionError((f"Invalid conv_mode=\"{conv_mode}\"."
"Must be vanilla, octave, or multi-octave"))
self.fc = nn.Linear(self.num_channels * 4 * 4, 10)
self.log_softmax = nn.LogSoftmax(dim=1)
self.loss = nn.NLLLoss()
def _makeVanilla(self):
self.convlayers = nn.Sequential(
createConvBlock(3, self.num_channels),
createConvBlock(self.num_channels, self.num_channels * 2),
createConvBlock(self.num_channels * 2, self.num_channels))
def _makeOctave(self, alpha, use_multi):
self.convlayers = nn.Sequential(
OctConvBlock(3, self.num_channels, (0., alpha), use_multi),
OctConvBlock(self.num_channels, self.num_channels * 2, alpha,
use_multi),
OctConvBlock(self.num_channels * 2, self.num_channels, (alpha, 0.),
use_multi))
def forward(self, x, labels=None):
out = self.convlayers(x)
out = torch.flatten(out, start_dim=1)
out = self.fc(out)
out = self.log_softmax(out)
if labels is None:
# Inference model, just return the prediction
return out
else:
# Training model, calculate the loss and return it along with the prediction
loss = self.loss(out, labels)
return out, loss
def setupOptions(args, train=True):
"""
Setup poptorch options for either training or inference runs.
"""
opts = poptorch.Options().deviceIterations(args.batches_per_step)
if args.cache_dir:
# Separate caches for training/inference to prevent overwriting.
prefix = args.conv_mode
suffix = "-train" if train else "-inference"
cache = args.cache_dir + f"/{prefix}{suffix}"
opts.enableExecutableCaching(cache)
if args.profile_dir:
# Enable profiling if supported
assert not args.cache_dir, "Profiling is not supported with executable caching"
engine_opts = {
"autoReport.all": "true",
"autoReport.directory": args.profile_dir,
"profiler.format": "v3"
}
os.environ["POPLAR_ENGINE_OPTIONS"] = json.dumps(engine_opts)
# Use synthetic data when profiling
opts.enableSyntheticData(True)
return opts
def accuracy(predictions, labels):
"""
Evaluate accuracy from model predictions against ground truth labels.
"""
ind = torch.argmax(predictions, 1)
# provide labels only for samples, where prediction is available (during the training, not every samples prediction is returned for efficiency reasons)
labels = labels[-predictions.size()[0]:]
accuracy = torch.sum(torch.eq(ind, labels)).item() / \
labels.size()[0] * 100.0
return accuracy
def setupTraining(model, args):
"""
Setup a training run using the CIFAR-10 training dataset.
Uses the poptorch.DataLoader so that each training iteration executed on the
IPU will incorporate:
* (mini-)batch size
* device iterations
* replica factor
* gradient accumulation factor
Using poptorch.DataLoaderMode.Async allows loading the dataset on a separate
thread. This reduces the host/IPU communication overhead by using the time
that the IPU is running to load the next batch on the CPU.
"""
opts = setupOptions(args, train=True)
optimizer = optim.SGD(model.parameters(), lr=args.lr)
model.train() # Switch the model to training mode
training_model = poptorch.trainingModel(model, opts, optimizer)
dataset = cifar10(args.data_dir, train=True)
loader = poptorch.DataLoader(opts,
dataset,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
num_workers=8,
mode=poptorch.DataLoaderMode.Async)
return training_model, loader
def train(model, loader, num_epochs):
"""
Train the model on the IPU.
"""
num_batches = len(loader)
for epoch in range(1, num_epochs + 1):
print(f"Epoch {epoch}/{num_epochs}")
bar = tqdm(loader, total=num_batches)
for data, labels in bar:
predictions, losses = model(data, labels)
with torch.no_grad():
mean_loss = torch.mean(losses).item()
acc = accuracy(predictions, labels)
bar.set_description("Loss:{:0.4f} | Accuracy:{:0.2f}%".format(
mean_loss, acc))
def setupInference(model, args):
"""
Setup a training run using the CIFAR-10 training dataset.
Uses the poptorch.DataLoader so that each training iteration executed on the
IPU will incorporate:
* (mini-)batch size
* device iterations
* replica factor
* gradient accumulation factor
Applying the poptorch.AsynchronousDataAccessor allows loading the dataset on
a separate thread. This reduces the host/IPU communication overhead by
using the time that the IPU is running to load the next batch on the CPU.
"""
opts = setupOptions(args, train=False)
model.eval() # Switch the model to inference mode
inference_model = poptorch.inferenceModel(model, opts)
dataset = cifar10(args.data_dir, train=False)
loader = poptorch.DataLoader(opts,
dataset,
batch_size=args.test_batch_size,
shuffle=True,
drop_last=True,
num_workers=8)
loader = poptorch.AsynchronousDataAccessor(loader)
return inference_model, loader
def test(inference_model, loader):
"""
Test the model on the IPU.
"""
num_batches = len(loader)
sum_acc = 0.0
with torch.no_grad():
for data, labels in tqdm(loader, total=num_batches):
output = inference_model(data)
sum_acc += accuracy(output, labels)
print("Accuracy on test set: {:0.2f}%".format(sum_acc / num_batches))
def profile(model, args):
"""
Profile a single training iteration on the IPU using synthetic data
"""
opts = setupOptions(args)
optimizer = optim.SGD(model.parameters(), lr=args.lr)
model.train() # Switch the model to training mode
training_model = poptorch.trainingModel(model, opts, optimizer)
# Generate a random dataset for profiling
device_batch_size = args.batch_size * args.batches_per_step
torch.manual_seed(0)
data = torch.randn(device_batch_size, 3, 32, 32)
labels = torch.randint(0, 10, (device_batch_size,))
_, _ = training_model(data, labels)
def parseArgs():
"""
Parse command line arguments
"""
parser = argparse.ArgumentParser(
description="Octave Convolution in PopTorch")
parser.add_argument("--conv-mode",
choices=["vanilla", "octave", "multi-octave"],
default="vanilla",
help="Convolution implementation used in the classification model (default: vanilla)")
parser.add_argument(
"--alpha",
type=float,
default=0.5,
help="Ratio of low-frequency features used in octave convolutions (default: 0.5)")
parser.add_argument("--batch-size",
type=int,
default=8,
help="batch size for training (default: 8)")
parser.add_argument("--batches-per-step",
type=int,
default=50,
help="device iteration (default:50)")
parser.add_argument("--test-batch-size",
type=int,
default=80,
help="batch size for testing (default: 80)")
parser.add_argument("--epochs",
type=int,
default=10,
help="number of epochs to train (default: 10)")
parser.add_argument("--lr",
type=float,
default=0.05,
help="learning rate (default: 0.05)")
parser.add_argument(
"--profile-dir",
type=str,
help="Perform a single iteration of training for profiling and place in specified folder."
)
parser.add_argument(
"--cache-dir",
type=str,
help="Enable executable caching in the specified folder")
parser.add_argument(
"--data-dir",
type=str,
default="~/.torch/datasets",
help="Location to use for loading the CIFAR-10 dataset from.")
parser.add_argument("--expansion",
type=int,
default=1,
help="Expansion factor for tuning model width.")
return parser.parse_args()
if __name__ == "__main__":
# Create the model from command line args
args = parseArgs()
model = ClassificationModel(conv_mode=args.conv_mode,
alpha=args.alpha,
expansion=args.expansion)
if args.profile_dir:
profile(model, args)
sys.exit(0)
# Train the model
training_model, train_loader = setupTraining(model, args)
train(training_model, train_loader, args.epochs)
# Update the weights in model by copying from the training IPU. This updates (model.parameters())
training_model.copyWeightsToHost()
# Evaluate the trained model
inference_model, test_loader = setupInference(model, args)
test(inference_model, test_loader)
| 34.360775
| 155
| 0.599817
|
4a10c6a238c303fca6befe8bef7a66675db39977
| 29,361
|
py
|
Python
|
chb/simulation/SimulationState.py
|
sipma/CodeHawk-Binary
|
3d4a0c2958266571f8f660cb48f29339b253c07f
|
[
"MIT"
] | null | null | null |
chb/simulation/SimulationState.py
|
sipma/CodeHawk-Binary
|
3d4a0c2958266571f8f660cb48f29339b253c07f
|
[
"MIT"
] | null | null | null |
chb/simulation/SimulationState.py
|
sipma/CodeHawk-Binary
|
3d4a0c2958266571f8f660cb48f29339b253c07f
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2020 Henny Sipma
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
"""Top-level state representation of a simulation run.
A simulation run includes one top-level state that represents all shared entities,
such as stack, heap, and registers, and substates for the main executable (can
be a library) and (optionally) for each of the dynamically linked libraries that
contains static global memory, visible only to the submodule. Dynamically
linked libraries can be optionally included; alternatively, library functions
may be stubbed out. Global addresses include the name of the module in which
address space they are mapped. Each submodule has its own resolution of linked
library functions.
"""
from abc import ABC, abstractmethod
from typing import (
cast, Dict, List, Mapping, Optional, Sequence, TYPE_CHECKING, Union)
from chb.app.Operand import Operand
from chb.simulation.ELFSimGlobalMemory import ELFSimGlobalMemory
from chb.simulation.SimBaseMemory import SimBaseMemory
import chb.simulation.SimFileUtil as SFU
from chb.simulation.SimLocation import (
SimLocation, SimRegister, SimMemoryLocation)
from chb.simulation.SimMappedMemory import SimMappedMemory
from chb.simulation.SimProgramCounter import SimProgramCounter
from chb.simulation.SimSharedMemory import SimSharedMemory
from chb.simulation.SimMemory import SimMemory, SimStackMemory
from chb.simulation.SimStub import SimStub
from chb.simulation.SimSupport import SimSupport
import chb.simulation.SimSymbolicValue as SSV
import chb.simulation.SimValue as SV
import chb.simulation.SimUtil as SU
import chb.util.fileutil as UF
if TYPE_CHECKING:
from chb.app.AppAccess import AppAccess
from chb.elfformat.ELFSection import ELFSymbolTable
prefer_stubs = [
"access",
"cdbg_printf",
"chdir",
"close",
"fflush",
"get_current_dir_name",
"getenv",
"malloc",
"mallopt",
"memcpy",
"memset",
"mmap",
"open",
"printf",
"putenv",
"realloc",
"setsid",
"shmat",
"shmget",
"sigaction",
"strcmp",
"strcpy",
"strlen",
"strncpy",
"strstr",
"tcsetattr",
"unsetenv"]
class SimModule:
def __init__(
self,
name: str,
app: "AppAccess",
base: str,
max_addr: str) -> None:
self._name = name
self._app = app
self._base = base # base address in hex
self._imports: Dict[int, str] = {}
self._exports: Dict[str, int] = {}
self._max_addr = max_addr
@property
def name(self) -> str:
return self._name
@property
def app(self) -> "AppAccess":
return self._app
@property
def base(self) -> str:
"""Return base address in hex."""
return self._base
@property
def base_i(self) -> int:
"""Return base address as integer."""
return int(self.base, 16)
@property
def max_addr(self) -> str:
"""Return maximum address in adress space."""
return self._max_addr
@property
def max_addr_i(self) -> int:
return int(self.max_addr, 16)
@property
def imports(self) -> Dict[int, str]:
if len(self._imports) == 0:
libstubs = self.app.functionsdata.library_stubs()
for (x, s) in libstubs.items():
self._imports[int(x, 16)] = s
return self._imports
@property
def exports(self) -> Dict[str, int]:
if len(self._exports) == 0:
symtab = cast("ELFSymbolTable", self.app.header.get_dynamic_symbol_table())
for (sid, sym) in symtab.symbols.items():
if sym.is_exported:
self._exports[sym.st_name] = int(sym.value, 16)
# sometimes symbols are available only with prefix __libc_
if sym.st_name.startswith("__libc_"):
self._exports[sym.st_name[7:]] = int(sym.value, 16)
return self._exports
def is_imported(self, addr: int) -> bool:
return addr in self.imports
def is_exported(self, sym: str) -> bool:
if sym in prefer_stubs:
return False
else:
return sym in self.exports
def import_symbol(self, addr: int) -> str:
if self.is_imported(addr):
return self.imports[addr]
else:
raise UF.CHBError(
"Address " + hex(addr) + " not found in imports of " + self.name)
def export_address(self, sym: str) -> int:
if self.is_exported(sym):
print("Symbol " + sym + " linked to " + self.name + ":" + hex(self.exports[sym]))
return self.exports[sym]
else:
raise UF.CHBError(
"Symbol " + sym + " not found in exports of " + self.name)
def has_function_name(self, addr: int):
return self.app.has_function_name(hex(addr))
def has_function(self, addr: int):
return self.app.has_function(hex(addr))
def function_name(self, addr: int):
if self.has_function_name(addr):
return self.app.function_name(hex(addr))
else:
raise UF.CHBError(
"No function name associated with " + hex(addr))
def has_address(self, addr: int):
return addr >= self.base_i and addr <= self.max_addr_i
class ModuleSimulationState:
def __init__(
self,
simstate: "SimulationState",
module: SimModule) -> None:
self._module = module
self._simstate = simstate
self._globalmem = ELFSimGlobalMemory(self, self.module.app.header)
@property
def module(self) -> SimModule:
return self._module
@property
def modulename(self) -> str:
return self.module.name
@property
def simstate(self) -> "SimulationState":
return self._simstate
@property
def globalmem(self) -> ELFSimGlobalMemory:
return self._globalmem
def is_literal_address(self, iaddr: str, addrvalue: int) -> bool:
return addrvalue > self.module.base_i
def resolve_literal_address(
self, iaddr: str, addrvalue: int) -> SSV.SimGlobalAddress:
if self.module.has_address(addrvalue):
return SSV.mk_global_address(addrvalue, modulename=self.modulename)
else:
return SSV.mk_undefined_global_address(self.modulename)
def set_memval(
self,
iaddr: str,
address: SSV.SimGlobalAddress,
srcval: SV.SimValue) -> None:
self.globalmem.set(iaddr, address, srcval)
def memval(
self,
iaddr: str,
address: SSV.SimGlobalAddress,
size: int,
signextend: bool = False) -> SV.SimValue:
return self.globalmem.get(iaddr, address, size)
class TraversalEdge:
def __init__(self, src: str, callsite: str, dst: str) -> None:
self._src = src
self._callsite = callsite
self._dst = dst
self._traversals: int = 0
@property
def src(self) -> str:
return self._src
@property
def callsite(self) -> str:
return self._callsite
@property
def dst(self) -> str:
return self._dst
@property
def traversals(self) -> int:
return self._traversals
def traverse(self) -> None:
self._traversals += 1
def __str__(self) -> str:
return (
" "
+ self.src.ljust(8)
+ " "
+ self.callsite.ljust(8)
+ " "
+ self.dst.ljust(8)
+ " "
+ str(self.traversals).rjust(4))
class SimulationInitializer:
def __init__(self) -> None:
pass
def do_initialization(self, simstate: "SimulationState") -> None:
pass
class SimulationDataDisplay:
def __init__(self) -> None:
pass
def display_registers(self, simstate: "SimulationState") -> str:
return "none"
class SimulationTrace:
def __init__(self) -> None:
self._trace: List[str] = []
self._delayed_trace: List[str] = []
self._traversaledges: Dict[str, Dict[str, Dict[str, TraversalEdge]]] = {}
self._appcalls: List[str] = []
@property
def trace(self) -> List[str]:
return self._trace
@property
def delayed_trace(self) -> List[str]:
return self._delayed_trace
def reset_delayed_trace(self) -> None:
self._delayed_trace = []
def add(self, s: str) -> None:
self._trace.append(s)
def add_delayed(self, s: str) -> None:
self._delayed_trace.append(s)
def include_delayed(self) -> None:
if len(self.delayed_trace) > 0:
self._trace.extend(self.delayed_trace)
self.reset_delayed_trace()
def add_appcall(self, s: str) -> None:
self._appcalls.append(s)
@property
def traversaledges(self) -> Dict[str, Dict[str, Dict[str, TraversalEdge]]]:
return self._traversaledges
def traverse_edge(self, src: str, callsite: str, dst: str) -> None:
self.traversaledges.setdefault(src, {})
self.traversaledges[src].setdefault(callsite, {})
self.traversaledges[src][callsite].setdefault(dst, TraversalEdge(src, callsite, dst))
self.traversaledges[src][callsite][dst].traverse()
def traversals(self) -> str:
lines: List[str] = []
for src in sorted(self.traversaledges):
for callsite in sorted(self.traversaledges[src]):
for dst in sorted(self.traversaledges[src][callsite]):
lines.append(str(self.traversaledges[src][callsite][dst]))
return "\n".join(lines)
def __str__(self) -> str:
return ("\n".join(self.trace)
+ "\n\nTraversals\n"
+ self.traversals()
+ "\n\nApplication calls\n"
+ "\n".join(" " + a for a in self._appcalls))
class SimulationState:
def __init__(
self,
startaddr: str,
mainx: SimModule,
simprogramcounter: SimProgramCounter,
siminitializer: SimulationInitializer = SimulationInitializer(),
simsupport: SimSupport = SimSupport(),
simdatadisplay: SimulationDataDisplay = SimulationDataDisplay(),
dynlibs: Sequence[SimModule] = [],
stubs: Mapping[str, SimStub] = {},
bigendian: bool = False) -> None:
self._startaddr = startaddr
self._mainx = mainx
self._simprogramcounter = simprogramcounter
self._siminitializer = siminitializer
self._dynlibs = dynlibs
self._simsupport = simsupport
self._simdatadisplay = simdatadisplay
self._stubs = stubs
self._bigendian = bigendian
# module states
self._modulestates: Dict[str, ModuleSimulationState] = {}
# registers and memory (registers are assumed to be 32 bits wide)
self.registers: Dict[str, SV.SimValue] = {}
self.stackmem = SimStackMemory(self)
self.basemem: Dict[str, SimBaseMemory] = {}
self.mappedmem: Dict[str, SimMappedMemory] = {}
self.sharedmem: Dict[int, SimSharedMemory] = {} # indexed by id returned by shmget
# log
self.fnlog: Dict[str, List[str]] = {}
# trace
self._trace = SimulationTrace()
# initialization
self.siminitializer.do_initialization(self)
self.simsupport.do_initialization(self)
@property
def startaddr(self) -> str:
return self._startaddr
@property
def mainx(self) -> SimModule:
return self._mainx
@property
def siminitializer(self) -> SimulationInitializer:
return self._siminitializer
@property
def simsupport(self) -> SimSupport:
return self._simsupport
@property
def simdatadisplay(self) -> SimulationDataDisplay:
return self._simdatadisplay
@property
def dynlibs(self) -> Sequence[SimModule]:
"""Return dynamically linked libraries that are included in the simulation."""
return self._dynlibs
@property
def stubs(self) -> Mapping[str, SimStub]:
return self._stubs
@property
def bigendian(self) -> bool:
return self._bigendian
@property
def modulestates(self) -> Dict[str, ModuleSimulationState]:
if len(self._modulestates) == 0:
self._modulestates[self.mainx.name] = ModuleSimulationState(self, self.mainx)
for d in self.dynlibs:
self._modulestates[d.name] = ModuleSimulationState(self, d)
return self._modulestates
@property
def trace(self) -> SimulationTrace:
return self._trace
# --- program counter ---
@property
def simprogramcounter(self) -> SimProgramCounter:
return self._simprogramcounter
@property
def programcounter(self) -> SSV.SimGlobalAddress:
return self.simprogramcounter.programcounter
@property
def modulename(self) -> str:
return self.simprogramcounter.modulename
@property
def modulestate(self) -> ModuleSimulationState:
return self.modulestates[self.modulename]
@property
def module(self) -> SimModule:
return self.modulestate.module
@property
def function_address(self) -> str:
"""Return the hex address of the current function."""
return self.simprogramcounter.function_address
def set_function_address(self, faddr: str) -> None:
self.simprogramcounter.set_function_address(faddr)
def set_programcounter(self, pc: SSV.SimGlobalAddress) -> None:
self.simprogramcounter.set_programcounter(pc)
def increment_programcounter(self) -> None:
self.simprogramcounter.increment_programcounter(self)
# --- import symbol resolution ---
def resolve_import_symbol(self, importsym: str) -> SSV.SimGlobalAddress:
for dynlib in self.dynlibs:
if dynlib.is_exported(importsym):
faddr = dynlib.export_address(importsym)
return SSV.mk_global_address(faddr, dynlib.name)
else:
return SSV.mk_undefined_global_address(self.modulename)
def is_import_symbol_stubbed(self, importsym: str) -> bool:
return False
def has_stub(self, name: str) -> bool:
return name in self.stubs
def stub_functioncall(self, iaddr: str, name: str) -> None:
if name in self.stubs:
if self.simsupport.has_call_intercept(name):
intercept = self.simsupport.call_intercept(name)
intercept.do_before(iaddr, self)
returnaddr = self.simprogramcounter.returnaddress(iaddr, self)
stub = self.stubs[name]
msg = stub.simulate(iaddr, self)
self.trace.add(" ".ljust(15) + iaddr + " " + msg)
self.simprogramcounter.set_programcounter(returnaddr)
else:
raise SU.CHBSimError(self, iaddr, "Missing stub: " + name)
# --- simulation values ---
def set(
self,
iaddr: str,
dstop: Operand,
srcval: SV.SimValue) -> SimLocation:
size = dstop.size
if srcval.is_literal and (not srcval.is_defined):
self.add_logmsg(iaddr, "Assigning undefined value to " + str(dstop))
lhs = self.lhs(iaddr, dstop)
if lhs.is_register:
lhs = cast(SimRegister, lhs)
self.set_register(iaddr, lhs.register, srcval)
elif lhs.is_memory_location:
lhs = cast(SimMemoryLocation, lhs)
self.set_memval(iaddr, lhs.simaddress, srcval)
else:
raise SU.CHBSimError(self, iaddr, "lhs not recognized: " + str(lhs))
return lhs
def rhs(self, iaddr: str, op: Operand, opsize: int = 4) -> SV.SimValue:
if op.is_register:
return self.regval(iaddr, op.register, opsize=opsize)
elif op.is_immediate:
return SV.mk_simvalue(op.value, size=opsize)
elif op.is_indirect_register:
regval = self.regval(iaddr, op.indirect_register)
offset = op.offset
if not regval.is_defined:
return SV.mk_undefined_simvalue(opsize)
if regval.is_string_address and opsize == 1:
regval = cast(SSV.SimStringAddress, regval)
return self.rhs_string_char(iaddr, regval, offset)
elif regval.is_symbol:
regval = cast(SSV.SimSymbol, regval)
return self.rhs_symbol(iaddr, regval, offset, opsize)
elif regval.is_address:
regval = cast(SSV.SimAddress, regval)
return self.memval(iaddr, regval.add_offset(offset), opsize)
elif regval.is_literal:
regval = cast(SV.SimLiteralValue, regval)
return self.rhs_literal_address(iaddr, regval.value, offset, opsize)
else:
raise SU.CHBSimError(
self,
iaddr,
"Unable to resolve indirect register operand: " + str(op))
else:
raise SU.CHBSimError(
self, iaddr, "Operand " + str(op) + " not recognized in rhs")
def rhs_string_char(
self, iaddr: str, addr: SSV.SimStringAddress, offset: int) -> SV.SimValue:
regstr = addr.stringval
if offset == len(regstr):
return SV.simZerobyte
elif offset < len(regstr):
return SV.mk_simbytevalue(ord(regstr[offset]))
else:
raise SU.CHBSimError(
self,
iaddr,
("Access of string value out of bounds. String: "
+ regstr
+ "; offset: "
+ str(offset)))
def rhs_symbol(
self,
iaddr: str,
sym: SSV.SimSymbol,
offset: int,
opsize: int) -> SV.SimValue:
base = sym.name
if base not in self.basemem:
self.basemem[base] = SimBaseMemory(self, base)
self.add_logmsg(iaddr, "Initialize base memory for " + base)
addr: SSV.SimAddress = SSV.mk_base_address(base, offset=offset)
return self.memval(iaddr, addr, opsize)
def rhs_literal_address(
self,
iaddr: str,
addrvalue: int,
offset: int,
opsize: int) -> SV.SimValue:
addr = self.resolve_literal_address(iaddr, addrvalue)
return self.memval(iaddr, addr.add_offset(offset), opsize)
def get_string_from_memaddr(self, iaddr: str, saddr: SSV.SimAddress) -> str:
result = ""
offset = 0
while True:
srcaddr = saddr.add_offset(offset)
srcval = self.memval(iaddr, srcaddr, 1)
if srcval.is_defined and srcval.is_literal:
srcval = cast(SV.SimLiteralValue, srcval)
if srcval.value == 0:
break
else:
result += chr(srcval.value)
offset += 1
else:
break
return result
# --- locations ---
def compute_indirect_address(self, iaddr: str, op: Operand) -> SSV.SimAddress:
regval = self.regval(iaddr, op.indirect_register)
if regval.is_address:
regval = cast(SSV.SimAddress, regval)
return regval.add_offset(op.offset)
elif regval.is_literal:
return self.resolve_literal_address(iaddr, regval.literal_value + op.offset)
else:
raise UF.CHBError("Indirect address cannot be resolved: " + str(op))
def resolve_literal_address(
self, iaddr: str,
addrvalue: int) -> SSV.SimGlobalAddress:
addr = self.modulestates[self.modulename].resolve_literal_address(
iaddr, addrvalue)
if addr.is_defined:
return addr
# elif addrvalue == 2:
# return SFU.sim_openfile("sim_stderr", "w")
else:
for shmid in self.sharedmem:
if self.sharedmem[shmid].has_address(addrvalue):
addr = SSV.mk_global_address(addrvalue, "shared:" + str(shmid))
return addr
else:
raise SU.CHBSimError(
self,
iaddr,
("Unable to resolve address: "
+ hex(addrvalue)
+ " in "
+ self.modulename))
def lhs(self, iaddr: str, op: Operand) -> SimLocation:
if op.is_register:
return SimRegister(op.register)
elif op.is_indirect_register:
addr = self.compute_indirect_address(iaddr, op)
return SimMemoryLocation(addr)
elif op.is_immediate:
addr = self.resolve_literal_address(iaddr, op.value)
return SimMemoryLocation(addr)
else:
raise SU.CHBSimError(
self, iaddr, "Unable to determine location for " + str(op))
# --- registers ---
def set_register(self, iaddr: str, reg: str, srcval: SV.SimValue) -> None:
self.registers[reg] = srcval
def regval(self, iaddr: str, reg: str, opsize: int = 4) -> SV.SimValue:
if reg in self.registers:
v = self.registers[reg]
if opsize == 4:
return v
elif opsize == 1:
if v.is_literal and v.is_defined:
v = cast(SV.SimDoubleWordValue, v)
return v.simbyte1
else:
return SV.simUndefinedByte
elif opsize == 2:
if v.is_literal and v.is_defined:
v = cast(SV.SimDoubleWordValue, v)
return v.lowword
else:
return SV.simUndefinedWord
else:
raise SU.CHBSimError(
self,
iaddr,
"regval with opsize: "
+ str(opsize)
+ " not recognized")
else:
self.add_logmsg(iaddr, "no value for register: " + reg)
return SV.mk_undefined_simvalue(opsize)
# --- memory ---
def set_memval(
self,
iaddr: str,
address: SSV.SimAddress,
srcval: SV.SimValue) -> None:
try:
if address.is_global_address:
address = cast(SSV.SimGlobalAddress, address)
name = address.modulename
if name in self.modulestates:
self.modulestates[name].set_memval(iaddr, address, srcval)
elif name.startswith("shared"):
shmid = int(name[7:])
if shmid in self.sharedmem:
self.sharedmem[shmid].set(iaddr, address, srcval)
else:
raise SU.CHBSimError(
self,
iaddr,
("Shared memory with identifier "
+ str(shmid)
+ " not found"))
else:
raise SU.CHBSimError(
self,
iaddr,
("Module name not recognized: "
+ name
+ " ("
+ ", ".join(name for name in self.modulestates)
+ ")"))
elif address.is_stack_address:
self.stackmem.set(iaddr, address, srcval)
elif address.is_base_address:
address = cast(SSV.SimBaseAddress, address)
base = address.base
if base not in self.basemem:
self.basemem[base] = SimBaseMemory(
self, base, buffersize=address.buffersize)
self.add_logmsg(iaddr, "initialize base memory for " + base)
self.basemem[base].set(iaddr, address, srcval)
else:
raise SU.CHBSimError(
self, iaddr, "Address not recognized: " + str(address))
except SU.CHBSimError as e:
self.add_logmsg(iaddr, "error in set_memval: " + str(e))
raise SU.CHBSimError(
self, iaddr, "set_memval: " + str(address) + ": " + str(e))
def memval(
self,
iaddr: str,
address: SSV.SimAddress,
size: int,
signextend: bool = False) -> SV.SimValue:
try:
if address.is_global_address:
address = cast(SSV.SimGlobalAddress, address)
name = address.modulename
if name in self.modulestates:
return self.modulestates[name].memval(iaddr, address, size)
elif name.startswith("shared:"):
shmid = int(name[7:])
if shmid in self.sharedmem:
return self.sharedmem[shmid].get(iaddr, address, size)
else:
raise SU.CHBSimError(
self,
iaddr,
("Shared memory with identifier "
+ str(shmid)
+ " not found"))
else:
raise SU.CHBSimError(
self,
iaddr,
("Module name not recognized: "
+ name
+ " ("
+ ", ".join(name for name in self.modulestates)
+ ")"))
elif address.is_stack_address:
return self.stackmem.get(iaddr, address, size)
elif address.is_base_address:
address = cast(SSV.SimBaseAddress, address)
if address.base in self.basemem:
return self.basemem[address.base].get(iaddr, address, size)
else:
raise SU.CHBSimError(
self,
iaddr,
("Base of base address: "
+ address.base
+ " not found in state's basemem"))
else:
raise SU.CHBSimError(
self, iaddr, "Address not recognized: " + str(address))
except SU.CHBSimError as e:
self.add_logmsg(
iaddr,
("no value for memory address: "
+ str(address)
+ " ("
+ str(e)
+ ")"))
return SV.mk_undefined_simvalue(size)
# --- logging ---
def add_logmsg(self, key: str, msg: str) -> None:
self.fnlog.setdefault(key, [])
self.fnlog[key].append(msg)
def __str__(self) -> str:
lines: List[str] = []
lines.append("")
lines.append(str(self.simprogramcounter))
lines.append("")
# registers
lines.append(self.simdatadisplay.display_registers(self))
lines.append("")
# stack
lines.append("-" * 80)
lines.append("Stack memory:")
lines.append("-" * 80)
lines.append(str(self.stackmem))
lines.append("=" * 80)
lines.append("")
# log messages
if self.fnlog:
lines.append('-' * 80)
lines.append('Log messages:')
lines.append('-' * 80)
for a in sorted(self.fnlog):
lines.append(' ' + str(a) + ' (' + str(len(self.fnlog[a])) + ')')
for x in self.fnlog[a]:
lines.append(' ' + str(x))
lines.append('=' * 80)
return "\n".join(lines)
| 33.904157
| 93
| 0.565274
|
4a10c6af6f00d1fbef221397bb0f2553d1e966c5
| 4,251
|
py
|
Python
|
server/catgenome/demo_scripts/reg_file.py
|
ismael-dev88/NGB
|
7d5e74489968d3ec6edfbf65e1599b982d49141c
|
[
"MIT"
] | 1
|
2017-08-22T09:42:22.000Z
|
2017-08-22T09:42:22.000Z
|
server/catgenome/demo_scripts/reg_file.py
|
react-dev26/NGB-master
|
b3646f92d7d9802d5bb83ac4b7a59031b2fe71a7
|
[
"MIT"
] | null | null | null |
server/catgenome/demo_scripts/reg_file.py
|
react-dev26/NGB-master
|
b3646f92d7d9802d5bb83ac4b7a59031b2fe71a7
|
[
"MIT"
] | null | null | null |
import requests
import json
import argparse
import sys
import os.path
import urlparse
import urllib
base_url = "http://localhost:8080/catgenome/restapi/{0}/register"
file_types = {
'vcf': "vcf",
'vcf.gz': "vcf",
'gff': "gene",
'gtf': "gene",
'gff.gz': "gene",
'gtf.gz': "gene",
'gff3': "gene",
'gff3.gz': "gene",
'bam':"bam",
'seg':'seg',
'seg.gz':'seg',
'bw':'wig',
'bed':'bed',
'bed.gz':"bed",
'vg': "vg",
'maf': "maf",
'maf.gz': "maf"
}
def checkBamIndex(fileType, args):
if fileType == 'bam':
if args.index == None or not os.path.isfile(args.index):
print("Error: Index file is required for BAM files")
sys.exit(1)
indexExtension = os.path.splitext(args.index)[1][1:]
if indexExtension != 'bai':
print("Unsupported BAM index extension: reqired BAI file, found {0}".format(indexExtension))
sys.exit(1)
def determinFileType(args):
if not os.path.isfile(args.path):
print("Error: File " + args.path + " does not exist")
sys.exit(1)
if args.index != None and not os.path.isfile(args.index):
print("Error: Index file " + args.index + " does not exist")
sys.exit(1)
extension = os.path.splitext(args.path)[1][1:]
if extension == 'gz':
path = args.path[:-3]
print(path)
extension = os.path.splitext(path)[1][1:]
if not extension in file_types:
print("Error: File extension '{0}' is not supported".format(extension))
sys.exit(1)
return extension
def determinUrlType(args):
path = urlparse.urlparse(args.path).path
extension = os.path.splitext(path)[1][1:]
if extension == 'gz':
p = path[:-3]
extension = os.path.splitext(p)[1][1:]
if not extension in file_types:
print("Error: File extension '{0}' is not supported".format(extension))
sys.exit(1)
return extension
def checkBamUrlIndex(fileType, args):
if fileType == 'bam':
if args.index == None:
print("Error: Index file is required for BAM files")
sys.exit(1)
path = urlparse.urlparse(args.index).path
indexExtension = os.path.splitext(path)[1][1:]
if indexExtension != 'bai':
print("Error: Unsupported BAM index extension: reqired BAI file, found {0}".format(indexExtension))
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description='Registers reference in NGB')
parser.add_argument('path', metavar='PATH', help='Path to file')
parser.add_argument('referenceId', metavar='REFERENCE_ID', type=int, help='ID of the reference, for wich file is being registered')
parser.add_argument('--name', metavar='NAME', help='Desired name for file in the system')
parser.add_argument('--index', metavar='INDEX_PATH', help='Path to index file')
parser.add_argument('--type', metavar='TYPE', help='Type of a registered resource: FILE, URL')
parser.add_argument('--indexType', metavar='INDEX_TYPE', help='Type of a registered index resource: FILE, URL')
args = parser.parse_args()
#print(args)
fileType = None
if args.type == None or args.type.lower() == 'file':
extension = determinFileType(args)
fileType = file_types[extension];
checkBamIndex(fileType, args)
elif args.type.lower() == 'url':
extension = determinUrlType(args)
fileType = file_types[extension]
checkBamUrlIndex(fileType, args)
url = base_url.format(fileType)
payload_obj = {
'path': args.path,
'referenceId': args.referenceId
}
if args.name != None:
payload_obj['name'] = args.name
print("Registering file '{0}'' with name '{1}', please wait...".format(args.path, args.name))
else:
print("Registering file '{0}', please wait...".format(args.path))
if args.index != None:
payload_obj['indexPath'] = args.index
if args.type != None:
payload_obj['type'] = args.type.upper()
if (args.indexType == None):
payload_obj['indexType'] = args.type.upper()
else:
payload_obj['indexType'] = args.indexType.upper()
headers = {
'content-type': "application/json",
'cache-control': "no-cache"
}
response = requests.request("POST", url, data=json.dumps(payload_obj), headers=headers)
resp_obj = json.loads(response.text)
if not 'payload' in resp_obj:
print("Error: " + response.text)
sys.exit(1)
payload = resp_obj['payload']
print("Registered file '{0}' with ID '{1}'".format(payload['name'], payload['id']))
if __name__ == "__main__":
main()
| 28.530201
| 133
| 0.677252
|
4a10c85975d5f31fb10a2d9a2829f3c553104c4a
| 8,214
|
py
|
Python
|
registry.py
|
oleksandr-k12/ua-ownership-registry-parser
|
0110d5180e24f2ea5a7673b2dbfccd4abd615083
|
[
"MIT"
] | null | null | null |
registry.py
|
oleksandr-k12/ua-ownership-registry-parser
|
0110d5180e24f2ea5a7673b2dbfccd4abd615083
|
[
"MIT"
] | null | null | null |
registry.py
|
oleksandr-k12/ua-ownership-registry-parser
|
0110d5180e24f2ea5a7673b2dbfccd4abd615083
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyparsing import *
import os.path
import re,sys
import csv
import settings
unicodePrintables = ''.join(chr(c) for c in range(sys.maxunicode) if not chr(c).isspace())
area_regex = r"Загальна площа \(кв\.м\)\: (\d{1,2}[\.,]?\d?)"
apart_num_regex=r"квартира (\d{1,3})"
building_regex=r"будинок \d{1,2}a?"
def sanitize_property_type(prop_type):
[prop_type, livable]=''.join(prop_type).split(", об'єкт житлової нерухомості:")
return prop_type.strip()
def sanitize_area(area):
# print(area)
if not isinstance(area, str):
area=' '.join(area)
matches = re.finditer(area_regex, area)
for match in matches:
return match.group(1).replace(',','.')
return area
def sanitize_address(address):
address=''.join(address)
matches = re.finditer(apart_num_regex, address)
for match in matches:
return match.group(1)
return 'нежитл. прим.'
def sanitize_owner(raw_owner):
ID_NUMBER_LITERAL = ', реєстраційний номер облікової картки платника податків: '
raw_owner = ' '.join(raw_owner)
if ID_NUMBER_LITERAL in raw_owner:
[person, regnum_raw] = raw_owner.split(ID_NUMBER_LITERAL)
else:
[person, regnum_raw] = raw_owner.split(', причина відсутності РНОКПП: ')
[regnum,citizen] = regnum_raw.split(', країна громадянства: ')
return (person, regnum, citizen)
def postprocess_owner_basis(raw_basis):
return ' '.join(raw_basis)
def parse(filename):
with open(filename, encoding='utf-8') as f:
data = f.readlines()
data = ''.join(data)
# these tokens should be suppressed
NL = LineEnd()
# like стор. 2 з 206
page_numbers = Literal('стор.') + Word(nums) + Literal('з')+Word(nums)
# qrcodes on every page like RRP-4HH2EL59B
qrcode=Literal("RRP-")+Word(unicodePrintables)
words = Word(unicodePrintables, unicodePrintables + ' ')
# useful info start
DOC_START=Literal('З ДЕРЖАВНОГО РЕЄСТРУ РЕЧОВИХ ПРАВ НА НЕРУХОМЕ МАЙНО')
DATA_REGISTRY_HEADER=Literal('ВІДОМОСТІ')
#marks start of second snapshot part of certificate
SNAPSHOT_REGISTRY_START=Literal('З РЕЄСТРУ ПРАВ ВЛАСНОСТІ НА НЕРУХОМЕ МАЙНО')
# headers of blocks in actual part of certificate
AC_HEADER_1 = Literal('Актуальна інформація про об’єкт нерухомого майна')
AC_HEADER_2 = Literal('Актуальна інформація про право власності')
RECORD_NUMBER=Literal('Номер запису про право власності / довірчої власності: ')
DATA_HEADER_OLD_1=Literal('ВІДОМОСТІ ПРО ОБ’ЄКТ НЕРУХОМОГО МАЙНА')
DATA_HEADER_OLD_2=Literal('ВІДОМОСТІ ПРО ПРАВА ВЛАСНОСТІ')
STOP_LITERAL_INFO = Literal('Відомості про реєстрацію')
ADDRESS=Literal('Адреса:')
NOMOVE_OBJECT=Literal('Об’єкт нерухомого')
OF_PROPERTY=Literal('майна:')
SHARE=Literal('Розмір частки: ')
OBJ_DESCR=Literal('Опис об’єкта: ')
record_num=RECORD_NUMBER+Word(nums)
ac_address=ADDRESS+OneOrMore(~AC_HEADER_2+words)('address')
owner_basis_literal=Literal('Підстава для державної')
owner_basis_literal_2 = Literal('реєстрації:')
record_basis=Literal('Підстава внесення')
owner_basis = ZeroOrMore(~record_basis + words)
share=SHARE+words('share')
owner_stop_list = STOP_LITERAL_INFO | RECORD_NUMBER
owner=Literal('Власники: ') + OneOrMore(~owner_stop_list + words)('owner')
stop_list=AC_HEADER_1 | record_num | DATA_REGISTRY_HEADER
trash=ZeroOrMore(~stop_list+words)
trash_3=ZeroOrMore(~DATA_HEADER_OLD_2+words)
ac_property_type=OneOrMore(~OBJ_DESCR+words)('prop_type')
# ac_area=AREA+words('area')
ac_area=OBJ_DESCR + OneOrMore(~ADDRESS +words)('area')
# property type and address block in snapshot part
ADDRESS_NOMOVE=Literal('Адреса нерухомого')
ADDRESS_NOMOVE_2=Literal('майна:')
PROP_TYPE=Literal('Тип майна:')
sn_property_type=PROP_TYPE+OneOrMore(~ADDRESS_NOMOVE+words)('prop_type')
address_stop_list=Literal('Загальна площа') | DATA_HEADER_OLD_2
sn_address=OneOrMore(~address_stop_list +words)('address')
SN_AREA_START=Literal('Загальна площа')
sn_area=(SN_AREA_START+words)('area')
sn_owner=(Literal('ПІБ:')+words('owner'))
SN_DATE_REGISTRY=Literal('Дата прийняття рішення')
sn_share=(Literal('Частка власності:')+words('share'))
SN_EMERSION_REASON=Literal('Підстава виникнення')
SN_REGISTRATION_MARK=Literal('ВІДМІТКА ПРО РЕЄСТРАЦІЮ')
SN_REGISTRATION_MARK_DATE=Literal('Дата реєстрації')+words
DATA_ABSENT=Literal('Відомості про права власності відсутні')
basis_reason_stop=SN_EMERSION_REASON | DATA_HEADER_OLD_1|SN_REGISTRATION_MARK|SN_DATE_REGISTRY
basis_mark_stop_list=SN_DATE_REGISTRY|DATA_HEADER_OLD_1
basis_mark_stop=ZeroOrMore(~basis_mark_stop_list+words)
sh_basis=(SN_EMERSION_REASON+Literal('права власності:')+ZeroOrMore(~basis_reason_stop +words)('basis')+basis_mark_stop)
ownership_record=Group(record_num+SkipTo(owner_basis_literal+owner_basis_literal_2, include=True)+owner_basis('basis')+record_basis+\
SkipTo(SHARE)+share+owner+trash)
sn_ownership=Group(SN_DATE_REGISTRY+SkipTo(sn_owner)+sn_owner+SkipTo(sn_share)+sn_share+sh_basis)
actual_record = Group(AC_HEADER_1+SkipTo(NOMOVE_OBJECT+OF_PROPERTY,include=True)+ac_property_type+ac_area+ac_address+AC_HEADER_2+OneOrMore(ownership_record)('records'))
snapshot_start=DATA_REGISTRY_HEADER.suppress()+SNAPSHOT_REGISTRY_START.suppress()
snapshot_record=Group(DATA_HEADER_OLD_1+SkipTo(PROP_TYPE)+sn_property_type+SkipTo(ADDRESS_NOMOVE_2,include=True)+sn_address+ZeroOrMore(sn_area)+trash_3+DATA_HEADER_OLD_2+(OneOrMore(sn_ownership)('records')|DATA_ABSENT))
# grammar = SkipTo(DOC_START, include=True).suppress()+OneOrMore(actual_record)('apartments')+snapshot_start+OneOrMore(snapshot_record)('old')
grammar = SkipTo(DOC_START, include=True).suppress()+OneOrMore(actual_record)('apartments')
grammar.ignore(NL)
grammar.ignore(page_numbers)
grammar.ignore(qrcode)
# print ("start")
result = []
tokens = grammar.parseString(data, parseAll=True)
for apt in tokens.apartments:
print("=================")
print (sanitize_address(apt.address))
print (sanitize_property_type(apt.prop_type))
print (sanitize_area(apt.area))
for record in apt.records:
print (sanitize_owner(record.owner))
print (record.share)
print (postprocess_owner_basis(record.basis))
for old_apt in tokens.old:
print (sanitize_address(old_apt.address))
print (sanitize_property_type(old_apt.prop_type))
print (sanitize_area(old_apt.area))
for record in old_apt.records:
print (record.owner, record.share)
test = (" ".join(record.basis).replace('Львівської міської ради','ЛМР').replace('Франківською районною адміністрацією','ФРА').replace('Львівського міського нотаріального округу','ЛМНО').replace('департаменту економічної політики','ДЕП'))
print (test)
return tokens
def output_csv(tokens, replacements):
with open(settings.OUT_FILE, 'w', newline='', encoding='utf-8') as csvfile:
fieldnames = ('номер', 'тип', 'площа', 'власник', 'ід.номер власника','частка','підстава')
csv.register_dialect('singlequote',
quotechar='',
escapechar='|',
doublequote = False,
quoting=csv.QUOTE_NONE,
delimiter='|'
)
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, dialect = 'singlequote')
writer.writeheader()
for apt in tokens.apartments:
for record in apt.records:
row = {}
row['номер'] = sanitize_address(apt.address)
row['тип'] = sanitize_property_type(apt.prop_type)
row['площа'] = sanitize_area(apt.area)
person, regnum, citizen = sanitize_owner(record.owner)
row['власник'] = person
row ['ід.номер власника'] = regnum
row['частка'] = record.share
basis = postprocess_owner_basis(record.basis)
for k,v in replacements.items():
basis = basis.replace(k,v)
row['підстава'] = basis
writer.writerow(row)
tokens = parse(settings.INPUT_FILE)
output_csv(tokens, settings.REPLACEMENTS)
| 41.276382
| 244
| 0.711833
|
4a10c91ca2e0e2b0f075142f066f9d1fbb2339cf
| 141
|
py
|
Python
|
pattern/7.py
|
itspuneet/itspuneet
|
d44f78afcff275aa56f03bba738ac3e4f2c30843
|
[
"bzip2-1.0.6"
] | null | null | null |
pattern/7.py
|
itspuneet/itspuneet
|
d44f78afcff275aa56f03bba738ac3e4f2c30843
|
[
"bzip2-1.0.6"
] | null | null | null |
pattern/7.py
|
itspuneet/itspuneet
|
d44f78afcff275aa56f03bba738ac3e4f2c30843
|
[
"bzip2-1.0.6"
] | null | null | null |
for i in range(5):
for j in range(4-i):
print(' ',end='')
for j in range(2*i+1):
print('*',end='')
print()
| 20.142857
| 27
| 0.425532
|
4a10c9733d178760c0444b0a93779322fc838422
| 23,329
|
py
|
Python
|
exemplos/exemplo1/blockly/build.py
|
danihalls/propesq
|
77fb62896215ffa4a2500e1fc6d53feeb004f03d
|
[
"MIT"
] | null | null | null |
exemplos/exemplo1/blockly/build.py
|
danihalls/propesq
|
77fb62896215ffa4a2500e1fc6d53feeb004f03d
|
[
"MIT"
] | null | null | null |
exemplos/exemplo1/blockly/build.py
|
danihalls/propesq
|
77fb62896215ffa4a2500e1fc6d53feeb004f03d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python2.7
# Compresses the core Blockly files into a single JavaScript file.
#
# Copyright 2012 Google Inc.
# https://developers.google.com/blockly/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Usage: build.py <0 or more of accessible, core, generators, langfiles>
# build.py with no parameters builds all files.
# core builds blockly_compressed, blockly_uncompressed, and blocks_compressed.
# accessible builds blockly_accessible_compressed,
# blockly_accessible_uncompressed, and blocks_compressed.
# generators builds every <language>_compressed.js.
# langfiles builds every msg/js/<LANG>.js file.
# This script generates four versions of Blockly's core files. The first pair
# are:
# blockly_compressed.js
# blockly_uncompressed.js
# The compressed file is a concatenation of all of Blockly's core files which
# have been run through Google's Closure Compiler. This is done using the
# online API (which takes a few seconds and requires an Internet connection).
# The uncompressed file is a script that loads in each of Blockly's core files
# one by one. This takes much longer for a browser to load, but is useful
# when debugging code since line numbers are meaningful and variables haven't
# been renamed. The uncompressed file also allows for a faster developement
# cycle since there is no need to rebuild or recompile, just reload.
#
# The second pair are:
# blockly_accessible_compressed.js
# blockly_accessible_uncompressed.js
# These files are analogous to blockly_compressed and blockly_uncompressed,
# but also include the visually-impaired module for Blockly.
#
# This script also generates:
# blocks_compressed.js: The compressed Blockly language blocks.
# javascript_compressed.js: The compressed Javascript generator.
# python_compressed.js: The compressed Python generator.
# dart_compressed.js: The compressed Dart generator.
# lua_compressed.js: The compressed Lua generator.
# msg/js/<LANG>.js for every language <LANG> defined in msg/js/<LANG>.json.
import sys
if sys.version_info[0] != 2:
raise Exception("Blockly build only compatible with Python 2.x.\n"
"You are using: " + sys.version)
for arg in sys.argv[1:len(sys.argv)]:
if (arg != 'core' and
arg != 'accessible' and
arg != 'generators' and
arg != 'langfiles' and
arg != 'demo'):
raise Exception("Invalid argument: \"" + arg + "\". Usage: build.py <0 or more of accessible," +
" core, generators, langfiles, demo>")
import errno, glob, httplib, json, os, re, subprocess, threading, urllib
def import_path(fullpath):
"""Import a file with full path specification.
Allows one to import from any directory, something __import__ does not do.
Args:
fullpath: Path and filename of import.
Returns:
An imported module.
"""
path, filename = os.path.split(fullpath)
filename, ext = os.path.splitext(filename)
sys.path.append(path)
module = __import__(filename)
reload(module) # Might be out of date.
del sys.path[-1]
return module
HEADER = ("// Do not edit this file; automatically generated by build.py.\n"
"'use strict';\n")
class Gen_uncompressed(threading.Thread):
"""Generate a JavaScript file that loads Blockly's raw files.
Runs in a separate thread.
"""
def __init__(self, search_paths, target_filename):
threading.Thread.__init__(self)
self.search_paths = search_paths
self.target_filename = target_filename
def run(self):
f = open(self.target_filename, 'w')
f.write(HEADER)
f.write("""
var isNodeJS = !!(typeof module !== 'undefined' && module.exports &&
typeof window === 'undefined');
if (isNodeJS) {
var window = {};
require('closure-library');
}
window.BLOCKLY_DIR = (function() {
if (!isNodeJS) {
// Find name of current directory.
var scripts = document.getElementsByTagName('script');
var re = new RegExp('(.+)[\/]blockly_(.*)uncompressed\.js$');
for (var i = 0, script; script = scripts[i]; i++) {
var match = re.exec(script.src);
if (match) {
return match[1];
}
}
alert('Could not detect Blockly\\'s directory name.');
}
return '';
})();
window.BLOCKLY_BOOT = function() {
var dir = '';
if (isNodeJS) {
require('closure-library');
dir = 'blockly';
} else {
// Execute after Closure has loaded.
if (!window.goog) {
alert('Error: Closure not found. Read this:\\n' +
'developers.google.com/blockly/guides/modify/web/closure');
}
dir = window.BLOCKLY_DIR.match(/[^\\/]+$/)[0];
}
""")
add_dependency = []
base_path = calcdeps.FindClosureBasePath(self.search_paths)
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
add_dependency.append(calcdeps.GetDepsLine(dep, base_path))
add_dependency.sort() # Deterministic build.
add_dependency = '\n'.join(add_dependency)
# Find the Blockly directory name and replace it with a JS variable.
# This allows blockly_uncompressed.js to be compiled on one computer and be
# used on another, even if the directory name differs.
m = re.search('[\\/]([^\\/]+)[\\/]core[\\/]blockly.js', add_dependency)
add_dependency = re.sub('([\\/])' + re.escape(m.group(1)) +
'([\\/](core|accessible)[\\/])', '\\1" + dir + "\\2', add_dependency)
f.write(add_dependency + '\n')
provides = []
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
if not dep.filename.startswith(os.pardir + os.sep): # '../'
provides.extend(dep.provides)
provides.sort() # Deterministic build.
f.write('\n')
f.write('// Load Blockly.\n')
for provide in provides:
f.write("goog.require('%s');\n" % provide)
f.write("""
delete this.BLOCKLY_DIR;
delete this.BLOCKLY_BOOT;
};
if (isNodeJS) {
window.BLOCKLY_BOOT();
module.exports = Blockly;
} else {
// Delete any existing Closure (e.g. Soy's nogoog_shim).
document.write('<script>var goog = undefined;</script>');
// Load fresh Closure Library.
document.write('<script src="' + window.BLOCKLY_DIR +
'/../closure-library/closure/goog/base.js"></script>');
document.write('<script>window.BLOCKLY_BOOT();</script>');
}
""")
f.close()
print("SUCCESS: " + self.target_filename)
class Gen_compressed(threading.Thread):
"""Generate a JavaScript file that contains all of Blockly's core and all
required parts of Closure, compiled together.
Uses the Closure Compiler's online API.
Runs in a separate thread.
"""
def __init__(self, search_paths, bundles):
threading.Thread.__init__(self)
self.search_paths = search_paths
self.bundles = bundles
def run(self):
if ('core' in self.bundles):
self.gen_core()
if ('accessible' in self.bundles):
self.gen_accessible()
if ('core' in self.bundles or 'accessible' in self.bundles):
self.gen_blocks()
if ('generators' in self.bundles):
self.gen_generator("javascript")
self.gen_generator("python")
self.gen_generator("php")
self.gen_generator("dart")
self.gen_generator("lua")
if ('demo' in self.bundles):
self.gen_together()
def gen_together(self):
target_filename = os.path.join("demos", "fixed-advanced", "main_compressed.js")
# Define the parameters for the POST request.
params = [
("compilation_level", "ADVANCED_OPTIMIZATIONS"),
("use_closure_library", "true"),
("generate_exports", "true"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
# debug options (to make the uglified code readable)
# ("formatting", "pretty_print"),
# ("formatting", "print_input_delimiter"),
# ("debug", "true"),
]
# Read in all the source files.
filenames = calcdeps.CalculateDependencies(self.search_paths,
[os.path.join("demos", "fixed-advanced", "main.js")])
filenames.sort() # Deterministic build.
for filename in filenames:
# Filter out the Closure files (the compiler will add them).
if filename.startswith(os.pardir + os.sep): # '../'
continue
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
externs = [os.path.join("externs", "svg-externs.js")]
for filename in externs:
f = open(filename)
params.append(("js_externs", "".join(f.readlines())))
f.close()
self.do_compile(params, target_filename, filenames, "")
def gen_core(self):
target_filename = "blockly_compressed.js"
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("use_closure_library", "true"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
filenames = calcdeps.CalculateDependencies(self.search_paths,
[os.path.join("core", "blockly.js")])
filenames.sort() # Deterministic build.
for filename in filenames:
# Filter out the Closure files (the compiler will add them).
if filename.startswith(os.pardir + os.sep): # '../'
continue
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
self.do_compile(params, target_filename, filenames, "")
def gen_accessible(self):
target_filename = "blockly_accessible_compressed.js"
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("use_closure_library", "true"),
("language_out", "ES5"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
filenames = calcdeps.CalculateDependencies(self.search_paths,
[os.path.join("accessible", "app.component.js")])
filenames.sort() # Deterministic build.
for filename in filenames:
# Filter out the Closure files (the compiler will add them).
if filename.startswith(os.pardir + os.sep): # '../'
continue
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
self.do_compile(params, target_filename, filenames, "")
def gen_accessible(self):
target_filename = "blockly_accessible_compressed.js"
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("use_closure_library", "true"),
("language_out", "ES5"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
filenames = calcdeps.CalculateDependencies(self.search_paths,
[os.path.join("accessible", "app.component.js")])
for filename in filenames:
# Filter out the Closure files (the compiler will add them).
if filename.startswith(os.pardir + os.sep): # '../'
continue
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
self.do_compile(params, target_filename, filenames, "")
def gen_blocks(self):
target_filename = "blocks_compressed.js"
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
# Add Blockly.Blocks to be compatible with the compiler.
params.append(("js_code", "goog.provide('Blockly');goog.provide('Blockly.Blocks');"))
filenames = glob.glob(os.path.join("blocks", "*.js"))
filenames.sort() # Deterministic build.
for filename in filenames:
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
# Remove Blockly.Blocks to be compatible with Blockly.
remove = "var Blockly={Blocks:{}};"
self.do_compile(params, target_filename, filenames, remove)
def gen_generator(self, language):
target_filename = language + "_compressed.js"
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
# Add Blockly.Generator to be compatible with the compiler.
params.append(("js_code", "goog.provide('Blockly.Generator');"))
filenames = glob.glob(
os.path.join("generators", language, "*.js"))
filenames.sort() # Deterministic build.
filenames.insert(0, os.path.join("generators", language + ".js"))
for filename in filenames:
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
filenames.insert(0, "[goog.provide]")
# Remove Blockly.Generator to be compatible with Blockly.
remove = "var Blockly={Generator:{}};"
self.do_compile(params, target_filename, filenames, remove)
def do_compile(self, params, target_filename, filenames, remove):
# Send the request to Google.
headers = {"Content-type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPSConnection("closure-compiler.appspot.com")
conn.request("POST", "/compile", urllib.urlencode(params), headers)
response = conn.getresponse()
json_str = response.read()
conn.close()
# Parse the JSON response.
json_data = json.loads(json_str)
def file_lookup(name):
if not name.startswith("Input_"):
return "???"
n = int(name[6:]) - 1
return filenames[n]
if json_data.has_key("serverErrors"):
errors = json_data["serverErrors"]
for error in errors:
print("SERVER ERROR: %s" % target_filename)
print(error["error"])
elif json_data.has_key("errors"):
errors = json_data["errors"]
for error in errors:
print("FATAL ERROR")
print(error["error"])
if error["file"]:
print("%s at line %d:" % (
file_lookup(error["file"]), error["lineno"]))
print(error["line"])
print((" " * error["charno"]) + "^")
sys.exit(1)
else:
if json_data.has_key("warnings"):
warnings = json_data["warnings"]
for warning in warnings:
print("WARNING")
print(warning["warning"])
if warning["file"]:
print("%s at line %d:" % (
file_lookup(warning["file"]), warning["lineno"]))
print(warning["line"])
print((" " * warning["charno"]) + "^")
print()
if not json_data.has_key("compiledCode"):
print("FATAL ERROR: Compiler did not return compiledCode.")
sys.exit(1)
code = HEADER + "\n" + json_data["compiledCode"]
code = code.replace(remove, "")
# Trim down Google's (and only Google's) Apache licences.
# The Closure Compiler preserves these.
LICENSE = re.compile("""/\\*
[\w ]+
Copyright \\d+ Google Inc.
https://developers.google.com/blockly/
Licensed under the Apache License, Version 2.0 \(the "License"\);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
\\*/""")
code = re.sub(LICENSE, "", code)
stats = json_data["statistics"]
original_b = stats["originalSize"]
compressed_b = stats["compressedSize"]
if original_b > 0 and compressed_b > 0:
f = open(target_filename, "w")
f.write(code)
f.close()
original_kb = int(original_b / 1024 + 0.5)
compressed_kb = int(compressed_b / 1024 + 0.5)
ratio = int(float(compressed_b) / float(original_b) * 100 + 0.5)
print("SUCCESS: " + target_filename)
print("Size changed from %d KB to %d KB (%d%%)." % (
original_kb, compressed_kb, ratio))
else:
print("UNKNOWN ERROR")
class Gen_langfiles(threading.Thread):
"""Generate JavaScript file for each natural language supported.
Runs in a separate thread.
"""
def __init__(self, force_gen):
threading.Thread.__init__(self)
self.force_gen = force_gen
def _rebuild(self, srcs, dests):
# Determine whether any of the files in srcs is newer than any in dests.
try:
return (max(os.path.getmtime(src) for src in srcs) >
min(os.path.getmtime(dest) for dest in dests))
except OSError as e:
# Was a file not found?
if e.errno == errno.ENOENT:
# If it was a source file, we can't proceed.
if e.filename in srcs:
print("Source file missing: " + e.filename)
sys.exit(1)
else:
# If a destination file was missing, rebuild.
return True
else:
print("Error checking file creation times: " + e)
def run(self):
# The files msg/json/{en,qqq,synonyms}.json depend on msg/messages.js.
if (self.force_gen or
self._rebuild([os.path.join("msg", "messages.js")],
[os.path.join("msg", "json", f) for f in
["en.json", "qqq.json", "synonyms.json"]])):
try:
subprocess.check_call([
"python",
os.path.join("i18n", "js_to_json.py"),
"--input_file", "msg/messages.js",
"--output_dir", "msg/json/",
"--quiet"])
except (subprocess.CalledProcessError, OSError) as e:
# Documentation for subprocess.check_call says that CalledProcessError
# will be raised on failure, but I found that OSError is also possible.
print("Error running i18n/js_to_json.py: ", e)
sys.exit(1)
# Checking whether it is necessary to rebuild the js files would be a lot of
# work since we would have to compare each <lang>.json file with each
# <lang>.js file. Rebuilding is easy and cheap, so just go ahead and do it.
try:
# Use create_messages.py to create .js files from .json files.
cmd = [
"python",
os.path.join("i18n", "create_messages.py"),
"--source_lang_file", os.path.join("msg", "json", "en.json"),
"--source_synonym_file", os.path.join("msg", "json", "synonyms.json"),
"--source_constants_file", os.path.join("msg", "json", "constants.json"),
"--key_file", os.path.join("msg", "json", "keys.json"),
"--output_dir", os.path.join("msg", "js"),
"--quiet"]
json_files = glob.glob(os.path.join("msg", "json", "*.json"))
json_files = [file for file in json_files if not
(file.endswith(("keys.json", "synonyms.json", "qqq.json", "constants.json")))]
cmd.extend(json_files)
subprocess.check_call(cmd)
except (subprocess.CalledProcessError, OSError) as e:
print("Error running i18n/create_messages.py: ", e)
sys.exit(1)
# Output list of .js files created.
for f in json_files:
# This assumes the path to the current directory does not contain "json".
f = f.replace("json", "js")
if os.path.isfile(f):
print("SUCCESS: " + f)
else:
print("FAILED to create " + f)
if __name__ == "__main__":
try:
calcdeps = import_path(os.path.join(
os.path.pardir, "closure-library", "closure", "bin", "calcdeps.py"))
except ImportError:
if os.path.isdir(os.path.join(os.path.pardir, "closure-library-read-only")):
# Dir got renamed when Closure moved from Google Code to GitHub in 2014.
print("Error: Closure directory needs to be renamed from"
"'closure-library-read-only' to 'closure-library'.\n"
"Please rename this directory.")
elif os.path.isdir(os.path.join(os.path.pardir, "google-closure-library")):
# When Closure is installed by npm, it is named "google-closure-library".
#calcdeps = import_path(os.path.join(
# os.path.pardir, "google-closure-library", "closure", "bin", "calcdeps.py"))
print("Error: Closure directory needs to be renamed from"
"'google-closure-library' to 'closure-library'.\n"
"Please rename this directory.")
else:
print("""Error: Closure not found. Read this:
developers.google.com/blockly/guides/modify/web/closure""")
sys.exit(1)
core_search_paths = calcdeps.ExpandDirectories(
["core", os.path.join(os.path.pardir, "closure-library")])
core_search_paths.sort() # Deterministic build.
full_search_paths = calcdeps.ExpandDirectories(
["accessible", "core", os.path.join(os.path.pardir, "closure-library")])
full_search_paths.sort() # Deterministic build.
if (len(sys.argv) == 1):
args = ['core', 'accessible', 'generators', 'defaultlangfiles']
else:
args = sys.argv
# Uncompressed and compressed are run in parallel threads.
# Uncompressed is limited by processor speed.
if ('core' in args):
Gen_uncompressed(core_search_paths, 'blockly_uncompressed.js').start()
if ('accessible' in args):
Gen_uncompressed(full_search_paths, 'blockly_accessible_uncompressed.js').start()
if ('demo' in args):
all_search_paths = calcdeps.ExpandDirectories(
["accessible", "core", "blocks", os.path.join("demos", "fixed-advanced"), os.path.join("msg", "js"), os.path.join(os.path.pardir, "closure-library")])
all_search_paths.sort() # Deterministic build.
print("Compressing " + str(len(all_search_paths)) + " files...")
Gen_compressed(all_search_paths, args).start()
else:
# Compressed is limited by network and server speed.
Gen_compressed(full_search_paths, args).start()
# This is run locally in a separate thread
# defaultlangfiles checks for changes in the msg files, while manually asking
# to build langfiles will force the messages to be rebuilt.
if ('langfiles' in args or 'defaultlangfiles' in args):
Gen_langfiles('langfiles' in args).start()
| 37.08903
| 158
| 0.646277
|
4a10ca83bd67f685eee9e2eae6116527b2664310
| 24,426
|
py
|
Python
|
src/issues/migrations/0027_auto__add_field_proposalvoteargumentranking_user.py
|
ofirr/OpenCommunity
|
7786ac2996530af8f545f4398c071793c73634c8
|
[
"BSD-3-Clause"
] | null | null | null |
src/issues/migrations/0027_auto__add_field_proposalvoteargumentranking_user.py
|
ofirr/OpenCommunity
|
7786ac2996530af8f545f4398c071793c73634c8
|
[
"BSD-3-Clause"
] | null | null | null |
src/issues/migrations/0027_auto__add_field_proposalvoteargumentranking_user.py
|
ofirr/OpenCommunity
|
7786ac2996530af8f545f4398c071793c73634c8
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ProposalVoteArgumentRanking.user'
db.add_column(u'issues_proposalvoteargumentranking', 'user',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, related_name='argument_votes', to=orm['users.OCUser']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ProposalVoteArgumentRanking.user'
db.delete_column(u'issues_proposalvoteargumentranking', 'user_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'communities.community': {
'Meta': {'object_name': 'Community'},
'allow_links_in_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'board_name': ('django.db.models.fields.CharField', [], {'default': "u'Board'", 'max_length': '200'}),
'default_quorum': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'email_invitees': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inform_system_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'issue_ranking_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'official_identifier': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'referendum_ends_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'referendum_started': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'referendum_started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'register_missing_board_members': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'straw_voting_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'ch8u0qg40cwnc8uiwyebo45y'", 'unique': 'True', 'max_length': '24'}),
'upcoming_meeting_comments': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_guests': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'upcoming_meeting_location': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'upcoming_meeting_participants': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'+'", 'blank': 'True', 'to': u"orm['users.OCUser']"}),
'upcoming_meeting_published_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_scheduled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_started': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'upcoming_meeting_summary': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'upcoming_meeting_title': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'upcoming_meeting_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'voting_ends_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'communities.communityconfidentialreason': {
'Meta': {'ordering': "['community']", 'unique_together': "(('community', 'title'),)", 'object_name': 'CommunityConfidentialReason'},
'community': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'confidential_reasons'", 'to': u"orm['communities.Community']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'issues.issue': {
'Meta': {'ordering': "['order_in_upcoming_meeting', 'title']", 'object_name': 'Issue'},
'abstract': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'calculated_score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issues'", 'to': u"orm['communities.Community']"}),
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'confidential_reason': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['communities.CommunityConfidentialReason']", 'null': 'True', 'blank': 'True'}),
'content': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issues_created'", 'to': u"orm['users.OCUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_confidential': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'length_in_minutes': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'order_by_votes': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'order_in_upcoming_meeting': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'vwws1hl3tqwbxvihusyu9zq2'", 'unique': 'True', 'max_length': '24'})
},
u'issues.issueattachment': {
'Meta': {'ordering': "('created_at',)", 'object_name': 'IssueAttachment'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'agenda_item': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attachments'", 'null': 'True', 'to': u"orm['meetings.AgendaItem']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files_created'", 'to': u"orm['users.OCUser']"}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': u"orm['issues.Issue']"}),
'ordinal': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'qln2qqr1z11fcqsmsg2mhlvr'", 'unique': 'True', 'max_length': '24'})
},
u'issues.issuecomment': {
'Meta': {'ordering': "('created_at',)", 'object_name': 'IssueComment'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('ocd.base_models.HTMLField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issue_comments_created'", 'to': u"orm['users.OCUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': u"orm['issues.Issue']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'issue_comments_last_edited'", 'null': 'True', 'to': u"orm['users.OCUser']"}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['meetings.Meeting']", 'null': 'True', 'blank': 'True'}),
'ordinal': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'gt7uowu03vandmyzyp4uvjcp'", 'unique': 'True', 'max_length': '24'}),
'version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'issues.issuecommentrevision': {
'Meta': {'object_name': 'IssueCommentRevision'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': u"orm['issues.IssueComment']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'issue_comment_versions_created'", 'to': u"orm['users.OCUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'issues.issuerankingvote': {
'Meta': {'object_name': 'IssueRankingVote'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ranking_votes'", 'to': u"orm['issues.Issue']"}),
'rank': ('django.db.models.fields.PositiveIntegerField', [], {}),
'voted_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.OCUser']"})
},
u'issues.proposal': {
'Meta': {'object_name': 'Proposal'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'assigned_to': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'assigned_to_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'proposals_assigned'", 'null': 'True', 'to': u"orm['users.OCUser']"}),
'community_members': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'confidential_reason': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['communities.CommunityConfidentialReason']", 'null': 'True', 'blank': 'True'}),
'content': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposals_created'", 'to': u"orm['users.OCUser']"}),
'decided_at_meeting': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['meetings.Meeting']", 'null': 'True', 'blank': 'True'}),
'due_by': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_confidential': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposals'", 'to': u"orm['issues.Issue']"}),
'register_board_votes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'task_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'vukvb20bqrhnluwslnelthay'", 'unique': 'True', 'max_length': '24'}),
'votes_con': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'votes_pro': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'issues.proposalvote': {
'Meta': {'unique_together': "(('proposal', 'user'),)", 'object_name': 'ProposalVote'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': u"orm['issues.Proposal']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': u"orm['users.OCUser']"}),
'value': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
u'issues.proposalvoteargument': {
'Meta': {'object_name': 'ProposalVoteArgument'},
'argument': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposal_vote': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'arguments'", 'to': u"orm['issues.ProposalVote']"})
},
u'issues.proposalvoteargumentranking': {
'Meta': {'object_name': 'ProposalVoteArgumentRanking'},
'argument': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['issues.ProposalVoteArgument']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'argument_votes'", 'to': u"orm['users.OCUser']"}),
'value': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
u'issues.proposalvoteboard': {
'Meta': {'unique_together': "(('proposal', 'user'),)", 'object_name': 'ProposalVoteBoard'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['issues.Proposal']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'board_votes'", 'to': u"orm['users.OCUser']"}),
'value': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'voted_by_chairman': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'issues.voteresult': {
'Meta': {'unique_together': "(('proposal', 'meeting'),)", 'object_name': 'VoteResult'},
'community_members': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['meetings.Meeting']"}),
'proposal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results'", 'to': u"orm['issues.Proposal']"}),
'votes_con': ('django.db.models.fields.PositiveIntegerField', [], {}),
'votes_pro': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'meetings.agendaitem': {
'Meta': {'ordering': "('meeting__created_at', 'order')", 'unique_together': "(('meeting', 'issue'),)", 'object_name': 'AgendaItem'},
'background': ('ocd.base_models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_confidential': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agenda_items'", 'to': u"orm['issues.Issue']"}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agenda'", 'to': u"orm['meetings.Meeting']"}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'})
},
u'meetings.meeting': {
'Meta': {'ordering': "('-held_at',)", 'object_name': 'Meeting'},
'agenda_items': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'meetings'", 'blank': 'True', 'through': u"orm['meetings.AgendaItem']", 'to': u"orm['issues.Issue']"}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'meetings'", 'to': u"orm['communities.Community']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'meetings_created'", 'to': u"orm['users.OCUser']"}),
'guests': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'held_at': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'participated_in_meeting'", 'symmetrical': 'False', 'through': u"orm['meetings.MeetingParticipant']", 'to': u"orm['users.OCUser']"}),
'scheduled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'kx8oqxz77596drsplwf2ubfe'", 'unique': 'True', 'max_length': '24'})
},
u'meetings.meetingparticipant': {
'Meta': {'unique_together': "(('meeting', 'ordinal'), ('meeting', 'user'))", 'object_name': 'MeetingParticipant'},
'default_group_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_absent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'participations'", 'to': u"orm['meetings.Meeting']"}),
'ordinal': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'participations'", 'to': u"orm['users.OCUser']"})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
},
u'users.ocuser': {
'Meta': {'object_name': 'OCUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
}
}
complete_apps = ['issues']
| 89.472527
| 235
| 0.580406
|
4a10cb201725257dc5b5bb2c60ab6aa2c699f5c0
| 3,255
|
py
|
Python
|
app/views.py
|
DFilyushin/librusec
|
fd6d7a99037aac4c1112f648397830284f4165f9
|
[
"Apache-2.0"
] | 2
|
2017-12-14T11:50:16.000Z
|
2021-12-27T13:42:16.000Z
|
app/views.py
|
DFilyushin/librusec
|
fd6d7a99037aac4c1112f648397830284f4165f9
|
[
"Apache-2.0"
] | null | null | null |
app/views.py
|
DFilyushin/librusec
|
fd6d7a99037aac4c1112f648397830284f4165f9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import time
from app import librusec
from flask import render_template, request, abort, send_file, g
from models import Authors, Books, Genre
from readlib import extract_book
RUSSIAN_LETTERS = u'АБВГДЕЖЗИКЛМНОПРСТУФХЦЧШЩЭЮЯ'
def get_author_by_id(id_author):
author = Authors.query.filter_by(id=id_author).one()
return author.first_name + ' ' + author.last_name
def get_genres():
genre = getattr(g, 'genre', None)
if genre is None:
g.genre = {}
rows = Genre.query.all()
for row in rows:
g.genre[row.id_genre] = row.ru_genre
return g.genre
def process_books(books_rows):
q_genres = get_genres()
for row in books_rows:
genre_line = row.genre[:-1]
genres = genre_line.split(':')
ru_genres = []
for genre in genres:
ru_genres.append(q_genres.get(genre, genre))
row.genre = ', '.join(ru_genres)
return books_rows
@librusec.route('/')
def index():
letters = list(i for i in RUSSIAN_LETTERS)
return render_template("index.html", letters=letters)
@librusec.route('/authors', methods=['GET'])
def authors():
"""
Get list of authors
:return:
"""
writers = None
letter = request.args.get('letter', '')
if letter != '':
try:
idx = int(letter)
except ValueError:
abort(404)
symbol = RUSSIAN_LETTERS[idx - 1:idx]
writers = Authors().query.filter(Authors.last_name.like('%s%%' % symbol)).order_by(Authors.last_name)
letters = list(i for i in RUSSIAN_LETTERS)
return render_template("authors.html", letters=letters, authors=writers)
@librusec.route('/authorbook', methods=['GET'])
def book_by_author():
"""
Get list book by author id
:return:
"""
get_id = request.args.get('id', '')
try:
id_author = int(get_id)
author = get_author_by_id(id_author)
except ValueError:
abort(404)
books = Books.query.filter(Books.authors.any(id=id_author))
books2 = process_books(books)
return render_template("books.html", books=books2, author=author)
@librusec.route('/search', methods=['POST'])
def search():
"""
Search book by book name
:return:
"""
text = request.form['search_text']
start_time = time.time()
books = Books.query.filter(Books.name.like('%s%%' % text.lower()))
author_book = Books.query.filter(Books.authors.any(Authors.last_name.like('%s%%' % text.lower())))
elapsed = (time.time() - start_time)
return render_template("books.html", books=books, author_books=author_book, find_text=text, elapsed=elapsed)
@librusec.route('/download/<int:id_book>', methods=['GET'])
def download(id_book):
"""
Download book from zip archive
:param id_book:
:return:
"""
book = Books.query.filter_by(id=id_book).one()
id_file = extract_book(id_book, book.type)
file_name = os.path.basename(id_file)
return send_file(filename_or_fp=id_file,
mimetype='document/fb2',
attachment_filename=file_name,
as_attachment=True)
@librusec.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
| 28.304348
| 112
| 0.642089
|
4a10cb46f6c287d3fd6044431f36648f16095773
| 3,204
|
py
|
Python
|
train_emotion.py
|
cuciureansergiu/facial_emotion
|
944416d1c2958c1e44fec2337acaf8e495ba8923
|
[
"MIT"
] | null | null | null |
train_emotion.py
|
cuciureansergiu/facial_emotion
|
944416d1c2958c1e44fec2337acaf8e495ba8923
|
[
"MIT"
] | null | null | null |
train_emotion.py
|
cuciureansergiu/facial_emotion
|
944416d1c2958c1e44fec2337acaf8e495ba8923
|
[
"MIT"
] | 1
|
2021-01-15T14:12:02.000Z
|
2021-01-15T14:12:02.000Z
|
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn import datasets, svm, metrics
from matplotlib.pyplot import imread
from fnmatch import fnmatch as match
from zipfile import ZipFile
from util import rgb2gray
from cv2 import resize
import wget, os, re
from hog import hog
import numpy as np
import progressbar
import pickle
import sys
print("Training facial emotion model")
if (os.path.isdir('./KDEF_and_AKDEF') == False):
print("Getting KDEF database")
url = 'https://www.kdef.se/download/KDEF_and_AKDEF.zip'
filename = wget.download(url)
with ZipFile('KDEF_and_AKDEF.zip', 'r') as zipObj:
# Extract all the contents of zip file in different directory
zipObj.extractall('./')
data_path = './KDEF_and_AKDEF/KDEF/'
dictionary = {
"AF" : "afraid",
"AN" : "angry",
"DI" : "disgusted",
"HA" : "happy",
"NE" : "neutral",
"SA" : "sad",
"SU" : "surprised"
}
img_w = 512
img_h = 512
hog = hog()
hog.summary()
timecount = 0
avg = 0
# 140 actors * 7 emotions
bar = progressbar.ProgressBar(maxval=140*7, \
widgets=[progressbar.Bar('=', '[', ']'),\
' ', progressbar.ETA(),' ', progressbar.Percentage()])
bar.start()
labels = []
data = np.zeros((1, int(hog.feature_size * 64))) # Create empty image so the other can stack here
print("Reading the input data")
for person in os.listdir(data_path):
for image in os.listdir(data_path + person):
# Only front facing profiles
if match(image, '*S.JPG'):
keyword = re.match(r"([a-z]+)([0-9]+)([a-z]+)", image, re.I)
category = keyword.groups()[2]
category = category[:-1]
img = imread(data_path + person + "/" + image)
img = resize(img, (img_w, img_h))
img = rgb2gray(img)
feature = []
for i in range(0, img_w, 64):
for j in range(0, img_h, 64):
patch = img[i:i+64, j:j+64]
feature = np.concatenate((feature, [hog.get_feature(patch)]), axis=None)
data = np.vstack((data,feature))
labels.append(list(dictionary).index(category))
bar.update(timecount := timecount + 1)
data = data[1:] # Delete the dummy feature
labels = np.array(labels)
bar.finish()
classifier = svm.SVC()
data_train, data_test, labels_train, label_test = train_test_split(
data, labels, test_size=0.1, shuffle=False)
print("Training the model")
classifier.fit(data_train, labels_train)
pickle.dump(classifier, open('emotion.svm', 'wb'))
print("Testing the model")
predicted = classifier.predict(data_test)
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(label_test, predicted)))
disp = metrics.plot_confusion_matrix(classifier, data_test, label_test)
disp.figure_.suptitle("Confusion Matrix")
print("Confusion matrix:\n%s" % disp.confusion_matrix)
#Save the confusion matrix to a file
f = open("report.txt", "a")
f.write("Classification report for classifier\n")
f.write(str(metrics.classification_report(label_test, predicted)))
f.write(str(confusion_matrix(label_test, predicted)))
f.close()
| 31.411765
| 97
| 0.662921
|
4a10cc7d02edddd296b56c9e78aaa00d6401fcc4
| 3,423
|
py
|
Python
|
rcsb/exdb/seq/LigandNeighborMappingExtractor.py
|
rcsb/py-rcsb_exdb
|
79fc3fd7d3c5909d9e138eb633378f9f8bf0ee5d
|
[
"Apache-2.0"
] | null | null | null |
rcsb/exdb/seq/LigandNeighborMappingExtractor.py
|
rcsb/py-rcsb_exdb
|
79fc3fd7d3c5909d9e138eb633378f9f8bf0ee5d
|
[
"Apache-2.0"
] | 1
|
2022-02-10T14:31:03.000Z
|
2022-02-21T22:06:42.000Z
|
rcsb/exdb/seq/LigandNeighborMappingExtractor.py
|
rcsb/py-rcsb_exdb
|
79fc3fd7d3c5909d9e138eb633378f9f8bf0ee5d
|
[
"Apache-2.0"
] | 3
|
2020-12-03T17:26:23.000Z
|
2020-12-03T17:26:48.000Z
|
##
# File: LigandNeighborMappingExtractor.py
# Date: 28-Jun-2021 jdw
#
# Utilities to extract ligand neighbor mapping details from the exchange collections.
#
# Updates:
#
##
__docformat__ = "google en"
__author__ = "John Westbrook"
__email__ = "jwest@rcsb.rutgers.edu"
__license__ = "Apache 2.0"
import logging
from rcsb.exdb.utils.ObjectExtractor import ObjectExtractor
logger = logging.getLogger(__name__)
class LigandNeighborMappingExtractor(object):
"""Utilities to extract ligand neighbor mapping details from the exchange collections."""
def __init__(self, cfgOb):
self.__cfgOb = cfgOb
def getLigandNeighbors(self):
"""Extract unique chemical component ids involved in neighbor interactions with each
polymer and branched entity instance.
Returns:
dict: {'entryId_entityId': [(chem_comp_id, isBound),...], }
"""
return self.__extractLigandNeighbors()
def __extractLigandNeighbors(self):
"""Extract unique chemical component ids involved in neighbor interactions with each
polymer and branched entity instance."""
try:
databaseName = "pdbx_core"
collectionName = "pdbx_core_polymer_entity_instance"
obEx = ObjectExtractor(
self.__cfgOb,
databaseName=databaseName,
collectionName=collectionName,
cacheFilePath=None,
useCache=False,
keyAttribute="rcsb_id",
uniqueAttributes=["rcsb_id"],
cacheKwargs=None,
objectLimit=None,
# selectionQuery={"rcsb_polymer_entity_annotation.type": annotationType},
selectionQuery=None,
selectionList=[
"rcsb_id",
"rcsb_polymer_entity_instance_container_identifiers.entry_id",
"rcsb_polymer_entity_instance_container_identifiers.entity_id",
"rcsb_polymer_entity_instance_container_identifiers.asym_id",
"rcsb_ligand_neighbors.ligand_comp_id",
"rcsb_ligand_neighbors.ligand_is_bound",
],
)
eCount = obEx.getCount()
logger.info("Total neighbor count (%d)", eCount)
rD = {}
objD = obEx.getObjects()
for _, peiD in objD.items():
try:
entryId = peiD["rcsb_polymer_entity_instance_container_identifiers"]["entry_id"]
entityId = peiD["rcsb_polymer_entity_instance_container_identifiers"]["entity_id"]
ky = entryId + "_" + entityId
for lnD in peiD["rcsb_ligand_neighbors"] if "rcsb_ligand_neighbors" in peiD else []:
if "ligand_comp_id" in lnD and "ligand_is_bound" in lnD:
rD.setdefault(ky, set()).add((lnD["ligand_comp_id"], lnD["ligand_is_bound"]))
else:
logger.warning("%s %s missing details lnD %r", entryId, entityId, lnD)
except Exception as e:
logger.exception("Failing with %s", str(e))
rD = {k: list(v) for k, v in rD.items()}
logger.info("Unique instance %d", len(rD))
return rD
except Exception as e:
logger.exception("Failing with %s", str(e))
| 40.270588
| 105
| 0.595968
|
4a10ccf7354625e6145ca9834092f260db3077a2
| 139
|
py
|
Python
|
src/_data/data_generator.py
|
aerotog/sorts
|
a9ccc8425cf6d155d16cf275bf7e4e5512e68e49
|
[
"MIT"
] | null | null | null |
src/_data/data_generator.py
|
aerotog/sorts
|
a9ccc8425cf6d155d16cf275bf7e4e5512e68e49
|
[
"MIT"
] | null | null | null |
src/_data/data_generator.py
|
aerotog/sorts
|
a9ccc8425cf6d155d16cf275bf7e4e5512e68e49
|
[
"MIT"
] | null | null | null |
def main():
# TODO Create autogenerated unsorted and sorted int list?
print('HELLO WORLD')
if __name__ == "__main__":
main()
| 17.375
| 61
| 0.654676
|
4a10cd0b6f9d47984a88105bee275f06fbac7cf9
| 2,253
|
py
|
Python
|
cvxpy/expressions/variables/semidef_var.py
|
quantopian/cvxpy
|
7deee4d172470aa8f629dab7fead50467afa75ff
|
[
"Apache-2.0"
] | 5
|
2017-08-31T01:37:00.000Z
|
2022-03-24T04:23:09.000Z
|
cvxpy/expressions/variables/semidef_var.py
|
quantopian/cvxpy
|
7deee4d172470aa8f629dab7fead50467afa75ff
|
[
"Apache-2.0"
] | null | null | null |
cvxpy/expressions/variables/semidef_var.py
|
quantopian/cvxpy
|
7deee4d172470aa8f629dab7fead50467afa75ff
|
[
"Apache-2.0"
] | 6
|
2017-02-09T19:37:07.000Z
|
2021-01-07T00:17:54.000Z
|
"""
Copyright 2013 Steven Diamond, Eric Chu
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.expressions.constants.constant import Constant
from cvxpy.expressions.variables.variable import Variable
from cvxpy.expressions.variables.symmetric import upper_tri_to_full
from cvxpy.constraints.semidefinite import SDP
from cvxpy.expressions import cvxtypes
import cvxpy.lin_ops.lin_utils as lu
def Semidef(n, name=None):
"""An expression representing a positive semidefinite matrix.
"""
var = SemidefUpperTri(n, name)
fill_mat = Constant(upper_tri_to_full(n))
return cvxtypes.reshape()(fill_mat*var, n, n)
class SemidefUpperTri(Variable):
""" The upper triangular part of a positive semidefinite variable. """
def __init__(self, n, name=None):
self.n = n
super(SemidefUpperTri, self).__init__(n*(n+1)//2, 1, name)
def get_data(self):
"""Returns info needed to reconstruct the expression besides the args.
"""
return [self.n, self.name]
def canonicalize(self):
"""Variable must be semidefinite and symmetric.
"""
upper_tri = lu.create_var((self.size[0], 1), self.id)
fill_coeff = upper_tri_to_full(self.n)
fill_coeff = lu.create_const(fill_coeff, (self.n*self.n, self.size[0]),
sparse=True)
full_mat = lu.mul_expr(fill_coeff, upper_tri, (self.n*self.n, 1))
full_mat = lu.reshape(full_mat, (self.n, self.n))
return (upper_tri, [SDP(full_mat, enforce_sym=False)])
def __repr__(self):
"""String to recreate the object.
"""
return "SemidefUpperTri(%d)" % self.n
| 35.761905
| 79
| 0.699512
|
4a10cd352a9478cc5e60f5aab1ae65098dd621fb
| 1,071
|
py
|
Python
|
bims/tests/test_fish_form_views.py
|
Christiaanvdm/django-bims
|
f92a63156c711b2d53c5f8ea06867cd64cee9eb9
|
[
"MIT"
] | null | null | null |
bims/tests/test_fish_form_views.py
|
Christiaanvdm/django-bims
|
f92a63156c711b2d53c5f8ea06867cd64cee9eb9
|
[
"MIT"
] | null | null | null |
bims/tests/test_fish_form_views.py
|
Christiaanvdm/django-bims
|
f92a63156c711b2d53c5f8ea06867cd64cee9eb9
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""Tests for models."""
from django.test import TestCase, Client
from allauth.utils import get_user_model
from bims.tests.model_factories import (
LocationSiteF,
)
class TestFishFormView(TestCase):
""" Tests Fish Form View.
"""
def setUp(self):
"""
Sets up before each test
"""
self.client = Client()
self.location_site = LocationSiteF.create()
def test_get_fish_form(self):
"""
Test fish form get method
"""
# Login
user = get_user_model().objects.create(
is_staff=True,
is_active=True,
is_superuser=True,
username='@.test')
user.set_password('psst')
user.save()
resp = self.client.login(
username='@.test',
password='psst'
)
self.assertTrue(resp)
response = self.client.get(
'/fish-form/?siteId={}'.format(
self.location_site.id
)
)
self.assertIsNotNone(response.context)
| 23.282609
| 51
| 0.549953
|
4a10cef7146700cd4cfba96bb2f8759dac74f298
| 1,374
|
py
|
Python
|
tests/python/mkl/test_subgraph.py
|
leeesangwon/incubator-mxnet
|
0514233103baff5e1581cf2057f561f7a36616c2
|
[
"Apache-2.0"
] | 211
|
2016-06-06T08:32:36.000Z
|
2021-07-03T16:50:16.000Z
|
tests/python/mkl/test_subgraph.py
|
leeesangwon/incubator-mxnet
|
0514233103baff5e1581cf2057f561f7a36616c2
|
[
"Apache-2.0"
] | 42
|
2017-01-05T02:45:13.000Z
|
2020-08-11T23:45:27.000Z
|
tests/python/mkl/test_subgraph.py
|
leeesangwon/incubator-mxnet
|
0514233103baff5e1581cf2057f561f7a36616c2
|
[
"Apache-2.0"
] | 58
|
2016-10-27T07:37:08.000Z
|
2021-07-03T16:50:17.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import os
import mxnet as mx
import numpy as np
import unittest
import ctypes
import pytest
def test_float64_fallback():
sym = mx.sym.FullyConnected(
mx.sym.Variable('in'),
mx.sym.Variable('w'),
mx.sym.Variable('b'),
num_hidden=2)
dtype = 'float64'
args = {'in': mx.nd.array([[2, 3, 4]], dtype=dtype),
'w': mx.nd.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
'b': mx.nd.array([7, 8], dtype=dtype)}
ex = sym._bind(mx.cpu(), args, args_grad=None, grad_req='write')
ex.forward()
ex.outputs[0].wait_to_read()
| 34.35
| 68
| 0.695779
|
4a10cf34b8408dd29fb37942c42c5c24d8047da8
| 567
|
py
|
Python
|
encoding/models/sseg/__init__.py
|
ruijieren98/DANet
|
e38d61e371179833c08888fd5a1ee444cf5bd875
|
[
"MIT"
] | 2,190
|
2018-09-11T11:44:50.000Z
|
2022-03-30T15:20:11.000Z
|
encoding/models/sseg/__init__.py
|
haoah316/DANet
|
56a612ec1ed5c2573ebc8df04ad08475fbf13a52
|
[
"MIT"
] | 138
|
2018-09-12T21:51:15.000Z
|
2022-03-22T13:24:51.000Z
|
encoding/models/sseg/__init__.py
|
haoah316/DANet
|
56a612ec1ed5c2573ebc8df04ad08475fbf13a52
|
[
"MIT"
] | 531
|
2018-09-12T06:46:10.000Z
|
2022-03-30T13:14:28.000Z
|
from .base import *
from .fcn import *
from .psp import *
from .fcfpn import *
from .atten import *
from .encnet import *
from .deeplab import *
from .upernet import *
from .dran import *
from .danet import *
def get_segmentation_model(name, **kwargs):
models = {
'fcn': get_fcn,
'psp': get_psp,
'fcfpn': get_fcfpn,
'atten': get_atten,
'encnet': get_encnet,
'upernet': get_upernet,
'deeplab': get_deeplab,
'dran':get_dran,
'danet': get_danet
}
return models[name.lower()](**kwargs)
| 22.68
| 43
| 0.604938
|
4a10d0244c55a7f931e4acdd8292b76cf230eaf4
| 4,901
|
py
|
Python
|
lib/ansible/modules/network/solace/solace_topic.py
|
streetster/ansible-solace
|
ed802eee2b37a50e34973d89c420ed58f105d493
|
[
"MIT"
] | 5
|
2019-07-02T10:58:51.000Z
|
2020-05-28T21:23:47.000Z
|
lib/ansible/modules/network/solace/solace_topic.py
|
mkst/ansible-solace
|
ed802eee2b37a50e34973d89c420ed58f105d493
|
[
"MIT"
] | 3
|
2019-07-14T10:06:32.000Z
|
2020-05-25T09:42:49.000Z
|
lib/ansible/modules/network/solace/solace_topic.py
|
mkst/ansible-solace
|
ed802eee2b37a50e34973d89c420ed58f105d493
|
[
"MIT"
] | 2
|
2019-07-02T11:34:19.000Z
|
2020-04-06T15:54:15.000Z
|
#!/usr/bin/env python
# Copyright (c) 2019, Mark Street <mkst@protonmail.com>
# MIT License
"""Ansible-Solace Module for configuring Topics"""
import ansible.module_utils.network.solace.solace_utils as su
from ansible.module_utils.basic import AnsibleModule
ANSIBLE_METADATA = {
'metadata_version': '0.1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: solace_topic
short_description: todo
description:
- "todo"
- "Reference documentation: todo"
options:
name:
description:
- This is the RDP name of the Rest Delivery Point being configured
required: true
msg_vpn:
description:
- The message vpn the RDP is on/created
required: true
settings:
description:
- JSON dictionary of additional configuration, see Reference documentation
required: false
state:
description:
- Target state of the RDP, present/absent
required: false
host:
description:
- Hostname of Solace Broker, default is "localhost"
required: false
port:
description:
- Management port of Solace Broker, default is 8080
required: false
secure_connection:
description:
- If true use https rather than http for querying
required: false
username:
description:
- Administrator username for Solace Broker, default is "admin"
required: false
password:
description:
- Administrator password for Solace Broker, default is "admin"
required: false
timeout:
description:
- Connection timeout when making requests, defaults to 1 (second)
required: false
x_broker:
description:
- Custom HTTP header with the broker virtual router id, if using a SMEPv2 Proxy/agent infrastructure
required: false
author:
- Mark Street (mkst@protonmail.com)
- Swen-Helge Huber (swen-helge.huber@solace.com)
- Ricardo Gomez-Ulmke (ricardo.gomez-ulmke@solace.com)
'''
EXAMPLES = '''
todo
'''
RETURN = '''
response:
description: The response back from the Solace Sempv2 request
type: dict
'''
class SolaceTopicTask(su.SolaceTask):
LOOKUP_ITEM_KEY = 'topicEndpointName'
def __init__(self, module):
su.SolaceTask.__init__(self, module)
def lookup_item(self):
return self.module.params['name']
def get_args(self):
return [self.module.params['msg_vpn']]
def get_func(self, solace_config, vpn, lookup_item_value):
"""Pull configuration for all Topic/Endpoints associated with a given VPN"""
path_array = [su.SEMP_V2_CONFIG, su.MSG_VPNS, vpn, su.TOPIC_ENDPOINTS, lookup_item_value]
return su.get_configuration(solace_config, path_array, self.LOOKUP_ITEM_KEY)
def create_func(self, solace_config, vpn, topic, settings=None):
"""Create a Topic/Endpoint"""
defaults = {}
mandatory = {
'msgVpnName': vpn,
'topicEndpointName': topic
}
data = su.merge_dicts(defaults, mandatory, settings)
path_array = [su.SEMP_V2_CONFIG, su.MSG_VPNS, vpn, su.TOPIC_ENDPOINTS]
return su.make_post_request(solace_config, path_array, data)
def update_func(self, solace_config, vpn, lookup_item_value, settings):
"""Update an existing Topic/Endpoint"""
path_array = [su.SEMP_V2_CONFIG, su.MSG_VPNS, vpn, su.TOPIC_ENDPOINTS, lookup_item_value]
return su.make_patch_request(solace_config, path_array, settings)
def delete_func(self, solace_config, vpn, lookup_item_value):
"""Delete a Topic/Endpoint"""
path_array = [su.SEMP_V2_CONFIG, su.MSG_VPNS, vpn, su.TOPIC_ENDPOINTS, lookup_item_value]
return su.make_delete_request(solace_config, path_array)
def run_module():
"""Entrypoint to module"""
module_args = dict(
name=dict(type='str', required=True),
msg_vpn=dict(type='str', required=True),
host=dict(type='str', default='localhost'),
port=dict(type='int', default=8080),
secure_connection=dict(type='bool', default=False),
username=dict(type='str', default='admin'),
password=dict(type='str', default='admin', no_log=True),
settings=dict(type='dict', require=False),
state=dict(default='present', choices=['absent', 'present']),
timeout=dict(default='1', require=False),
x_broker=dict(type='str', default='')
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
solace_task = SolaceTopicTask(module)
result = solace_task.do_task()
module.exit_json(**result)
def main():
"""Standard boilerplate"""
run_module()
if __name__ == '__main__':
main()
###
# The End.
| 29.70303
| 112
| 0.652316
|
4a10d1958299ef6601cfdc9c5f32d3e54839d185
| 311
|
py
|
Python
|
automol/mult/_mult.py
|
snelliott/automol
|
d1f7d51c1bbe06ba7569ea7c75304618cebee198
|
[
"Apache-2.0"
] | 2
|
2021-03-01T14:23:25.000Z
|
2021-11-28T19:17:08.000Z
|
automol/mult/_mult.py
|
avcopan/autochem
|
19d9b0402568170ae5ca0adecb460d36f258a9bd
|
[
"Apache-2.0"
] | 1
|
2021-02-12T21:02:22.000Z
|
2021-02-12T21:35:33.000Z
|
automol/mult/_mult.py
|
avcopan/autochem
|
19d9b0402568170ae5ca0adecb460d36f258a9bd
|
[
"Apache-2.0"
] | 7
|
2019-12-18T20:11:06.000Z
|
2020-10-14T08:54:16.000Z
|
""" Calculate spins (2S) from spin multiplicities.
"""
def spin(mult):
""" Calculate the spin (2Ms) from using the spin multiplicity
which is equivalent to the number of unpaired electrons.
:param mult: multiplicity
:type mult: int
:rtype: int
"""
return mult - 1
| 22.214286
| 65
| 0.623794
|
4a10d1f7105b97c9f63dd46bb838943530fb62d3
| 3,136
|
py
|
Python
|
generic_api/generics/client.py
|
guestready/generic_api
|
4830995ec2f6ea77b1b3bff1d86d4152530b0942
|
[
"BSD-2-Clause"
] | 1
|
2020-11-24T07:49:37.000Z
|
2020-11-24T07:49:37.000Z
|
generic_api/generics/client.py
|
guestready/generic_api
|
4830995ec2f6ea77b1b3bff1d86d4152530b0942
|
[
"BSD-2-Clause"
] | null | null | null |
generic_api/generics/client.py
|
guestready/generic_api
|
4830995ec2f6ea77b1b3bff1d86d4152530b0942
|
[
"BSD-2-Clause"
] | null | null | null |
import json
import requests
class GenericClient:
session_class = None
errors_handler_class = None
retries_handler_class = None
def __init__(self, *args, **kwargs):
pass
def get_session_class(self, *args, **kwargs):
assert self.session_class is not None, (
"'%s' should either include a `session_class` attribute, "
"or override the `get_session_class()` method."
% self.__class__.__name__
)
return self.session_class
def get_session_context(self, *args, **kwargs):
return {}
def get_session(self, *args, **kwargs):
"""
Return instantiated session
"""
session_class = self.get_session_class()
kwargs['context'] = self.get_session_context()
return session_class(*args, **kwargs)
def get_errors_handler_class(self, *args, **kwargs):
assert self.errors_handler_class is not None, (
"'%s' should either include a `errors_handler_class` attribute, "
"or override the `get_errors_handler_class()` method."
% self.__class__.__name__
)
return self.errors_handler_class
def get_errors_handler_context(self, *args, **kwargs):
return {}
def get_errors_handler(self, *args, **kwargs):
errors_handler_class = self.get_errors_handler_class()
kwargs['context'] = self.get_errors_handler_context()
return errors_handler_class(*args, **kwargs)
def get_retries_handler_class(self, *args, **kwargs):
return self.retries_handler_class
def get_retries_handler_context(self, *args, **kwargs):
return {}
def get_retries_handler(self, *args, **kwargs):
retries_handler_class = self.get_retries_handler_class()
if retries_handler_class:
kwargs['context'] = self.get_retries_handler_context()
return retries_handler_class(*args, **kwargs)
else:
return None
def get_base_url(self):
return self.get_session().get_base_url()
def _request(self, http_method, endpoint_url, data=None, params={}, retries_handler=None):
session = self.get_session()
retries_handler = retries_handler or self.get_retries_handler()
error_handler = self.get_errors_handler()
request_headers = session.headers()
request_params = {**params, **session.params()}
response = http_method(
endpoint_url,
params=request_params,
headers=request_headers,
data=json.dumps(data) if data else None
)
if retries_handler and retries_handler.is_eligible(response):
retries_handler.increment()
return self._request(http_method, endpoint_url, data, params, retries_handler)
return error_handler.validate(response)
def get(self, endpoint_url, *args, **kwargs):
return self._request(requests.get, endpoint_url, *args, **kwargs)
def post(self, endpoint_url, data, *args, **kwargs):
return self._request(requests.post, endpoint_url, data=data, *args, **kwargs)
| 33.361702
| 94
| 0.651148
|
4a10d28c852a0f22b6e89ee68f58c481f59f9262
| 42,445
|
py
|
Python
|
sdk/python/pulumi_aws/acmpca/certificate_authority.py
|
RafalSumislawski/pulumi-aws
|
7c8a335d327c173aa32c8b3d98816e760db329fa
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-11-10T16:33:40.000Z
|
2021-11-10T16:33:40.000Z
|
sdk/python/pulumi_aws/acmpca/certificate_authority.py
|
RafalSumislawski/pulumi-aws
|
7c8a335d327c173aa32c8b3d98816e760db329fa
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/acmpca/certificate_authority.py
|
RafalSumislawski/pulumi-aws
|
7c8a335d327c173aa32c8b3d98816e760db329fa
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['CertificateAuthorityArgs', 'CertificateAuthority']
@pulumi.input_type
class CertificateAuthorityArgs:
def __init__(__self__, *,
certificate_authority_configuration: pulumi.Input['CertificateAuthorityCertificateAuthorityConfigurationArgs'],
enabled: Optional[pulumi.Input[bool]] = None,
permanent_deletion_time_in_days: Optional[pulumi.Input[int]] = None,
revocation_configuration: Optional[pulumi.Input['CertificateAuthorityRevocationConfigurationArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a CertificateAuthority resource.
:param pulumi.Input['CertificateAuthorityCertificateAuthorityConfigurationArgs'] certificate_authority_configuration: Nested argument containing algorithms and certificate subject information. Defined below.
:param pulumi.Input[bool] enabled: Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. Defaults to `false`.
:param pulumi.Input[int] permanent_deletion_time_in_days: The number of days to make a CA restorable after it has been deleted, must be between 7 to 30 days, with default to 30 days.
:param pulumi.Input['CertificateAuthorityRevocationConfigurationArgs'] revocation_configuration: Nested argument containing revocation configuration. Defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Specifies a key-value map of user-defined tags that are attached to the certificate authority. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[str] type: The type of the certificate authority. Defaults to `SUBORDINATE`. Valid values: `ROOT` and `SUBORDINATE`.
"""
pulumi.set(__self__, "certificate_authority_configuration", certificate_authority_configuration)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if permanent_deletion_time_in_days is not None:
pulumi.set(__self__, "permanent_deletion_time_in_days", permanent_deletion_time_in_days)
if revocation_configuration is not None:
pulumi.set(__self__, "revocation_configuration", revocation_configuration)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="certificateAuthorityConfiguration")
def certificate_authority_configuration(self) -> pulumi.Input['CertificateAuthorityCertificateAuthorityConfigurationArgs']:
"""
Nested argument containing algorithms and certificate subject information. Defined below.
"""
return pulumi.get(self, "certificate_authority_configuration")
@certificate_authority_configuration.setter
def certificate_authority_configuration(self, value: pulumi.Input['CertificateAuthorityCertificateAuthorityConfigurationArgs']):
pulumi.set(self, "certificate_authority_configuration", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. Defaults to `false`.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="permanentDeletionTimeInDays")
def permanent_deletion_time_in_days(self) -> Optional[pulumi.Input[int]]:
"""
The number of days to make a CA restorable after it has been deleted, must be between 7 to 30 days, with default to 30 days.
"""
return pulumi.get(self, "permanent_deletion_time_in_days")
@permanent_deletion_time_in_days.setter
def permanent_deletion_time_in_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "permanent_deletion_time_in_days", value)
@property
@pulumi.getter(name="revocationConfiguration")
def revocation_configuration(self) -> Optional[pulumi.Input['CertificateAuthorityRevocationConfigurationArgs']]:
"""
Nested argument containing revocation configuration. Defined below.
"""
return pulumi.get(self, "revocation_configuration")
@revocation_configuration.setter
def revocation_configuration(self, value: Optional[pulumi.Input['CertificateAuthorityRevocationConfigurationArgs']]):
pulumi.set(self, "revocation_configuration", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Specifies a key-value map of user-defined tags that are attached to the certificate authority. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the certificate authority. Defaults to `SUBORDINATE`. Valid values: `ROOT` and `SUBORDINATE`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class _CertificateAuthorityState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
certificate: Optional[pulumi.Input[str]] = None,
certificate_authority_configuration: Optional[pulumi.Input['CertificateAuthorityCertificateAuthorityConfigurationArgs']] = None,
certificate_chain: Optional[pulumi.Input[str]] = None,
certificate_signing_request: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
not_after: Optional[pulumi.Input[str]] = None,
not_before: Optional[pulumi.Input[str]] = None,
permanent_deletion_time_in_days: Optional[pulumi.Input[int]] = None,
revocation_configuration: Optional[pulumi.Input['CertificateAuthorityRevocationConfigurationArgs']] = None,
serial: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering CertificateAuthority resources.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of the certificate authority.
:param pulumi.Input[str] certificate: Base64-encoded certificate authority (CA) certificate. Only available after the certificate authority certificate has been imported.
:param pulumi.Input['CertificateAuthorityCertificateAuthorityConfigurationArgs'] certificate_authority_configuration: Nested argument containing algorithms and certificate subject information. Defined below.
:param pulumi.Input[str] certificate_chain: Base64-encoded certificate chain that includes any intermediate certificates and chains up to root on-premises certificate that you used to sign your private CA certificate. The chain does not include your private CA certificate. Only available after the certificate authority certificate has been imported.
:param pulumi.Input[str] certificate_signing_request: The base64 PEM-encoded certificate signing request (CSR) for your private CA certificate.
:param pulumi.Input[bool] enabled: Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. Defaults to `false`.
:param pulumi.Input[str] not_after: Date and time after which the certificate authority is not valid. Only available after the certificate authority certificate has been imported.
:param pulumi.Input[str] not_before: Date and time before which the certificate authority is not valid. Only available after the certificate authority certificate has been imported.
:param pulumi.Input[int] permanent_deletion_time_in_days: The number of days to make a CA restorable after it has been deleted, must be between 7 to 30 days, with default to 30 days.
:param pulumi.Input['CertificateAuthorityRevocationConfigurationArgs'] revocation_configuration: Nested argument containing revocation configuration. Defined below.
:param pulumi.Input[str] serial: Serial number of the certificate authority. Only available after the certificate authority certificate has been imported.
:param pulumi.Input[str] status: Status of the certificate authority.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Specifies a key-value map of user-defined tags that are attached to the certificate authority. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input[str] type: The type of the certificate authority. Defaults to `SUBORDINATE`. Valid values: `ROOT` and `SUBORDINATE`.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if certificate_authority_configuration is not None:
pulumi.set(__self__, "certificate_authority_configuration", certificate_authority_configuration)
if certificate_chain is not None:
pulumi.set(__self__, "certificate_chain", certificate_chain)
if certificate_signing_request is not None:
pulumi.set(__self__, "certificate_signing_request", certificate_signing_request)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if not_after is not None:
pulumi.set(__self__, "not_after", not_after)
if not_before is not None:
pulumi.set(__self__, "not_before", not_before)
if permanent_deletion_time_in_days is not None:
pulumi.set(__self__, "permanent_deletion_time_in_days", permanent_deletion_time_in_days)
if revocation_configuration is not None:
pulumi.set(__self__, "revocation_configuration", revocation_configuration)
if serial is not None:
pulumi.set(__self__, "serial", serial)
if status is not None:
pulumi.set(__self__, "status", status)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the certificate authority.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def certificate(self) -> Optional[pulumi.Input[str]]:
"""
Base64-encoded certificate authority (CA) certificate. Only available after the certificate authority certificate has been imported.
"""
return pulumi.get(self, "certificate")
@certificate.setter
def certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate", value)
@property
@pulumi.getter(name="certificateAuthorityConfiguration")
def certificate_authority_configuration(self) -> Optional[pulumi.Input['CertificateAuthorityCertificateAuthorityConfigurationArgs']]:
"""
Nested argument containing algorithms and certificate subject information. Defined below.
"""
return pulumi.get(self, "certificate_authority_configuration")
@certificate_authority_configuration.setter
def certificate_authority_configuration(self, value: Optional[pulumi.Input['CertificateAuthorityCertificateAuthorityConfigurationArgs']]):
pulumi.set(self, "certificate_authority_configuration", value)
@property
@pulumi.getter(name="certificateChain")
def certificate_chain(self) -> Optional[pulumi.Input[str]]:
"""
Base64-encoded certificate chain that includes any intermediate certificates and chains up to root on-premises certificate that you used to sign your private CA certificate. The chain does not include your private CA certificate. Only available after the certificate authority certificate has been imported.
"""
return pulumi.get(self, "certificate_chain")
@certificate_chain.setter
def certificate_chain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate_chain", value)
@property
@pulumi.getter(name="certificateSigningRequest")
def certificate_signing_request(self) -> Optional[pulumi.Input[str]]:
"""
The base64 PEM-encoded certificate signing request (CSR) for your private CA certificate.
"""
return pulumi.get(self, "certificate_signing_request")
@certificate_signing_request.setter
def certificate_signing_request(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate_signing_request", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. Defaults to `false`.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="notAfter")
def not_after(self) -> Optional[pulumi.Input[str]]:
"""
Date and time after which the certificate authority is not valid. Only available after the certificate authority certificate has been imported.
"""
return pulumi.get(self, "not_after")
@not_after.setter
def not_after(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "not_after", value)
@property
@pulumi.getter(name="notBefore")
def not_before(self) -> Optional[pulumi.Input[str]]:
"""
Date and time before which the certificate authority is not valid. Only available after the certificate authority certificate has been imported.
"""
return pulumi.get(self, "not_before")
@not_before.setter
def not_before(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "not_before", value)
@property
@pulumi.getter(name="permanentDeletionTimeInDays")
def permanent_deletion_time_in_days(self) -> Optional[pulumi.Input[int]]:
"""
The number of days to make a CA restorable after it has been deleted, must be between 7 to 30 days, with default to 30 days.
"""
return pulumi.get(self, "permanent_deletion_time_in_days")
@permanent_deletion_time_in_days.setter
def permanent_deletion_time_in_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "permanent_deletion_time_in_days", value)
@property
@pulumi.getter(name="revocationConfiguration")
def revocation_configuration(self) -> Optional[pulumi.Input['CertificateAuthorityRevocationConfigurationArgs']]:
"""
Nested argument containing revocation configuration. Defined below.
"""
return pulumi.get(self, "revocation_configuration")
@revocation_configuration.setter
def revocation_configuration(self, value: Optional[pulumi.Input['CertificateAuthorityRevocationConfigurationArgs']]):
pulumi.set(self, "revocation_configuration", value)
@property
@pulumi.getter
def serial(self) -> Optional[pulumi.Input[str]]:
"""
Serial number of the certificate authority. Only available after the certificate authority certificate has been imported.
"""
return pulumi.get(self, "serial")
@serial.setter
def serial(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "serial", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Status of the certificate authority.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Specifies a key-value map of user-defined tags that are attached to the certificate authority. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the certificate authority. Defaults to `SUBORDINATE`. Valid values: `ROOT` and `SUBORDINATE`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
class CertificateAuthority(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
certificate_authority_configuration: Optional[pulumi.Input[pulumi.InputType['CertificateAuthorityCertificateAuthorityConfigurationArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
permanent_deletion_time_in_days: Optional[pulumi.Input[int]] = None,
revocation_configuration: Optional[pulumi.Input[pulumi.InputType['CertificateAuthorityRevocationConfigurationArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a resource to manage AWS Certificate Manager Private Certificate Authorities (ACM PCA Certificate Authorities).
> **NOTE:** Creating this resource will leave the certificate authority in a `PENDING_CERTIFICATE` status, which means it cannot yet issue certificates. To complete this setup, you must fully sign the certificate authority CSR available in the `certificate_signing_request` attribute and import the signed certificate using the AWS SDK, CLI or Console. This provider can support another resource to manage that workflow automatically in the future.
## Example Usage
### Basic
```python
import pulumi
import pulumi_aws as aws
example = aws.acmpca.CertificateAuthority("example",
certificate_authority_configuration=aws.acmpca.CertificateAuthorityCertificateAuthorityConfigurationArgs(
key_algorithm="RSA_4096",
signing_algorithm="SHA512WITHRSA",
subject=aws.acmpca.CertificateAuthorityCertificateAuthorityConfigurationSubjectArgs(
common_name="example.com",
),
),
permanent_deletion_time_in_days=7)
```
### Enable Certificate Revocation List
```python
import pulumi
import pulumi_aws as aws
example_bucket = aws.s3.Bucket("exampleBucket")
acmpca_bucket_access = pulumi.Output.all(example_bucket.arn, example_bucket.arn).apply(lambda exampleBucketArn, exampleBucketArn1: aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
actions=[
"s3:GetBucketAcl",
"s3:GetBucketLocation",
"s3:PutObject",
"s3:PutObjectAcl",
],
resources=[
example_bucket_arn,
f"{example_bucket_arn1}/*",
],
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
identifiers=["acm-pca.amazonaws.com"],
type="Service",
)],
)]))
example_bucket_policy = aws.s3.BucketPolicy("exampleBucketPolicy",
bucket=example_bucket.id,
policy=acmpca_bucket_access.json)
example_certificate_authority = aws.acmpca.CertificateAuthority("exampleCertificateAuthority",
certificate_authority_configuration=aws.acmpca.CertificateAuthorityCertificateAuthorityConfigurationArgs(
key_algorithm="RSA_4096",
signing_algorithm="SHA512WITHRSA",
subject=aws.acmpca.CertificateAuthorityCertificateAuthorityConfigurationSubjectArgs(
common_name="example.com",
),
),
revocation_configuration=aws.acmpca.CertificateAuthorityRevocationConfigurationArgs(
crl_configuration=aws.acmpca.CertificateAuthorityRevocationConfigurationCrlConfigurationArgs(
custom_cname="crl.example.com",
enabled=True,
expiration_in_days=7,
s3_bucket_name=example_bucket.id,
),
),
opts=pulumi.ResourceOptions(depends_on=[example_bucket_policy]))
```
## Import
`aws_acmpca_certificate_authority` can be imported by using the certificate authority Amazon Resource Name (ARN), e.g.,
```sh
$ pulumi import aws:acmpca/certificateAuthority:CertificateAuthority example arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['CertificateAuthorityCertificateAuthorityConfigurationArgs']] certificate_authority_configuration: Nested argument containing algorithms and certificate subject information. Defined below.
:param pulumi.Input[bool] enabled: Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. Defaults to `false`.
:param pulumi.Input[int] permanent_deletion_time_in_days: The number of days to make a CA restorable after it has been deleted, must be between 7 to 30 days, with default to 30 days.
:param pulumi.Input[pulumi.InputType['CertificateAuthorityRevocationConfigurationArgs']] revocation_configuration: Nested argument containing revocation configuration. Defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Specifies a key-value map of user-defined tags that are attached to the certificate authority. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[str] type: The type of the certificate authority. Defaults to `SUBORDINATE`. Valid values: `ROOT` and `SUBORDINATE`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: CertificateAuthorityArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a resource to manage AWS Certificate Manager Private Certificate Authorities (ACM PCA Certificate Authorities).
> **NOTE:** Creating this resource will leave the certificate authority in a `PENDING_CERTIFICATE` status, which means it cannot yet issue certificates. To complete this setup, you must fully sign the certificate authority CSR available in the `certificate_signing_request` attribute and import the signed certificate using the AWS SDK, CLI or Console. This provider can support another resource to manage that workflow automatically in the future.
## Example Usage
### Basic
```python
import pulumi
import pulumi_aws as aws
example = aws.acmpca.CertificateAuthority("example",
certificate_authority_configuration=aws.acmpca.CertificateAuthorityCertificateAuthorityConfigurationArgs(
key_algorithm="RSA_4096",
signing_algorithm="SHA512WITHRSA",
subject=aws.acmpca.CertificateAuthorityCertificateAuthorityConfigurationSubjectArgs(
common_name="example.com",
),
),
permanent_deletion_time_in_days=7)
```
### Enable Certificate Revocation List
```python
import pulumi
import pulumi_aws as aws
example_bucket = aws.s3.Bucket("exampleBucket")
acmpca_bucket_access = pulumi.Output.all(example_bucket.arn, example_bucket.arn).apply(lambda exampleBucketArn, exampleBucketArn1: aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
actions=[
"s3:GetBucketAcl",
"s3:GetBucketLocation",
"s3:PutObject",
"s3:PutObjectAcl",
],
resources=[
example_bucket_arn,
f"{example_bucket_arn1}/*",
],
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
identifiers=["acm-pca.amazonaws.com"],
type="Service",
)],
)]))
example_bucket_policy = aws.s3.BucketPolicy("exampleBucketPolicy",
bucket=example_bucket.id,
policy=acmpca_bucket_access.json)
example_certificate_authority = aws.acmpca.CertificateAuthority("exampleCertificateAuthority",
certificate_authority_configuration=aws.acmpca.CertificateAuthorityCertificateAuthorityConfigurationArgs(
key_algorithm="RSA_4096",
signing_algorithm="SHA512WITHRSA",
subject=aws.acmpca.CertificateAuthorityCertificateAuthorityConfigurationSubjectArgs(
common_name="example.com",
),
),
revocation_configuration=aws.acmpca.CertificateAuthorityRevocationConfigurationArgs(
crl_configuration=aws.acmpca.CertificateAuthorityRevocationConfigurationCrlConfigurationArgs(
custom_cname="crl.example.com",
enabled=True,
expiration_in_days=7,
s3_bucket_name=example_bucket.id,
),
),
opts=pulumi.ResourceOptions(depends_on=[example_bucket_policy]))
```
## Import
`aws_acmpca_certificate_authority` can be imported by using the certificate authority Amazon Resource Name (ARN), e.g.,
```sh
$ pulumi import aws:acmpca/certificateAuthority:CertificateAuthority example arn:aws:acm-pca:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012
```
:param str resource_name: The name of the resource.
:param CertificateAuthorityArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CertificateAuthorityArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
certificate_authority_configuration: Optional[pulumi.Input[pulumi.InputType['CertificateAuthorityCertificateAuthorityConfigurationArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
permanent_deletion_time_in_days: Optional[pulumi.Input[int]] = None,
revocation_configuration: Optional[pulumi.Input[pulumi.InputType['CertificateAuthorityRevocationConfigurationArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CertificateAuthorityArgs.__new__(CertificateAuthorityArgs)
if certificate_authority_configuration is None and not opts.urn:
raise TypeError("Missing required property 'certificate_authority_configuration'")
__props__.__dict__["certificate_authority_configuration"] = certificate_authority_configuration
__props__.__dict__["enabled"] = enabled
__props__.__dict__["permanent_deletion_time_in_days"] = permanent_deletion_time_in_days
__props__.__dict__["revocation_configuration"] = revocation_configuration
__props__.__dict__["tags"] = tags
__props__.__dict__["type"] = type
__props__.__dict__["arn"] = None
__props__.__dict__["certificate"] = None
__props__.__dict__["certificate_chain"] = None
__props__.__dict__["certificate_signing_request"] = None
__props__.__dict__["not_after"] = None
__props__.__dict__["not_before"] = None
__props__.__dict__["serial"] = None
__props__.__dict__["status"] = None
__props__.__dict__["tags_all"] = None
super(CertificateAuthority, __self__).__init__(
'aws:acmpca/certificateAuthority:CertificateAuthority',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
certificate: Optional[pulumi.Input[str]] = None,
certificate_authority_configuration: Optional[pulumi.Input[pulumi.InputType['CertificateAuthorityCertificateAuthorityConfigurationArgs']]] = None,
certificate_chain: Optional[pulumi.Input[str]] = None,
certificate_signing_request: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
not_after: Optional[pulumi.Input[str]] = None,
not_before: Optional[pulumi.Input[str]] = None,
permanent_deletion_time_in_days: Optional[pulumi.Input[int]] = None,
revocation_configuration: Optional[pulumi.Input[pulumi.InputType['CertificateAuthorityRevocationConfigurationArgs']]] = None,
serial: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None) -> 'CertificateAuthority':
"""
Get an existing CertificateAuthority resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of the certificate authority.
:param pulumi.Input[str] certificate: Base64-encoded certificate authority (CA) certificate. Only available after the certificate authority certificate has been imported.
:param pulumi.Input[pulumi.InputType['CertificateAuthorityCertificateAuthorityConfigurationArgs']] certificate_authority_configuration: Nested argument containing algorithms and certificate subject information. Defined below.
:param pulumi.Input[str] certificate_chain: Base64-encoded certificate chain that includes any intermediate certificates and chains up to root on-premises certificate that you used to sign your private CA certificate. The chain does not include your private CA certificate. Only available after the certificate authority certificate has been imported.
:param pulumi.Input[str] certificate_signing_request: The base64 PEM-encoded certificate signing request (CSR) for your private CA certificate.
:param pulumi.Input[bool] enabled: Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. Defaults to `false`.
:param pulumi.Input[str] not_after: Date and time after which the certificate authority is not valid. Only available after the certificate authority certificate has been imported.
:param pulumi.Input[str] not_before: Date and time before which the certificate authority is not valid. Only available after the certificate authority certificate has been imported.
:param pulumi.Input[int] permanent_deletion_time_in_days: The number of days to make a CA restorable after it has been deleted, must be between 7 to 30 days, with default to 30 days.
:param pulumi.Input[pulumi.InputType['CertificateAuthorityRevocationConfigurationArgs']] revocation_configuration: Nested argument containing revocation configuration. Defined below.
:param pulumi.Input[str] serial: Serial number of the certificate authority. Only available after the certificate authority certificate has been imported.
:param pulumi.Input[str] status: Status of the certificate authority.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Specifies a key-value map of user-defined tags that are attached to the certificate authority. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input[str] type: The type of the certificate authority. Defaults to `SUBORDINATE`. Valid values: `ROOT` and `SUBORDINATE`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _CertificateAuthorityState.__new__(_CertificateAuthorityState)
__props__.__dict__["arn"] = arn
__props__.__dict__["certificate"] = certificate
__props__.__dict__["certificate_authority_configuration"] = certificate_authority_configuration
__props__.__dict__["certificate_chain"] = certificate_chain
__props__.__dict__["certificate_signing_request"] = certificate_signing_request
__props__.__dict__["enabled"] = enabled
__props__.__dict__["not_after"] = not_after
__props__.__dict__["not_before"] = not_before
__props__.__dict__["permanent_deletion_time_in_days"] = permanent_deletion_time_in_days
__props__.__dict__["revocation_configuration"] = revocation_configuration
__props__.__dict__["serial"] = serial
__props__.__dict__["status"] = status
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["type"] = type
return CertificateAuthority(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the certificate authority.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def certificate(self) -> pulumi.Output[str]:
"""
Base64-encoded certificate authority (CA) certificate. Only available after the certificate authority certificate has been imported.
"""
return pulumi.get(self, "certificate")
@property
@pulumi.getter(name="certificateAuthorityConfiguration")
def certificate_authority_configuration(self) -> pulumi.Output['outputs.CertificateAuthorityCertificateAuthorityConfiguration']:
"""
Nested argument containing algorithms and certificate subject information. Defined below.
"""
return pulumi.get(self, "certificate_authority_configuration")
@property
@pulumi.getter(name="certificateChain")
def certificate_chain(self) -> pulumi.Output[str]:
"""
Base64-encoded certificate chain that includes any intermediate certificates and chains up to root on-premises certificate that you used to sign your private CA certificate. The chain does not include your private CA certificate. Only available after the certificate authority certificate has been imported.
"""
return pulumi.get(self, "certificate_chain")
@property
@pulumi.getter(name="certificateSigningRequest")
def certificate_signing_request(self) -> pulumi.Output[str]:
"""
The base64 PEM-encoded certificate signing request (CSR) for your private CA certificate.
"""
return pulumi.get(self, "certificate_signing_request")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Boolean value that specifies whether certificate revocation lists (CRLs) are enabled. Defaults to `false`.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="notAfter")
def not_after(self) -> pulumi.Output[str]:
"""
Date and time after which the certificate authority is not valid. Only available after the certificate authority certificate has been imported.
"""
return pulumi.get(self, "not_after")
@property
@pulumi.getter(name="notBefore")
def not_before(self) -> pulumi.Output[str]:
"""
Date and time before which the certificate authority is not valid. Only available after the certificate authority certificate has been imported.
"""
return pulumi.get(self, "not_before")
@property
@pulumi.getter(name="permanentDeletionTimeInDays")
def permanent_deletion_time_in_days(self) -> pulumi.Output[Optional[int]]:
"""
The number of days to make a CA restorable after it has been deleted, must be between 7 to 30 days, with default to 30 days.
"""
return pulumi.get(self, "permanent_deletion_time_in_days")
@property
@pulumi.getter(name="revocationConfiguration")
def revocation_configuration(self) -> pulumi.Output[Optional['outputs.CertificateAuthorityRevocationConfiguration']]:
"""
Nested argument containing revocation configuration. Defined below.
"""
return pulumi.get(self, "revocation_configuration")
@property
@pulumi.getter
def serial(self) -> pulumi.Output[str]:
"""
Serial number of the certificate authority. Only available after the certificate authority certificate has been imported.
"""
return pulumi.get(self, "serial")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
Status of the certificate authority.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Specifies a key-value map of user-defined tags that are attached to the certificate authority. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
The type of the certificate authority. Defaults to `SUBORDINATE`. Valid values: `ROOT` and `SUBORDINATE`.
"""
return pulumi.get(self, "type")
| 54.277494
| 456
| 0.693886
|
4a10d494acf3644366a1c341e0c6f706046f7c05
| 166
|
py
|
Python
|
hlso/__init__.py
|
holtgrewe/clsify
|
ad5efecd1d0114898e10498434bded32840d9a4b
|
[
"MIT"
] | 19
|
2019-07-25T17:36:16.000Z
|
2021-12-24T06:52:40.000Z
|
hlso/__init__.py
|
holtgrewe/clsify
|
ad5efecd1d0114898e10498434bded32840d9a4b
|
[
"MIT"
] | 31
|
2019-05-13T11:21:50.000Z
|
2020-06-19T09:33:16.000Z
|
hlso/__init__.py
|
holtgrewe/clsify
|
ad5efecd1d0114898e10498434bded32840d9a4b
|
[
"MIT"
] | 2
|
2020-03-05T15:36:30.000Z
|
2021-12-22T04:48:36.000Z
|
from ._version import get_versions as _get_versions
#: The application's version from versioneer.
__version__ = _get_versions()["version"]
del _get_versions # noqa
| 27.666667
| 51
| 0.795181
|
4a10d506cbc6f4894dd9ef097012de3ae6f88e18
| 33,718
|
py
|
Python
|
stix_shifter_modules/darktrace/test/stix_translation/test_darktrace_stix_to_query.py
|
lakshmi51974368/stix-shifter
|
36d71c172a5fc5b97d872e623753b0dd1bf4fe6c
|
[
"Apache-2.0"
] | 33
|
2018-05-25T17:07:28.000Z
|
2019-09-30T10:08:53.000Z
|
stix_shifter_modules/darktrace/test/stix_translation/test_darktrace_stix_to_query.py
|
lakshmi51974368/stix-shifter
|
36d71c172a5fc5b97d872e623753b0dd1bf4fe6c
|
[
"Apache-2.0"
] | 54
|
2018-06-01T18:17:24.000Z
|
2019-09-30T18:36:15.000Z
|
stix_shifter_modules/darktrace/test/stix_translation/test_darktrace_stix_to_query.py
|
subbyte/stix-shifter
|
36d71c172a5fc5b97d872e623753b0dd1bf4fe6c
|
[
"Apache-2.0"
] | 37
|
2018-07-24T13:29:46.000Z
|
2019-09-29T19:06:27.000Z
|
from stix_shifter.stix_translation import stix_translation
import unittest
import re
translation = stix_translation.StixTranslation()
def _remove_timestamp_from_query(expected_query):
pattern1 = r"@fields.epochdate\s:>\d{0,10}.\d{0,3}\s*AND\s*@fields.epochdate\s:<\d{0,10}.\d{0,3}"
pattern2 = r"\,\s*\'timeframe\'\:\s*\'custom\'\,\s*\'time\'([^>]*)?\'\}"
combined_pat = r'|'.join((pattern1, pattern2))
if isinstance(expected_query, list):
return [re.sub(combined_pat, '', str(query)) for query in expected_query]
elif isinstance(expected_query, str):
return re.sub(combined_pat, '', expected_query)
class TestqueryTranslator(unittest.TestCase):
"""
class to perform unit test case Darktrace translate query
"""
if __name__ == "__main__":
unittest.main()
def _test_query_assertions(self, actual_query, expected_query):
"""
to assert the each query in the list against expected result
"""
self.assertIsInstance(expected_query, list)
self.assertIsInstance(actual_query, dict)
self.assertIsInstance(actual_query['queries'], list)
for index, each_query in enumerate(actual_query.get('queries'), start=0):
self.assertEqual(each_query, expected_query[index])
def test_mac_address_query(self):
stix_pattern = "[mac-addr:value = '12:2f:23:46:35:5b']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "(@fields.mac:\"12:2f:23:46:35:5b\" AND (@fields.epochdate :>1651064608.076 AND "
"@fields.epochdate :<1651064908.076))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-04-27T13:03:28.076000Z",
"to": "2022-04-27T13:08:28.076000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_file_query(self):
stix_pattern = "[file:name = 'some_file.exe']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "(@fields.filename:\"some_file.exe\" AND (@fields.epochdate :>1649152970.31 AND"
" @fields.epochdate :<1649153270.31))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-04-05T10:02:50.310000Z",
"to": "2022-04-05T10:07:50.310000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_user_account_query(self):
stix_pattern = "[user-account:account_login = 'anonymous']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.user:\"anonymous\" OR @fields.username:\"anonymous\") AND "
"(@fields.epochdate :>1649240843.165 AND @fields.epochdate :<1649241143.165))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-04-06T10:27:23.165000Z",
"to": "2022-04-06T10:32:23.165000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_network_query_not_equals(self):
stix_pattern = "[network-traffic:dst_port!=3389]"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "(((@fields.dest_port:* AND NOT @fields.dest_port:3389) OR "
"(@fields.dst_p:* AND NOT @fields.dst_p:3389)) AND "
"(@fields.epochdate :>1649310118.117 AND @fields.epochdate :<1649310418.117))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-04-07T05:41:58.117000Z",
"to": "2022-04-07T05:46:58.117000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_network_query_greater_than(self):
stix_pattern = "[network-traffic:dst_port>3389]"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.dest_port:>3389 OR @fields.dst_p:>3389) AND "
"(@fields.epochdate :>1646092800.0 AND @fields.epochdate :<1648810800.003))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-04-01T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_network_query_less_than(self):
stix_pattern = "[network-traffic:src_port<62298]"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.source_port:<62298 OR @fields.src_p:<62298) "
"AND (@fields.epochdate :>1646092800.0 AND @fields.epochdate :<1648810800.003))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-04-01T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_network_query_grater_than_or_equal(self):
stix_pattern = "[network-traffic:src_port >= 58771]"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.source_port:>58770 OR @fields.src_p:>58770) "
"AND (@fields.epochdate :>1646092800.0 AND @fields.epochdate :<1648810800.003))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-04-01T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_network_query_less_than_or_equal(self):
stix_pattern = "[network-traffic:src_port <= 58771]"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.source_port:<58772 OR @fields.src_p:<58772) "
"AND (@fields.epochdate :>1646092800.0 AND @fields.epochdate :<1648810800.003))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-04-01T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_network_query_less_than_or_equal_str(self):
stix_pattern = "[network-traffic:src_port <= '58771']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.source_port:<58772 OR @fields.src_p:<58772) "
"AND (@fields.epochdate :>1646092800.0 AND @fields.epochdate :<1648810800.003))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-04-01T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_network_query_grater_than_or_equal_str(self):
stix_pattern = "[network-traffic:src_port >= '58771']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.source_port:>58770 OR @fields.src_p:>58770) AND "
"(@fields.epochdate :>1646092800.0 AND @fields.epochdate :<1648810800.003))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-04-01T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_network_query_like(self):
stix_pattern = "[network-traffic:protocols[*] LIKE 'tcp']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.proto:*tcp* OR @fields.protocol:*tcp*) AND "
"(@fields.epochdate :>1646092800.0 AND @fields.epochdate :<1648810800.003))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-04-01T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_software_match(self):
stix_pattern = "[software:name MATCHES 'Windows']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "(@fields.name:/Windows/ AND (@fields.epochdate :>1646092800.0 AND "
"@fields.epochdate :<1648810800.003))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-04-01T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_network_traffic_in(self):
stix_pattern = "[network-traffic:protocols[*] IN ('tcp','dns')]"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.proto:(\"tcp\" OR \"dns\") OR @fields.protocol:(\"tcp\" OR \"dns\"))"
" AND (@fields.epochdate :>1646092800.0 AND @fields.epochdate :<1648810800.003))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-04-01T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_network_traffic_port_query(self):
stix_pattern = "[network-traffic:src_port = 62298 OR network-traffic:dst_port = 3389]"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "(((@fields.dest_port:3389 OR @fields.dst_p:3389) OR (@fields.source_port:62298 OR "
"@fields.src_p:62298)) AND (@fields.epochdate :>1646092800.0 AND "
"@fields.epochdate :<1648724400.003))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-03-31T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_x_darktrace_http_query_equals(self):
stix_pattern = "[network-traffic:extensions.'http-request-ext'.request_method='GET']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "(@fields.method:\"GET\" AND (@fields.epochdate :>1651065067.574 AND "
"@fields.epochdate :<1651065367.574))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-04-27T13:11:07.574000Z",
"to": "2022-04-27T13:16:07.574000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_x_http_or_x509_query(self):
stix_pattern = "[network-traffic:extensions.'http-request-ext'.request_method='GET' AND " \
"x509-certificate:serial_number='76FDB38B8D5AA88844250EFE0EA89026']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.certificate_serial:\"76FDB38B8D5AA88844250EFE0EA89026\" AND "
"@fields.method:\"GET\") AND (@fields.epochdate :>1651065261.707 AND "
"@fields.epochdate :<1651065561.707))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-04-27T13:14:21.707000Z",
"to": "2022-04-27T13:19:21.707000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_network_traffic_port_and_query(self):
stix_pattern = "[network-traffic:src_port > 53331 AND network-traffic:dst_port < 3380]"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "(((@fields.dest_port:<3380 OR @fields.dst_p:<3380) AND "
"(@fields.source_port:>53331 OR @fields.src_p:>53331)) AND "
"(@fields.epochdate :>1646092800.0 AND @fields.epochdate :<1648724400.003))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-03-31T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_equal_or_operator(self):
stix_pattern = "[software:name='Windows'] OR [file:created = '1648122134.845304']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.name:\"Windows\" OR @fields.epochdate:1648122134.845304) "
"AND (@fields.epochdate :>1649152875.85 AND @fields.epochdate :<1649153175.85))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-04-05T10:01:15.850000Z",
"to": "2022-04-05T10:06:15.850000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_multi_list_or_operator(self):
stix_pattern = "[user-account:account_login = 'anonymous'] OR " \
"([file:name = 'input.csv'] OR [software:version='3.2.1']) " \
"START t'2022-03-01T00:00:00.000Z' STOP t'2022-03-31T11:00:00.003Z'"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.user:\"anonymous\" OR @fields.username:\"anonymous\") OR "
"((@fields.filename:\"input.csv\" OR @fields.version:\"3.2.1\") AND "
"(@fields.epochdate :>1646092800.0 AND @fields.epochdate :<1648724400.003)))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-03-31T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_x_oca_asset(self):
stix_pattern = "[x-oca-asset:hostname = '169.254.169.254']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "(@fields.host:\"169.254.169.254\" AND (@fields.epochdate :>1649154023.919 "
"AND @fields.epochdate :<1649154323.919))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-04-05T10:20:23.919000Z",
"to": "2022-04-05T10:25:23.919000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_software_match_cap_error(self):
stix_pattern = "[software:name MATCHES 'Windows^']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
assert actual_query['success'] is False
def test_mac_match_error(self):
stix_pattern = "[mac-addr:value MATCHES '12:2f:23:46:35:5b']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
assert actual_query['success'] is False
def test_software_match_dollar_error(self):
stix_pattern = "[software:name MATCHES '$Windows']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
assert actual_query['success'] is False
def test_x509_like(self):
stix_pattern = "[x509-certificate:version LIKE '12']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
assert actual_query['success'] is False
def test_mac_query_like(self):
stix_pattern = "[mac-addr:value LIKE '12:2f:23:46:35:5b']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
assert actual_query['success'] is False
def test_network_query_less_than_or_equal_string_error(self):
stix_pattern = "[network-traffic:src_port <= 'five']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
assert actual_query['success'] is False
def test_ipv4_query_time_error(self):
stix_pattern = "[ipv4-addr:value = '172.31.81.98'] START t'2021-10-09T11:.0Z' STOP t'2021-10:00.003Z'"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern, {'validate_pattern': 'true'})
assert actual_query['success'] is False
assert actual_query['code'] == 'invalid_parameter'
def test_ipv4_query(self):
stix_pattern = "[ipv4-addr:value = '172.31.81.98']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.source_ip:\"172.31.81.98\" OR @fields.dest_ip:\"172.31.81.98\" OR "
"@fields.src:\"172.31.81.98\" OR @fields.dst:\"172.31.81.98\" OR "
"@fields.ip:\"172.31.81.98\" OR @fields.subnet_mask:\"172.31.81.98\" OR "
"@fields.released_ip:\"172.31.81.98\" OR @fields.requested_ip:\"172.31.81.98\" OR "
"@fields.assigned_ip:\"172.31.81.98\") AND (@fields.epochdate :>1650946804.243 "
"AND @fields.epochdate :<1650947104.243))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-04-26T04:20:04.243000Z",
"to": "2022-04-26T04:25:04.243000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_email_query(self):
stix_pattern = "[email-addr:value = 'shahtanveer@gmail.com'] " \
"START t'2022-03-01T00:00:00.000Z' STOP t'2022-04-05T11:00:00.003Z'"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.mailfrom:\"shahtanveer@gmail.com\" OR @fields.rcptto:\"shahtanveer@gmail.com\" OR "
"@fields.from:\"shahtanveer@gmail.com\" OR @fields.to:\"shahtanveer@gmail.com\" OR "
"@fields.cc:\"shahtanveer@gmail.com\") AND (@fields.epochdate :>1646092800.0 AND "
"@fields.epochdate :<1649156400.003))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-04-05T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_network_query_packets(self):
stix_pattern = "[network-traffic:src_packets = 10]"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.pkts_recv:10 OR @fields.orig_pkts:10) AND "
"(@fields.epochdate :>1650947018.067 AND @fields.epochdate :<1650947318.067))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-04-26T04:23:38.067000Z",
"to": "2022-04-26T04:28:38.067000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_ipv4_query_time(self):
stix_pattern = "[ipv4-addr:value = '172.31.81.98'] START t'2022-03-01T00:00:00.000Z'" \
" STOP t'2022-03-31T11:00:00.003Z'"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.source_ip:\"172.31.81.98\" OR @fields.dest_ip:\"172.31.81.98\" OR "
"@fields.src:\"172.31.81.98\" OR @fields.dst:\"172.31.81.98\" OR @fields.ip:\"172.31.81.98\""
" OR @fields.subnet_mask:\"172.31.81.98\" OR @fields.released_ip:\"172.31.81.98\""
" OR @fields.requested_ip:\"172.31.81.98\" OR @fields.assigned_ip:\"172.31.81.98\")"
" AND (@fields.epochdate :>1646092800.0 AND @fields.epochdate :<1648724400.003))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-03-31T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_ipv4_query_or(self):
stix_pattern = "[ipv4-addr:value = '172.31.81.98' OR network-traffic:src_port > 62298]"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "(((@fields.source_port:>62298 OR @fields.src_p:>62298) OR (@fields.source_ip:\"172.31.81.98\" OR"
" @fields.dest_ip:\"172.31.81.98\" OR @fields.src:\"172.31.81.98\" OR "
"@fields.dst:\"172.31.81.98\" OR @fields.ip:\"172.31.81.98\" OR "
"@fields.subnet_mask:\"172.31.81.98\" OR @fields.released_ip:\"172.31.81.98\" OR "
"@fields.requested_ip:\"172.31.81.98\" OR @fields.assigned_ip:\"172.31.81.98\")) AND "
"(@fields.epochdate :>1650947192.945 AND @fields.epochdate :<1650947492.945))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-04-26T04:26:32.945000Z",
"to": "2022-04-26T04:31:32.945000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_domain_name_and_mac(self):
stix_pattern = "[domain-name:value='ec2.internal'] AND [mac-addr:value = '12:2f:23:46:35:5b']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "(((@fields.domain_name:\"ec2.internal\" OR @fields.query:\"ec2.internal\") OR "
"@fields.mac:\"12:2f:23:46:35:5b\") AND (@fields.epochdate :>1651064544.474 AND "
"@fields.epochdate :<1651064844.474))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-04-27T13:02:24.474000Z",
"to": "2022-04-27T13:07:24.474000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_set_or_operator(self):
stix_pattern = "([ipv4-addr:value = '172.31.81.98'] OR [mac-addr:value = '12:2f:23:46:35:5b'])" \
" START t'2022-03-01T00:00:00.000Z' STOP t'2022-03-31T11:00:00.003Z'"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "(((@fields.source_ip:\"172.31.81.98\" OR @fields.dest_ip:\"172.31.81.98\" OR "
"@fields.src:\"172.31.81.98\" OR @fields.dst:\"172.31.81.98\" OR @fields.ip:\"172.31.81.98\" OR "
"@fields.subnet_mask:\"172.31.81.98\" OR @fields.released_ip:\"172.31.81.98\" OR "
"@fields.requested_ip:\"172.31.81.98\" OR @fields.assigned_ip:\"172.31.81.98\") OR "
"@fields.mac:\"12:2f:23:46:35:5b\") AND (@fields.epochdate :>1646092800.0 AND "
"@fields.epochdate :<1648724400.003))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-03-31T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_two_sets_or_operator(self):
stix_pattern = "([network-traffic:dst_port = '3389'] AND [domain-name:value = 'sample']) AND " \
"([software:name = 'word'] OR [mac-addr:value = '12:2f:23:46:35:5b']) " \
"START t'2022-03-01T00:00:00.000Z' STOP t'2022-03-31T11:00:00.003Z'"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "(((@fields.dest_port:3389 OR @fields.dst_p:3389) OR (@fields.domain_name:\"sample\" OR "
"@fields.query:\"sample\")) OR ((@fields.name:\"word\" OR @fields.mac:\"12:2f:23:46:35:5b\") AND "
"(@fields.epochdate :>1646092800.0 AND @fields.epochdate :<1648724400.003)))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-03-31T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_comparison_and_operator(self):
stix_pattern = "[email-message:from_ref.value = 'shahtanveer@gmail.com'] AND " \
"[email-addr:value != 'first@mail.com']"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.mailfrom:\"shahtanveer@gmail.com\" OR "
"((@fields.mailfrom:* AND NOT @fields.mailfrom:\"first@mail.com\") OR "
"(@fields.rcptto:* AND NOT @fields.rcptto:\"first@mail.com\") OR "
"(@fields.from:* AND NOT @fields.from:\"first@mail.com\") OR "
"(@fields.to:* AND NOT @fields.to:\"first@mail.com\") OR "
"(@fields.cc:* AND NOT @fields.cc:\"first@mail.com\"))) AND "
"(@fields.epochdate :>1650950251.463 AND @fields.epochdate :<1650950551.463))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-04-26T05:17:31.463000Z",
"to": "2022-04-26T05:22:31.463000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
def test_combinedcomparison_and_or_operator(self):
stix_pattern = "[ipv4-addr:value = '172.31.81.98'] AND [mac-addr:value = '12:2f:23:46:35:5b'] "\
"START t'2022-03-01T00:00:00.000Z' STOP t'2022-03-31T11:00:00.003Z'"
actual_query = translation.translate('darktrace', 'query', '{}', stix_pattern)
actual_query['queries'] = _remove_timestamp_from_query(actual_query['queries'])
expected_query = [{
"search": "((@fields.source_ip:\"172.31.81.98\" OR @fields.dest_ip:\"172.31.81.98\" OR "
"@fields.src:\"172.31.81.98\" OR @fields.dst:\"172.31.81.98\" OR @fields.ip:\"172.31.81.98\" OR "
"@fields.subnet_mask:\"172.31.81.98\" OR @fields.released_ip:\"172.31.81.98\" OR "
"@fields.requested_ip:\"172.31.81.98\" OR @fields.assigned_ip:\"172.31.81.98\") OR "
"(@fields.mac:\"12:2f:23:46:35:5b\" AND (@fields.epochdate :>1646092800.0 AND "
"@fields.epochdate :<1648724400.003)))",
"fields": [],
"timeframe": "custom",
"time": {
"from": "2022-03-01T00:00:00.000000Z",
"to": "2022-03-31T11:00:00.003000Z"
},
"size": 10000
}]
expected_query = _remove_timestamp_from_query(expected_query)
self._test_query_assertions(actual_query, expected_query)
| 51.635528
| 120
| 0.582508
|
4a10d56a59d038b94acf72e4f0b3196ce70495dc
| 380
|
py
|
Python
|
pyfortified_cache/utils.py
|
jeff00seattle/pyfortified-cache
|
60a20721b382c21c17868efd79a5c4870829a765
|
[
"MIT"
] | null | null | null |
pyfortified_cache/utils.py
|
jeff00seattle/pyfortified-cache
|
60a20721b382c21c17868efd79a5c4870829a765
|
[
"MIT"
] | null | null | null |
pyfortified_cache/utils.py
|
jeff00seattle/pyfortified-cache
|
60a20721b382c21c17868efd79a5c4870829a765
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import hashlib
import ujson as json
def create_hash_key(key):
if key is None:
raise ValueError("Parameter 'key' not defined.")
if isinstance(key, str):
key_str = key
if isinstance(key, dict):
key_str = json.dumps(key, sort_keys=True)
return hashlib.md5(key_str.encode('utf-8')).hexdigest()
| 23.75
| 59
| 0.647368
|
4a10d67e41b295daab125dcfa1c00b343a498edf
| 722
|
py
|
Python
|
tests/test_02_cmd.py
|
MichaelKim0407/ini-to-env
|
aee310f31b2ba2077b081be2327a2cba45dd9f65
|
[
"MIT"
] | null | null | null |
tests/test_02_cmd.py
|
MichaelKim0407/ini-to-env
|
aee310f31b2ba2077b081be2327a2cba45dd9f65
|
[
"MIT"
] | null | null | null |
tests/test_02_cmd.py
|
MichaelKim0407/ini-to-env
|
aee310f31b2ba2077b081be2327a2cba45dd9f65
|
[
"MIT"
] | null | null | null |
import sys
from ini2env import cmd
def test_cmd_direct(ini_file_name, capsys, monkeypatch):
monkeypatch.setattr(sys.stdout, 'isatty', lambda: True)
cmd([ini_file_name()])
captured = capsys.readouterr()
assert captured.err == '''\
# source this output to export environment variables
'''
assert captured.out.splitlines() == [
'export HELLO=WORLD',
'export HOST=localhost',
'export PORT=8000',
]
def test_cmd_source(ini_file_name, capsys):
cmd([ini_file_name()])
captured = capsys.readouterr()
assert captured.err == ''
assert captured.out.splitlines() == [
'export HELLO=WORLD',
'export HOST=localhost',
'export PORT=8000',
]
| 24.896552
| 59
| 0.646814
|
4a10d6b040d37b5c6fce93e090aa5bd286524778
| 21
|
py
|
Python
|
project_name/__init__.py
|
oskarmenrus/empty_project_template
|
3985a5221387759f1949e09ca36faa9cca4dc2c3
|
[
"MIT"
] | 18
|
2017-11-30T15:36:13.000Z
|
2021-12-19T11:19:56.000Z
|
clean/datazoo/__init__.py
|
ahhuisg/ML-Data-Prep-Zoo
|
195733b5767d69c9992456f1380e6c646e30a5ae
|
[
"Apache-2.0"
] | 2
|
2020-02-07T06:52:45.000Z
|
2021-04-26T20:24:58.000Z
|
clean/datazoo/__init__.py
|
ahhuisg/ML-Data-Prep-Zoo
|
195733b5767d69c9992456f1380e6c646e30a5ae
|
[
"Apache-2.0"
] | 6
|
2018-01-30T15:03:29.000Z
|
2021-12-12T08:37:50.000Z
|
__version__ = '0.1'
| 10.5
| 20
| 0.619048
|
4a10d946a2ebb028f4f842ada64418181e3233d6
| 54,251
|
py
|
Python
|
wsi/filter.py
|
steermomo/WSI_extract
|
9db3bca2e59617a244c0f09d05146bfb44fb96e7
|
[
"Apache-2.0"
] | null | null | null |
wsi/filter.py
|
steermomo/WSI_extract
|
9db3bca2e59617a244c0f09d05146bfb44fb96e7
|
[
"Apache-2.0"
] | null | null | null |
wsi/filter.py
|
steermomo/WSI_extract
|
9db3bca2e59617a244c0f09d05146bfb44fb96e7
|
[
"Apache-2.0"
] | null | null | null |
# ------------------------------------------------------------------------
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------
import math
import multiprocessing
import numpy as np
import os
import scipy.ndimage.morphology as sc_morph
import skimage.color as sk_color
import skimage.exposure as sk_exposure
import skimage.feature as sk_feature
import skimage.filters as sk_filters
import skimage.future as sk_future
import skimage.morphology as sk_morphology
import skimage.segmentation as sk_segmentation
from configs import cfg as default_cfg
from wsi import slide
from wsi import util
from wsi.util import Time
def filter_rgb_to_grayscale(np_img, output_type="uint8"):
"""
Convert an RGB NumPy array to a grayscale NumPy array.
Shape (h, w, c) to (h, w).
Args:
np_img: RGB Image as a NumPy array.
output_type: Type of array to return (float or uint8)
Returns:
Grayscale image as NumPy array with shape (h, w).
"""
t = Time()
# Another common RGB ratio possibility: [0.299, 0.587, 0.114]
grayscale = np.dot(np_img[..., :3], [0.2125, 0.7154, 0.0721])
if output_type != "float":
grayscale = grayscale.astype("uint8")
util.np_info(grayscale, "Gray", t.elapsed())
return grayscale
def filter_complement(np_img, output_type="uint8"):
"""
Obtain the complement of an image as a NumPy array.
Args:
np_img: Image as a NumPy array.
type: Type of array to return (float or uint8).
Returns:
Complement image as Numpy array.
"""
t = Time()
if output_type == "float":
complement = 1.0 - np_img
else:
complement = 255 - np_img
util.np_info(complement, "Complement", t.elapsed())
return complement
def filter_hysteresis_threshold(np_img, low=50, high=100, output_type="uint8"):
"""
Apply two-level (hysteresis) threshold to an image as a NumPy array, returning a binary image.
Args:
np_img: Image as a NumPy array.
low: Low threshold.
high: High threshold.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) where True, 1.0, and 255 represent a pixel above hysteresis threshold.
"""
t = Time()
hyst = sk_filters.apply_hysteresis_threshold(np_img, low, high)
if output_type == "bool":
pass
elif output_type == "float":
hyst = hyst.astype(float)
else:
hyst = (255 * hyst).astype("uint8")
util.np_info(hyst, "Hysteresis Threshold", t.elapsed())
return hyst
def filter_otsu_threshold(np_img, output_type="uint8"):
"""
Compute Otsu threshold on image as a NumPy array and return binary image based on pixels above threshold.
Args:
np_img: Image as a NumPy array.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) where True, 1.0, and 255 represent a pixel above Otsu threshold.
"""
t = Time()
otsu_thresh_value = sk_filters.threshold_otsu(np_img)
otsu = (np_img > otsu_thresh_value)
if output_type == "bool":
pass
elif output_type == "float":
otsu = otsu.astype(float)
else:
otsu = otsu.astype("uint8") * 255
util.np_info(otsu, "Otsu Threshold", t.elapsed())
return otsu
def filter_local_otsu_threshold(np_img, disk_size=3, output_type="uint8"):
"""
Compute local Otsu threshold for each pixel and return binary image based on pixels being less than the
local Otsu threshold.
Args:
np_img: Image as a NumPy array.
disk_size: Radius of the disk structuring element used to compute the Otsu threshold for each pixel.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) where local Otsu threshold values have been applied to original image.
"""
t = Time()
local_otsu = sk_filters.rank.otsu(np_img, sk_morphology.disk(disk_size))
if output_type == "bool":
pass
elif output_type == "float":
local_otsu = local_otsu.astype(float)
else:
local_otsu = local_otsu.astype("uint8") * 255
util.np_info(local_otsu, "Otsu Local Threshold", t.elapsed())
return local_otsu
def filter_entropy(np_img, neighborhood=9, threshold=5, output_type="uint8"):
"""
Filter image based on entropy (complexity).
Args:
np_img: Image as a NumPy array.
neighborhood: Neighborhood size (defines height and width of 2D array of 1's).
threshold: Threshold value.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) where True, 1.0, and 255 represent a measure of complexity.
"""
t = Time()
entr = sk_filters.rank.entropy(np_img, np.ones(
(neighborhood, neighborhood))) > threshold
if output_type == "bool":
pass
elif output_type == "float":
entr = entr.astype(float)
else:
entr = entr.astype("uint8") * 255
util.np_info(entr, "Entropy", t.elapsed())
return entr
def filter_canny(np_img, sigma=1, low_threshold=0, high_threshold=25, output_type="uint8"):
"""
Filter image based on Canny algorithm edges.
Args:
np_img: Image as a NumPy array.
sigma: Width (std dev) of Gaussian.
low_threshold: Low hysteresis threshold value.
high_threshold: High hysteresis threshold value.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) representing Canny edge map (binary image).
"""
t = Time()
can = sk_feature.canny(
np_img, sigma=sigma, low_threshold=low_threshold, high_threshold=high_threshold)
if output_type == "bool":
pass
elif output_type == "float":
can = can.astype(float)
else:
can = can.astype("uint8") * 255
util.np_info(can, "Canny Edges", t.elapsed())
return can
def mask_percent(np_img):
"""
Determine the percentage of a NumPy array that is masked (how many of the values are 0 values).
Args:
np_img: Image as a NumPy array.
Returns:
The percentage of the NumPy array that is masked.
"""
if (len(np_img.shape) == 3) and (np_img.shape[2] == 3):
np_sum = np_img[:, :, 0] + np_img[:, :, 1] + np_img[:, :, 2]
mask_percentage = 100 - np.count_nonzero(np_sum) / np_sum.size * 100
else:
mask_percentage = 100 - np.count_nonzero(np_img) / np_img.size * 100
return mask_percentage
def tissue_percent(np_img):
"""
Determine the percentage of a NumPy array that is tissue (not masked).
Args:
np_img: Image as a NumPy array.
Returns:
The percentage of the NumPy array that is tissue.
"""
return 100 - mask_percent(np_img)
def filter_remove_small_objects(np_img, min_size=3000, avoid_overmask=True, overmask_thresh=95, output_type="uint8"):
"""
Filter image to remove small objects (connected components) less than a particular minimum size. If avoid_overmask
is True, this function can recursively call itself with progressively smaller minimum size objects to remove to
reduce the amount of masking that this filter performs.
Args:
np_img: Image as a NumPy array of type bool.
min_size: Minimum size of small object to remove.
avoid_overmask: If True, avoid masking above the overmask_thresh percentage.
overmask_thresh: If avoid_overmask is True, avoid masking above this threshold percentage value.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8).
"""
t = Time()
rem_sm = np_img.astype(bool) # make sure mask is boolean
rem_sm = sk_morphology.remove_small_objects(rem_sm, min_size=min_size)
mask_percentage = mask_percent(rem_sm)
if (mask_percentage >= overmask_thresh) and (min_size >= 1) and (avoid_overmask is True):
new_min_size = min_size / 2
print("Mask percentage %3.2f%% >= overmask threshold %3.2f%% for Remove Small Objs size %d, so try %d" % (
mask_percentage, overmask_thresh, min_size, new_min_size))
rem_sm = filter_remove_small_objects(
np_img, new_min_size, avoid_overmask, overmask_thresh, output_type)
np_img = rem_sm
if output_type == "bool":
pass
elif output_type == "float":
np_img = np_img.astype(float)
else:
np_img = np_img.astype("uint8") * 255
util.np_info(np_img, "Remove Small Objs", t.elapsed())
return np_img
def filter_remove_small_holes(np_img, min_size=3000, output_type="uint8"):
"""
Filter image to remove small holes less than a particular size.
Args:
np_img: Image as a NumPy array of type bool.
min_size: Remove small holes below this size.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8).
"""
t = Time()
rem_sm = sk_morphology.remove_small_holes(np_img, min_size=min_size)
if output_type == "bool":
pass
elif output_type == "float":
rem_sm = rem_sm.astype(float)
else:
rem_sm = rem_sm.astype("uint8") * 255
util.np_info(rem_sm, "Remove Small Holes", t.elapsed())
return rem_sm
def filter_contrast_stretch(np_img, low=40, high=60):
"""
Filter image (gray or RGB) using contrast stretching to increase contrast in image based on the intensities in
a specified range.
Args:
np_img: Image as a NumPy array (gray or RGB).
low: Range low value (0 to 255).
high: Range high value (0 to 255).
Returns:
Image as NumPy array with contrast enhanced.
"""
t = Time()
low_p, high_p = np.percentile(np_img, (low * 100 / 255, high * 100 / 255))
contrast_stretch = sk_exposure.rescale_intensity(
np_img, in_range=(low_p, high_p))
util.np_info(contrast_stretch, "Contrast Stretch", t.elapsed())
return contrast_stretch
def filter_histogram_equalization(np_img, nbins=256, output_type="uint8"):
"""
Filter image (gray or RGB) using histogram equalization to increase contrast in image.
Args:
np_img: Image as a NumPy array (gray or RGB).
nbins: Number of histogram bins.
output_type: Type of array to return (float or uint8).
Returns:
NumPy array (float or uint8) with contrast enhanced by histogram equalization.
"""
t = Time()
# if uint8 type and nbins is specified, convert to float so that nbins can be a value besides 256
if np_img.dtype == "uint8" and nbins != 256:
np_img = np_img / 255
hist_equ = sk_exposure.equalize_hist(np_img, nbins=nbins)
if output_type == "float":
pass
else:
hist_equ = (hist_equ * 255).astype("uint8")
util.np_info(hist_equ, "Hist Equalization", t.elapsed())
return hist_equ
def filter_adaptive_equalization(np_img, nbins=256, clip_limit=0.01, output_type="uint8"):
"""
Filter image (gray or RGB) using adaptive equalization to increase contrast in image, where contrast in local regions
is enhanced.
Args:
np_img: Image as a NumPy array (gray or RGB).
nbins: Number of histogram bins.
clip_limit: Clipping limit where higher value increases contrast.
output_type: Type of array to return (float or uint8).
Returns:
NumPy array (float or uint8) with contrast enhanced by adaptive equalization.
"""
t = Time()
adapt_equ = sk_exposure.equalize_adapthist(
np_img, nbins=nbins, clip_limit=clip_limit)
if output_type == "float":
pass
else:
adapt_equ = (adapt_equ * 255).astype("uint8")
util.np_info(adapt_equ, "Adapt Equalization", t.elapsed())
return adapt_equ
def filter_local_equalization(np_img, disk_size=50):
"""
Filter image (gray) using local equalization, which uses local histograms based on the disk structuring element.
Args:
np_img: Image as a NumPy array.
disk_size: Radius of the disk structuring element used for the local histograms
Returns:
NumPy array with contrast enhanced using local equalization.
"""
t = Time()
local_equ = sk_filters.rank.equalize(
np_img, selem=sk_morphology.disk(disk_size))
util.np_info(local_equ, "Local Equalization", t.elapsed())
return local_equ
def filter_rgb_to_hed(np_img, output_type="uint8"):
"""
Filter RGB channels to HED (Hematoxylin - Eosin - Diaminobenzidine) channels.
Args:
np_img: RGB image as a NumPy array.
output_type: Type of array to return (float or uint8).
Returns:
NumPy array (float or uint8) with HED channels.
"""
t = Time()
hed = sk_color.rgb2hed(np_img)
if output_type == "float":
hed = sk_exposure.rescale_intensity(hed, out_range=(0.0, 1.0))
else:
hed = (sk_exposure.rescale_intensity(
hed, out_range=(0, 255))).astype("uint8")
util.np_info(hed, "RGB to HED", t.elapsed())
return hed
def filter_rgb_to_hsv(np_img, display_np_info=True):
"""
Filter RGB channels to HSV (Hue, Saturation, Value).
Args:
np_img: RGB image as a NumPy array.
display_np_info: If True, display NumPy array info and filter time.
Returns:
Image as NumPy array in HSV representation.
"""
if display_np_info:
t = Time()
hsv = sk_color.rgb2hsv(np_img)
if display_np_info:
util.np_info(hsv, "RGB to HSV", t.elapsed())
return hsv
def filter_hsv_to_h(hsv, output_type="int", display_np_info=True):
"""
Obtain hue values from HSV NumPy array as a 1-dimensional array. If output as an int array, the original float
values are multiplied by 360 for their degree equivalents for simplicity. For more information, see
https://en.wikipedia.org/wiki/HSL_and_HSV
Args:
hsv: HSV image as a NumPy array.
output_type: Type of array to return (float or int).
display_np_info: If True, display NumPy array info and filter time.
Returns:
Hue values (float or int) as a 1-dimensional NumPy array.
"""
if display_np_info:
t = Time()
h = hsv[:, :, 0]
h = h.flatten()
if output_type == "int":
h *= 360
h = h.astype("int")
if display_np_info:
util.np_info(hsv, "HSV to H", t.elapsed())
return h
def filter_hsv_to_s(hsv):
"""
Experimental HSV to S (saturation).
Args:
hsv: HSV image as a NumPy array.
Returns:
Saturation values as a 1-dimensional NumPy array.
"""
s = hsv[:, :, 1]
s = s.flatten()
return s
def filter_hsv_to_v(hsv):
"""
Experimental HSV to V (value).
Args:
hsv: HSV image as a NumPy array.
Returns:
Value values as a 1-dimensional NumPy array.
"""
v = hsv[:, :, 2]
v = v.flatten()
return v
def filter_hed_to_hematoxylin(np_img, output_type="uint8"):
"""
Obtain Hematoxylin channel from HED NumPy array and rescale it (for example, to 0 to 255 for uint8) for increased
contrast.
Args:
np_img: HED image as a NumPy array.
output_type: Type of array to return (float or uint8).
Returns:
NumPy array for Hematoxylin channel.
"""
t = Time()
hema = np_img[:, :, 0]
if output_type == "float":
hema = sk_exposure.rescale_intensity(hema, out_range=(0.0, 1.0))
else:
hema = (sk_exposure.rescale_intensity(
hema, out_range=(0, 255))).astype("uint8")
util.np_info(hema, "HED to Hematoxylin", t.elapsed())
return hema
def filter_hed_to_eosin(np_img, output_type="uint8"):
"""
Obtain Eosin channel from HED NumPy array and rescale it (for example, to 0 to 255 for uint8) for increased
contrast.
Args:
np_img: HED image as a NumPy array.
output_type: Type of array to return (float or uint8).
Returns:
NumPy array for Eosin channel.
"""
t = Time()
eosin = np_img[:, :, 1]
if output_type == "float":
eosin = sk_exposure.rescale_intensity(eosin, out_range=(0.0, 1.0))
else:
eosin = (sk_exposure.rescale_intensity(
eosin, out_range=(0, 255))).astype("uint8")
util.np_info(eosin, "HED to Eosin", t.elapsed())
return eosin
def filter_binary_fill_holes(np_img, output_type="bool"):
"""
Fill holes in a binary object (bool, float, or uint8).
Args:
np_img: Binary image as a NumPy array.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) where holes have been filled.
"""
t = Time()
if np_img.dtype == "uint8":
np_img = np_img / 255
result = sc_morph.binary_fill_holes(np_img)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
util.np_info(result, "Binary Fill Holes", t.elapsed())
return result
def filter_binary_erosion(np_img, disk_size=5, iterations=1, output_type="uint8"):
"""
Erode a binary object (bool, float, or uint8).
Args:
np_img: Binary image as a NumPy array.
disk_size: Radius of the disk structuring element used for erosion.
iterations: How many times to repeat the erosion.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) where edges have been eroded.
"""
t = Time()
if np_img.dtype == "uint8":
np_img = np_img / 255
result = sc_morph.binary_erosion(
np_img, sk_morphology.disk(disk_size), iterations=iterations)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
util.np_info(result, "Binary Erosion", t.elapsed())
return result
def filter_binary_dilation(np_img, disk_size=5, iterations=1, output_type="uint8"):
"""
Dilate a binary object (bool, float, or uint8).
Args:
np_img: Binary image as a NumPy array.
disk_size: Radius of the disk structuring element used for dilation.
iterations: How many times to repeat the dilation.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) where edges have been dilated.
"""
t = Time()
if np_img.dtype == "uint8":
np_img = np_img / 255
result = sc_morph.binary_dilation(
np_img, sk_morphology.disk(disk_size), iterations=iterations)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
util.np_info(result, "Binary Dilation", t.elapsed())
return result
def filter_binary_opening(np_img, disk_size=3, iterations=1, output_type="uint8"):
"""
Open a binary object (bool, float, or uint8). Opening is an erosion followed by a dilation.
Opening can be used to remove small objects.
Args:
np_img: Binary image as a NumPy array.
disk_size: Radius of the disk structuring element used for opening.
iterations: How many times to repeat.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) following binary opening.
"""
t = Time()
if np_img.dtype == "uint8":
np_img = np_img / 255
result = sc_morph.binary_opening(
np_img, sk_morphology.disk(disk_size), iterations=iterations)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
util.np_info(result, "Binary Opening", t.elapsed())
return result
def filter_binary_closing(np_img, disk_size=3, iterations=1, output_type="uint8"):
"""
Close a binary object (bool, float, or uint8). Closing is a dilation followed by an erosion.
Closing can be used to remove small holes.
Args:
np_img: Binary image as a NumPy array.
disk_size: Radius of the disk structuring element used for closing.
iterations: How many times to repeat.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array (bool, float, or uint8) following binary closing.
"""
t = Time()
if np_img.dtype == "uint8":
np_img = np_img / 255
result = sc_morph.binary_closing(
np_img, sk_morphology.disk(disk_size), iterations=iterations)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
util.np_info(result, "Binary Closing", t.elapsed())
return result
def filter_kmeans_segmentation(np_img, compactness=10, n_segments=800):
"""
Use K-means segmentation (color/space proximity) to segment RGB image where each segment is
colored based on the average color for that segment.
Args:
np_img: Binary image as a NumPy array.
compactness: Color proximity versus space proximity factor.
n_segments: The number of segments.
Returns:
NumPy array (uint8) representing 3-channel RGB image where each segment has been colored based on the average
color for that segment.
"""
t = Time()
labels = sk_segmentation.slic(
np_img, compactness=compactness, n_segments=n_segments)
result = sk_color.label2rgb(labels, np_img, kind='avg')
util.np_info(result, "K-Means Segmentation", t.elapsed())
return result
def filter_rag_threshold(np_img, compactness=10, n_segments=800, threshold=9):
"""
Use K-means segmentation to segment RGB image, build region adjacency graph based on the segments, combine
similar regions based on threshold value, and then output these resulting region segments.
Args:
np_img: Binary image as a NumPy array.
compactness: Color proximity versus space proximity factor.
n_segments: The number of segments.
threshold: Threshold value for combining regions.
Returns:
NumPy array (uint8) representing 3-channel RGB image where each segment has been colored based on the average
color for that segment (and similar segments have been combined).
"""
t = Time()
labels = sk_segmentation.slic(
np_img, compactness=compactness, n_segments=n_segments)
g = sk_future.graph.rag_mean_color(np_img, labels)
labels2 = sk_future.graph.cut_threshold(labels, g, threshold)
result = sk_color.label2rgb(labels2, np_img, kind='avg')
util.np_info(result, "RAG Threshold", t.elapsed())
return result
def filter_threshold(np_img, threshold, output_type="bool"):
"""
Return mask where a pixel has a value if it exceeds the threshold value.
Args:
np_img: Binary image as a NumPy array.
threshold: The threshold value to exceed.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array representing a mask where a pixel has a value (T, 1.0, or 255) if the corresponding input array
pixel exceeds the threshold value.
"""
t = Time()
result = (np_img > threshold)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
util.np_info(result, "Threshold", t.elapsed())
return result
def filter_green_channel(np_img, green_thresh=200, avoid_overmask=True, overmask_thresh=90, output_type="bool"):
"""
Create a mask to filter out pixels with a green channel value greater than a particular threshold, since hematoxylin
and eosin are purplish and pinkish, which do not have much green to them.
Args:
np_img: RGB image as a NumPy array.
green_thresh: Green channel threshold value (0 to 255). If value is greater than green_thresh, mask out pixel.
avoid_overmask: If True, avoid masking above the overmask_thresh percentage.
overmask_thresh: If avoid_overmask is True, avoid masking above this threshold percentage value.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array representing a mask where pixels above a particular green channel threshold have been masked out.
"""
t = Time()
g = np_img[:, :, 1]
gr_ch_mask = (g < green_thresh) & (g > 0)
mask_percentage = mask_percent(gr_ch_mask)
if (mask_percentage >= overmask_thresh) and (green_thresh < 255) and (avoid_overmask is True):
new_green_thresh = math.ceil((255 - green_thresh) / 2 + green_thresh)
print(
"Mask percentage %3.2f%% >= overmask threshold %3.2f%% for Remove Green Channel green_thresh=%d, so try %d" % (
mask_percentage, overmask_thresh, green_thresh, new_green_thresh))
gr_ch_mask = filter_green_channel(
np_img, new_green_thresh, avoid_overmask, overmask_thresh, output_type)
np_img = gr_ch_mask
if output_type == "bool":
pass
elif output_type == "float":
np_img = np_img.astype(float)
else:
np_img = np_img.astype("uint8") * 255
util.np_info(np_img, "Filter Green Channel", t.elapsed())
return np_img
def filter_red(rgb, red_lower_thresh, green_upper_thresh, blue_upper_thresh, output_type="bool",
display_np_info=False):
"""
Create a mask to filter out reddish colors, where the mask is based on a pixel being above a
red channel threshold value, below a green channel threshold value, and below a blue channel threshold value.
Args:
rgb: RGB image as a NumPy array.
red_lower_thresh: Red channel lower threshold value.
green_upper_thresh: Green channel upper threshold value.
blue_upper_thresh: Blue channel upper threshold value.
output_type: Type of array to return (bool, float, or uint8).
display_np_info: If True, display NumPy array info and filter time.
Returns:
NumPy array representing the mask.
"""
if display_np_info:
t = Time()
r = rgb[:, :, 0] > red_lower_thresh
g = rgb[:, :, 1] < green_upper_thresh
b = rgb[:, :, 2] < blue_upper_thresh
result = ~(r & g & b)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
if display_np_info:
util.np_info(result, "Filter Red", t.elapsed())
return result
def filter_red_pen(rgb, output_type="bool"):
"""
Create a mask to filter out red pen marks from a slide.
Args:
rgb: RGB image as a NumPy array.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array representing the mask.
"""
t = Time()
result = filter_red(rgb, red_lower_thresh=150, green_upper_thresh=80, blue_upper_thresh=90) & \
filter_red(rgb, red_lower_thresh=110, green_upper_thresh=20, blue_upper_thresh=30) & \
filter_red(rgb, red_lower_thresh=185, green_upper_thresh=65, blue_upper_thresh=105) & \
filter_red(rgb, red_lower_thresh=195, green_upper_thresh=85, blue_upper_thresh=125) & \
filter_red(rgb, red_lower_thresh=220, green_upper_thresh=115, blue_upper_thresh=145) & \
filter_red(rgb, red_lower_thresh=125, green_upper_thresh=40, blue_upper_thresh=70) & \
filter_red(rgb, red_lower_thresh=200, green_upper_thresh=120, blue_upper_thresh=150) & \
filter_red(rgb, red_lower_thresh=100, green_upper_thresh=50, blue_upper_thresh=65) & \
filter_red(rgb, red_lower_thresh=85,
green_upper_thresh=25, blue_upper_thresh=45)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
util.np_info(result, "Filter Red Pen", t.elapsed())
return result
def filter_green(rgb, red_upper_thresh, green_lower_thresh, blue_lower_thresh, output_type="bool",
display_np_info=False):
"""
Create a mask to filter out greenish colors, where the mask is based on a pixel being below a
red channel threshold value, above a green channel threshold value, and above a blue channel threshold value.
Note that for the green ink, the green and blue channels tend to track together, so we use a blue channel
lower threshold value rather than a blue channel upper threshold value.
Args:
rgb: RGB image as a NumPy array.
red_upper_thresh: Red channel upper threshold value.
green_lower_thresh: Green channel lower threshold value.
blue_lower_thresh: Blue channel lower threshold value.
output_type: Type of array to return (bool, float, or uint8).
display_np_info: If True, display NumPy array info and filter time.
Returns:
NumPy array representing the mask.
"""
if display_np_info:
t = Time()
r = rgb[:, :, 0] < red_upper_thresh
g = rgb[:, :, 1] > green_lower_thresh
b = rgb[:, :, 2] > blue_lower_thresh
result = ~(r & g & b)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
if display_np_info:
util.np_info(result, "Filter Green", t.elapsed())
return result
def filter_green_pen(rgb, output_type="bool"):
"""
Create a mask to filter out green pen marks from a slide.
Args:
rgb: RGB image as a NumPy array.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array representing the mask.
"""
t = Time()
result = filter_green(rgb, red_upper_thresh=150, green_lower_thresh=160, blue_lower_thresh=140) & \
filter_green(rgb, red_upper_thresh=70, green_lower_thresh=110, blue_lower_thresh=110) & \
filter_green(rgb, red_upper_thresh=45, green_lower_thresh=115, blue_lower_thresh=100) & \
filter_green(rgb, red_upper_thresh=30, green_lower_thresh=75, blue_lower_thresh=60) & \
filter_green(rgb, red_upper_thresh=195, green_lower_thresh=220, blue_lower_thresh=210) & \
filter_green(rgb, red_upper_thresh=225, green_lower_thresh=230, blue_lower_thresh=225) & \
filter_green(rgb, red_upper_thresh=170, green_lower_thresh=210, blue_lower_thresh=200) & \
filter_green(rgb, red_upper_thresh=20, green_lower_thresh=30, blue_lower_thresh=20) & \
filter_green(rgb, red_upper_thresh=50, green_lower_thresh=60, blue_lower_thresh=40) & \
filter_green(rgb, red_upper_thresh=30, green_lower_thresh=50, blue_lower_thresh=35) & \
filter_green(rgb, red_upper_thresh=65, green_lower_thresh=70, blue_lower_thresh=60) & \
filter_green(rgb, red_upper_thresh=100, green_lower_thresh=110, blue_lower_thresh=105) & \
filter_green(rgb, red_upper_thresh=165, green_lower_thresh=180, blue_lower_thresh=180) & \
filter_green(rgb, red_upper_thresh=140, green_lower_thresh=140, blue_lower_thresh=150) & \
filter_green(rgb, red_upper_thresh=185,
green_lower_thresh=195, blue_lower_thresh=195)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
util.np_info(result, "Filter Green Pen", t.elapsed())
return result
def filter_blue(rgb, red_upper_thresh, green_upper_thresh, blue_lower_thresh, output_type="bool",
display_np_info=False):
"""
Create a mask to filter out blueish colors, where the mask is based on a pixel being below a
red channel threshold value, below a green channel threshold value, and above a blue channel threshold value.
Args:
rgb: RGB image as a NumPy array.
red_upper_thresh: Red channel upper threshold value.
green_upper_thresh: Green channel upper threshold value.
blue_lower_thresh: Blue channel lower threshold value.
output_type: Type of array to return (bool, float, or uint8).
display_np_info: If True, display NumPy array info and filter time.
Returns:
NumPy array representing the mask.
"""
if display_np_info:
t = Time()
r = rgb[:, :, 0] < red_upper_thresh
g = rgb[:, :, 1] < green_upper_thresh
b = rgb[:, :, 2] > blue_lower_thresh
result = ~(r & g & b)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
if display_np_info:
util.np_info(result, "Filter Blue", t.elapsed())
return result
def filter_blue_pen(rgb, output_type="bool"):
"""
Create a mask to filter out blue pen marks from a slide.
Args:
rgb: RGB image as a NumPy array.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array representing the mask.
"""
t = Time()
result = filter_blue(rgb, red_upper_thresh=60, green_upper_thresh=120, blue_lower_thresh=190) & \
filter_blue(rgb, red_upper_thresh=120, green_upper_thresh=170, blue_lower_thresh=200) & \
filter_blue(rgb, red_upper_thresh=175, green_upper_thresh=210, blue_lower_thresh=230) & \
filter_blue(rgb, red_upper_thresh=145, green_upper_thresh=180, blue_lower_thresh=210) & \
filter_blue(rgb, red_upper_thresh=37, green_upper_thresh=95, blue_lower_thresh=160) & \
filter_blue(rgb, red_upper_thresh=30, green_upper_thresh=65, blue_lower_thresh=130) & \
filter_blue(rgb, red_upper_thresh=130, green_upper_thresh=155, blue_lower_thresh=180) & \
filter_blue(rgb, red_upper_thresh=40, green_upper_thresh=35, blue_lower_thresh=85) & \
filter_blue(rgb, red_upper_thresh=30, green_upper_thresh=20, blue_lower_thresh=65) & \
filter_blue(rgb, red_upper_thresh=90, green_upper_thresh=90, blue_lower_thresh=140) & \
filter_blue(rgb, red_upper_thresh=60, green_upper_thresh=60, blue_lower_thresh=120) & \
filter_blue(rgb, red_upper_thresh=110,
green_upper_thresh=110, blue_lower_thresh=175)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
util.np_info(result, "Filter Blue Pen", t.elapsed())
return result
def filter_grays(rgb, tolerance=15, output_type="bool"):
"""
Create a mask to filter out pixels where the red, green, and blue channel values are similar.
Args:
np_img: RGB image as a NumPy array.
tolerance: Tolerance value to determine how similar the values must be in order to be filtered out
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array representing a mask where pixels with similar red, green, and blue values have been masked out.
"""
t = Time()
(h, w, c) = rgb.shape
rgb = rgb.astype(np.int)
rg_diff = abs(rgb[:, :, 0] - rgb[:, :, 1]) <= tolerance
rb_diff = abs(rgb[:, :, 0] - rgb[:, :, 2]) <= tolerance
gb_diff = abs(rgb[:, :, 1] - rgb[:, :, 2]) <= tolerance
result = ~(rg_diff & rb_diff & gb_diff)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
util.np_info(result, "Filter Grays", t.elapsed())
return result
def uint8_to_bool(np_img):
"""
Convert NumPy array of uint8 (255,0) values to bool (True,False) values
Args:
np_img: Binary image as NumPy array of uint8 (255,0) values.
Returns:
NumPy array of bool (True,False) values.
"""
result = (np_img / 255).astype(bool)
return result
def apply_image_filters(np_img, cfg, slide_num=None, info=None, save=False, display=False):
"""
Apply filters to image as NumPy array and optionally save and/or display filtered images.
Args:
np_img: Image as NumPy array.
slide_num: The slide number (used for saving/displaying).
info: Dictionary of slide information (used for HTML display).
save: If True, save image.
display: If True, display image.
Returns:
Resulting filtered image as a NumPy array.
"""
rgb = np_img
save_display(save, cfg, display, info, rgb, slide_num, 1, "Original", "rgb")
mask_not_green = filter_green_channel(rgb)
rgb_not_green = util.mask_rgb(rgb, mask_not_green)
save_display(save, cfg, display, info, rgb_not_green,
slide_num, 2, "Not Green", "rgb-not-green")
mask_not_gray = filter_grays(rgb)
rgb_not_gray = util.mask_rgb(rgb, mask_not_gray)
save_display(save, cfg, display, info, rgb_not_gray,
slide_num, 3, "Not Gray", "rgb-not-gray")
mask_no_red_pen = filter_red_pen(rgb)
rgb_no_red_pen = util.mask_rgb(rgb, mask_no_red_pen)
save_display(save, cfg, display, info, rgb_no_red_pen,
slide_num, 4, "No Red Pen", "rgb-no-red-pen")
mask_no_green_pen = filter_green_pen(rgb)
rgb_no_green_pen = util.mask_rgb(rgb, mask_no_green_pen)
save_display(save, cfg, display, info, rgb_no_green_pen,
slide_num, 5, "No Green Pen", "rgb-no-green-pen")
mask_no_blue_pen = filter_blue_pen(rgb)
rgb_no_blue_pen = util.mask_rgb(rgb, mask_no_blue_pen)
save_display(save, cfg, display, info, rgb_no_blue_pen,
slide_num, 6, "No Blue Pen", "rgb-no-blue-pen")
mask_gray_green_pens = mask_not_gray & mask_not_green & mask_no_red_pen & mask_no_green_pen & mask_no_blue_pen
rgb_gray_green_pens = util.mask_rgb(rgb, mask_gray_green_pens)
save_display(save, cfg, display, info, rgb_gray_green_pens, slide_num, 7, "Not Gray, Not Green, No Pens",
"rgb-no-gray-no-green-no-pens")
mask_remove_small = filter_remove_small_objects(
mask_gray_green_pens, min_size=500, output_type="bool")
rgb_remove_small = util.mask_rgb(rgb, mask_remove_small)
save_display(save, cfg, display, info, rgb_remove_small, slide_num, 8,
"Not Gray, Not Green, No Pens,\nRemove Small Objects",
"rgb-not-green-not-gray-no-pens-remove-small")
img = rgb_remove_small
return img
def apply_filters_to_image(slide_num, cfg, save=True, display=False):
"""
Apply a set of filters to an image and optionally save and/or display filtered images.
Args:
cfg: config
slide_num: The slide number.
save: If True, save filtered images.
display: If True, display filtered images to screen.
Returns:
Tuple consisting of 1) the resulting filtered image as a NumPy array, and 2) dictionary of image information
(used for HTML page generation).
"""
t = Time()
print("Processing slide #%d" % slide_num)
info = dict()
if save and not os.path.exists(cfg.FILTER.DIR):
os.makedirs(cfg.FILTER.DIR)
img_path = slide.get_training_image_path(slide_num)
np_orig = slide.open_image_np(img_path)
filtered_np_img = apply_image_filters(
np_orig, slide_num, info, save=save, display=display)
if save:
t1 = Time()
result_path = slide.get_filter_image_result(slide_num)
pil_img = util.np_to_pil(filtered_np_img)
pil_img.save(result_path)
print("%-20s | Time: %-14s Name: %s" %
("Save Image", str(t1.elapsed()), result_path))
t1 = Time()
thumbnail_path = slide.get_filter_thumbnail_result(slide_num)
slide.save_thumbnail(pil_img, cfg.SLIDE.THUMBNAIL_SIZE, thumbnail_path)
print("%-20s | Time: %-14s Name: %s" %
("Save Thumbnail", str(t1.elapsed()), thumbnail_path))
print("Slide #%03d processing time: %s\n" % (slide_num, str(t.elapsed())))
return filtered_np_img, info
def save_display(save, cfg, display, info, np_img, slide_num, filter_num, display_text, file_text,
display_mask_percentage=True):
"""
Optionally save an image and/or display the image.
Args:
save: If True, save filtered images.
display: If True, display filtered images to screen.
info: Dictionary to store filter information.
np_img: Image as a NumPy array.
slide_num: The slide number.
filter_num: The filter number.
display_text: Filter display name.
file_text: Filter name for file.
display_mask_percentage: If True, display mask percentage on displayed slide.
"""
mask_percentage = None
if display_mask_percentage:
mask_percentage = mask_percent(np_img)
display_text = display_text + \
"\n(" + mask_percentage_text(mask_percentage) + " masked)"
if slide_num is None and filter_num is None:
pass
elif filter_num is None:
display_text = "S%03d " % slide_num + display_text
elif slide_num is None:
display_text = "F%03d " % filter_num + display_text
else:
display_text = "S%03d-F%03d " % (slide_num, filter_num) + display_text
if display:
util.display_img(np_img, display_text)
if save:
save_filtered_image(np_img, cfg, slide_num, filter_num, file_text)
if info is not None:
info[slide_num * 1000 + filter_num] = (
slide_num, filter_num, display_text, file_text, mask_percentage)
def mask_percentage_text(mask_percentage):
"""
Generate a formatted string representing the percentage that an image is masked.
Args:
mask_percentage: The mask percentage.
Returns:
The mask percentage formatted as a string.
"""
return "%3.2f%%" % mask_percentage
def image_cell(slide_num, filter_num, display_text, file_text):
"""
Generate HTML for viewing a processed image.
Args:
slide_num: The slide number.
filter_num: The filter number.
display_text: Filter display name.
file_text: Filter name for file.
Returns:
HTML for a table cell for viewing a filtered image.
"""
filt_img = slide.get_filter_image_path(slide_num, filter_num, file_text)
filt_thumb = slide.get_filter_thumbnail_path(
slide_num, filter_num, file_text)
img_name = slide.get_filter_image_filename(
slide_num, filter_num, file_text)
return " <td>\n" + \
" <a target=\"_blank\" href=\"%s\">%s<br/>\n" % (filt_img, display_text) + \
" <img src=\"%s\" />\n" % (filt_thumb) + \
" </a>\n" + \
" </td>\n"
def html_header(page_title):
"""
Generate an HTML header for previewing images.
Returns:
HTML header for viewing images.
"""
html = "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" " + \
"\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n" + \
"<html xmlns=\"http://www.w3.org/1999/xhtml\" lang=\"en\" xml:lang=\"en\">\n" + \
" <head>\n" + \
" <title>%s</title>\n" % page_title + \
" <style type=\"text/css\">\n" + \
" img { border: 2px solid black; }\n" + \
" td { border: 2px solid black; }\n" + \
" </style>\n" + \
" </head>\n" + \
" <body>\n"
return html
def html_footer():
"""
Generate an HTML footer for previewing images.
Returns:
HTML footer for viewing images.
"""
html = "</body>\n" + \
"</html>\n"
return html
def save_filtered_image(np_img, cfg, slide_num, filter_num, filter_text):
"""
Save a filtered image to the file system.
Args:
np_img: Image as a NumPy array.
slide_num: The slide number.
filter_num: The filter number.
filter_text: Descriptive text to add to the image filename.
"""
t = Time()
filepath = slide.get_filter_image_path(slide_num, filter_num, filter_text)
pil_img = util.np_to_pil(np_img)
pil_img.save(filepath)
print("%-20s | Time: %-14s Name: %s" %
("Save Image", str(t.elapsed()), filepath))
t1 = Time()
thumbnail_filepath = slide.get_filter_thumbnail_path(
slide_num, filter_num, filter_text)
slide.save_thumbnail(pil_img, cfg.SLIDE.THUMBNAIL_SIZE, thumbnail_filepath)
print("%-20s | Time: %-14s Name: %s" %
("Save Thumbnail", str(t1.elapsed()), thumbnail_filepath))
def generate_filter_html_result(html_page_info):
"""
Generate HTML to view the filtered images. If slide.FILTER_PAGINATE is True, the results will be paginated.
Args:
html_page_info: Dictionary of image information.
"""
if not slide.FILTER_PAGINATE:
html = ""
html += html_header("Filtered Images")
html += " <table>\n"
row = 0
for key in sorted(html_page_info):
value = html_page_info[key]
current_row = value[0]
if current_row > row:
html += " <tr>\n"
row = current_row
html += image_cell(value[0], value[1], value[2], value[3])
next_key = key + 1
if next_key not in html_page_info:
html += " </tr>\n"
html += " </table>\n"
html += html_footer()
text_file = open(os.path.join(
slide.FILTER_HTML_DIR, "filters.html"), "w")
text_file.write(html)
text_file.close()
else:
slide_nums = set()
for key in html_page_info:
slide_num = math.floor(key / 1000)
slide_nums.add(slide_num)
slide_nums = sorted(list(slide_nums))
total_len = len(slide_nums)
page_size = slide.FILTER_PAGINATION_SIZE
num_pages = math.ceil(total_len / page_size)
for page_num in range(1, num_pages + 1):
start_index = (page_num - 1) * page_size
end_index = (page_num * page_size) if (page_num <
num_pages) else total_len
page_slide_nums = slide_nums[start_index:end_index]
html = ""
html += html_header("Filtered Images, Page %d" % page_num)
html += " <div style=\"font-size: 20px\">"
if page_num > 1:
if page_num == 2:
html += "<a href=\"filters.html\"><</a> "
else:
html += "<a href=\"filters-%d.html\"><</a> " % (
page_num - 1)
html += "Page %d" % page_num
if page_num < num_pages:
html += " <a href=\"filters-%d.html\">></a> " % (
page_num + 1)
html += "</div>\n"
html += " <table>\n"
for slide_num in page_slide_nums:
html += " <tr>\n"
filter_num = 1
lookup_key = slide_num * 1000 + filter_num
while lookup_key in html_page_info:
value = html_page_info[lookup_key]
html += image_cell(value[0], value[1], value[2], value[3])
lookup_key += 1
html += " </tr>\n"
html += " </table>\n"
html += html_footer()
if page_num == 1:
text_file = open(os.path.join(
slide.FILTER_HTML_DIR, "filters.html"), "w")
else:
text_file = open(os.path.join(
slide.FILTER_HTML_DIR, "filters-%d.html" % page_num), "w")
text_file.write(html)
text_file.close()
def apply_filters_to_image_list(image_num_list, save, display, cfg):
"""
Apply filters to a list of images.
Args:
image_num_list: List of image numbers.
save: If True, save filtered images.
display: If True, display filtered images to screen.
Returns:
Tuple consisting of 1) a list of image numbers, and 2) a dictionary of image filter information.
"""
html_page_info = dict()
for slide_num in image_num_list:
_, info = apply_filters_to_image(
slide_num, cfg, save=save, display=display)
html_page_info.update(info)
return image_num_list, html_page_info
def apply_filters_to_image_range(start_ind: int, end_ind: int, save: bool, display: bool, cfg: default_cfg) -> tuple:
"""
Apply filters to a range of images.
Args:
cfg:
start_ind: Starting index (inclusive).
end_ind: Ending index (inclusive).
save: If True, save filtered images.
display: If True, display filtered images to screen.
Returns:
Tuple consisting of 1) staring index of slides converted to images, 2) ending index of slides converted to images,
and 3) a dictionary of image filter information.
"""
html_page_info = dict()
for slide_num in range(start_ind, end_ind + 1):
_, info = apply_filters_to_image(
slide_num, cfg, save=save, display=display)
html_page_info.update(info)
return start_ind, end_ind, html_page_info
def singleprocess_apply_filters_to_images(save=True, display=False, html=True, image_num_list=None):
"""
Apply a set of filters to training images and optionally save and/or display the filtered images.
Args:
save: If True, save filtered images.
display: If True, display filtered images to screen.
html: If True, generate HTML page to display filtered images.
image_num_list: Optionally specify a list of image slide numbers.
"""
t = Time()
print("Applying filters to images\n")
if image_num_list is not None:
_, info = apply_filters_to_image_list(image_num_list, save, display)
else:
num_training_slides = slide.get_num_training_slides()
(s, e, info) = apply_filters_to_image_range(
1, num_training_slides, save, display)
print("Time to apply filters to all images: %s\n" % str(t.elapsed()))
if html:
generate_filter_html_result(info)
def multiprocess_apply_filters_to_images(cfg: default_cfg, save=True, display=False, html=True, image_num_list=None):
"""
Apply a set of filters to all training images using multiple processes (one process per core).
Args:
cfg:
save: If True, save filtered images.
display: If True, display filtered images to screen (multiprocessed display not recommended).
html: If True, generate HTML page to display filtered images.
image_num_list: Optionally specify a list of image slide numbers.
"""
timer = Time()
print("Applying filters to images (multiprocess)\n")
if save and not os.path.exists(cfg.FILTER.DIR):
os.makedirs(cfg.FILTER.DIR)
# how many processes to use
num_processes = multiprocessing.cpu_count()
pool = multiprocessing.Pool(num_processes)
if image_num_list is not None:
num_train_images = len(image_num_list)
else:
num_train_images = slide.get_num_training_slides()
if num_processes > num_train_images:
num_processes = num_train_images
images_per_process = num_train_images / num_processes
print("Number of processes: " + str(num_processes))
print("Number of training images: " + str(num_train_images))
tasks = []
for num_process in range(1, num_processes + 1):
start_index = (num_process - 1) * images_per_process + 1
end_index = num_process * images_per_process
start_index = int(start_index)
end_index = int(end_index)
if image_num_list is not None:
sublist = image_num_list[start_index - 1:end_index]
tasks.append((sublist, save, display, cfg))
print("Task #" + str(num_process) +
": Process slides " + str(sublist))
else:
tasks.append((start_index, end_index, save, display, cfg))
if start_index == end_index:
print("Task #" + str(num_process) +
": Process slide " + str(start_index))
else:
print("Task #" + str(num_process) + ": Process slides " +
str(start_index) + " to " + str(end_index))
# start tasks
results = []
for t in tasks:
if image_num_list is not None:
results.append(pool.apply_async(apply_filters_to_image_list, t))
else:
results.append(pool.apply_async(apply_filters_to_image_range, t))
html_page_info = dict()
for result in results:
if image_num_list is not None:
(image_nums, html_page_info_res) = result.get()
html_page_info.update(html_page_info_res)
print("Done filtering slides: %s" % image_nums)
else:
(start_ind, end_ind, html_page_info_res) = result.get()
html_page_info.update(html_page_info_res)
if (start_ind == end_ind):
print("Done filtering slide %d" % start_ind)
else:
print("Done filtering slides %d through %d" %
(start_ind, end_ind))
if html:
generate_filter_html_result(html_page_info)
print("Time to apply filters to all images (multiprocess): %s\n" %
str(timer.elapsed()))
# if __name__ == "__main__":
# slide.training_slide_to_image(2)
# singleprocess_apply_filters_to_images(image_num_list=[2], display=True)
# singleprocess_apply_filters_to_images()
# multiprocess_apply_filters_to_images()
| 35.644547
| 123
| 0.651435
|
4a10d98ded44ebfbdb2cf0610bcbeb6426d83afc
| 9,974
|
py
|
Python
|
python/foglamp/plugins/south/roxtec/roxtec.py
|
foglamp/foglamp-south-roxtec
|
7978d13a89e4727e1720e6cdd756d59974f1f180
|
[
"Apache-2.0"
] | null | null | null |
python/foglamp/plugins/south/roxtec/roxtec.py
|
foglamp/foglamp-south-roxtec
|
7978d13a89e4727e1720e6cdd756d59974f1f180
|
[
"Apache-2.0"
] | null | null | null |
python/foglamp/plugins/south/roxtec/roxtec.py
|
foglamp/foglamp-south-roxtec
|
7978d13a89e4727e1720e6cdd756d59974f1f180
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
"""HTTP Listener handler for Roxtec transit data"""
import asyncio
import copy
import os
import ssl
import logging
import datetime
from threading import Thread
from aiohttp import web
from foglamp.common import logger
from foglamp.common.web import middleware
from foglamp.plugins.common import utils
import async_ingest
__author__ = "Mark Riddoch, Ashish Jabble"
__copyright__ = "Copyright (c) 2018 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_LOGGER = logger.setup(__name__, level=logging.INFO)
_FOGLAMP_DATA = os.getenv("FOGLAMP_DATA", default=None)
_FOGLAMP_ROOT = os.getenv("FOGLAMP_ROOT", default='/usr/local/foglamp')
c_callback = None
c_ingest_ref = None
loop = None
t = None
_DEFAULT_CONFIG = {
'plugin': {
'description': 'Roxtec South Plugin',
'type': 'string',
'default': 'roxtec',
'readonly': 'true'
},
'port': {
'description': 'Port to listen on',
'type': 'integer',
'default': '8608',
'order': '2',
'displayName': 'Port'
},
'httpsPort': {
'description': 'Port to accept HTTPS connections on',
'type': 'integer',
'default': '1608',
'order': '5',
'displayName': 'Https Port'
},
'enableHttp': {
'description': 'Enable HTTP connections',
'type': 'boolean',
'default': 'false',
'order': '4',
'displayName': 'Enable Http'
},
'certificateName': {
'description': 'Certificate file name',
'type': 'string',
'default': 'foglamp',
'order': '6',
'displayName': 'Certificate Name'
},
'host': {
'description': 'Address to accept data on',
'type': 'string',
'default': '0.0.0.0',
'order': '1',
'displayName': 'Host'
},
'uri': {
'description': 'URI to accept data on',
'type': 'string',
'default': 'transit',
'order': '3',
'displayName': 'URI'
}
}
def plugin_info():
return {
'name': 'Roxtec Transit',
'version': '1.7.0',
'mode': 'async',
'type': 'south',
'interface': '1.0',
'config': _DEFAULT_CONFIG
}
def plugin_init(config):
"""Initialises the Roxtec Transit south plugin
Args:
config: JSON configuration document for the South plugin configuration category
Returns:
handle: JSON object to be used in future calls to the plugin
Raises:
"""
handle = copy.deepcopy(config)
return handle
def plugin_start(data):
global loop, t
try:
host = data['host']['value']
port = data['port']['value']
uri = data['uri']['value']
loop = asyncio.new_event_loop()
app = web.Application(middlewares=[middleware.error_middleware], loop=loop)
app.router.add_route('PUT', '/{}'.format(uri), RoxtecTransitIngest.render_put)
app.router.add_route('POST', '/{}'.format(uri), RoxtecTransitIngest.render_put)
handler = app.make_handler(loop=loop)
# SSL context
ssl_ctx = None
is_https = True if data['enableHttp']['value'] == 'false' else False
if is_https:
port = data['httpsPort']['value']
cert_name = data['certificateName']['value']
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
cert, key = get_certificate(cert_name)
_LOGGER.info('Loading TLS certificate %s and key %s', cert, key)
ssl_ctx.load_cert_chain(cert, key)
server_coro = loop.create_server(handler, host, port, ssl=ssl_ctx)
future = asyncio.ensure_future(server_coro, loop=loop)
data['app'] = app
data['handler'] = handler
data['server'] = None
def f_callback(f):
# _LOGGER.info(repr(f.result()))
""" <Server sockets=
[<socket.socket fd=17, family=AddressFamily.AF_INET, type=2049,proto=6, laddr=('0.0.0.0', 6683)>]>"""
data['server'] = f.result()
future.add_done_callback(f_callback)
def run():
global loop
loop.run_forever()
t = Thread(target=run)
t.start()
except Exception as e:
_LOGGER.exception(str(e))
def plugin_reconfigure(handle, new_config):
""" Reconfigures the plugin
it should be called when the configuration of the plugin is changed during the operation of the South service;
The new configuration category should be passed.
Args:
handle: handle returned by the plugin initialisation call
new_config: JSON object representing the new configuration category for the category
Returns:
new_handle: new handle to be used in the future calls
Raises:
"""
_LOGGER.info("Old config for Roxtec plugin {} \n new config {}".format(handle, new_config))
global loop
plugin_shutdown(handle)
new_handle = plugin_init(new_config)
plugin_start(new_handle)
return new_handle
def plugin_shutdown(handle):
""" Shutdowns the plugin doing required cleanup, to be called prior to the South service being shut down.
Args:
handle: handle returned by the plugin initialisation call
Returns:
Raises:
"""
_LOGGER.info('Roxtec Transit plugin shutting down.')
global loop
try:
app = handle['app']
handler = handle['handler']
server = handle['server']
if server:
server.close()
asyncio.ensure_future(server.wait_closed(), loop=loop)
asyncio.ensure_future(app.shutdown(), loop=loop)
asyncio.ensure_future(handler.shutdown(60.0), loop=loop)
asyncio.ensure_future(app.cleanup(), loop=loop)
loop.stop()
except Exception as e:
_LOGGER.exception(str(e))
raise
def plugin_register_ingest(handle, callback, ingest_ref):
"""Required plugin interface component to communicate to South C server
Args:
handle: handle returned by the plugin initialisation call
callback: C opaque object required to passed back to C->ingest method
ingest_ref: C opaque object required to passed back to C->ingest method
"""
global c_callback, c_ingest_ref
c_callback = callback
c_ingest_ref = ingest_ref
def get_certificate(cert_name):
if _FOGLAMP_DATA:
certs_dir = os.path.expanduser(_FOGLAMP_DATA + '/etc/certs')
else:
certs_dir = os.path.expanduser(_FOGLAMP_ROOT + '/data/etc/certs')
cert = certs_dir + '/{}.cert'.format(cert_name)
key = certs_dir + '/{}.key'.format(cert_name)
if not os.path.isfile(cert) or not os.path.isfile(key):
_LOGGER.warning("%s certificate files are missing. Hence using default certificate.", cert_name)
cert = certs_dir + '/foglamp.cert'
key = certs_dir + '/foglamp.key'
if not os.path.isfile(cert) or not os.path.isfile(key):
_LOGGER.error("Certificates are missing")
raise RuntimeError
return cert, key
class RoxtecTransitIngest(object):
"""Handles incoming sensor readings from Roxtec Transit Listener"""
@staticmethod
async def render_put(request):
"""Store sensor readings from Roxtec to FogLAMP
Args:
request:
The payload block decodes to JSON similar to the following:
.. code-block:: python
{
"guard_id": "444DF705F0F8",
"gateway_id": "device-0",
"state": 70,
"transit_id": "t11",
"battery": 4,
"pressure": 722,
"temperature": 0,
"last_seen": 1533816739126
}
Example:
curl --insecure -X PUT https://localhost:1608/transit -d '[{ "guard_id": "444DF705F0F8", "gateway_id": "device-0", "state": 70, "transit_id": "t11", "battery": 4, "pressure": 722, "temperature": 0, "last_seen": 1533816739126 }]'
curl -X PUT http://localhost:8608/transit -d '[{ "guard_id": "444DF705F0F8", "gateway_id": "device-0", "state": 70, "transit_id": "t11", "battery": 4, "pressure": 722, "temperature": 0, "last_seen": 1533816739126 }]'
"""
try:
message = {'result': 'success'}
payload_block = await request.json()
if type(payload_block) is not list:
raise ValueError('Payload block must be a valid list')
for payload in payload_block:
asset = "Guard " + payload['guard_id']
epoch_ms = payload['last_seen'] / 1000.0
timestamp = datetime.datetime.fromtimestamp(epoch_ms).strftime('%Y-%m-%d %H:%M:%S.%f')
readings = {
"gateway_id": payload['gateway_id'],
"state": payload['state'],
"battery": payload['battery'],
"pressure": payload['pressure'],
"temperature": payload['temperature']
}
if 'transit_id' in payload and payload['transit_id'] is not None:
readings['transit_id'] = payload['transit_id']
data = {
'asset': asset,
'timestamp': timestamp,
'readings': readings
}
async_ingest.ingest_callback(c_callback, c_ingest_ref, data)
except (KeyError, ValueError, TypeError) as e:
_LOGGER.exception("%d: %s", web.HTTPBadRequest.status_code, e)
raise web.HTTPBadRequest(reason=e)
except Exception as ex:
_LOGGER.exception("%d: %s", web.HTTPInternalServerError.status_code, str(ex))
raise web.HTTPInternalServerError(reason=str(ex))
return web.json_response(message)
| 32.07074
| 240
| 0.593042
|
4a10da36514cd9855df971a413996b3b8fa4d981
| 347
|
py
|
Python
|
tests/trace_test.py
|
al-fontes-jr/bardolph
|
209bba49765c729d8f1479903593043cef274aab
|
[
"Apache-2.0"
] | null | null | null |
tests/trace_test.py
|
al-fontes-jr/bardolph
|
209bba49765c729d8f1479903593043cef274aab
|
[
"Apache-2.0"
] | 16
|
2020-06-15T11:04:10.000Z
|
2022-03-28T05:39:10.000Z
|
tests/trace_test.py
|
al-fontes-jr/bardolph
|
209bba49765c729d8f1479903593043cef274aab
|
[
"Apache-2.0"
] | 1
|
2020-06-24T02:01:04.000Z
|
2020-06-24T02:01:04.000Z
|
#!/usr/bin/env python
from bardolph.lib.trace import trace_call, trace_call_enable
_output = ""
def test_callback(msg):
_output += msg
class TraceTest:
@trace_call
def fn1(self, x, y):
print("This is fn1:", x, y)
def main():
trace_call_enable(True)
TraceTest().fn1("a", "b")
if __name__ == '__main__':
main()
| 16.52381
| 60
| 0.634006
|
4a10db97dd8920532b664b998f47af70e87fac24
| 7,506
|
py
|
Python
|
schwarz/mailqueue/testutils.py
|
FelixSchwarz/mailqueue-runner
|
8e1a53ac4b363ced55636b1dda042c605386fc81
|
[
"0BSD"
] | 3
|
2019-12-15T18:17:16.000Z
|
2022-03-18T23:00:49.000Z
|
schwarz/mailqueue/testutils.py
|
FelixSchwarz/mailqueue-runner
|
8e1a53ac4b363ced55636b1dda042c605386fc81
|
[
"0BSD"
] | 24
|
2018-10-12T21:13:00.000Z
|
2020-11-12T23:03:31.000Z
|
schwarz/mailqueue/testutils.py
|
FelixSchwarz/mailqueue-runner
|
8e1a53ac4b363ced55636b1dda042c605386fc81
|
[
"0BSD"
] | null | null | null |
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
from __future__ import absolute_import, print_function, unicode_literals
from email.message import Message
from io import BytesIO
import logging
import os
from pymta import SMTPCommandParser
from pymta.test_util import BlackholeDeliverer
from schwarz.log_utils import ForwardingLogger
from .maildir_utils import move_message
from .queue_runner import enqueue_message, MaildirBackedMsg
from .smtpclient import SMTPClient
__all__ = [
'assert_did_log_message',
'create_ini',
'fake_smtp_client',
'info_logger',
'inject_example_message',
'SocketMock',
]
def message():
msg = Message()
msg['Header'] = 'somevalue'
msg.set_payload('MsgBody')
return msg
def inject_example_message(queue_path, sender=b'foo@site.example', recipient=None, recipients=None, msg_bytes=None, target_folder='new', queue_date=None):
if msg_bytes is None:
msg_bytes = message()
if recipient and recipients:
raise ValueError('inject_example_message() got conflicting parameters: recipient=%r, recipients=%r' % (recipient, recipients))
if (recipient is None) and (recipients is None):
recipients = (b'bar@site.example',)
elif recipient:
recipients = (recipient,)
msg_path = enqueue_message(msg_bytes, queue_path, sender, recipients, queue_date=queue_date)
if target_folder != 'new':
msg_path = move_message(msg_path, target_folder=target_folder, open_file=False)
return MaildirBackedMsg(msg_path)
def create_ini(hostname, port, fs=None):
config_str = '\n'.join([
'[mqrunner]',
'smtp_hostname = %s' % hostname,
'smtp_port = %d' % port,
])
if not fs:
return config_str
config_path = fs.create_file('config.ini', contents=config_str.encode('ascii'))
return str(config_path.path)
# --- helpers to capture/check logged messages --------------------------------
def info_logger(log_capture):
return get_capture_logger(log_capture, level=logging.INFO)
def get_capture_logger(log_capture, level):
logger = logging.Logger('__dummy__')
connect_to_log_capture(logger, log_capture)
return ForwardingLogger(forward_to=logger, forward_minlevel=level)
def connect_to_log_capture(logger, log_capture):
lc = log_capture
name = logger.name
# -------------------------------------------------------------------------
# code copied (with small adaptations) from Simplistix/testfixtures (MIT)
# LogCapture.install() in testfixtures/logcapture.py (git 61683a80)
lc.old['levels'][name] = logger.level
lc.old['handlers'][name] = logger.handlers
lc.old['disabled'][name] = logger.disabled
lc.old['progagate'][name] = logger.propagate
logger.setLevel(lc.level)
logger.handlers = [lc]
logger.disabled = False
if lc.propagate is not None:
logger.propagate = lc.propagate
lc.instances.add(lc)
# -------------------------------------------------------------------------
def assert_did_log_message(log_capture, expected_msg):
lc = log_capture
if not lc.records:
raise AssertionError('no messages logged')
log_messages = [log_record.msg for log_record in lc.records]
if expected_msg in log_messages:
return
raise AssertionError('message not logged: "%s" - did log %s' % (expected_msg, log_messages))
# --- test helpers to simulate a SMTP server ----------------------------------
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
# Python 2 without "mock" library installed
# "stub_socket_creation()" will fail but at least users can import this
# module/use other functionality
mock = None
def stub_socket_creation(socket_mock):
connect_override = socket_mock._overrides.get('connect', None)
def mock_create_connection(host_port, timeout, source_address):
if connect_override:
return connect_override()
return socket_mock
socket_func = 'schwarz.mailqueue.lib.smtplib_py37.socket.create_connection'
if mock is None:
raise ValueError('Please install the "mock" library.')
return mock.patch(socket_func, new=mock_create_connection)
def fake_smtp_client(socket_mock=None, policy=None, overrides=None, **client_args):
if socket_mock is None:
socket_mock = SocketMock(policy=policy, overrides=overrides)
hostname = 'site.invalid'
has_connect_override = ('connect' in socket_mock._overrides)
remote_host = hostname if not has_connect_override else ''
with stub_socket_creation(socket_mock):
# by default SMTPClient tries to open a connection in "__init__()" when
# the "host" parameter is specified.
# If the test tries to override "connect" we delay the connection:
# Some tests might want to simulate exceptions during ".connect()" and
# it is much nicer if these exceptions are raised later (even though
# the caller must stub out the "socket.create_connection()" function
# again).
client = SMTPClient(host=remote_host, port=123, **client_args)
if has_connect_override:
client._host = hostname
client.server = socket_mock
return client
class FakeChannel(object):
def __init__(self):
self._ignore_write_operations = False
self.server_responses = []
def write(self, data_bytes):
if self._ignore_write_operations:
return
self.server_responses.append(data_bytes)
def close(self):
pass
def drain_responses(self):
response = ''
while len(self.server_responses) > 0:
response += self.server_responses.pop(0)
return response.encode('ASCII')
class SocketMock(object):
def __init__(self, policy=None, overrides=None, authenticator=None):
self.command_parser = None
self.deliverer = BlackholeDeliverer()
self.channel = FakeChannel()
self.policy = policy
self.reply_data = None
# This attribute is actually not used in the SocketMock itself but it
# simplifies some test code where we need to store some information to
# "override" default behaviors.
# Instead of adding yet another "state" variable just keep it here.
self._overrides = overrides or {}
self._authenticator = authenticator
@property
def received_messages(self):
return self.deliverer.received_messages
# --- "socket" API --------------------------------------------------------
def makefile(self, *args):
self.command_parser = SMTPCommandParser(
self.channel,
'127.0.0.1', 2525,
self.deliverer,
policy=self.policy,
authenticator=self._authenticator,
)
self.reply_data = BytesIO()
return self
def readline(self, size):
self._drain_responses()
return self.reply_data.readline(size)
def sendall(self, data):
if isinstance(data, bytes):
data = data.decode('ASCII')
self.command_parser.process_new_data(data)
def close(self):
pass
def _drain_responses(self):
reply_bytes = self.channel.drain_responses()
previous_position = self.reply_data.tell()
self.reply_data.seek(0, os.SEEK_END)
self.reply_data.write(reply_bytes)
self.reply_data.seek(previous_position, os.SEEK_SET)
| 34.589862
| 154
| 0.66307
|
4a10dc092692424bb5fc29241e84b3add6a70fbc
| 5,748
|
py
|
Python
|
src/Constrained_PG_CartPole.py
|
bva-bme/Constrained_Policy_Gradient
|
2331f55ff3bf06e2276662517c34cc45d5a51da8
|
[
"MIT"
] | null | null | null |
src/Constrained_PG_CartPole.py
|
bva-bme/Constrained_Policy_Gradient
|
2331f55ff3bf06e2276662517c34cc45d5a51da8
|
[
"MIT"
] | null | null | null |
src/Constrained_PG_CartPole.py
|
bva-bme/Constrained_Policy_Gradient
|
2331f55ff3bf06e2276662517c34cc45d5a51da8
|
[
"MIT"
] | null | null | null |
from PolicyNet import NeuralNet
from Functions import select_action, compute_gain, compute_logprobs, compute_policy_evolution_safety_region_simplified,\
compute_policy_evolution_safety_eq, compute_policy_evolution_safety_ineq, safe_states_cartpole, \
safe_triangle_cartpole, compute_learning_stats, plot_data
import gym
import torch
import numpy as np
import pickle
# setting device on GPU if available, else CPU
if torch.cuda.is_available():
device = torch.device('cuda')
print("GPU available.")
else:
device = torch.device('cpu')
print("GPU not available.")
# 0: equality
# 1: inequality
# 2: regional
CONSTRAINT_TYPE = 1
def main():
# Set up the environment
env = gym.make('CartPole-v0')
state_num = 4
action_num = 2
num_episodes = 200
num_seeds = 5
num_constr_ep = 50
discount_factor = 1
n_win_ticks = 195
learning_rate = 0.0001
hidden_width = 5000
# Set up the constraints
if CONSTRAINT_TYPE == 0: # Equality
safe_state_batch, safe_action_batch = safe_states_cartpole()
elif CONSTRAINT_TYPE == 1: # Inequality
safe_state_batch, safe_action_batch = safe_states_cartpole()
elif CONSTRAINT_TYPE == 2: # Regional
safe_state_batch0, safe_action_batch0 = safe_triangle_cartpole(0)
safe_state_batch1, safe_action_batch1 = safe_triangle_cartpole(1)
safe_state_batch = safe_state_batch0 + safe_state_batch1
safe_action_batch = safe_action_batch0 + safe_action_batch1
else:
safe_state_batch = []
safe_action_batch = []
# Logging
gains_logged = []
avg_gains_logged = []
loss_logged = []
# Random seeds
for i in range(num_seeds):
# Create the agent
agent = NeuralNet(state_num, action_num, hidden_width, learning_rate)
episode_gains = []
mean_score = []
e = 0
# Run a trajectory with the current policy
while e < num_episodes:
reward_batch = []
logprob_batch = []
action_batch = []
state_batch = []
state = env.reset()
done = False
while not done:
action, log_prob = select_action(agent, state)
state_batch.append(torch.from_numpy(state).float().unsqueeze(0))
state, reward, done, _ = env.step(action)
reward_batch.append(reward)
action_batch.append(action)
logprob_batch.append(log_prob)
# Update the policy
gain_batch = compute_gain(reward_batch, discount_factor)
state_batch_tensor = torch.cat(state_batch)
if e < num_constr_ep: # to speed up learning, train only for the first x episodes.
if CONSTRAINT_TYPE == 0: # Equality
gain_learn = compute_policy_evolution_safety_eq(agent, state_batch_tensor, action_batch,
gain_batch, safe_state_batch, safe_action_batch,
device)
safe_logprobs = compute_logprobs(agent, safe_state_batch, safe_action_batch)
elif CONSTRAINT_TYPE == 1: # Inequality
gain_learn = compute_policy_evolution_safety_ineq(agent, state_batch_tensor, action_batch,
gain_batch, safe_state_batch, safe_action_batch,
learning_rate, device)
safe_logprobs = compute_logprobs(agent, safe_state_batch, safe_action_batch)
elif CONSTRAINT_TYPE == 2: # Regional
gain_learn, safe_state_batch_reg, safe_action_batch_reg, dpx_ = \
compute_policy_evolution_safety_region_simplified(agent, state_batch_tensor, action_batch,
gain_batch, safe_state_batch,
safe_action_batch, learning_rate, device)
safe_logprobs = compute_logprobs(agent, safe_state_batch_reg, safe_action_batch_reg)
else:
gain_learn = []
safe_state_batch = []
safe_action_batch = []
safe_logprobs = []
logprob_learn = logprob_batch + safe_logprobs
# Train with the safety appended data
agent.train_network(logprob_learn, gain_learn, 1)
episode_gains.append(len(reward_batch))
print("Episode ", e, " gain: ", len(reward_batch))
# Check if won
if e > 100:
mean_score.append(np.mean(episode_gains[-100:]))
if mean_score[-1] >= n_win_ticks:
print('Ran {} episodes. Solved after {} trials'.format(e, e - 100))
else:
mean_score.append(np.mean(episode_gains))
e = e + 1
loss_logged.append(agent.loss_arr)
gains_logged.append(episode_gains)
avg_gains_logged.append(mean_score)
# Save the trained agent for the last seed.
if e == num_episodes and i == num_seeds-1:
pickle.dump(agent, open("cartpole_agent_ineq.p", "wb"))
del agent
env.close()
# Plot learning stats
data = compute_learning_stats(avg_gains_logged)
plot_data(data, n_win_ticks)
if __name__ == '__main__':
main()
| 38.837838
| 121
| 0.573417
|
4a10dc4faa01b920bf8ee6df80d057f69429037d
| 1,053
|
py
|
Python
|
projects/practice/main.py
|
jeongroseok/person-reid-research
|
f70463c6f55bb72d7c6376811d002c9d48d490f6
|
[
"MIT"
] | null | null | null |
projects/practice/main.py
|
jeongroseok/person-reid-research
|
f70463c6f55bb72d7c6376811d002c9d48d490f6
|
[
"MIT"
] | null | null | null |
projects/practice/main.py
|
jeongroseok/person-reid-research
|
f70463c6f55bb72d7c6376811d002c9d48d490f6
|
[
"MIT"
] | null | null | null |
from torchreid.data import datamanager
import torchreid
import torch
if __name__ == '__main__':
data_manager = torchreid.data.ImageDataManager(
root='data',
sources='grid',
targets='grid',
batch_size_train=64,
workers=0,
transforms=['random_flip', 'random_crop'],
train_sampler='RandomIdentitySampler' # this is important
)
model = torchreid.models.build_model(
name='resnet18',
num_classes=data_manager.num_train_pids,
loss='softmax',
pretrained=False
).cuda()
# torchreid.utils.load_pretrained_weights(
# model, './log/model/model.pth.tar-3'
# )
optimizer = torchreid.optim.build_optimizer(model, optim='adam', lr=0.0003)
scheduler = torchreid.optim.build_lr_scheduler(optimizer, stepsize=20)
engine = torchreid.engine.ImageSoftmaxEngine(
data_manager, model, optimizer, scheduler=scheduler
)
# engine.run(max_epoch=1, print_freq=1)
engine.run(test_only=True, visrank=True)
print('done')
| 27.710526
| 79
| 0.666667
|
4a10dd1aae146b4ddfb4671a643932da8343ad81
| 19,732
|
py
|
Python
|
libcpab/libcpab/core/tesselation.py
|
BGU-CS-VIL/CDTNCA
|
f46c490811c57d27dda196e64330c002d42e3824
|
[
"MIT"
] | null | null | null |
libcpab/libcpab/core/tesselation.py
|
BGU-CS-VIL/CDTNCA
|
f46c490811c57d27dda196e64330c002d42e3824
|
[
"MIT"
] | null | null | null |
libcpab/libcpab/core/tesselation.py
|
BGU-CS-VIL/CDTNCA
|
f46c490811c57d27dda196e64330c002d42e3824
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 18 14:23:25 2018
@author: nsde
"""
# %%
import numpy as np
from .utility import make_hashable, check_if_file_exist, null, save_obj, load_obj
# %%
class Tesselation(object):
""" Base tesselation class. This function is not meant to be called,
but descripes the base structure that needs to be implemented in
1D, 2D, and 3D. Additionally, some functionallity is shared across
the different dimensions.
Args:
nc: list with number of cells
domain_min: value of the lower bound(s) of the domain
domain_max: value of the upper bound(s) of the domain
zero_boundary: bool, if true the velocity is zero on the boundary
volume_perservation: bool, if true volume is perserved
Methods that should not be implemented in subclasses:
@get_cell_centers:
@create_continuity_constrains:
@create_zero_trace_constrains:
Methods that should be implemented in subclasses:
@find_verts:
@find_verts_outside:
@create_zero_boundary_constrains:
"""
def __init__(self, nc, domain_min, domain_max,
zero_boundary=True, volume_perservation=False, circularity=False,
direc=None, override=False):
""" Initilization of the class that create the constrain matrix L
Arguments:
nc: list, number of cells in each dimension
domain_min: list, lower domain bound in each dimension
domain_max: list, upper domain bound in each dimension
zero_boundary: bool, determines is the velocity at the boundary is zero
volume_perservation: bool, determine if the transformation is
volume perservating
direc: string, where to store the basis
override: bool, determines if we should calculate the basis even
if it already exists
"""
# Save parameters
self.nc = nc
self.domain_min = domain_min
self.domain_max = domain_max
self.zero_boundary = zero_boundary
self.volume_perservation = volume_perservation
self.circularity = circularity
self.dir = direc
self._basis_file = self.dir + \
'cpab_basis_dim' + str(len(self.nc)) + '_tess' + \
'_'.join([str(e) for e in self.nc]) + '_' + \
'vo' + str(int(not self.zero_boundary)) + '_' + \
'zb' + str(int(self.zero_boundary)) + '_' + \
'vp' + str(int(self.volume_perservation)) + '_' + \
'cr' + str(int(self.circularity))
# Check if file exist else calculate the basis
if not check_if_file_exist(self._basis_file + '.pkl') or override:
# Get vertices
self.find_verts()
# Find shared vertices
self.find_shared_verts()
# find auxility vertices, if transformation is valid outside
if not zero_boundary: self.find_verts_outside()
# Get continuity constrains
self.L = self.create_continuity_constrains()
# If zero boundary, add constrains
if zero_boundary:
temp = self.create_zero_boundary_constrains()
self.L = np.concatenate((self.L, temp), axis=0)
# If volume perservation, add constrains
if volume_perservation:
temp = self.create_zero_trace_constrains()
self.L = np.concatenate((self.L, temp), axis=0)
if circularity:
temp = self.create_circularity_constrains()
self.L = np.concatenate((self.L, temp), axis=0)
# Find null space
self.B = null(self.L)
# Save to file
save_obj(self.__dict__, self._basis_file)
else:
self.__dict__ = load_obj(self._basis_file)
def get_cell_centers(self):
""" Get the centers of all the cells """
return np.mean(self.verts[:, :, :self.ndim], axis=1)
def find_verts(self):
""" Function that should find the different vertices of all cells in
the tesselation """
raise NotImplementedError
def find_shared_verts(self):
""" Find pairs of cells that share ndim-vertices. It is these pairs,
where we need to add continuity constrains at """
# Iterate over all pairs of cell to find cells with intersecting cells
shared_v, shared_v_idx = [], []
for i in range(self.nC):
for j in range(self.nC):
if i != j:
vi = make_hashable(self.verts[i])
vj = make_hashable(self.verts[j])
shared_verts = set(vi).intersection(vj)
if len(shared_verts) == self.ndim and (j, i) not in shared_v_idx:
shared_v.append(list(shared_verts)[:self.ndim])
shared_v_idx.append((i, j))
# Save result
self.shared_v = np.asarray(shared_v)
self.shared_v_idx = shared_v_idx
def find_verts_outside(self):
""" If the transformation should be valid outside, this function should
add additional auxilliry points to the tesselation that secures
continuity outside the domain """
raise NotImplementedError
def create_continuity_constrains(self):
""" This function goes through all pairs (i,j) of cells that share a
boundary. In N dimension we need to add N*N constrains (one for each
dimension times one of each vertex in the boundary) """
Ltemp = np.zeros(shape=(0, self.n_params * self.nC))
for idx, (i, j) in enumerate(self.shared_v_idx):
for vidx in range(self.ndim):
for k in range(self.ndim):
index1 = self.n_params * i + k * (self.ndim + 1)
index2 = self.n_params * j + k * (self.ndim + 1)
row = np.zeros(shape=(1, self.n_params * self.nC))
row[0, index1:index1 + (self.ndim + 1)] = self.shared_v[idx][vidx]
row[0, index2:index2 + (self.ndim + 1)] = -self.shared_v[idx][vidx]
Ltemp = np.vstack((Ltemp, row))
return Ltemp
def create_zero_boundary_constrains(self):
""" Function that creates a constrain matrix L, containing constrains that
secure 0 velocity at the boundary """
raise NotImplementedError
def create_zero_trace_constrains(self):
""" The volume perservation constrains, that corresponds to the trace
of each matrix being 0. These can be written general for all dims."""
Ltemp = np.zeros((self.nC, self.n_params * self.nC))
row = np.concatenate((np.eye(self.ndim), np.zeros((self.ndim, 1))), axis=1).flatten()
for c in range(self.nC):
Ltemp[c, self.n_params * c:self.n_params * (c + 1)] = row
return Ltemp
def create_circularity_constrains(self):
raise NotImplementedError
# %%
class Tesselation1D(Tesselation):
def __init__(self, nc, domain_min, domain_max,
zero_boundary=True, volume_perservation=False,
circularity=False, direc=None, override=False):
# 1D parameters
self.n_params = 2
self.nC = np.prod(nc)
self.ndim = 1
# Initialize super class
super(Tesselation1D, self).__init__(nc, domain_min, domain_max,
zero_boundary, volume_perservation, circularity, direc, override)
def find_verts(self):
Vx = np.linspace(self.domain_min[0], self.domain_max[0], self.nc[0] + 1)
# Find cell index and verts for each cell
cells, verts = [], []
for i in range(self.nc[0]):
v1 = tuple([Vx[i], 1])
v2 = tuple([Vx[i + 1], 1])
verts.append((v1, v2))
cells.append((i))
# Convert to array
self.verts = np.asarray(verts)
self.cells = cells
def find_verts_outside(self):
pass # in 1D, we do not need auxilliry points
def create_zero_boundary_constrains(self):
Ltemp = np.zeros((2, 2 * self.nC))
Ltemp[0, :2] = [self.domain_min[0], 1]
Ltemp[1, -2:] = [self.domain_max[0], 1]
return Ltemp
def create_circularity_constrains(self):
Ltemp = np.zeros((1, self.n_params * self.nC))
Ltemp[0, :2] = [self.domain_min[0], 1]
Ltemp[0, -2:] = [-self.domain_max[0], -1]
return Ltemp
# %%
class Tesselation2D(Tesselation):
def __init__(self, nc, domain_min, domain_max,
zero_boundary=True, volume_perservation=False,
circularity=False, direc=None, override=False):
# 1D parameters
self.n_params = 6
self.nC = 4 * np.prod(nc) # 4 triangle per cell
self.ndim = 2
# Initialize super class
super(Tesselation2D, self).__init__(nc, domain_min, domain_max,
zero_boundary, volume_perservation, circularity, direc, override)
def find_verts(self):
Vx = np.linspace(self.domain_min[0], self.domain_max[0], self.nc[0] + 1)
Vy = np.linspace(self.domain_min[1], self.domain_max[1], self.nc[1] + 1)
# Find cell index and verts for each cell
cells, verts = [], []
for i in range(self.nc[1]):
for j in range(self.nc[0]):
ul = tuple([Vx[j], Vy[i], 1])
ur = tuple([Vx[j + 1], Vy[i], 1])
ll = tuple([Vx[j], Vy[i + 1], 1])
lr = tuple([Vx[j + 1], Vy[i + 1], 1])
center = [(Vx[j] + Vx[j + 1]) / 2, (Vy[i] + Vy[i + 1]) / 2, 1]
center = tuple(center)
verts.append((center, ul, ur)) # order matters!
verts.append((center, ur, lr)) # order matters!
verts.append((center, lr, ll)) # order matters!
verts.append((center, ll, ul)) # order matters!
cells.append((j, i, 0))
cells.append((j, i, 1))
cells.append((j, i, 2))
cells.append((j, i, 3))
# Convert to array
self.verts = np.asarray(verts)
self.cells = cells
def find_verts_outside(self):
shared_v, shared_v_idx = [], []
left = np.zeros((self.nC, self.nC), np.bool)
right = np.zeros((self.nC, self.nC), np.bool)
top = np.zeros((self.nC, self.nC), np.bool)
bottom = np.zeros((self.nC, self.nC), np.bool)
for i in range(self.nC):
for j in range(self.nC):
vi = make_hashable(self.verts[i])
vj = make_hashable(self.verts[j])
shared_verts = set(vi).intersection(vj)
mi = self.cells[i]
mj = self.cells[j]
# leftmost col, left triangle, adjacent rows
if mi[0] == mj[0] == 0 and \
mi[2] == mj[2] == 3 and \
np.abs(mi[1] - mj[1]) == 1:
left[i, j] = True
# rightmost col, right triangle, adjacent rows
if mi[0] == mj[0] == self.nc[0] - 1 and \
mi[2] == mj[2] == 1 and \
np.abs(mi[1] - mj[1]) == 1:
right[i, j] = True
# uppermost row, upper triangle , adjacent cols
if mi[1] == mj[1] == 0 and \
mi[2] == mj[2] == 0 and \
np.abs(mi[0] - mj[0]) == 1:
top[i, j] = True
# lowermost row, # lower triangle, # adjacent cols
if mi[1] == mj[1] == self.nc[1] - 1 and \
mi[2] == mj[2] == 2 and \
np.abs(mi[0] - mj[0]) == 1:
bottom[i, j] = True
if len(shared_verts) == 1 and \
any([left[i, j], right[i, j], top[i, j], bottom[i, j]]) and \
(j, i) not in shared_v_idx:
v_aux = list(shared_verts)[0] # v_aux is a tuple
v_aux = list(v_aux) # Now v_aux is a list (i.e. mutable)
if left[i, j] or right[i, j]:
v_aux[0] -= 10 # Create a new vertex with the same y
elif top[i, j] or bottom[i, j]:
v_aux[1] -= 10 # Create a new vertex with the same x
else:
raise ValueError("WTF?")
shared_verts = [tuple(shared_verts)[0], tuple(v_aux)]
shared_v.append(shared_verts)
shared_v_idx.append((i, j))
# Concat to the current list of vertices
if shared_v:
self.shared_v = np.concatenate((self.shared_v, shared_v))
self.shared_v_idx = np.concatenate((self.shared_v_idx, shared_v_idx))
def create_zero_boundary_constrains(self):
xmin, ymin = self.domain_min
xmax, ymax = self.domain_max
Ltemp = np.zeros(shape=(0, 6 * self.nC))
for c in range(self.nC):
for v in self.verts[c]:
if (v[0] == xmin or v[0] == xmax):
row = np.zeros(shape=(6 * self.nC))
row[(6 * c):(6 * (c + 1))] = np.append(v, np.zeros((1, 3)))
Ltemp = np.vstack((Ltemp, row))
if (v[1] == ymin or v[1] == ymax):
row = np.zeros(shape=(6 * self.nC))
row[(6 * c):(6 * (c + 1))] = np.append(np.zeros((1, 3)), v)
Ltemp = np.vstack((Ltemp, row))
return Ltemp
# %%
class Tesselation3D(Tesselation):
def __init__(self, nc, domain_min, domain_max,
zero_boundary=True, volume_perservation=False,
circularity=False, direc=None, override=False):
# 1D parameters
self.n_params = 12
self.nC = 5 * np.prod(nc) # 6 triangle per cell
self.ndim = 3
# Initialize super class
super(Tesselation3D, self).__init__(nc, domain_min, domain_max,
zero_boundary, volume_perservation, circularity, direc, override)
def find_verts(self):
Vx = np.linspace(self.domain_min[0], self.domain_max[0], self.nc[0] + 1)
Vy = np.linspace(self.domain_min[1], self.domain_max[1], self.nc[1] + 1)
Vz = np.linspace(self.domain_min[2], self.domain_max[2], self.nc[2] + 1)
# Find cell index and verts for each cell
cells, verts = [], []
for i in range(self.nc[2]):
for j in range(self.nc[1]):
for k in range(self.nc[0]):
ul0 = tuple([Vx[k], Vy[j], Vz[i], 1])
ur0 = tuple([Vx[k + 1], Vy[j], Vz[i], 1])
ll0 = tuple([Vx[k], Vy[j + 1], Vz[i], 1])
lr0 = tuple([Vx[k + 1], Vy[j + 1], Vz[i], 1])
ul1 = tuple([Vx[k], Vy[j], Vz[i + 1], 1])
ur1 = tuple([Vx[k + 1], Vy[j], Vz[i + 1], 1])
ll1 = tuple([Vx[k], Vy[j + 1], Vz[i + 1], 1])
lr1 = tuple([Vx[k + 1], Vy[j + 1], Vz[i + 1], 1])
tf = False
if k % 2 == 0:
if (i % 2 == 0 and j % 2 == 1) or (i % 2 == 1 and j % 2 == 0):
tf = True
else:
if (i % 2 == 0 and j % 2 == 0) or (i % 2 == 1 and j % 2 == 1):
tf = True
if tf:
ul0, ur0, lr0, ll0 = ur0, lr0, ll0, ul0
ul1, ur1, lr1, ll1 = ur1, lr1, ll1, ul1
# ORDER MATTERS
verts.append((ll1, ur1, ul0, lr0)) # central part
verts.append((ul1, ur1, ll1, ul0))
verts.append((lr1, ur1, ll1, lr0))
verts.append((ll0, ul0, lr0, ll1))
verts.append((ur0, ul0, lr0, ur1))
for l in range(5):
cells.append((k, j, i, l))
# Convert to array
self.verts = np.asarray(verts)
self.cells = cells
def find_verts_outside(self):
shared_verts, shared_verts_idx = [], []
# Iterate over all pairs of cells
for i in range(self.nC):
for j in range(self.nC):
if i != j:
# Add constrains for each side
for d in range(self.ndim):
# Get cell vertices
vi = self.verts[i]
vj = self.verts[j]
ci = self.cells[i]
cj = self.cells[j]
# Conditions for adding a constrain
upper_cond = sum(vi[:, d] == self.domain_min[d]) == 3 and \
sum(vj[:, d] == self.domain_min[d]) == 3
lower_cond = sum(vi[:, d] == self.domain_max[d]) == 3 and \
sum(vj[:, d] == self.domain_max[d]) == 3
dist_cond = (sum([abs(i1 - i2) for i1, i2 in zip(ci[:3], cj[:3])]) == 0) # same cell
idx_cond = (j, i) not in shared_verts_idx
if (upper_cond or lower_cond) and dist_cond and idx_cond:
# Find the shared points
vi = make_hashable(vi)
vj = make_hashable(vj)
sv = set(vi).intersection(vj)
center = [(v1 + v2) / 2.0 for v1, v2 in zip(vi[0], vj[0])]
center[d] += (-1) if upper_cond else (+1)
shared_verts.append(list(sv.union([tuple(center)])))
shared_verts_idx.append((i, j))
# Add to already found pairs
if shared_verts:
self.shared_v = np.concatenate((self.shared_v, np.asarray(shared_verts)))
self.shared_v_idx += shared_verts_idx
def create_zero_boundary_constrains(self):
xmin, ymin, zmin = self.domain_min
xmax, ymax, zmax = self.domain_max
Ltemp = np.zeros(shape=(0, 12 * self.nC))
for c in range(self.nC):
for v in self.verts[c]:
if (v[0] == xmin or v[0] == xmax):
row = np.zeros(shape=(12 * self.nC))
row[(12 * c):(12 * (c + 1))] = np.concatenate([v, np.zeros((8,))])
Ltemp = np.vstack((Ltemp, row))
if (v[1] == ymin or v[1] == ymax):
row = np.zeros(shape=(12 * self.nC))
row[(12 * c):(12 * (c + 1))] = np.concatenate([np.zeros((4,)), v, np.zeros((4,))])
Ltemp = np.vstack((Ltemp, row))
if (v[2] == zmin or v[2] == zmax):
row = np.zeros(shape=(12 * self.nC))
row[(12 * c):(12 * (c + 1))] = np.concatenate([np.zeros((8,)), v])
Ltemp = np.vstack((Ltemp, row))
return Ltemp
# %%
if __name__ == "__main__":
tess1 = Tesselation1D([5], [0], [1], zero_boundary=True, volume_perservation=True)
tess2 = Tesselation2D([2, 2], [0, 0], [1, 1], zero_boundary=False, volume_perservation=True)
tess3 = Tesselation3D([2, 2, 2], [0, 0, 0], [1, 1, 1], zero_boundary=True, volume_perservation=False)
| 42.434409
| 109
| 0.507399
|
4a10dd850fcb379c931cf638ca4c95ea63f3e146
| 10,335
|
py
|
Python
|
tools/mutation.py
|
12yuens2/reconfigurable_organisms
|
af4b011b04918cedcb863b9f26d02bfac3b0d4ef
|
[
"CC0-1.0"
] | 788
|
2019-05-07T22:09:16.000Z
|
2022-03-24T13:14:17.000Z
|
tools/mutation.py
|
12yuens2/reconfigurable_organisms
|
af4b011b04918cedcb863b9f26d02bfac3b0d4ef
|
[
"CC0-1.0"
] | 8
|
2020-01-15T09:02:02.000Z
|
2021-02-18T05:10:11.000Z
|
tools/mutation.py
|
12yuens2/reconfigurable_organisms
|
af4b011b04918cedcb863b9f26d02bfac3b0d4ef
|
[
"CC0-1.0"
] | 179
|
2020-01-13T21:23:18.000Z
|
2022-03-24T13:24:03.000Z
|
import numpy as np
import random
import copy
import inspect
def create_new_children_through_mutation(pop, print_log, new_children=None, mutate_network_probs=None,
prob_generating_func=None, max_mutation_attempts=1500):
"""Create copies, with modification, of existing individuals in the population.
Parameters
----------
pop : Population class
This provides the individuals to mutate.
print_log : PrintLog()
For logging
new_children : a list of new children created outside this function (may be empty)
This is useful if creating new children through multiple functions, e.g. Crossover and Mutation.
mutate_network_probs : probability, float between 0 and 1 (inclusive)
The probability of mutating each network.
prob_generating_func : func
Used to recalculate the mutate_network_probs for each individual
max_mutation_attempts : int
Maximum number of invalid mutation attempts to allow before giving up on mutating a particular individual.
Returns
-------
new_children : list
A list of new individual SoftBots.
"""
if new_children is None:
new_children = []
random.shuffle(pop.individuals)
repeats = max(pop.learning_trials, 1)
while len(new_children) < pop.pop_size*repeats:
for ind in pop:
clone = copy.deepcopy(ind)
if prob_generating_func is not None:
mutate_network_probs = prob_generating_func()
if mutate_network_probs is None:
required = 0
else:
required = mutate_network_probs.count(1)
selection = []
if mutate_network_probs is None:
# uniformly select networks
selection = np.random.random(len(clone.genotype)) < 1 / float(len(clone.genotype))
else:
# use probability distribution
selection = np.random.random(len(clone.genotype)) < mutate_network_probs
# don't select any frozen networks (used to freeze aspects of genotype during evolution)
for idx in range(len(selection)):
if clone.genotype[idx].freeze or clone.genotype[idx].switch: # also don't select a switch
selection[idx] = False
# if none selected, choose one
if np.sum(selection) <= required:
order = np.random.permutation(range(len(selection)))
for idx in order:
if not clone.genotype[idx].freeze and not clone.genotype[idx].switch:
selection[idx] = True
break
# it's possible that none are selected if using learning trials and the only unfrozen nets are also switches
selected_networks = np.arange(len(clone.genotype))[selection].tolist()
for rank, goal in pop.objective_dict.items():
setattr(clone, "parent_{}".format(goal["name"]), getattr(clone, goal["name"]))
clone.parent_genotype = ind.genotype
clone.parent_id = clone.id
# sam: moved the id increment to end for learning trials
# clone.id = pop.max_id
# pop.max_id += 1
# for network in clone.genotype:
# for node_name in network.graph:
# network.graph.node[node_name]["old_state"] = network.graph.node[node_name]["state"]
for name, details in clone.genotype.to_phenotype_mapping.items():
details["old_state"] = copy.deepcopy(details["state"])
# old_individual = copy.deepcopy(clone)
for selected_net_idx in selected_networks:
mutation_counter = 0
done = False
while not done:
mutation_counter += 1
candidate = copy.deepcopy(clone)
# perform mutation(s)
for _ in range(candidate.genotype[selected_net_idx].num_consecutive_mutations):
if not clone.genotype[selected_net_idx].direct_encoding:
# using CPPNs
mut_func_args = inspect.getargspec(candidate.genotype[selected_net_idx].mutate)
mut_func_args = [0 for _ in range(1, len(mut_func_args.args))] # this is correct.
choice = random.choice(range(len(mut_func_args)))
mut_func_args[choice] = 1
variation_type, variation_degree = candidate.genotype[selected_net_idx].mutate(*mut_func_args)
else:
# direct encoding with possibility of evolving mutation rate
# TODO: enable cppn mutation rate evolution
rate = None
for net in clone.genotype:
if "mutation_rate" in net.output_node_names:
rate = net.values # evolved mutation rates, one for each voxel
if "mutation_rate" not in candidate.genotype[selected_net_idx].output_node_names:
# use evolved mutation rates
variation_type, variation_degree = candidate.genotype[selected_net_idx].mutate(rate)
else:
# this is the mutation rate itself (use predefined meta-mutation rate)
variation_type, variation_degree = candidate.genotype[selected_net_idx].mutate()
if variation_degree != "":
candidate.variation_type = "{0}({1})".format(variation_type, variation_degree)
else:
candidate.variation_type = str(variation_type)
candidate.genotype.express()
if candidate.genotype[selected_net_idx].allow_neutral_mutations:
done = True
clone = copy.deepcopy(candidate) # SAM: ensures change is made to every net
break
else:
for name, details in candidate.genotype.to_phenotype_mapping.items():
new = details["state"]
old = details["old_state"]
changes = np.array(new != old, dtype=np.bool)
if np.any(changes) and candidate.phenotype.is_valid():
done = True
clone = copy.deepcopy(candidate) # SAM: ensures change is made to every net
break
# for name, details in candidate.genotype.to_phenotype_mapping.items():
# if np.sum( details["old_state"] != details["state"] ) and candidate.phenotype.is_valid():
# done = True
# break
if mutation_counter > max_mutation_attempts:
print_log.message("Couldn't find a successful mutation in {} attempts! "
"Skipping this network.".format(max_mutation_attempts))
num_edges = len(clone.genotype[selected_net_idx].graph.edges())
num_nodes = len(clone.genotype[selected_net_idx].graph.nodes())
print_log.message("num edges: {0}; num nodes {1}".format(num_edges, num_nodes))
break
# end while
if not clone.genotype[selected_net_idx].direct_encoding:
for output_node in clone.genotype[selected_net_idx].output_node_names:
clone.genotype[selected_net_idx].graph.node[output_node]["old_state"] = ""
# reset all objectives we calculate in VoxCad to unevaluated values
for rank, goal in pop.objective_dict.items():
if goal["tag"] is not None:
setattr(clone, goal["name"], goal["worst_value"])
if pop.learning_trials <= 1: # default is zero but one is equivalent for now
clone.id = pop.max_id
pop.max_id += 1
new_children.append(clone)
else:
clone.learning_id = clone.id
for this_net in clone.genotype:
if this_net.switch and not this_net.freeze:
this_net.mutate()
new_children += pop.get_learning_trials_for_single_ind(clone)
# this_learning_id = int(pop.max_id)
#
# for trial in range(pop.learning_trials):
# learner = copy.deepcopy(clone)
# learner.md5 = learner.learning_md5[trial]
# learner.learning_id = this_learning_id # same for all trials with this genotype
# learner.id = pop.max_id
# pop.max_id += 1
#
# for this_net in learner.genotype:
# if this_net.switch:
# this_morphology = pop.learning_morphology[trial]
# this_input = pop.learning_data[trial]
# this_net.update(this_morphology, this_input)
#
# learner.genotype.express()
# new_children.append(learner)
# SAM: random individuals are now added in algorithms.py after mutation
# while len(new_children) < spots_to_fill:
# valid = False
# while not valid:
# ind = softbot_class(pop.max_id, pop.objective_dict, pop.genotype, pop.phenotype)
# if ind.phenotype.is_valid():
# new_children.append(ind)
# pop.max_id += 1
# valid = True
return new_children
def genome_wide_mutation(pop, print_log):
mutate_network_probs = [1 for _ in range(len(pop[0].genotype))]
return create_new_children_through_mutation(pop, print_log, mutate_network_probs=mutate_network_probs)
| 46.764706
| 122
| 0.553169
|
4a10e12ddde3b97e766f0ac674090cab99a7459a
| 4,848
|
py
|
Python
|
alipay/aop/api/domain/Invoice.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/Invoice.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/Invoice.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class Invoice(object):
def __init__(self):
self._email = None
self._invoice_content = None
self._invoice_fee = None
self._invoice_title = None
self._invoice_type = None
self._phone = None
self._receiver = None
self._tax_number = None
self._title_type = None
@property
def email(self):
return self._email
@email.setter
def email(self, value):
self._email = value
@property
def invoice_content(self):
return self._invoice_content
@invoice_content.setter
def invoice_content(self, value):
self._invoice_content = value
@property
def invoice_fee(self):
return self._invoice_fee
@invoice_fee.setter
def invoice_fee(self, value):
self._invoice_fee = value
@property
def invoice_title(self):
return self._invoice_title
@invoice_title.setter
def invoice_title(self, value):
self._invoice_title = value
@property
def invoice_type(self):
return self._invoice_type
@invoice_type.setter
def invoice_type(self, value):
self._invoice_type = value
@property
def phone(self):
return self._phone
@phone.setter
def phone(self, value):
self._phone = value
@property
def receiver(self):
return self._receiver
@receiver.setter
def receiver(self, value):
self._receiver = value
@property
def tax_number(self):
return self._tax_number
@tax_number.setter
def tax_number(self, value):
self._tax_number = value
@property
def title_type(self):
return self._title_type
@title_type.setter
def title_type(self, value):
self._title_type = value
def to_alipay_dict(self):
params = dict()
if self.email:
if hasattr(self.email, 'to_alipay_dict'):
params['email'] = self.email.to_alipay_dict()
else:
params['email'] = self.email
if self.invoice_content:
if hasattr(self.invoice_content, 'to_alipay_dict'):
params['invoice_content'] = self.invoice_content.to_alipay_dict()
else:
params['invoice_content'] = self.invoice_content
if self.invoice_fee:
if hasattr(self.invoice_fee, 'to_alipay_dict'):
params['invoice_fee'] = self.invoice_fee.to_alipay_dict()
else:
params['invoice_fee'] = self.invoice_fee
if self.invoice_title:
if hasattr(self.invoice_title, 'to_alipay_dict'):
params['invoice_title'] = self.invoice_title.to_alipay_dict()
else:
params['invoice_title'] = self.invoice_title
if self.invoice_type:
if hasattr(self.invoice_type, 'to_alipay_dict'):
params['invoice_type'] = self.invoice_type.to_alipay_dict()
else:
params['invoice_type'] = self.invoice_type
if self.phone:
if hasattr(self.phone, 'to_alipay_dict'):
params['phone'] = self.phone.to_alipay_dict()
else:
params['phone'] = self.phone
if self.receiver:
if hasattr(self.receiver, 'to_alipay_dict'):
params['receiver'] = self.receiver.to_alipay_dict()
else:
params['receiver'] = self.receiver
if self.tax_number:
if hasattr(self.tax_number, 'to_alipay_dict'):
params['tax_number'] = self.tax_number.to_alipay_dict()
else:
params['tax_number'] = self.tax_number
if self.title_type:
if hasattr(self.title_type, 'to_alipay_dict'):
params['title_type'] = self.title_type.to_alipay_dict()
else:
params['title_type'] = self.title_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = Invoice()
if 'email' in d:
o.email = d['email']
if 'invoice_content' in d:
o.invoice_content = d['invoice_content']
if 'invoice_fee' in d:
o.invoice_fee = d['invoice_fee']
if 'invoice_title' in d:
o.invoice_title = d['invoice_title']
if 'invoice_type' in d:
o.invoice_type = d['invoice_type']
if 'phone' in d:
o.phone = d['phone']
if 'receiver' in d:
o.receiver = d['receiver']
if 'tax_number' in d:
o.tax_number = d['tax_number']
if 'title_type' in d:
o.title_type = d['title_type']
return o
| 30.111801
| 81
| 0.582096
|
4a10e1dd5a83344bfe2902553a93ab1276d8addd
| 1,463
|
py
|
Python
|
scripts/dummygridmeter.py
|
stevepbyrne/dbus-systemcalc-py
|
4d50ca36af51bbe1e3040cb63f60ef262da5d397
|
[
"MIT"
] | null | null | null |
scripts/dummygridmeter.py
|
stevepbyrne/dbus-systemcalc-py
|
4d50ca36af51bbe1e3040cb63f60ef262da5d397
|
[
"MIT"
] | null | null | null |
scripts/dummygridmeter.py
|
stevepbyrne/dbus-systemcalc-py
|
4d50ca36af51bbe1e3040cb63f60ef262da5d397
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# takes data from the dbus, does calculations with it, and puts it back on
from dbus.mainloop.glib import DBusGMainLoop
import gobject
import argparse
import logging
import sys
import os
# our own packages
sys.path.insert(1, os.path.join(os.path.dirname(__file__), '../ext/velib_python'))
from dbusdummyservice import DbusDummyService
# Argument parsing
parser = argparse.ArgumentParser(
description='dummygridmeter.py demo run'
)
parser.add_argument(
"-n", "--name", help="the D-Bus service you want me to claim", type=str,
default="com.victronenergy.grid.ttyUSB0")
parser.add_argument(
"-p", "--position", help="position (and instance): 0=grid, 1=output, 2=genset", type=int,
default="0")
args = parser.parse_args()
# Init logging
logging.basicConfig(level=logging.DEBUG)
logging.info(__file__ + " is starting up, use -h argument to see optional arguments")
# Have a mainloop, so we can send/receive asynchronous calls to and from dbus
DBusGMainLoop(set_as_default=True)
pvac_output = DbusDummyService(
servicename=args.name,
deviceinstance=args.position,
productname='Grid meter (dummy)',
paths={
'/Ac/L1/Power': {'initial': 150},
'/Ac/L2/Power': {'initial': 200},
'/Ac/L3/Power': {'initial': 250},
'/Ac/Power': {'initial': 600}})
print 'Connected to dbus, and switching over to gobject.MainLoop() (= event based)'
mainloop = gobject.MainLoop()
mainloop.run()
| 29.26
| 93
| 0.710185
|
4a10e28bf724205e3717136bb70eb2ff28b86cde
| 1,967
|
py
|
Python
|
14 - Convolutional Neural Network/train.py
|
rendywijayaa/Deep_Learning
|
ee87064030ec0649d7ff3ea1e78113f7315e9684
|
[
"MIT"
] | 12
|
2020-04-16T03:02:33.000Z
|
2021-10-16T12:05:47.000Z
|
14 - Convolutional Neural Network/train.py
|
rendywijayaa/Deep_Learning
|
ee87064030ec0649d7ff3ea1e78113f7315e9684
|
[
"MIT"
] | null | null | null |
14 - Convolutional Neural Network/train.py
|
rendywijayaa/Deep_Learning
|
ee87064030ec0649d7ff3ea1e78113f7315e9684
|
[
"MIT"
] | 14
|
2020-03-07T02:44:46.000Z
|
2022-03-23T06:39:32.000Z
|
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from jcopdl.callback import Callback, set_config
from src.model import CNN
from src.train_utils import loop_fn
import config as cfg
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def train():
# Dataset & Dataloader
train_transform = transforms.Compose([
transforms.RandomRotation(15),
transforms.RandomResizedCrop(cfg.CROP_SIZE, scale=(0.8, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
test_transform = transforms.Compose([
transforms.Resize(70),
transforms.CenterCrop(cfg.CROP_SIZE),
transforms.ToTensor()
])
train_set = datasets.ImageFolder(cfg.TRAIN_DIR, transform=train_transform)
trainloader = DataLoader(train_set, batch_size=cfg.BATCH_SIZE, shuffle=True, num_workers=4)
test_set = datasets.ImageFolder(cfg.TEST_DIR, transform=test_transform)
testloader = DataLoader(test_set, batch_size=cfg.BATCH_SIZE, shuffle=True)
# Config
config = set_config({
"batch_size": cfg.BATCH_SIZE,
"crop_size": cfg.CROP_SIZE
})
# Training Preparation
model = CNN().to(device)
criterion = nn.NLLLoss()
optimizer = optim.AdamW(model.parameters(), lr=0.001)
callback = Callback(model, config, outdir=cfg.OUTDIR)
# Training
while True:
train_cost, train_score = loop_fn("train", train_set, trainloader, model, criterion, optimizer, device)
with torch.no_grad():
test_cost, test_score = loop_fn("test", test_set, testloader, model, criterion, optimizer, device)
# Callbacks
callback.log(train_cost, test_cost, train_score, test_score)
callback.save_checkpoint()
if callback.early_stopping(model, monitor="test_score"):
break
if __name__ == "__main__":
train()
| 32.245902
| 111
| 0.691408
|
4a10e4ac1198a337389355d249341bca56cfca4b
| 1,317
|
py
|
Python
|
model/lstm_model.py
|
FFTYYY/Poem
|
8836a32d21997afef5381c6ff8fd71c8adeebb75
|
[
"MIT"
] | 7
|
2020-02-07T06:08:36.000Z
|
2022-03-15T07:27:01.000Z
|
model/lstm_model.py
|
FFTYYY/Poem
|
8836a32d21997afef5381c6ff8fd71c8adeebb75
|
[
"MIT"
] | 2
|
2020-02-07T06:15:13.000Z
|
2020-12-08T02:33:36.000Z
|
model/lstm_model.py
|
FFTYYY/Poem
|
8836a32d21997afef5381c6ff8fd71c8adeebb75
|
[
"MIT"
] | null | null | null |
import torch as tc
import torch.nn as nn
import torch.nn.functional as F
from .lstm import LSTM
import pdb
import fitlog
fitlog.commit(__file__)
class Model(nn.Module):
def __init__(self , vocab , d_model = 512 , dropout = 0.0):
super().__init__()
self.embedding = nn.Embedding(len(vocab) , d_model , padding_idx = 0)
self.x_encoder = LSTM(d_model , d_model , 2 , True , dropout , output_mode = "vec")
self.x_outer = nn.Linear(self.x_encoder.out_dim , d_model)
self.y_inputer = nn.Linear(d_model , d_model)
self.decoder = LSTM(d_model , d_model , 2 , True , dropout , output_mode = "seq")
self.output_ln = nn.Linear(self.decoder.out_dim , len(vocab))
#----- hyper params -----
self.vocab = vocab
self.d_model = d_model
def forward(self , x , y):
'''
params:
x: (bsz , x_len)
y_inpt: (bsz , y_len)
return:
(bsz , y_len , len(vocab))
'''
bsz , x_len = x.size()
bsz , y_len = y.size()
d_model = self.d_model
x_mask = (x != 0)
y_mask = (y != 0)
x = self.embedding(x)
y = self.embedding(y)
x = self.x_encoder(x , mask = x_mask)
x = F.relu(self.x_outer(x)) # (bsz , d_model)
y = y + x.view(bsz , 1 , d_model)
y = F.relu(self.y_inputer(y))
y = self.decoder(y , mask = y_mask) #(bsz , y_len , out_dim)
y = self.output_ln(y)
return y
| 22.322034
| 86
| 0.629461
|
4a10e565e95ce8c4f8d22d7f5e1e8d946da8de30
| 1,126
|
py
|
Python
|
molecule/java-min/tests/test_role.py
|
dedayoa/ansible-role-java
|
af984f8b977915564df50fc87e10cd1e06114420
|
[
"MIT"
] | null | null | null |
molecule/java-min/tests/test_role.py
|
dedayoa/ansible-role-java
|
af984f8b977915564df50fc87e10cd1e06114420
|
[
"MIT"
] | null | null | null |
molecule/java-min/tests/test_role.py
|
dedayoa/ansible-role-java
|
af984f8b977915564df50fc87e10cd1e06114420
|
[
"MIT"
] | null | null | null |
import pytest
@pytest.mark.parametrize('command', [
'java',
'javac'
])
def test_java_tools(host, command):
cmd = host.run('. /etc/profile && ' + command + ' -version')
assert cmd.rc == 0
assert ' 1.8.0_' in cmd.stderr
@pytest.mark.parametrize('version_dir_pattern', [
'jdk8u[0-9b-]+$'
])
def test_java_installed(host, version_dir_pattern):
java_home = host.check_output('find %s | grep --color=never -E %s',
'/opt/java/',
version_dir_pattern)
java_exe = host.file(java_home + '/bin/java')
assert java_exe.exists
assert java_exe.is_file
assert java_exe.user == 'root'
assert java_exe.group == 'root'
assert oct(java_exe.mode) == '0o755'
@pytest.mark.parametrize('fact_group_name', [
'java'
])
def test_facts_installed(host, fact_group_name):
fact_file = host.file('/etc/ansible/facts.d/' + fact_group_name + '.fact')
assert fact_file.exists
assert fact_file.is_file
assert fact_file.user == 'root'
assert fact_file.group == 'root'
assert oct(fact_file.mode) == '0o644'
| 26.186047
| 78
| 0.62611
|
4a10e573301df9c973a16c6bd9fcbbcdd11c21bd
| 2,490
|
py
|
Python
|
src/collectors/nvidia_gpu/test/testnvidia_gpu.py
|
fasrc/Diamond
|
cd0270a53de2ea9d4fccd63c14b4ecef3e75f741
|
[
"MIT"
] | null | null | null |
src/collectors/nvidia_gpu/test/testnvidia_gpu.py
|
fasrc/Diamond
|
cd0270a53de2ea9d4fccd63c14b4ecef3e75f741
|
[
"MIT"
] | null | null | null |
src/collectors/nvidia_gpu/test/testnvidia_gpu.py
|
fasrc/Diamond
|
cd0270a53de2ea9d4fccd63c14b4ecef3e75f741
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from test import patch
from test import Mock
from diamond.collector import Collector
from nvidia_gpu import NvidiaGPUCollector
##########################################################################
class TestNvidiaGPUCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NvidiaGPUCollector', {
})
self.collector = NvidiaGPUCollector(config, None)
def test_import(self):
self.assertTrue(NvidiaGPUCollector)
@patch.object(Collector, 'publish')
def test_should_publish_gpu_stat(self, publish_mock):
output_mock = Mock(
return_value=(self.getFixture('nvidia_smi').getvalue(), '')
)
collector_mock = patch.object(
NvidiaGPUCollector,
'run_command',
output_mock
)
collector_mock.start()
self.collector.collect()
collector_mock.stop()
metrics = {
'gpu_0.memory.total': 4095,
'gpu_0.memory.used': 2670,
'gpu_0.memory.free': 1425,
'gpu_0.utilization.gpu': 0,
'gpu_0.utilization.memory': 0,
'gpu_0.temperature.gpu': 53,
'gpu_1.memory.total': 4095,
'gpu_1.memory.used': 2670,
'gpu_1.memory.free': 1425,
'gpu_1.utilization.gpu': 0,
'gpu_1.utilization.memory': 0,
'gpu_1.temperature.gpu': 44,
'gpu_2.memory.total': 4095,
'gpu_2.memory.used': 1437,
'gpu_2.memory.free': 2658,
'gpu_2.utilization.gpu': 0,
'gpu_2.utilization.memory': 0,
'gpu_2.temperature.gpu': 48,
'gpu_3.memory.total': 4095,
'gpu_3.memory.used': 1437,
'gpu_3.memory.free': 2658,
'gpu_3.utilization.gpu': 0,
'gpu_3.utilization.memory': 0,
'gpu_3.temperature.gpu': 44
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
| 31.518987
| 74
| 0.536546
|
4a10e600359b6bd0bdbb2a2e65603e10484878a3
| 1,051
|
py
|
Python
|
ddtrace/internal/runtime/constants.py
|
dchengrove/dd-trace-py
|
549e8d532679c2a7dd0c572ac0a0eb520f6e4d49
|
[
"BSD-3-Clause"
] | 1
|
2020-03-10T01:45:56.000Z
|
2020-03-10T01:45:56.000Z
|
ddtrace/internal/runtime/constants.py
|
dchengrove/dd-trace-py
|
549e8d532679c2a7dd0c572ac0a0eb520f6e4d49
|
[
"BSD-3-Clause"
] | null | null | null |
ddtrace/internal/runtime/constants.py
|
dchengrove/dd-trace-py
|
549e8d532679c2a7dd0c572ac0a0eb520f6e4d49
|
[
"BSD-3-Clause"
] | null | null | null |
GC_COUNT_GEN0 = 'runtime.python.gc.count.gen0'
GC_COUNT_GEN1 = 'runtime.python.gc.count.gen1'
GC_COUNT_GEN2 = 'runtime.python.gc.count.gen2'
THREAD_COUNT = 'runtime.python.thread_count'
MEM_RSS = 'runtime.python.mem.rss'
CPU_TIME_SYS = 'runtime.python.cpu.time.sys'
CPU_TIME_USER = 'runtime.python.cpu.time.user'
CPU_PERCENT = 'runtime.python.cpu.percent'
CTX_SWITCH_VOLUNTARY = 'runtime.python.cpu.ctx_switch.voluntary'
CTX_SWITCH_INVOLUNTARY = 'runtime.python.cpu.ctx_switch.involuntary'
GC_RUNTIME_METRICS = set([
GC_COUNT_GEN0,
GC_COUNT_GEN1,
GC_COUNT_GEN2,
])
PSUTIL_RUNTIME_METRICS = set([
THREAD_COUNT,
MEM_RSS,
CTX_SWITCH_VOLUNTARY,
CTX_SWITCH_INVOLUNTARY,
CPU_TIME_SYS,
CPU_TIME_USER,
CPU_PERCENT,
])
DEFAULT_RUNTIME_METRICS = GC_RUNTIME_METRICS | PSUTIL_RUNTIME_METRICS
SERVICE = 'service'
LANG_INTERPRETER = 'lang_interpreter'
LANG_VERSION = 'lang_version'
TRACER_TAGS = set([
SERVICE,
])
PLATFORM_TAGS = set([
LANG_INTERPRETER,
LANG_VERSION
])
DEFAULT_RUNTIME_TAGS = TRACER_TAGS
| 23.355556
| 69
| 0.765937
|
4a10e61a1019e510eace17bf367128f30d23ba14
| 48
|
py
|
Python
|
First_Test_File.py
|
technicSupFan/Network_Scanner_Python
|
0d4a64209835c98cf28ca301abf493101f1c7898
|
[
"CC0-1.0"
] | null | null | null |
First_Test_File.py
|
technicSupFan/Network_Scanner_Python
|
0d4a64209835c98cf28ca301abf493101f1c7898
|
[
"CC0-1.0"
] | null | null | null |
First_Test_File.py
|
technicSupFan/Network_Scanner_Python
|
0d4a64209835c98cf28ca301abf493101f1c7898
|
[
"CC0-1.0"
] | null | null | null |
import time
print("Hello World")
time.sleep(1)
| 9.6
| 20
| 0.729167
|
4a10e6e3533437efca93a19b05fe3b7086706f53
| 146
|
py
|
Python
|
src/hogpong/constants.py
|
SarunasAzna/hogpong
|
bd4a7aca0ec4ffa25a691721dc4088f2a96874d2
|
[
"MIT"
] | null | null | null |
src/hogpong/constants.py
|
SarunasAzna/hogpong
|
bd4a7aca0ec4ffa25a691721dc4088f2a96874d2
|
[
"MIT"
] | null | null | null |
src/hogpong/constants.py
|
SarunasAzna/hogpong
|
bd4a7aca0ec4ffa25a691721dc4088f2a96874d2
|
[
"MIT"
] | null | null | null |
RIGTH_SIDE = "right"
LEFT_SIDE = "left"
TOP_SIDE = "top"
BOTTOM_SIDE = "botton"
SIDE_ENUMERATION = (LEFT_SIDE, RIGTH_SIDE, TOP_SIDE, BOTTOM_SIDE)
| 24.333333
| 65
| 0.753425
|
4a10e8682e7754b8e9172b1ac1537363f9a6ae10
| 1,569
|
py
|
Python
|
Jumpscale/tutorials/base/tutorials/cache/example_class.py
|
threefoldtech/JumpscaleX
|
5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa
|
[
"Apache-2.0"
] | 2
|
2019-05-09T07:21:25.000Z
|
2019-08-05T06:37:53.000Z
|
Jumpscale/tutorials/base/tutorials/cache/example_class.py
|
threefoldtech/JumpscaleX
|
5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa
|
[
"Apache-2.0"
] | 664
|
2018-12-19T12:43:44.000Z
|
2019-08-23T04:24:42.000Z
|
Jumpscale/tutorials/base/tutorials/cache/example_class.py
|
threefoldtech/jumpscale10
|
5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa
|
[
"Apache-2.0"
] | 7
|
2019-05-03T07:14:37.000Z
|
2019-08-05T12:36:52.000Z
|
from Jumpscale import j
import time
JSBASE = j.application.JSBaseClass
class TutorialCacheClass(j.application.JSBaseClass):
"""
"""
def __init__(self):
JSBASE.__init__(self)
self.value = 1
#
def amounts_servers_active(self, reload=False):
def do(me=None):
print("self.value:%s" % me.value)
# SLOW FUNCTION
# Get something from webservice but it could fail
# ok to cache for 60 sec
x = j.data.idgenerator.generateRandomInt(1, 5)
print(x)
if x == 2:
# simulator that once and a while I get the return from internet, here 1 time on 10 it works
return me.value
else:
msg = "could not fetch info, there was simulated network error"
print(msg)
time.sleep(0.05)
raise RuntimeError(msg)
return self._cache.get("amounts_servers_active", do, expire=60, retry=100, refresh=reload, me=self)
def main():
c = TutorialCacheClass()
c.cache.reset()
c.value = 1
print("FIRST QUERY, value needs to be 1")
assert c.amounts_servers_active() == 1
c.value = 2
print("2nd QUERY, value needs to be 1, because cache will return")
# now will go immediate because cached
assert c.amounts_servers_active() == 1
# now will empty cache
c.value = 3
print("2nd QUERY, value needs to be 3, because cache is emptied")
assert c.amounts_servers_active(reload=True) == 3
print("test done ok")
| 29.603774
| 108
| 0.599745
|
4a10e9c65192b606e3bef9d9976acbb93fb42f72
| 6,836
|
py
|
Python
|
regularfrynance/utils.py
|
regularfry/regularfrynance
|
38b6e6a0d20b9e3d06c31f2856f79309727fe15b
|
[
"Apache-2.0"
] | null | null | null |
regularfrynance/utils.py
|
regularfry/regularfrynance
|
38b6e6a0d20b9e3d06c31f2856f79309727fe15b
|
[
"Apache-2.0"
] | null | null | null |
regularfrynance/utils.py
|
regularfry/regularfrynance
|
38b6e6a0d20b9e3d06c31f2856f79309727fe15b
|
[
"Apache-2.0"
] | 1
|
2020-09-03T01:38:35.000Z
|
2020-09-03T01:38:35.000Z
|
#!/usr/bin/env python
#
# Yahoo! Finance market data downloader (+fix for Pandas Datareader)
# https://github.com/regularfry/regularfrynance
#
# Copyright 2020 Alex Young
# Copyright 2017-2019 Ran Aroussi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import requests as _requests
import re as _re
import pandas as _pd
import numpy as _np
import sys as _sys
import re as _re
try:
import ujson as _json
except ImportError:
import json as _json
from .app_page import AppPage
def empty_df(index=[]):
empty = _pd.DataFrame(
index=index,
data={
"Open": _np.nan,
"High": _np.nan,
"Low": _np.nan,
"Close": _np.nan,
"Adj Close": _np.nan,
"Volume": _np.nan,
},
)
empty.index.name = "Date"
return empty
def get(url, proxy=None, **kwargs):
response = _requests.get(url=url, proxies=proxy, **kwargs)
response.raise_for_status()
return response.text
def nullify_empty_dicts(data):
return _json.loads(_json.dumps(data).replace("{}", "null"))
def select_raw_values(data):
json_str = _json.dumps(data)
return _json.loads(_re.sub(r"\{[\'|\"]raw[\'|\"]:(.*?),(.*?)\}", r"\1", json_str))
def get_json(url, proxy=None):
html = get(url, proxy)
if "QuoteSummaryStore" not in html:
html = get(url, proxy)
if "QuoteSummaryStore" not in html:
return {}
data = AppPage(html).quote_summary_store()
return select_raw_values(nullify_empty_dicts(data))
def camel2title(o):
return [_re.sub("([a-z])([A-Z])", "\\1 \\2", i).title() for i in o]
def auto_adjust(data):
df = data.copy()
ratio = df["Close"] / df["Adj Close"]
df["Adj Open"] = df["Open"] / ratio
df["Adj High"] = df["High"] / ratio
df["Adj Low"] = df["Low"] / ratio
df.drop(["Open", "High", "Low", "Close"], axis=1, inplace=True)
df.rename(
columns={
"Adj Open": "Open",
"Adj High": "High",
"Adj Low": "Low",
"Adj Close": "Close",
},
inplace=True,
)
df = df[["Open", "High", "Low", "Close", "Volume"]]
return df[["Open", "High", "Low", "Close", "Volume"]]
def back_adjust(data):
""" back-adjusted data to mimic true historical prices """
df = data.copy()
ratio = df["Adj Close"] / df["Close"]
df["Adj Open"] = df["Open"] * ratio
df["Adj High"] = df["High"] * ratio
df["Adj Low"] = df["Low"] * ratio
df.drop(["Open", "High", "Low", "Adj Close"], axis=1, inplace=True)
df.rename(
columns={"Adj Open": "Open", "Adj High": "High", "Adj Low": "Low"}, inplace=True
)
return df[["Open", "High", "Low", "Close", "Volume"]]
def parse_quotes(data, tz=None):
timestamps = data["timestamp"]
ohlc = data["indicators"]["quote"][0]
volumes = ohlc["volume"]
opens = ohlc["open"]
closes = ohlc["close"]
lows = ohlc["low"]
highs = ohlc["high"]
adjclose = closes
if "adjclose" in data["indicators"]:
adjclose = data["indicators"]["adjclose"][0]["adjclose"]
quotes = _pd.DataFrame(
{
"Open": opens,
"High": highs,
"Low": lows,
"Close": closes,
"Adj Close": adjclose,
"Volume": volumes,
}
)
quotes.index = _pd.to_datetime(timestamps, unit="s")
quotes.sort_index(inplace=True)
if tz is not None:
quotes.index = quotes.index.tz_localize(tz)
return quotes
def parse_actions(data, tz=None):
dividends = _pd.DataFrame(columns=["Dividends"])
splits = _pd.DataFrame(columns=["Stock Splits"])
if "events" in data:
if "dividends" in data["events"]:
dividends = _pd.DataFrame(data=list(data["events"]["dividends"].values()))
dividends.set_index("date", inplace=True)
dividends.index = _pd.to_datetime(dividends.index, unit="s")
dividends.sort_index(inplace=True)
if tz is not None:
dividends.index = dividends.index.tz_localize(tz)
dividends.columns = ["Dividends"]
if "splits" in data["events"]:
splits = _pd.DataFrame(data=list(data["events"]["splits"].values()))
splits.set_index("date", inplace=True)
splits.index = _pd.to_datetime(splits.index, unit="s")
splits.sort_index(inplace=True)
if tz is not None:
splits.index = splits.index.tz_localize(tz)
splits["Stock Splits"] = splits["numerator"] / splits["denominator"]
splits = splits["Stock Splits"]
return dividends, splits
class ProgressBar:
def __init__(self, iterations, text="completed"):
self.text = text
self.iterations = iterations
self.prog_bar = "[]"
self.fill_char = "*"
self.width = 50
self.__update_amount(0)
self.elapsed = 1
def completed(self):
if self.elapsed > self.iterations:
self.elapsed = self.iterations
self.update_iteration(1)
print("\r" + str(self), end="")
_sys.stdout.flush()
print()
def animate(self, iteration=None):
if iteration is None:
self.elapsed += 1
iteration = self.elapsed
else:
self.elapsed += iteration
print("\r" + str(self), end="")
_sys.stdout.flush()
self.update_iteration()
def update_iteration(self, val=None):
val = val if val is not None else self.elapsed / float(self.iterations)
self.__update_amount(val * 100.0)
self.prog_bar += " %s of %s %s" % (self.elapsed, self.iterations, self.text)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = (
"[" + self.fill_char * num_hashes + " " * (all_full - num_hashes) + "]"
)
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = "%d%%" % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + (
pct_string + self.prog_bar[pct_place + len(pct_string) :]
)
def __str__(self):
return str(self.prog_bar)
| 29.465517
| 88
| 0.587624
|
4a10e9e9f20fea6ca41d6978196a0a2e1af6d489
| 6,712
|
py
|
Python
|
scripts/lib/xpedite/util/__init__.py
|
dhruvshekhawat/Xpedite
|
e857cb623e22a4384265dafa8e8437f038f77b95
|
[
"Apache-2.0"
] | null | null | null |
scripts/lib/xpedite/util/__init__.py
|
dhruvshekhawat/Xpedite
|
e857cb623e22a4384265dafa8e8437f038f77b95
|
[
"Apache-2.0"
] | null | null | null |
scripts/lib/xpedite/util/__init__.py
|
dhruvshekhawat/Xpedite
|
e857cb623e22a4384265dafa8e8437f038f77b95
|
[
"Apache-2.0"
] | null | null | null |
"""
Utility methods
This module provides utility methods for
1. file system operations - make or clean up directories, touch files etc
2. formating and normalizing strings
3. collecting cpu and memory info
4. etc ...
Author: Manikandan Dhamodharan, Morgan Stanley
"""
from __future__ import division
import sys
import os
import time
import shutil
import tempfile
import logging
from collections import OrderedDict
from xpedite.dependencies import Package, DEPENDENCY_LOADER
DEPENDENCY_LOADER.load(Package.Six, Package.PyCpuInfo)
LOGGER = logging.getLogger(__name__)
def attachPdb(_, frame):
"""Attaches PDB instance to python process"""
import pdb
pdb.Pdb().set_trace(frame)
def timeAction(action, delegate):
"""
Measures elapsed time for execution of a delegate
:param action: Description of the delegate
:param delegate: Callable to perform a task
"""
begin = time.time()
retVal = delegate()
elapsed = time.time() - begin
if elapsed > 10:
LOGGER.warn('timed action exceeded threshold %s completed in %s.1f seconds', action, elapsed)
return retVal
def shell(cmd, cwd=None, closeFds=True):
"""
Executes a shell command
:param cmd: Command string
:param cwd: Current working directory (Default value = None)
:param closeFds: If True, all file descriptors except 0, 1 and 2 will
be closed before the child process is executed. Defaults to True.
:returns: a (return code, std out, std error) triplet
:rtype: tuple of int, str, str
"""
import subprocess
if cwd:
process = subprocess.Popen(
cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=cwd, close_fds=closeFds
)
else:
process = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=closeFds)
stdout, stderr = process.communicate()
return (process.returncode, stdout, stderr)
def makeUniqueId():
"""Returns an unique identifier for css selector"""
return str(time.time()).replace('.', '_')
def mkTempFilePath():
"""Creates a temporary directory in the file system"""
fd, tempPath = tempfile.mkstemp()
os.close(fd)
return tempPath
def formatHumanReadable(byteCount, suffix='B'):
"""Formats size using human friendly units"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(byteCount) < 1024.0:
return '%3.1f %s%s' % (byteCount, unit, suffix)
byteCount /= 1024.0
return '%.1f %s%s' % (byteCount, 'Yi', suffix)
def persist(filePath, iterable, lineDelimiter=None):
"""
Persists the given iterable to file system in text format
:param filePath: path to the file
:param iterable: Iterable to persist
:param lineDelimiter: delimiter for lines in the file (Default value = None)
"""
with open(filePath, 'w') as fileHandle:
for item in iterable:
fileHandle.write(str(item))
if lineDelimiter:
fileHandle.write(lineDelimiter)
def parseAddress(ipStr):
"""
Parses ip address and port from a string
:param ipStr: String with ip address and port delimited by colon
"""
words = ipStr.split(':')
if len(words) != 2:
errMsg = 'ill-formatted address {}. expect address in format - <ip4-address>:<port>'.format(ipStr)
LOGGER.error(errMsg)
raise Exception(errMsg)
return (words[0], int(words[1]))
def parsePort(port):
"""
Converts port in string format to an int
:param port: a string or integer value
:returns: an integer port number
:rtype: int
"""
result = None
try:
result = int(port)
except ValueError:
import socket
result = socket.getservbyname(port)
return result
def promptUser():
"""Awaits a key press from console"""
LOGGER.info('press return key to continue...')
sys.stdin.read(1)
def removeFiles(path):
"""
Removes a file or directory from file system
:param path: path to file or directory
"""
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except OSError:
pass
def mkdir(path, clean=False):
"""
Creates a directory, optionally cleaning its contents
:param path: Path to the directory
:param clean: Flag to indicate clean up (Default value = False)
"""
if clean:
removeFiles(path)
if os.path.exists(path):
if os.path.isdir(path):
if not os.access(path, os.W_OK):
raise Exception('Path {} is not writable'.format(path))
else:
raise Exception('Path {} is not a directory'.format(path))
else:
os.makedirs(path)
def logPath(name=None):
"""
Returns the path of xpedite log directory
:param name: Optional suffix for the log path (Default value = None)
"""
from xpedite.dependencies import CONFIG
logpath = CONFIG.logDir
if name:
logpath = os.path.join(logpath, name)
return logpath
def makeLogPath(name=None):
"""
Creates the directory for storing log files
:param name: Suffix for the log directory
"""
path = logPath(name)
mkdir(path)
if not os.path.isdir(path):
raise Exception('Could not create run directory {0}'.format(path))
return path
def touch(path):
"""
Touches a file in the given path
:param path: path to touch
"""
with open(path, 'a'):
pass
def getCpuInfo():
"""Loads cpu info for localhost"""
from cpuinfo import cpuinfo
fullCpuInfo = cpuinfo.get_cpu_info()
return fullCpuInfo
def getCpuId(cpuInfo=None):
"""Returns cpu model for localhost"""
cpuInfo = cpuInfo if cpuInfo else getCpuInfo()
return '{}-{}-{:02X}'.format(cpuInfo['vendor_id'], cpuInfo['family'], cpuInfo['model'])
def meminfo(remoteConnection=None):
"""
Loads memory info for localhost
:param remoteConnection: handle to remote rpyc connection (Default value = None)
"""
meminfoPath = '/proc/meminfo'
if remoteConnection:
tempFilePath = mkTempFilePath()
import rpyc
rpyc.utils.classic.download(remoteConnection, meminfoPath, tempFilePath)
meminfoPath = tempFilePath
meminfoMap = OrderedDict()
with open(meminfoPath) as fileHandle:
for line in fileHandle:
meminfoMap[line.split(':')[0]] = line.split(':')[1].strip()
return meminfoMap
def compressText(data):
"""
returns compressed data in base64 format
:param data: Data to be compressed
"""
import zlib
import base64
import six
compressor = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS | 16)
zContent = compressor.compress(six.ensure_binary(data)) + compressor.flush()
return base64.b64encode(zContent)
def loadTextFile(path):
"""
Loads contents of the given file
:param path: Path of the file to load
"""
import six
with open(path, 'rb') as fileHandle:
return six.ensure_str(fileHandle.read())
| 25.716475
| 115
| 0.694875
|
4a10ea30e1b34f88cd907a10183816999b7d38d7
| 975
|
py
|
Python
|
serenity_pypeline/task.py
|
Bplotka/serenity-pypeline
|
7f355d95e21a0832d5c812e4a8a1055bc26267d4
|
[
"Apache-2.0"
] | 1
|
2016-01-16T22:56:07.000Z
|
2016-01-16T22:56:07.000Z
|
serenity_pypeline/task.py
|
Bplotka/serenity-pypeline
|
7f355d95e21a0832d5c812e4a8a1055bc26267d4
|
[
"Apache-2.0"
] | null | null | null |
serenity_pypeline/task.py
|
Bplotka/serenity-pypeline
|
7f355d95e21a0832d5c812e4a8a1055bc26267d4
|
[
"Apache-2.0"
] | null | null | null |
from serenity_pypeline.logger import log
class NotInitializedError(Exception):
pass
class Task(object):
def __init__(self, classpath):
self._klass = self._get_class(classpath)
self._obj = None
self.next_success = []
self.next_error = []
self.input = None
def _get_class(self, classpath):
classpath, classname = tuple(classpath.split(':'))
klass = __import__(classpath, fromlist=[classname])
klass = getattr(klass, classname)
return klass
def init_class(self, config):
self._obj = self._klass(config)
def run(self, **kwargs):
if self._obj is None:
raise NotInitializedError("Class is not initialized")
return self._obj.run(**kwargs)
def add_success(self, obj):
self.next_success.append(obj)
def add_error(self, obj):
self.next_error.append(obj)
def is_initialized(self):
return not (self._obj is None)
| 25.657895
| 65
| 0.635897
|
4a10ea877694ab083a823406c63feef271d35c2d
| 138
|
py
|
Python
|
examples/dio_test.py
|
willdickson/panel_comm
|
4a770fa8ecc7db644b9bdb918dce2e39f5913dbf
|
[
"BSD-3-Clause"
] | 1
|
2021-06-23T06:07:49.000Z
|
2021-06-23T06:07:49.000Z
|
examples/dio_test.py
|
willdickson/panel_comm
|
4a770fa8ecc7db644b9bdb918dce2e39f5913dbf
|
[
"BSD-3-Clause"
] | null | null | null |
examples/dio_test.py
|
willdickson/panel_comm
|
4a770fa8ecc7db644b9bdb918dce2e39f5913dbf
|
[
"BSD-3-Clause"
] | 1
|
2021-05-21T23:46:10.000Z
|
2021-05-21T23:46:10.000Z
|
import sys
import time
from panel_comm import PanelComm
chan = int(sys.argv[1])
ctlr = PanelComm(baudrate=921600)
ctlr.dio_test(chan)
| 12.545455
| 33
| 0.768116
|
4a10eabab3f7cffcb62ebfcac28aac57f096b74b
| 297
|
py
|
Python
|
decorators.py
|
KenjiEmura/django-REST-framework
|
544a187ebf5f0314e8b9ccf5c26c95c43eec15ca
|
[
"MIT"
] | null | null | null |
decorators.py
|
KenjiEmura/django-REST-framework
|
544a187ebf5f0314e8b9ccf5c26c95c43eec15ca
|
[
"MIT"
] | null | null | null |
decorators.py
|
KenjiEmura/django-REST-framework
|
544a187ebf5f0314e8b9ccf5c26c95c43eec15ca
|
[
"MIT"
] | null | null | null |
def f1(func):
def wrapper(*args, **kwargs):
print("Started")
val = func(*args, **kwargs)
print("Ended")
return val
return wrapper
@f1
def f(a, b=9):
print(a, b)
@f1
def add(x, y):
print("Inside add function")
return x + y
print(add(4, 5))
| 13.5
| 35
| 0.525253
|
4a10eb1a87395eb9a37acc6bd062eb65680644b4
| 2,436
|
py
|
Python
|
SRC/experiment.py
|
cyrilgalitzine/SDE_inference
|
e64e9c5cdf4c13bf3ba67071949c71b0a1b6d8fe
|
[
"Apache-2.0"
] | 1
|
2019-07-28T19:17:36.000Z
|
2019-07-28T19:17:36.000Z
|
SRC/experiment.py
|
cyrilgalitzine/SDE_inference
|
e64e9c5cdf4c13bf3ba67071949c71b0a1b6d8fe
|
[
"Apache-2.0"
] | null | null | null |
SRC/experiment.py
|
cyrilgalitzine/SDE_inference
|
e64e9c5cdf4c13bf3ba67071949c71b0a1b6d8fe
|
[
"Apache-2.0"
] | null | null | null |
from control import *
from time_series import *
from equation import *
from error_model import *
import numpy as np
class Experiment():
def __init__(self,Nrep,Type,Name):
self.Nrep= Nrep #Number of replicates
self.Type = Type #Type of experiment
self.Name = Name
class TimeSeriesExperiment(Experiment):
def __init__(self,name1):
Experiment.__init__(self,1,'Time Series',name1)
def simulate_experiment(self,control):
self.Nrep = control.Nrep_sim
print('sdssd',self.Nrep)
#Conditions for the start conditions:
if self.Nrep == control.X0_sim.size:
IC = control.X0_sim
elif control.X0_sim.size == 1:
IC = np.random.poisson(control.X0_sim[0],control.Nrep_sim)
else:
print("Wrong size for initial conditions for simulated data")
print(IC[0])
#Create list of time series with all the replicates:
TS = []
for irep in range(self.Nrep ):
print('simulating'+str(irep))
T1 = time_series.simulate_data(irep,control.Equation_sim,control.Error_sim,control.hetmodel_sim,IC[irep],control.T_sim,control.Ntime_sim)
print(T1.Ntime)
TS.append(T1)
self.TS = TS
self.hetmodel = control.hetmodel#inherit the het model of the simulation
def write_file_experiment(self):
print('Writing simulated data to file')
appended_data = []
for irep in range(self.Nrep):
TS = self.TS[irep]
df=pd.DataFrame({'t':TS.t, 'x':TS.x, 'replicate':np.repeat(irep,TS.t.size)})
appended_data.append(df)
appended_data = pd.concat(appended_data, axis=0)
appended_data.to_csv(data_file_name)
print(appended_data)
#Create a data frame now:
#Time value, replicate, type, Experiment name:
def read_file_experiment(self,Input):
print('Reading data file:'+data_file_name)
df_read = pd.read_csv(data_file_name, index_col=0)
#Count the number of replicates:
allrep = df_read.replicate.unique()
self.Nrep = df_read.replicate.unique().size
#Check that the error model is properly specified:
#if(Input.param_error.size == 1 & Input.param_error.size)
#Create list of time series with all the replicates:
TS = []
for irep in allrep:
print('reading replicate '+str(irep))
df_rep = df_read[df_read.replicate == irep]
t = np.array(df_rep.t.values); x = np.array(df_rep.x.values);
T1 = time_series(x[0],x.size,Input.equation,Input.error_model,Input.hetmodel,irep)
T1.initialize(t,x)
TS.append(T1)
self.TS = TS
self.hetmodel = Input.hetmodel
| 24.857143
| 140
| 0.720443
|
4a10eb4e07bba24797020c7ff05a43bdc754b47b
| 4,351
|
py
|
Python
|
brunch-recsys-flask.py
|
goodvc78/brunch-recsys
|
0b322db92bc97d14c7327e32e993fd16c3a3c5e9
|
[
"Apache-2.0"
] | 4
|
2016-05-02T03:08:02.000Z
|
2020-04-26T09:02:51.000Z
|
brunch-recsys-flask.py
|
goodvc78/brunch-recsys
|
0b322db92bc97d14c7327e32e993fd16c3a3c5e9
|
[
"Apache-2.0"
] | null | null | null |
brunch-recsys-flask.py
|
goodvc78/brunch-recsys
|
0b322db92bc97d14c7327e32e993fd16c3a3c5e9
|
[
"Apache-2.0"
] | 2
|
2017-12-21T03:41:24.000Z
|
2018-04-01T15:50:58.000Z
|
#!/usr/local/bin/python3
from flask import Flask, request
from flask_restful import Resource, Api
from json import dumps
import gensim.models.word2vec as word2vec
import pprint, pickle
from scipy.spatial import distance
from flask.ext.cors import CORS
## word2vec model path
model_path = '/Users/goodvc/Data/brunch-recsys/resource/b2v.latest.model'
writer_info_path = '/Users/goodvc/Data/brunch-recsys/resource/writer.pkl'
class BrunchRecsys:
def __init__(self, model_path, writer_info_path):
self.model_path = model_path
self.writer_info_path = writer_info_path
self.load()
def load(self):
## word2vec model load
self.b2v = word2vec.Word2Vec.load(self.model_path)
## writer_info pickle load
pkl_file = open(self.writer_info_path, 'rb')
self.writer_info = pickle.load(pkl_file)
pkl_file.close()
## for A to Z
self.NX = self.b2v.syn0
def parseid(self, writers):
positive = []
negative = []
for wid in writers.split(':'):
if wid[0] == '+':
positive.append(wid[1:])
elif wid[0] == '-':
negative.append(wid[1:])
else :
positive.append(wid)
return (positive, negative)
def get_writer_info(self, id):
if id not in self.writer_info:
return None
writer = self.writer_info[id]
if writer.get('documents',0) < 1 or writer.get('followers',0) < 10 :
return None
return writer
def most_similar(self, writers):
## parse id
(positive, negative) = self.parseid( writers )
neighbors = self.b2v.most_similar(positive=positive, negative=negative, topn=20)
similars = []
for (id, similarity) in neighbors:
writer = self.get_writer_info(id)
if None == writer:
continue
writer['similarity'] = similarity
similars.append(writer)
return similars
def nestest(self, NX, v1):
dist = distance.cdist( NX, v1.reshape(1,len(v1)), 'cosine' )
nearest_idx = dist.argmin()
if (NX[nearest_idx] == v1).all() == True:
dist[nearest_idx] = 1
return nearest_idx
def a2z_writers(self, a, z, max_steps=100):
av = self.b2v[a]
zv = self.b2v[z]
sv = (zv - av) / max_steps
exists = set([a,z])
writers = [a]
for n in range(0,max_steps):
nv = av+(sv*n)
idx = self.nestest(self.NX, nv)
name = self.b2v.index2word[idx]
if not name in exists :
writers.append(name)
exists.add(name)
writers.append(z)
result = []
for name in writers:
writer = self.get_writer_info(name)
print(a,name)
if None != writer:
writer['similarity'] = self.b2v.similarity(a,name)
result.append(writer)
return result
###############################################
recsys = BrunchRecsys(model_path, writer_info_path)
app = Flask(__name__)
CORS(app)
api = Api(app)
class SimilarTo(Resource):
def get(self, writers):
result = { 'result':0 }
similars = recsys.most_similar(writers)
if None == similars or len(similars) < 1:
result['reason'] = "there is on similars "
return result
result['result'] = 1
result['data'] = similars
return result
class AtoZ(Resource):
def get(self, writers):
result = { 'result':0 }
ids = writers.split(':',1)
if len(ids) != 2 :
result['reason'] = "not enough id list : {}".format(writers)
return result;
atozList = recsys.a2z_writers( ids[0], ids[1] )
result['data'] = atozList
result['result'] = 1 if len(atozList)>2 else 0
return result
api.add_resource(SimilarTo, '/most_similar/<string:writers>')
api.add_resource(AtoZ, '/a2z/<string:writers>')
## test
def test_similarity():
pprint.pprint(recsys.most_similar('goodvc78'))
def test_atoz():
pprint.pprint(recsys.a2z_writers('goodvc78','paranmoja'))
if __name__ == '__main__':
#app.debug = True
app.run()
##test_similarity();
##test_atoz()
| 29.80137
| 88
| 0.571823
|
4a10ed6e7402ccca2cae69d82c015606e8c21259
| 8,824
|
py
|
Python
|
pandas/tests/api/test_api.py
|
jess010/pandas
|
9872d6757e5117dce070981141cee562f675694e
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/tests/api/test_api.py
|
jess010/pandas
|
9872d6757e5117dce070981141cee562f675694e
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/tests/api/test_api.py
|
jess010/pandas
|
9872d6757e5117dce070981141cee562f675694e
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
from warnings import catch_warnings
import pytest
import pandas as pd
from pandas import api
from pandas.util import testing as tm
class Base(object):
def check(self, namespace, expected, ignored=None):
# see which names are in the namespace, minus optional
# ignored ones
# compare vs the expected
result = sorted([f for f in dir(namespace) if not f.startswith('_')])
if ignored is not None:
result = sorted(list(set(result) - set(ignored)))
expected = sorted(expected)
tm.assert_almost_equal(result, expected)
class TestPDApi(Base):
# these are optionally imported based on testing
# & need to be ignored
ignored = ['tests', 'locale', 'conftest']
# top-level sub-packages
lib = ['api', 'compat', 'core', 'errors', 'pandas',
'plotting', 'test', 'testing', 'tools', 'tseries',
'util', 'options', 'io']
# these are already deprecated; awaiting removal
deprecated_modules = ['stats', 'datetools', 'parser',
'json', 'lib', 'tslib']
# misc
misc = ['IndexSlice', 'NaT']
# top-level classes
classes = ['Categorical', 'CategoricalIndex', 'DataFrame', 'DateOffset',
'DatetimeIndex', 'ExcelFile', 'ExcelWriter', 'Float64Index',
'Grouper', 'HDFStore', 'Index', 'Int64Index', 'MultiIndex',
'Period', 'PeriodIndex', 'RangeIndex', 'UInt64Index',
'Series', 'SparseArray', 'SparseDataFrame',
'SparseSeries', 'Timedelta',
'TimedeltaIndex', 'Timestamp', 'Interval', 'IntervalIndex']
# these are already deprecated; awaiting removal
deprecated_classes = ['WidePanel', 'TimeGrouper', 'Expr', 'Term']
# these should be deprecated in the future
deprecated_classes_in_future = ['Panel']
# external modules exposed in pandas namespace
modules = ['np', 'datetime']
# top-level functions
funcs = ['bdate_range', 'concat', 'crosstab', 'cut',
'date_range', 'interval_range', 'eval',
'factorize', 'get_dummies',
'infer_freq', 'isna', 'isnull', 'lreshape',
'melt', 'notna', 'notnull', 'offsets',
'merge', 'merge_ordered', 'merge_asof',
'period_range',
'pivot', 'pivot_table', 'qcut',
'show_versions', 'timedelta_range', 'unique',
'value_counts', 'wide_to_long']
# top-level option funcs
funcs_option = ['reset_option', 'describe_option', 'get_option',
'option_context', 'set_option',
'set_eng_float_format']
# top-level read_* funcs
funcs_read = ['read_clipboard', 'read_csv', 'read_excel', 'read_fwf',
'read_gbq', 'read_hdf', 'read_html', 'read_json',
'read_msgpack', 'read_pickle', 'read_sas', 'read_sql',
'read_sql_query', 'read_sql_table', 'read_stata',
'read_table', 'read_feather', 'read_parquet']
# top-level to_* funcs
funcs_to = ['to_datetime', 'to_msgpack',
'to_numeric', 'to_pickle', 'to_timedelta']
# top-level to deprecate in the future
deprecated_funcs_in_future = []
# these are already deprecated; awaiting removal
deprecated_funcs = ['ewma', 'ewmcorr', 'ewmcov', 'ewmstd', 'ewmvar',
'ewmvol', 'expanding_apply', 'expanding_corr',
'expanding_count', 'expanding_cov', 'expanding_kurt',
'expanding_max', 'expanding_mean', 'expanding_median',
'expanding_min', 'expanding_quantile',
'expanding_skew', 'expanding_std', 'expanding_sum',
'expanding_var', 'rolling_apply',
'rolling_corr', 'rolling_count', 'rolling_cov',
'rolling_kurt', 'rolling_max', 'rolling_mean',
'rolling_median', 'rolling_min', 'rolling_quantile',
'rolling_skew', 'rolling_std', 'rolling_sum',
'rolling_var', 'rolling_window',
'pnow', 'match', 'groupby', 'get_store',
'plot_params', 'scatter_matrix']
def test_api(self):
self.check(pd,
self.lib + self.misc +
self.modules + self.deprecated_modules +
self.classes + self.deprecated_classes +
self.deprecated_classes_in_future +
self.funcs + self.funcs_option +
self.funcs_read + self.funcs_to +
self.deprecated_funcs_in_future +
self.deprecated_funcs,
self.ignored)
class TestApi(Base):
allowed = ['types', 'extensions']
def test_api(self):
self.check(api, self.allowed)
class TestTesting(Base):
funcs = ['assert_frame_equal', 'assert_series_equal',
'assert_index_equal']
def test_testing(self):
from pandas import testing
self.check(testing, self.funcs)
class TestDatetoolsDeprecation(object):
def test_deprecation_access_func(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.datetools.to_datetime('2016-01-01')
def test_deprecation_access_obj(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.datetools.monthEnd
class TestTopLevelDeprecations(object):
# top-level API deprecations
# GH 13790
def test_pnow(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.pnow(freq='M')
def test_term(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.Term('index>=date')
def test_expr(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.Expr('2>1')
def test_match(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.match([1, 2, 3], [1])
def test_groupby(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.groupby(pd.Series([1, 2, 3]), [1, 1, 1])
def test_TimeGrouper(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.TimeGrouper(freq='D')
# GH 15940
def test_get_store(self):
pytest.importorskip('tables')
with tm.ensure_clean() as path:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
s = pd.get_store(path)
s.close()
class TestJson(object):
def test_deprecation_access_func(self):
with catch_warnings(record=True):
pd.json.dumps([])
class TestParser(object):
def test_deprecation_access_func(self):
with catch_warnings(record=True):
pd.parser.na_values
class TestLib(object):
def test_deprecation_access_func(self):
with catch_warnings(record=True):
pd.lib.infer_dtype('foo')
class TestTSLib(object):
def test_deprecation_access_func(self):
with catch_warnings(record=True):
pd.tslib.Timestamp('20160101')
class TestTypes(object):
def test_deprecation_access_func(self):
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
from pandas.types.concat import union_categoricals
c1 = pd.Categorical(list('aabc'))
c2 = pd.Categorical(list('abcd'))
union_categoricals(
[c1, c2],
sort_categories=True,
ignore_order=True)
class TestCDateRange(object):
def test_deprecation_cdaterange(self):
# GH17596
from pandas.core.indexes.datetimes import cdate_range
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
cdate_range('2017-01-01', '2017-12-31')
class TestCategoricalMove(object):
def test_categorical_move(self):
# May have been cached by another import, e.g. pickle tests.
sys.modules.pop("pandas.core.categorical", None)
with tm.assert_produces_warning(FutureWarning):
from pandas.core.categorical import Categorical # noqa
| 33.679389
| 78
| 0.579102
|
4a10ed6ee4b64cb95b94c25085ffc029731a83ab
| 1,124
|
py
|
Python
|
thingsboard_gateway/connectors/connector.py
|
pticaTor/thingsboard-gateway
|
c56e9bd382693d6fb8b48135fd095b254ade56ec
|
[
"Apache-2.0"
] | null | null | null |
thingsboard_gateway/connectors/connector.py
|
pticaTor/thingsboard-gateway
|
c56e9bd382693d6fb8b48135fd095b254ade56ec
|
[
"Apache-2.0"
] | null | null | null |
thingsboard_gateway/connectors/connector.py
|
pticaTor/thingsboard-gateway
|
c56e9bd382693d6fb8b48135fd095b254ade56ec
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from abc import ABC, abstractmethod
log = logging.getLogger("connector")
class Connector(ABC):
@abstractmethod
def open(self):
pass
@abstractmethod
def close(self):
pass
@abstractmethod
def get_name(self):
pass
@abstractmethod
def is_connected(self):
pass
@abstractmethod
def on_attributes_update(self, content):
pass
@abstractmethod
def server_side_rpc_handler(self, content):
pass
| 24.434783
| 78
| 0.679715
|
4a10ed72ceb8835b23c3c3344ff19988b0aa7f61
| 1,190
|
py
|
Python
|
setup.py
|
Bogdanp/fargate_scraper
|
6c94e531bfb88b9aa5b530490b4bae94e91f346b
|
[
"Apache-2.0"
] | 6
|
2018-07-18T09:59:17.000Z
|
2021-07-05T05:45:26.000Z
|
setup.py
|
Bogdanp/fargate_scraper
|
6c94e531bfb88b9aa5b530490b4bae94e91f346b
|
[
"Apache-2.0"
] | 2
|
2018-08-22T18:59:58.000Z
|
2019-08-19T13:58:25.000Z
|
setup.py
|
Bogdanp/fargate_scraper
|
6c94e531bfb88b9aa5b530490b4bae94e91f346b
|
[
"Apache-2.0"
] | null | null | null |
import os
from setuptools import setup
def rel(*xs):
return os.path.join(os.path.abspath(os.path.dirname(__file__)), *xs)
with open(rel("fargate_scraper.py"), "r") as f:
version_marker = "__version__ = "
for line in f:
if line.startswith(version_marker):
_, version = line.split(version_marker)
version = version.strip().strip('"')
break
else:
raise RuntimeError("Version marker not found.")
setup(
name="fargate_scraper",
version=version,
description="A CLI utility for scraping metrics endpoints from AWS Fargate.",
long_description="Visit https://github.com/Bogdanp/fargate_scraper for more information.",
packages=[],
py_modules=["fargate_scraper"],
install_requires=["boto3"],
extras_require={
"dev": [
"bumpversion",
"flake8",
"flake8-quotes",
"isort",
"pytest",
"pytest-cov",
"twine",
]
},
python_requires=">=3.5",
entry_points={
"console_scripts": [
"fargate-scraper = fargate_scraper:main",
],
},
include_package_data=True,
)
| 24.791667
| 94
| 0.583193
|
4a10ed88cf61f6261892c41625306b4123a8c336
| 2,947
|
py
|
Python
|
dilema.py
|
DevAlek/PrisionDilemma
|
447872b491df6b6be4b069f73abe76781665a946
|
[
"MIT"
] | null | null | null |
dilema.py
|
DevAlek/PrisionDilemma
|
447872b491df6b6be4b069f73abe76781665a946
|
[
"MIT"
] | null | null | null |
dilema.py
|
DevAlek/PrisionDilemma
|
447872b491df6b6be4b069f73abe76781665a946
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from types import MethodType as method
from __init__ import loadStrategies
def align(string: str) -> str:
return string.replace(': ', ':\t')
class Tournament:
class Battle:
def __init__(self, strategy1: method, strategy2: method, duration: int = 30, price: int = 5, cost: int = 1) -> object:
self.strategy1 = strategy1
self.strategy2 = strategy2
self.duration = duration
self.name1 = self.strategy1.__name__.replace('_', ' ').replace('strategies.', '').capitalize()
self.name2 = self.strategy2.__name__.replace('_', ' ').replace('strategies.', '').capitalize()
self.price = price
self.cost = cost
self.string = ''
self.rank = {name:0 for name in (self.name1, self.name2)}
self.memory = ([''] , [''])
self.turns = ([True], [True])
self.game = {}
def start(self) -> None:
for day in range(self.duration):
today1 = self.strategy1.run(self.turns[1], self.memory[0])
today2 = self.strategy2.run(self.turns[0], self.memory[1])
self.turns[0].append (today1[0])
self.turns[1].append (today2[0])
self.memory[0].append(today1[1])
self.memory[1].append(today2[1])
if not self.name1 in self.rank:
self.rank[self.name1] = 0
if not self.name2 in self.rank:
self.rank[self.name2] = 0
self.rank[self.name1] += (self.price*self.turns[1][-1]) + (self.cost * (not self.turns[0][-1]))
self.rank[self.name2] += (self.price*self.turns[0][-1]) + (self.cost * (not self.turns[1][-1]))
self.game = {'turns':{self.name1:self.turns[0], self.name2:self.turns[1]}, 'rank':self.rank}
temp = [f'{name}: {self.rank[name]}' for name in self.rank]
temp.sort(key = (lambda x: int(x.split(': ')[-1])), reverse = True)
self.string = strList('\n'.join(temp), 10)
del temp
def __init__(self, duration: int = 30, price: int = 5, cost: int = 1) -> object:
self.duration = duration
self.strategies = []
self.rank = {}
self.game = {}
self.string = ''
self.price = price
self.cost = cost
def getStrategies(self) -> None:
self.strategies = loadStrategies()
def new(self) -> None:
self.getStrategies()
for strategy1 in self.strategies:
for strategy2 in self.strategies:
if strategy1 == strategy2: continue
battle = Tournament.Battle(strategy1, strategy2, self.duration, self.price, self.cost)
battle.start()
if not battle.name1 in self.rank:
self.rank[battle.name1] = 0
if not battle.name2 in self.rank:
self.rank[battle.name2] = 0
self.rank[battle.name1] += battle.rank[battle.name1]
self.rank[battle.name2] += battle.rank[battle.name2]
self.game[f'{battle.name1} x {battle.name2}'] = battle.game['turns']
del battle
temp = [f'{name}: {self.rank[name]}' for name in self.rank]
temp.sort(key = (lambda x: int(x.split(': ')[-1])), reverse = True)
self.string = strList('\n'.join(temp), 10)
del temp
def show() -> None:
print(self.string)
| 31.688172
| 120
| 0.638276
|
4a10ee2d453fafc02e338f73f4d7a90d27cbed01
| 549
|
py
|
Python
|
setup.py
|
LucasAlegre/rl-visualization
|
b20506c3417190576156e77a9dac5d872b99ffa3
|
[
"MIT"
] | null | null | null |
setup.py
|
LucasAlegre/rl-visualization
|
b20506c3417190576156e77a9dac5d872b99ffa3
|
[
"MIT"
] | null | null | null |
setup.py
|
LucasAlegre/rl-visualization
|
b20506c3417190576156e77a9dac5d872b99ffa3
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
REQUIRED = ['gym', 'numpy', 'pandas', 'matplotlib', 'seaborn', 'flask']
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='rl-visualization',
version='0.1',
packages=['rl_visualization',],
install_requires=REQUIRED,
author='LucasAlegre',
author_email='lucasnale@gmail.com',
long_description=long_description,
url='https://github.com/LucasAlegre/rl-visualization',
license="MIT",
description='Reinforcement Learning Visualization.'
)
| 28.894737
| 71
| 0.695811
|
4a10eea9ab8a30e4cb9cffae378036bcb87eaf95
| 3,969
|
py
|
Python
|
ucsmsdk/mometa/cimcvmedia/CimcvmediaMountConfigPolicy.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/cimcvmedia/CimcvmediaMountConfigPolicy.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/cimcvmedia/CimcvmediaMountConfigPolicy.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for CimcvmediaMountConfigPolicy ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class CimcvmediaMountConfigPolicyConsts():
INT_ID_NONE = "none"
POLICY_OWNER_LOCAL = "local"
POLICY_OWNER_PENDING_POLICY = "pending-policy"
POLICY_OWNER_POLICY = "policy"
RETRY_ON_MOUNT_FAIL_NO = "no"
RETRY_ON_MOUNT_FAIL_YES = "yes"
class CimcvmediaMountConfigPolicy(ManagedObject):
"""This is CimcvmediaMountConfigPolicy class."""
consts = CimcvmediaMountConfigPolicyConsts()
naming_props = set([u'name'])
mo_meta = MoMeta("CimcvmediaMountConfigPolicy", "cimcvmediaMountConfigPolicy", "mnt-cfg-policy-[name]", VersionMeta.Version222c, "InputOutput", 0x1ff, [], ["admin", "ls-compute", "ls-config", "ls-config-policy", "ls-server", "ls-server-policy", "ls-storage", "ls-storage-policy"], [u'orgOrg'], [u'cimcvmediaConfigMountEntry'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version222c, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version222c, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"int_id": MoPropertyMeta("int_id", "intId", "string", VersionMeta.Version222c, MoPropertyMeta.INTERNAL, None, None, None, None, ["none"], ["0-4294967295"]),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version222c, MoPropertyMeta.NAMING, 0x10, None, None, r"""[\-\.:_a-zA-Z0-9]{1,16}""", [], []),
"policy_level": MoPropertyMeta("policy_level", "policyLevel", "uint", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"policy_owner": MoPropertyMeta("policy_owner", "policyOwner", "string", VersionMeta.Version222c, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["local", "pending-policy", "policy"], []),
"retry_on_mount_fail": MoPropertyMeta("retry_on_mount_fail", "retryOnMountFail", "string", VersionMeta.Version222c, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["no", "yes"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version222c, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version222c, MoPropertyMeta.READ_WRITE, 0x100, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"descr": "descr",
"dn": "dn",
"intId": "int_id",
"name": "name",
"policyLevel": "policy_level",
"policyOwner": "policy_owner",
"retryOnMountFail": "retry_on_mount_fail",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.child_action = None
self.descr = None
self.int_id = None
self.policy_level = None
self.policy_owner = None
self.retry_on_mount_fail = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "CimcvmediaMountConfigPolicy", parent_mo_or_dn, **kwargs)
| 58.367647
| 363
| 0.654825
|
4a10eefbc0876c134c10d3843b87112c1b06ee27
| 8,153
|
py
|
Python
|
contrib/devtools/update-translations.py
|
we-conceived-the/ball
|
e313455fdcb5b3a06076fbe5053bef9a57f4483f
|
[
"MIT"
] | null | null | null |
contrib/devtools/update-translations.py
|
we-conceived-the/ball
|
e313455fdcb5b3a06076fbe5053bef9a57f4483f
|
[
"MIT"
] | null | null | null |
contrib/devtools/update-translations.py
|
we-conceived-the/ball
|
e313455fdcb5b3a06076fbe5053bef9a57f4483f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'ball_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
#assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
# fetch_all_translations()
postprocess_translations()
| 38.457547
| 124
| 0.633509
|
4a10efe7ea780bc2be077d29259028ecaf97d542
| 6,458
|
py
|
Python
|
zaqar/tests/tempest_plugin/api_schema/response/v2/queues.py
|
ISCAS-VDI/zaqar
|
5c1aedbef1930565a46cc60b1a9d5d5e238f174d
|
[
"Apache-2.0"
] | null | null | null |
zaqar/tests/tempest_plugin/api_schema/response/v2/queues.py
|
ISCAS-VDI/zaqar
|
5c1aedbef1930565a46cc60b1a9d5d5e238f174d
|
[
"Apache-2.0"
] | null | null | null |
zaqar/tests/tempest_plugin/api_schema/response/v2/queues.py
|
ISCAS-VDI/zaqar
|
5c1aedbef1930565a46cc60b1a9d5d5e238f174d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016 HuaWei, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
list_link = {
'type': 'object',
'properties': {
'rel': {'type': 'string'},
'href': {
'type': 'string',
'format': 'uri'
}
},
'required': ['href', 'rel']
}
list_queue = {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'href': {
'type': 'string',
'format': 'uri'
},
'metadata': {'type': 'object'}
},
'required': ['name', 'href']
}
list_queues = {
'status_code': [200, 204],
'response_body': {
'type': 'object',
'properties': {
'links': {
'type': 'array',
'items': list_link,
'maxItems': 1
},
'queues': {
'type': 'array',
'items': list_queue
}
},
'required': ['links', 'queues']
}
}
age = {
'type': 'number',
'minimum': 0
}
message_link = {
'type': 'object',
'properties': {
'href': {
'type': 'string',
'format': 'uri'
},
'age': age,
'created': {
'type': 'string',
'format': 'date-time'
}
},
'required': ['href', 'age', 'created']
}
messages = {
'type': 'object',
'properties': {
'free': {'type': 'number'},
'claimed': {'type': 'number'},
'total': {'type': 'number'},
'oldest': message_link,
'newest': message_link
},
'required': ['free', 'claimed', 'total']
}
queue_stats = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'messages': messages
},
'required': ['messages']
}
}
resource_schema = {
'type': 'array',
'items': {
'type': 'string'
},
'minItems': 1
}
post_messages = {
'status_code': [201],
'response_body': {
'type': 'object',
'properties': {
'resources': resource_schema,
'partial': {'type': 'boolean'}
}
},
'required': ['resources', 'partial']
}
message_ttl = {
'type': 'number',
'minimum': 1
}
list_messages_links = {
'type': 'array',
'maxItems': 1,
'minItems': 1,
'items': {
'type': 'object',
'properties': {
'rel': {'type': 'string'},
'href': {'type': 'string'}
},
'required': ['rel', 'href']
}
}
list_messages_response = {
'type': 'array',
'minItems': 1,
'items': {
'type': 'object',
'properties': {
'href': {'type': 'string'},
'ttl': message_ttl,
'age': age,
'body': {'type': 'object'}
},
'required': ['href', 'ttl', 'age', 'body']
}
}
list_messages = {
'status_code': [200, 204],
'response_body': {
'type': 'object',
'properties': {
'links': list_messages_links,
'messages': list_messages_response
}
},
'required': ['links', 'messages']
}
single_message = {
'type': 'object',
'properties': {
'href': {'type': 'string'},
'ttl': message_ttl,
'age': age,
'body': {'type': 'object'},
'id': {'type': 'string'}
},
'required': ['href', 'ttl', 'age', 'body', 'id']
}
get_single_message = {
'status_code': [200],
'response_body': single_message
}
get_multiple_messages = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'messages': {
"type": "array",
"items": single_message,
"minItems": 1,
}
}
}
}
messages_claimed = {
'type': 'object',
'properties': {
'href': {
'type': 'string',
'format': 'uri'
},
'ttl': message_ttl,
'age': {'type': 'number'},
'body': {'type': 'object'},
'id': {'type': 'string'}
},
'required': ['href', 'ttl', 'age', 'body', 'id']
}
claim_messages = {
'status_code': [201, 204],
'response_body': {
'type': 'object',
'properties': {
'messages': {
"type": "array",
"items": single_message,
"minItems": 1,
}
}
}
}
claim_ttl = {
'type': 'number',
'minimum': 1
}
query_claim = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'age': {'type': 'number'},
'ttl': claim_ttl,
'messages': {
'type': 'array',
'minItems': 1
}
},
'required': ['ttl', 'age', 'messages']
}
}
create_subscription = {
'status_code': [201],
'response_body': {
'type': 'object',
'properties': {
'subscription_id': {'type': 'string'},
},
'required': ['subscription_id']
}
}
single_subscription = {
'type': 'object',
'properties': {
'subscriber': {'type': 'string'},
'source': {'type': 'string'},
'options': {'type': 'object'},
'id': {'type': 'string'},
'ttl': message_ttl,
},
'required': ['subscriber', 'source', 'options', 'id', 'ttl']
}
show_single_subscription = {
'status_code': [200],
'response_body': single_subscription
}
list_subscriptions = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'subscriptions': {
"type": "array",
"items": single_subscription,
},
'links': {
'type': 'array',
'items': list_link,
'maxItems': 1
},
},
'required': ['subscriptions', 'links']
}
}
| 21.671141
| 69
| 0.452152
|
4a10f0a0c900cff2098af22630589997685d8475
| 6,571
|
py
|
Python
|
pynucastro/nucdata/elements.py
|
zingale/pynucastro
|
85b027c0d584046e749888e79e78c611bbf69cb4
|
[
"BSD-3-Clause"
] | 6
|
2016-03-31T20:51:07.000Z
|
2018-01-04T19:30:41.000Z
|
pynucastro/nucdata/elements.py
|
zingale/pynucastro
|
85b027c0d584046e749888e79e78c611bbf69cb4
|
[
"BSD-3-Clause"
] | 19
|
2016-03-16T10:03:18.000Z
|
2018-01-18T18:30:36.000Z
|
pynucastro/nucdata/elements.py
|
zingale/pynucastro
|
85b027c0d584046e749888e79e78c611bbf69cb4
|
[
"BSD-3-Clause"
] | 2
|
2016-03-29T14:42:27.000Z
|
2017-03-27T20:19:11.000Z
|
class Element(object):
def __init__(self, abbreviation=None, name=None, Z=None):
self.abbreviation = abbreviation
self.name = name
self.Z = Z
class UnidentifiedElement(BaseException):
def __init__(self):
return
class PeriodicTable(object):
table = {'h': Element('h', 'hydrogen', 1),
'he': Element('he', 'helium', 2),
'li': Element('li', 'lithium', 3),
'be': Element('be', 'beryllium', 4),
'b': Element('b', 'boron', 5),
'c': Element('c', 'carbon', 6),
'n': Element('n', 'nitrogen', 7),
'o': Element('o', 'oxygen', 8),
'f': Element('f', 'fluorine', 9),
'ne': Element('ne', 'neon', 10),
'na': Element('na', 'sodium', 11),
'mg': Element('mg', 'magnesium', 12),
'al': Element('al', 'aluminum', 13),
'si': Element('si', 'silicon', 14),
'p': Element('p', 'phosphorus', 15),
's': Element('s', 'sulfur', 16),
'cl': Element('cl', 'chlorine', 17),
'ar': Element('ar', 'argon', 18),
'k': Element('k', 'potassium', 19),
'ca': Element('ca', 'calcium', 20),
'sc': Element('sc', 'scandium', 21),
'ti': Element('ti', 'titanium', 22),
'v': Element('v', 'vanadium', 23),
'cr': Element('cr', 'chromium', 24),
'mn': Element('mn', 'manganese', 25),
'fe': Element('fe', 'iron', 26),
'co': Element('co', 'cobalt', 27),
'ni': Element('ni', 'nickel', 28),
'cu': Element('cu', 'copper', 29),
'zn': Element('zn', 'zinc', 30),
'ga': Element('ga', 'gallium', 31),
'ge': Element('ge', 'germanium', 32),
'as': Element('as', 'arsenic', 33),
'se': Element('se', 'selenium', 34),
'br': Element('br', 'bromine', 35),
'kr': Element('kr', 'krypton', 36),
'rb': Element('rb', 'rubidium', 37),
'sr': Element('sr', 'strontium', 38),
'y': Element('y', 'yttrium', 39),
'zr': Element('zr', 'zirconium', 40),
'nb': Element('nb', 'niobium', 41),
'mo': Element('mo', 'molybdenum', 42),
'tc': Element('tc', 'technetium', 43),
'ru': Element('ru', 'ruthenium', 44),
'rh': Element('rh', 'rhodium', 45),
'pd': Element('pd', 'palladium', 46),
'ag': Element('ag', 'silver', 47),
'cd': Element('cd', 'cadmium', 48),
'in': Element('in', 'indium', 49),
'sn': Element('sn', 'tin', 50),
'sb': Element('sb', 'antimony', 51),
'te': Element('te', 'tellurium', 52),
'i': Element('i', 'iodine', 53),
'xe': Element('xe', 'xenon', 54),
'cs': Element('cs', 'cesium', 55),
'ba': Element('ba', 'barium', 56),
'la': Element('la', 'lanthanum', 57),
'ce': Element('ce', 'cerium', 58),
'pr': Element('pr', 'praseodymium', 59),
'nd': Element('nd', 'neodymium', 60),
'pm': Element('pm', 'promethium', 61),
'sm': Element('sm', 'samarium', 62),
'eu': Element('eu', 'europium', 63),
'gd': Element('gd', 'gadolinium', 64),
'tb': Element('tb', 'terbium', 65),
'dy': Element('dy', 'dysprosium', 66),
'ho': Element('ho', 'holmium', 67),
'er': Element('er', 'erbium', 68),
'tm': Element('tm', 'thulium', 69),
'yb': Element('yb', 'ytterbium', 70),
'lu': Element('lu', 'lutetium', 71),
'hf': Element('hf', 'hafnium', 72),
'ta': Element('ta', 'tantalum', 73),
'w': Element('w', 'tungsten', 74),
're': Element('re', 'rhenium', 75),
'os': Element('os', 'osmium', 76),
'ir': Element('ir', 'iridium', 77),
'pt': Element('pt', 'platinum', 78),
'au': Element('au', 'gold', 79),
'hg': Element('hg', 'mercury', 80),
'tl': Element('tl', 'thallium', 81),
'pb': Element('pb', 'lead', 82),
'bi': Element('bi', 'bismuth', 83),
'po': Element('po', 'polonium', 84),
'at': Element('at', 'astatine', 85),
'rn': Element('rn', 'radon', 86),
'fr': Element('fr', 'francium', 87),
'ra': Element('ra', 'radium', 88),
'ac': Element('ac', 'actinium', 89),
'th': Element('th', 'thorium', 90),
'pa': Element('pa', 'protactinium', 91),
'u': Element('u', 'uranium', 92),
'np': Element('np', 'neptunium', 93),
'pu': Element('pu', 'plutonium', 94),
'am': Element('am', 'americium', 95),
'cm': Element('cm', 'curium', 96),
'bk': Element('bk', 'berkelium', 97),
'cf': Element('cf', 'californium', 98),
'es': Element('es', 'einsteinium', 99),
'fm': Element('fm', 'fermium', 100),
'md': Element('md', 'mendelevium', 101),
'no': Element('no', 'nobelium', 102),
'lr': Element('lr', 'lawrencium', 103),
'rf': Element('rf', 'rutherfordium', 104),
'db': Element('db', 'dubnium', 105),
'sg': Element('sg', 'seaborgium', 106),
'bh': Element('bh', 'bohrium', 107),
'hs': Element('hs', 'hassium', 108),
'mt': Element('mt', 'meitnerium', 109),
'ds': Element('ds', 'darmstadtium', 110),
'rg': Element('rg', 'roentgenium', 111),
'cn': Element('cn', 'copernicium', 112),
'nh': Element('nh', 'nihonium', 113),
'fl': Element('fl', 'flerovium', 114),
'mc': Element('mc', 'moscovium', 115),
'lv': Element('lv', 'livermorium', 116),
'ts': Element('ts', 'tennessine', 117),
'og': Element('og', 'oganesson', 118)}
def __init__(self):
return
@classmethod
def lookup_abbreviation(self, abbrev):
try:
return self.table[abbrev.lower()]
except IndexError:
raise UnidentifiedElement
@classmethod
def lookup_Z(self, Z):
for k in self.table.keys():
if self.table[k].Z == Z:
return self.table[k]
return None
| 44.100671
| 61
| 0.433876
|
4a10f0b157c713a6c404c0ae1af6a87eea21b300
| 1,026
|
py
|
Python
|
manage.py
|
krnr/appointments
|
3f132be1dd895b2aa9c07c5a8ebdebbd47ddfbf2
|
[
"MIT"
] | null | null | null |
manage.py
|
krnr/appointments
|
3f132be1dd895b2aa9c07c5a8ebdebbd47ddfbf2
|
[
"MIT"
] | null | null | null |
manage.py
|
krnr/appointments
|
3f132be1dd895b2aa9c07c5a8ebdebbd47ddfbf2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# dr_dre directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, 'dr_dre'))
execute_from_command_line(sys.argv)
| 34.2
| 77
| 0.655945
|
4a10f0c1061677b54fd864956da113105ebc690a
| 18,279
|
py
|
Python
|
utils/sparse_image_warp.py
|
iwaterxt/sparse_image_warp_pytorch
|
399848cf7c6bca0af33fc0e139ca426208d84c70
|
[
"MIT"
] | null | null | null |
utils/sparse_image_warp.py
|
iwaterxt/sparse_image_warp_pytorch
|
399848cf7c6bca0af33fc0e139ca426208d84c70
|
[
"MIT"
] | null | null | null |
utils/sparse_image_warp.py
|
iwaterxt/sparse_image_warp_pytorch
|
399848cf7c6bca0af33fc0e139ca426208d84c70
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This implementation is modified from https://github.com/zcaceres/spec_augment
MIT License
Copyright (c) 2019 Zach Caceres
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETjjHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import random
import torch
def sparse_image_warp(img_tensor,
source_control_point_locations,
dest_control_point_locations,
interpolation_order=2,
regularization_weight=0.0,
num_boundaries_points=0):
device = img_tensor.device
control_point_flows = dest_control_point_locations - source_control_point_locations
batch_size, image_height, image_width = img_tensor.shape
flattened_grid_locations = get_flat_grid_locations(image_height, image_width, device)
flattened_flows = interpolate_spline(
dest_control_point_locations,
control_point_flows,
flattened_grid_locations,
interpolation_order,
regularization_weight)
dense_flows = create_dense_flows(flattened_flows, batch_size, image_height, image_width)
warped_image = dense_image_warp(img_tensor, dense_flows)
return warped_image, dense_flows
def get_grid_locations(image_height, image_width, device):
y_range = torch.linspace(0, image_height - 1, image_height, device=device)
x_range = torch.linspace(0, image_width - 1, image_width, device=device)
y_grid, x_grid = torch.meshgrid(y_range, x_range)
return torch.stack((y_grid, x_grid), -1)
def flatten_grid_locations(grid_locations, image_height, image_width):
return torch.reshape(grid_locations, [image_height * image_width, 2])
def get_flat_grid_locations(image_height, image_width, device):
y_range = torch.linspace(0, image_height - 1, image_height, device=device)
x_range = torch.linspace(0, image_width - 1, image_width, device=device)
y_grid, x_grid = torch.meshgrid(y_range, x_range)
return torch.stack((y_grid, x_grid), -1).reshape([image_height * image_width, 2])
def create_dense_flows(flattened_flows, batch_size, image_height, image_width):
# possibly .view
return torch.reshape(flattened_flows, [batch_size, image_height, image_width, 2])
def interpolate_spline(train_points, train_values, query_points, order, regularization_weight=0.0,):
# First, fit the spline to the observed data.
w, v = solve_interpolation(train_points, train_values, order, regularization_weight)
# Then, evaluate the spline at the query locations.
query_values = apply_interpolation(query_points, train_points, w, v, order)
return query_values
def solve_interpolation(train_points, train_values, order, regularization_weight):
device = train_points.device
b, n, d = train_points.shape
k = train_values.shape[-1]
c = train_points
f = train_values.float()
matrix_a = phi(cross_squared_distance_matrix(c, c), order) # [b, n, n]
# Append ones to the feature values for the bias term in the linear model.
#ones = torch.ones(1, dtype=train_points.dtype, device=device).view([-1, 1, 1])
ones = torch.ones_like(c[:, :, :1], dtype=train_points.dtype, device=device)
matrix_b = torch.cat((c, ones), 2).float() # [b, n, d + 1]
# [b, n + d + 1, n]
left_block = torch.cat((matrix_a, torch.transpose(matrix_b, 2, 1)), 1)
num_b_cols = matrix_b.shape[2] # d + 1
# In Tensorflow, zeros are used here. Pytorch solve fails with zeros for some reason we don't understand.
# So instead we use very tiny randn values (variance of one, zero mean) on one side of our multiplication.
lhs_zeros = 0 * torch.randn((b, num_b_cols, num_b_cols), device=device) / 1e10
right_block = torch.cat((matrix_b, lhs_zeros), 1) # [b, n + d + 1, d + 1]
lhs = torch.cat((left_block, right_block), 2) # [b, n + d + 1, n + d + 1]
rhs_zeros = torch.zeros((b, d + 1, k), dtype=train_points.dtype, device=device).float()
rhs = torch.cat((f, rhs_zeros), 1) # [b, n + d + 1, k]
# Then, solve the linear system and unpack the results.
X, LU = torch.solve(rhs, lhs)
w = X[:, :n, :]
v = X[:, n:, :]
return w, v
def cross_squared_distance_matrix(x, y):
"""Pairwise squared distance between two (batch) matrices' rows (2nd dim).
Computes the pairwise distances between rows of x and rows of y
Args:
x: [batch_size, n, d] float `Tensor`
y: [batch_size, m, d] float `Tensor`
Returns:
squared_dists: [batch_size, n, m] float `Tensor`, where
squared_dists[b,i,j] = ||x[b,i,:] - y[b,j,:]||^2
"""
x_norm_squared = torch.sum(torch.mul(x, x), dim=-1).unsqueeze(2)
y_norm_squared = torch.sum(torch.mul(y, y), dim=-1).unsqueeze(1)
x_y_transpose = torch.bmm(x, y.transpose(1, 2))
# squared_dists[b,i,j] = ||x_bi - y_bj||^2 = x_bi'x_bi- 2x_bi'x_bj + x_bj'x_bj
squared_dists = x_norm_squared - 2 * x_y_transpose + y_norm_squared
return squared_dists.float()
def phi(r, order):
"""Coordinate-wise nonlinearity used to define the order of the interpolation.
See https://en.wikipedia.org/wiki/Polyharmonic_spline for the definition.
Args:
r: input op
order: interpolation order
Returns:
phi_k evaluated coordinate-wise on r, for k = r
"""
EPSILON = torch.tensor(1e-10, device=r.device)
# using EPSILON prevents log(0), sqrt0), etc.
# sqrt(0) is well-defined, but its gradient is not
if order == 1:
r = torch.max(r, EPSILON)
r = torch.sqrt(r)
return r
elif order == 2:
return 0.5 * r * torch.log(torch.max(r, EPSILON))
elif order == 4:
return 0.5 * torch.square(r) * torch.log(torch.max(r, EPSILON))
elif order % 2 == 0:
r = torch.max(r, EPSILON)
return 0.5 * torch.pow(r, 0.5 * order) * torch.log(r)
else:
r = torch.max(r, EPSILON)
return torch.pow(r, 0.5 * order)
def apply_interpolation(query_points, train_points, w, v, order):
"""Apply polyharmonic interpolation model to data.
Given coefficients w and v for the interpolation model, we evaluate
interpolated function values at query_points.
Args:
query_points: `[b, m, d]` x values to evaluate the interpolation at
train_points: `[b, n, d]` x values that act as the interpolation centers
( the c variables in the wikipedia article)
w: `[b, n, k]` weights on each interpolation center
v: `[b, d, k]` weights on each input dimension
order: order of the interpolation
Returns:
Polyharmonic interpolation evaluated at points defined in query_points.
"""
query_points = query_points.unsqueeze(0)
# First, compute the contribution from the rbf term.
pairwise_dists = cross_squared_distance_matrix(query_points.float(), train_points.float())
phi_pairwise_dists = phi(pairwise_dists, order)
rbf_term = torch.matmul(phi_pairwise_dists, w)
# Then, compute the contribution from the linear term.
# Pad query_points with ones, for the bias term in the linear model.
ones = torch.ones_like(query_points[..., :1])
query_points_pad = torch.cat((
query_points,
ones
), 2).float()
linear_term = torch.matmul(query_points_pad, v)
return rbf_term + linear_term
def dense_image_warp(image, flow):
"""Image warping using per-pixel flow vectors.
Apply a non-linear warp to the image, where the warp is specified by a dense
flow field of offset vectors that define the correspondences of pixel values
in the output image back to locations in the source image. Specifically, the
pixel value at output[b, j, i, c] is
images[b, j - flow[b, j, i, 0], i - flow[b, j, i, 1], c].
The locations specified by this formula do not necessarily map to an int
index. Therefore, the pixel value is obtained by bilinear
interpolation of the 4 nearest pixels around
(b, j - flow[b, j, i, 0], i - flow[b, j, i, 1]). For locations outside
of the image, we use the nearest pixel values at the image boundary.
Args:
image: 4-D float `Tensor` with shape `[batch, height, width, channels]`.
flow: A 4-D float `Tensor` with shape `[batch, height, width, 2]`.
name: A name for the operation (optional).
Note that image and flow can be of type tf.half, tf.float32, or tf.float64,
and do not necessarily have to be the same type.
Returns:
A 4-D float `Tensor` with shape`[batch, height, width, channels]`
and same type as input image.
Raises:
ValueError: if height < 2 or width < 2 or the inputs have the wrong number
of dimensions.
"""
image = image.unsqueeze(3) # add a single channel dimension to image tensor
batch_size, height, width, channels = image.shape
device = image.device
# The flow is defined on the image grid. Turn the flow into a list of query
# points in the grid space.
grid_x, grid_y = torch.meshgrid(
torch.arange(width, device=device), torch.arange(height, device=device))
stacked_grid = torch.stack((grid_y, grid_x), dim=2).float()
batched_grid = stacked_grid.unsqueeze(-1).permute(3, 1, 0, 2)
query_points_on_grid = batched_grid - flow
query_points_flattened = torch.reshape(query_points_on_grid, [batch_size, height * width, 2])
# Compute values at the query points, then reshape the result back to the
# image grid.
interpolated = interpolate_bilinear(image, query_points_flattened)
interpolated = torch.reshape(interpolated, [batch_size, height, width, channels])
return interpolated
def interpolate_bilinear(grid,
query_points,
name='interpolate_bilinear',
indexing='ij'):
"""Similar to Matlab's interp2 function.
Finds values for query points on a grid using bilinear interpolation.
Args:
grid: a 4-D float `Tensor` of shape `[batch, height, width, channels]`.
query_points: a 3-D float `Tensor` of N points with shape `[batch, N, 2]`.
name: a name for the operation (optional).
indexing: whether the query points are specified as row and column (ij),
or Cartesian coordinates (xy).
Returns:
values: a 3-D `Tensor` with shape `[batch, N, channels]`
Raises:
ValueError: if the indexing mode is invalid, or if the shape of the inputs
invalid.
"""
if indexing != 'ij' and indexing != 'xy':
raise ValueError('Indexing mode must be \'ij\' or \'xy\'')
shape = grid.shape
if len(shape) != 4:
msg = 'Grid must be 4 dimensional. Received size: '
raise ValueError(msg + str(grid.shape))
batch_size, height, width, channels = grid.shape
shape = [batch_size, height, width, channels]
query_type = query_points.dtype
grid_type = grid.dtype
grid_device = grid.device
num_queries = query_points.shape[1]
alphas = []
floors = []
ceils = []
index_order = [0, 1] if indexing == 'ij' else [1, 0]
unstacked_query_points = query_points.unbind(2)
for dim in index_order:
queries = unstacked_query_points[dim]
size_in_indexing_dimension = shape[dim + 1]
# max_floor is size_in_indexing_dimension - 2 so that max_floor + 1
# is still a valid index into the grid.
max_floor = torch.tensor(size_in_indexing_dimension - 2, dtype=query_type, device=grid_device)
min_floor = torch.tensor(0.0, dtype=query_type, device=grid_device)
maxx = torch.max(min_floor, torch.floor(queries))
floor = torch.min(maxx, max_floor)
int_floor = floor.long()
floors.append(int_floor)
ceil = int_floor + 1
ceils.append(ceil)
# alpha has the same type as the grid, as we will directly use alpha
# when taking linear combinations of pixel values from the image.
alpha = queries - floor
min_alpha = torch.tensor(0.0, dtype=grid_type, device=grid_device)
max_alpha = torch.tensor(1.0, dtype=grid_type, device=grid_device)
alpha = torch.min(torch.max(min_alpha, alpha), max_alpha)
# Expand alpha to [b, n, 1] so we can use broadcasting
# (since the alpha values don't depend on the channel).
alpha = torch.unsqueeze(alpha, 2)
alphas.append(alpha)
flattened_grid = torch.reshape(grid, [batch_size * height * width, channels])
batch_offsets = torch.reshape(torch.arange(batch_size, device=grid_device) * height * width, [batch_size, 1])
# This wraps array_ops.gather. We reshape the image data such that the
# batch, y, and x coordinates are pulled into the first dimension.
# Then we gather. Finally, we reshape the output back. It's possible this
# code would be made simpler by using array_ops.gather_nd.
def gather(y_coords, x_coords, name):
linear_coordinates = batch_offsets + y_coords * width + x_coords
gathered_values = torch.gather(flattened_grid.t(), 1, linear_coordinates)
return torch.reshape(gathered_values, [batch_size, num_queries, channels])
# grab the pixel values in the 4 corners around each query point
top_left = gather(floors[0], floors[1], 'top_left')
top_right = gather(floors[0], ceils[1], 'top_right')
bottom_left = gather(ceils[0], floors[1], 'bottom_left')
bottom_right = gather(ceils[0], ceils[1], 'bottom_right')
interp_top = alphas[1] * (top_right - top_left) + top_left
interp_bottom = alphas[1] * (bottom_right - bottom_left) + bottom_left
interp = alphas[0] * (interp_bottom - interp_top) + interp_top
return interp
def specaug(spec, W=80, F=13, T=70, num_freq_masks=2, num_time_masks=2, p=0.2, replace_with_zero=False):
"""SpecAugment
Reference: SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition
(https://arxiv.org/pdf/1904.08779.pdf)
This implementation modified from https://github.com/zcaceres/spec_augment
:param torch.Tensor spec: input tensor with the shape (T, dim)
:param int W: time warp parameter
:param int F: maximum width of each freq mask
:param int T: maximum width of each time mask
:param int num_freq_masks: number of frequency masks
:param int num_time_masks: number of time masks
:param bool replace_with_zero: if True, masked parts will be filled with 0, if False, filled with mean
"""
if replace_with_zero:
pad_value = 0
else:
pad_value = spec.mean()
return time_mask(
freq_mask(time_warp(spec.transpose(0, 1), W=W),
F=F, num_masks=num_freq_masks, pad_value=pad_value),
T=T, num_masks=num_time_masks, p=p, pad_value=pad_value).transpose(0, 1)
def time_warp(spec, W=5):
"""Time warping
:param torch.Tensor spec: input tensor with shape (dim, T)
:param int W: time warp parameter
"""
if W == 0:
return spec
num_mel_channels = spec.shape[0]
spec_len = spec.shape[1]
if spec_len <= 2*W:
return spec
spec = spec.unsqueeze(0)
device = spec.device
y = num_mel_channels / 2.0
point_to_warp = random.randrange(W, spec_len-W)
# Uniform distribution from (0,W) with chance to be up to W negative
dist_to_warp = random.randrange(-W, W)
src_pts, dest_pts = (torch.tensor([[[y, point_to_warp], [0, 0], [0, spec_len - 1], [num_mel_channels - 1, 0], [num_mel_channels - 1, spec_len - 1]]], device=device),
torch.tensor([[[y, point_to_warp + dist_to_warp], [0, 0], [0, spec_len - 1], [num_mel_channels - 1, 0], [num_mel_channels - 1, spec_len - 1]]], device=device))
warped_spectro, dense_flows = sparse_image_warp(spec, src_pts, dest_pts)
return warped_spectro.squeeze(3).squeeze(0)
def freq_mask(spec, F=30, num_masks=1, pad_value=0):
"""Frequency masking
:param torch.Tensor spec: input tensor with shape (dim, T)
:param int F: maximum width of each mask
:param int num_masks: number of masks
:param bool pad_value: value for padding
"""
cloned = spec.unsqueeze(0).clone()
num_mel_channels = cloned.shape[1]
for i in range(0, num_masks):
f = random.randrange(0, F)
f_zero = random.randrange(0, num_mel_channels - f)
# avoids randrange error if values are equal and range is empty
if (f_zero == f_zero + f):
return cloned.squeeze(0)
mask_end = random.randrange(f_zero, f_zero + f)
cloned[0][f_zero:mask_end] = pad_value
return cloned.squeeze(0)
def time_mask(spec, T=40, num_masks=1, p=0.2, pad_value=0):
"""Time masking
:param torch.Tensor spec: input tensor with shape (dim, T)
:param int T: maximum width of each mask
:param int num_masks: number of masks
:param bool pad_value: value for padding
"""
cloned = spec.unsqueeze(0).clone()
spec_len = cloned.shape[2]
T = min(T, int(spec_len * p / num_masks))
for i in range(0, num_masks):
t = random.randrange(0, T)
t_zero = random.randrange(0, spec_len - t)
# avoids randrange error if values are equal and range is empty
if (t_zero == t_zero + t):
return cloned.squeeze(0)
mask_end = random.randrange(t_zero, t_zero + t)
cloned[0][:, t_zero:mask_end] = pad_value
return cloned.squeeze(0)
| 39.736957
| 184
| 0.678538
|
4a10f3533496f5730c2c86f6c2512b9c32f79355
| 6,995
|
py
|
Python
|
capnpy/compiler/structor.py
|
wridgers/capnpy
|
63546597cc94434a271187f2e5af60f02e086caa
|
[
"MIT"
] | 1
|
2019-05-29T19:47:53.000Z
|
2019-05-29T19:47:53.000Z
|
capnpy/compiler/structor.py
|
wridgers/capnpy
|
63546597cc94434a271187f2e5af60f02e086caa
|
[
"MIT"
] | null | null | null |
capnpy/compiler/structor.py
|
wridgers/capnpy
|
63546597cc94434a271187f2e5af60f02e086caa
|
[
"MIT"
] | 4
|
2018-01-28T23:44:41.000Z
|
2019-09-28T17:50:14.000Z
|
"""
Structor -> struct ctor -> struct construtor :)
"""
import struct
from capnpy.type import Types
from capnpy.schema import Field, Type, Value
from capnpy.compiler.fieldtree import FieldTree, Node
class Structor(object):
"""
Create a struct constructor.
Some terminology:
- argnames: the name of arguments taken by the ctor
- params: [(argname, default)], for each argname in argnames
"""
def __init__(self, m, struct, fields):
self.m = m
self.struct = struct
self.data_size = struct.dataWordCount
self.ptrs_size = struct.pointerCount
self.fieldtree = FieldTree(m, self.struct)
self.argnames, self.params = self.fieldtree.get_args_and_params()
def slot_offset(self, f):
offset = f.slot.offset * f.slot.get_size()
if f.slot.type.is_pointer():
offset += self.data_size*8
return offset
def emit(self):
## generate a constructor which looks like this
## @staticmethod
## def __new(x=0, y=0, z=None):
## builder = _SegmentBuilder()
## pos = builder.allocate(24)
## builder.write_int64(pos + 0, x)
## builder.write_int64(pos + 8, y)
## builder.alloc_text(pos + 16, z)
## return builder.as_string()
#
# the parameters have the same order as fields
code = self.m.code
argnames = self.argnames
if len(argnames) != len(set(argnames)):
raise ValueError("Duplicate field name(s): %s" % argnames)
#
code.w('@staticmethod')
with code.cdef_('__new', self.params) as ns:
ns.length = (self.data_size + self.ptrs_size)*8
ns.cdef_var('_SegmentBuilder', 'builder')
ns.cdef_var('long', 'pos')
ns.w('builder = _SegmentBuilder()')
ns.w('pos = builder.allocate({length})')
for union in self.fieldtree.all_unions():
ns.w('{union}__curtag = None', union=union.varname)
for node in self.fieldtree.children:
self.handle_node(node)
ns.w('return builder.as_string()')
def handle_node(self, node):
if node.f.is_part_of_union():
ns = self.m.code.new_scope()
ns.varname = node.varname
ns.union = node.parent.union.varname
ns.offset = node.parent.union.offset
ns.tagval = node.f.discriminantValue
ns.tagname = self.m._field_name(node.f)
ns.ifmt = 'ord(%r)' % Types.int16.fmt
with ns.block('if {varname} is not _undefined:'):
ns.w('{union}__curtag = _check_tag({union}__curtag, {tagname!r})')
ns.w('builder.write_int16({offset}, {tagval})')
self._handle_node(node)
else:
self._handle_node(node)
def _handle_node(self, node):
f = node.f
if f.is_nullable(self.m):
self.handle_nullable(node)
elif f.is_group():
self.handle_group(node)
elif f.is_text():
self.handle_text(node)
elif f.is_data():
self.handle_data(node)
elif f.is_struct():
self.handle_struct(node)
elif f.is_list():
self.handle_list(node)
elif f.is_primitive() or f.is_enum():
self.handle_primitive(node)
elif f.is_bool():
self.handle_bool(node)
elif f.is_void():
pass # nothing to do
else:
self.m.code.w("raise NotImplementedError('Unsupported field type: {f}')",
f=node.f.shortrepr())
def handle_group(self, node):
# def __init__(self, position, ...):
# ...
# position_x, position_y = position
# builder.write_...(..., position_x)
# builder.write_...(..., position_y)
# ...
#
# 1. unpack the tuple into various indepented variables
ns = self.m.code.new_scope()
ns.group = node.varname
argnames = [child.varname for child in node.children]
ns.args = self.m.code.args(argnames)
ns.w('{args}, = {group}')
#
# 2. recursively handle all the children
for child in node.children:
self.handle_node(child)
def handle_nullable(self, node):
# def __init__(self, ..., x, ...):
# ...
# if x is None:
# x_is_null = 1
# x_value = 0
# else:
# x_is_null = 0
# x_value = x
#
ns = self.m.code.new_scope()
ns.fname = node.varname
ns.ww(
"""
if {fname} is None:
{fname}_is_null = 1
{fname}_value = 0
else:
{fname}_is_null = 0
{fname}_value = {fname}
""")
for child in node.children:
self.handle_node(child)
def handle_text(self, node):
self.m.code.w('builder.alloc_text(pos + {offset}, {arg})',
arg=node.varname, offset=self.slot_offset(node.f))
def handle_data(self, node):
self.m.code.w('builder.alloc_data(pos + {offset}, {arg})',
arg=node.varname, offset=self.slot_offset(node.f))
def handle_struct(self, node):
## @staticmethod
## def __new(x=0, y=<some struct>):
## builder = _SegmentBuilder()
## pos = builder.allocate(16)
## ...
## builder.copy_from_struct(pos+8, SomeStruct, y)
ns = self.m.code.new_scope()
ns.fname = node.varname
ns.offset = self.slot_offset(node.f)
ns.structname = node.f.slot.type.runtime_name(self.m)
ns.w('builder.copy_from_struct(pos + {offset}, {structname}, {fname})')
def handle_list(self, node):
ns = self.m.code.new_scope()
ns.fname = node.varname
ns.offset = self.slot_offset(node.f)
t = node.f.slot.type.list.elementType
ns.list_item_type = t.list_item_type(self.m)
ns.w('builder.copy_from_list(pos + {offset}, {list_item_type}, {fname})')
def handle_primitive(self, node):
ns = self.m.code.new_scope()
ns.arg = node.varname
if node.f.slot.hadExplicitDefault:
ns.default_ = node.f.slot.defaultValue.as_pyobj()
ns.w('{arg} ^= {default_}')
#
ns.type = node.f.slot.get_typename()
ns.offset = self.slot_offset(node.f)
ns.w('builder.write_{type}(pos + {offset}, {arg})')
def handle_bool(self, node):
ns = self.m.code.new_scope()
ns.arg = node.varname
ns.byteoffset, ns.bitoffset = divmod(node.f.slot.offset, 8)
if node.f.slot.hadExplicitDefault:
ns.default_ = node.f.slot.defaultValue.as_pyobj()
ns.w('{arg} ^= {default_}')
ns.w('builder.write_bool({byteoffset}, {bitoffset}, {arg})')
| 36.056701
| 85
| 0.549535
|
4a10f43766e2bb42fecde58283be694f01683338
| 4,278
|
py
|
Python
|
simpleml/datasets/__init__.py
|
aolopez/SimpleML
|
9e3237c243863400372a493164a107b74f770ef0
|
[
"BSD-3-Clause"
] | 15
|
2018-08-19T19:36:23.000Z
|
2021-11-09T17:47:18.000Z
|
simpleml/datasets/__init__.py
|
aolopez/SimpleML
|
9e3237c243863400372a493164a107b74f770ef0
|
[
"BSD-3-Clause"
] | 75
|
2020-10-11T17:58:59.000Z
|
2022-03-29T22:34:54.000Z
|
simpleml/datasets/__init__.py
|
aolopez/SimpleML
|
9e3237c243863400372a493164a107b74f770ef0
|
[
"BSD-3-Clause"
] | 4
|
2018-04-30T23:09:42.000Z
|
2022-01-19T08:03:18.000Z
|
'''
Import modules to register class names in global registry
Define convenience classes composed of different mixins
'''
__author__ = 'Elisha Yadgaran'
import pandas as pd
import logging
from .base_dataset import Dataset
from .pandas_mixin import BasePandasDatasetMixin, SingleLabelPandasDatasetMixin, MultiLabelPandasDatasetMixin
from .numpy_mixin import NumpyDatasetMixin
from simpleml.utils.errors import DatasetError
LOGGER = logging.getLogger(__name__)
class _PandasDatasetPipelineBuildMixin(object):
def build_dataframe(self) -> None:
'''
Transform raw dataset via dataset pipeline for production ready dataset
Overwrite this method to disable raw dataset requirement
'''
if self.pipeline is None:
raise DatasetError('Must set pipeline before building dataframe')
split_names = self.pipeline.get_split_names()
splits = [self.pipeline.transform(X=None, split=split_name) for split_name in split_names]
merged_splits = [self.merge_split(split) for split in splits]
if splits[0].y and not self.config['label_columns']: # Propagate old labels to new dataset
self.config['label_columns'] = splits[0].y.columns.tolist()
if len(merged_splits) > 1: # Combine multiple splits
# Join row wise - drop index in case duplicates exist
self.dataframe = pd.concat(merged_splits, axis=0, ignore_index=True)
else:
self.dataframe = merged_splits[0]
# Mixin implementations for convenience
class PandasDataset(BasePandasDatasetMixin, Dataset, _PandasDatasetPipelineBuildMixin):
'''
Composed mixin class with pandas helper methods and a predefined build
routine, assuming dataset pipeline existence.
WARNING: this class will fail if build_dataframe is not overwritten or a
pipeline provided!
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# warn that this class is deprecated and will be removed
LOGGER.warn('PandasDataset class is deprecated and will be removed in a future release! Use `SingleLabelPandasDataset` or `MultiLabelPandasDataset` instead')
class SingleLabelPandasDataset(SingleLabelPandasDatasetMixin, Dataset, _PandasDatasetPipelineBuildMixin):
'''
Composed mixin class with pandas helper methods and a predefined build
routine, assuming dataset pipeline existence.
Expects labels to only be a single column (1 label per sample)
WARNING: this class will fail if build_dataframe is not overwritten or a
pipeline provided!
'''
class MultiLabelPandasDataset(MultiLabelPandasDatasetMixin, Dataset, _PandasDatasetPipelineBuildMixin):
'''
Composed mixin class with pandas helper methods and a predefined build
routine, assuming dataset pipeline existence.
Expects multiple labels across many columns (multi labels per sample)
WARNING: this class will fail if build_dataframe is not overwritten or a
pipeline provided!
'''
class NumpyDataset(Dataset, NumpyDatasetMixin):
'''
Composed mixin class with numpy helper methods and a predefined build
routine, assuming dataset pipeline existence.
WARNING: this class will fail if build_dataframe is not overwritten or a
pipeline provided!
'''
def build_dataframe(self) -> None:
'''
Transform raw dataset via dataset pipeline for production ready dataset
Overwrite this method to disable raw dataset requirement
'''
if self.pipeline is None:
raise DatasetError('Must set pipeline before building dataframe')
split_names = self.pipeline.get_split_names()
splits = [(split_name, self.pipeline.transform(X=None, split=split_name)) for split_name in split_names]
if splits[0][1].y and not self.config['label_columns']:
# If there is a Y, explicitly label it
self.config['label_columns'] = ['y']
y_label = self.config['label_columns'][0]
# Assume propagating logic since there is no clear way to join
self._external_file = {
split_name: {
'X': split.X,
y_label: split.y
} for split_name, split in splits
}
| 35.94958
| 165
| 0.710846
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.