id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
43402 | <reponame>The-Kristina/CellComp
# TODO: Find out if you can reconstruct chopped trees from the tracker:
import sys
sys.path.append("../")
from Cell_IDs_Analysis.Plotter_Lineage_Trees import PlotLineageTree
raw_file = "/Volumes/lowegrp/Data/Kristina/MDCK_90WT_10Sc_NoComp/17_07_24/pos13/analysis/channel_RFP/cellIDdetails_raw.txt"
xml_file = "/Volumes/lowegrp/Data/Kristina/MDCK_90WT_10Sc_NoComp/17_07_24/pos13/tracks/tracks_type2.xml"
for line in open(raw_file, "r"):
line = line.rstrip().split("\t")
if line[0] == "Cell_ID" or len(line) < 8:
continue
if int(line[5]) == 0 and line[7] == "False":
print (line)
PlotLineageTree(root_ID=int(line[0]), cell_ID=int(line[0]), xml_file=xml_file, show=True)
| StarcoderdataPython |
43202 | <filename>project/admin.py
from django.contrib import admin
from project.models import Code
admin.site.register(Code)
| StarcoderdataPython |
1754976 | <reponame>sunjinbo/hipython
lst = [6, 7, 3, 0, 4, 8, 15, 4]
# 冒泡排序算法
def bubbleSort(arr):
sum = len(arr)
for i in range(sum - 1):
for j in range(sum - i - 1):
if arr[j] < arr[j + 1]:
temp = arr[j + 1]
arr[j + 1] = arr[j]
arr[j] = temp
# 插入排序算法
def insertionSort(arr):
sum = len(arr)
for i in range(1, sum):
j = i
target = arr[i]
while j > 0 and target < arr[j - 1]:
arr[j] = arr[j - 1]
j = j - 1
arr[j] = target
# 选择排序算法
def selectionSort(arr):
sum = len(arr)
for i in range(sum):
max = i
for j in range(i, sum):
if arr[j] > arr[max]:
max = j
temp = arr[i]
arr[i] = arr[max]
arr[max] = temp
# 快速排序算法
def quickSort(aar, low, heigh):
if low < heigh:
index = getPivot(aar, low, heigh)
quickSort(aar, 0, index - 1)
quickSort(aar, index + 1, heigh)
def getPivot(aar, low, heigh):
# 基准数据为数组第一个元素
pivot = aar[low]
while low < heigh:
# 当队尾的元素大于等于基准数据时,向前挪动heigh的指针
while low < heigh and aar[heigh] >= pivot:
heigh = heigh - 1
# 如果队尾元素小于tmp了,需要将其赋值给low
aar[low] = aar[heigh]
# 当队首元素小于等于pivot时,向前挪动low指针
while low < heigh and aar[low] <= pivot:
low = low + 1
# 当队首元素大于pivot时,需要将其赋值给high
aar[heigh] = aar[low]
# 跳出循环时low和high相等,此时的low或high就是pivot的正确索引位置
aar[low] = pivot
return low
# 快速排序,比上一个快速排序方法更容易理解
def quickSort2(arr):
if len(arr) < 2:
return arr
else:
pivot = arr[0]
less = [i for i in arr[1:] if i <= pivot]
greater = [i for i in arr[1:] if i > pivot]
return quickSort2(less) + [pivot] + quickSort2(greater)
# 测试代码
bubbleSort(lst)
print("bubble sort:")
print(lst)
print('\n')
insertionSort(lst)
print("insertion sort:")
print(lst)
print('\n')
selectionSort(lst)
print("selection sort:")
print(lst)
print('\n')
quickSort(lst, 0, len(lst) - 1)
print("quick sort:")
print(lst)
print('\n')
# 测试快速排序
lst = [6, 7, 3, 0, 4, 8, 15, 4]
lst = quickSort2(lst)
print("quick sort 2:")
print(lst)
print('\n')
| StarcoderdataPython |
3211483 | <reponame>Cronologium/secretsanta
import random
import sys
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric import rsa
PEOPLE_FILE = 'people.txt'
KEY_SIZE = 2048
def encode_int(x):
return '{0}{1}{2}{3}'.format(x % 10, x // 10 % 10, x // 100 % 10, x // 1000 % 10)
def decode_int(b):
return int(b[0]) + int(b[1]) * 10 + int(b[2]) * 100 + int(b[3]) * 1000
def list_people(people_file):
people = []
with open(people_file, 'r') as f:
people = f.readlines()
return [person[:-1] if person[-1] == '\n' else person for person in people]
def open_ticket(ticket):
my_key = None
encrypted_people = []
with open(ticket, 'rb') as f:
cert_length = decode_int(f.read(4))
my_key = serialization.load_pem_private_key(
f.read(cert_length),
password=None,
backend=default_backend()
)
encrypted_people_bytes = f.read()
encrypted_people = [encrypted_people_bytes[i:i + KEY_SIZE // 8] for i in
range(0, len(encrypted_people_bytes), KEY_SIZE // 8)]
found = False
for encrypted_person in encrypted_people:
try:
plaintext = my_key.decrypt(
encrypted_person,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
print('{0}, your person is {1}'.format(ticket.split('.')[0], plaintext))
return
except ValueError:
continue
if found is False:
print('{0}, you got nobody :(\n(Could not find a valid person)'.format(ticket.split('.')[0]))
def cmp_pb_keys(key1, key2):
return key1.public_key().public_bytes(encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo) == \
key2.public_key().public_bytes(encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo)
def write_ticket(person, cert, people):
with open('{0}.ticket'.format(person), 'wb') as f:
f.write(encode_int(len(cert)) + cert + people)
def generate_tickets(people=None):
if people is None:
people = list_people(PEOPLE_FILE)
private_keys = []
crypto_text = []
for line in people:
if line.endswith('\n'):
line = line[:-1]
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=KEY_SIZE,
backend=default_backend()
)
crypto_text.append(private_key.public_key().encrypt(
line,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
))
private_keys.append(private_key)
init_order = [prv_key for prv_key in private_keys]
ok = False
while ok is False:
random.shuffle(private_keys)
ok = True
for x in range(len(init_order)):
if cmp_pb_keys(init_order[x], private_keys[x]):
#print('[DEBUG] Not ok, {0} gets himself'.format(people[x]))
ok = False
for x in range(len(private_keys)):
pem = private_keys[x].private_bytes(
encoding = serialization.Encoding.PEM,
format = serialization.PrivateFormat.PKCS8,
encryption_algorithm = serialization.NoEncryption()
)
write_ticket(people[x], pem, b''.join(crypto_text))
if __name__ == '__main__':
if sys.argv[1] == 'gen':
if len(sys.argv) > 3:
generate_tickets(sys.argv[2:])
else:
generate_tickets()
elif sys.argv[1] == 'open':
open_ticket('{0}.ticket'.format(sys.argv[2]))
elif sys.argv[1] == 'test':
generate_tickets(['test1', 'test2', 'test3'])
for x in range(1,4):
open_ticket('test{0}.ticket'.format(x)) | StarcoderdataPython |
1789000 | from statistics import mean
import sys
import matplotlib.pyplot as plot
import numpy
filenames = \
[ "data/run_1"
, "data/run_2"
, "data/run_3"
, "data/run_4"
, "data/run_5"
, "data/run_6"
]
data = []
i = 0
for filename in filenames:
data_y = []
data_x = []
file = open(filename, "r")
lines = file.read().split("\n")
file.close()
j = 0
for line in lines:
if line != "":
data_y.append(float(line))
data_x.append(j * 30)
j += 1
i += 1
data.append((data_x, data_y))
smallest_dataset_size = 0
for dataset in data:
# print(len(dataset[0]))
if smallest_dataset_size == 0:
smallest_dataset_size = len(dataset[0])
# print(smallest_dataset_size)
elif len(dataset[0]) < smallest_dataset_size:
# print(len(dataset[0]))
smallest_dataset_size = len(dataset[0])
normalised_data = []
for dataset in data:
normalised_data.append((dataset[0][0:smallest_dataset_size], dataset[1][0:smallest_dataset_size]))
# def average_dataset(i, dataset_1, dataset_2):
# # print(dataset_2[1][i])
# # print(mean([dataset_1[1][i], dataset_2[1][i]]))
# return mean([dataset_1[1][i], dataset_2[1][i]])
# def averaged_data(data_1, data_2):
# data = []
# j = 0
# for dataset in data_1:
# averaged_dataset = []
# for (i, x) in enumerate(dataset[1]):
# averaged_dataset.append(average_dataset(i, dataset, data_2[j]))
# print((averaged_dataset))
# data.append((data_1[0], averaged_dataset))
# j += 1
# return data
# # print(len(normalised_data[0:3]))
# final_data = averaged_data(normalised_data[0:3], normalised_data[3:6]) if len(sys.argv) == 2 and sys.argv[1] == "--averaged" else normalised_data
final_data = normalised_data
figure, axis = plot.subplots()
def get_brand_name(run: int) -> str:
if run == 3:
return "Eveready Gold"
elif run == 2:
return "Energizer Max Plus"
elif run == 1:
return "Chevron"
else:
return get_brand_name(run - 3)
j = 1
for run in final_data:
label = get_brand_name(j)
axis.plot(run[0], run[1], label=label)
j += 1
axis.set_xlabel("Time (seconds)")
axis.set_ylabel("Voltage (volts)")
axis.set_title("AAA Battery Voltage Over Time, Organised by Brand")
axis.legend()
figure.savefig("graph.png", format="png")
figure.savefig("graph.svg", format="svg")
# Print hourly reports
def chunk(lst, n):
return zip(*[iter(lst)]*n)
chunked = []
for dataset in normalised_data:
chunked.append(list(chunk(dataset[1], 240))) # Seperates into 2-hour chunks
report_data = []
for dataset in chunked:
dataset_report = []
for chunk in dataset:
dataset_report.append(round(mean(chunk), ndigits=3))
report_data.append(dataset_report)
report = "# Recorded Data (every 2 hours)\n\n"
k = 1
for dataset in report_data:
report += "## " + get_brand_name(k) + "\n" + "\n"
for item in dataset:
report += str(item) + "\n"
report += "\n"
k += 1
print(report)
file = open("log.txt", "w")
file.write(report)
file.close() | StarcoderdataPython |
129005 | <reponame>DarwishMenna/pathways-backend
import logging
from django.utils import translation
from django.contrib.gis.geos import Point
from human_services.locations.models import Location, ServiceAtLocation, LocationAddress
from human_services.organizations.models import Organization
from human_services.services.models import Service
from human_services.addresses.models import Address, AddressType
from human_services.phone_at_location.models import PhoneNumberType, PhoneAtLocation
from taxonomies.models import TaxonomyTerm
from bc211.exceptions import XmlParseException
LOGGER = logging.getLogger(__name__)
def save_records_to_database(organizations, counters):
for organization in handle_parser_errors(organizations):
save_organization(organization, counters)
def save_organization(organization, counters):
translation.activate('en')
active_record = build_organization_active_record(organization)
active_record.save()
counters.count_organization()
LOGGER.debug('Organization "%s" "%s"', organization.id, organization.name)
save_locations(organization.locations, counters)
def handle_parser_errors(generator):
organization_id = ''
while True:
try:
organization = next(generator)
organization_id = organization.id
yield organization
except XmlParseException as error:
LOGGER.error('Error importing the organization immediately after the one with id "%s": %s',
organization_id, error.__str__())
def build_organization_active_record(record):
active_record = Organization()
active_record.id = record.id
active_record.name = record.name
active_record.description = record.description
active_record.website = record.website
active_record.email = record.email
return active_record
def save_locations(locations, counters):
for location in locations:
active_record = build_location_active_record(location)
active_record.save()
counters.count_location()
LOGGER.debug('Location "%s" "%s"', location.id, location.name)
if location.services:
save_services(location.services, counters)
if location.physical_address:
create_address_for_location(active_record, location.physical_address, counters)
if location.postal_address:
create_address_for_location(active_record, location.postal_address, counters)
if location.phone_numbers:
create_phone_numbers_for_location(active_record, location.phone_numbers, counters)
def build_location_active_record(record):
active_record = Location()
active_record.id = record.id
active_record.name = record.name
active_record.organization_id = record.organization_id
has_location = record.spatial_location is not None
if has_location:
active_record.point = Point(record.spatial_location.longitude, record.spatial_location.latitude)
active_record.description = record.description
return active_record
def build_service_active_record(record):
active_record = Service()
active_record.id = record.id
active_record.name = record.name
active_record.organization_id = record.organization_id
active_record.description = record.description
return active_record
def build_service_at_location_active_record(record):
active_record = ServiceAtLocation()
active_record.service_id = record.id
active_record.location_id = record.site_id
return active_record
def save_services(services, counters):
for service in services:
if not service_already_exists(service):
active_record = build_service_active_record(service)
active_record.save()
counters.count_service()
LOGGER.debug('Service "%s" "%s"', service.id, service.name)
save_service_at_location(service)
save_service_taxonomy_terms(service.taxonomy_terms, active_record, counters)
def service_already_exists(service):
return Service.objects.filter(pk=service.id).exists()
def save_service_at_location(service):
active_record = build_service_at_location_active_record(service)
active_record.save()
LOGGER.debug('Service at location: %s %s', service.id, service.site_id)
def save_service_taxonomy_terms(taxonomy_terms, service_active_record, counters):
for taxonomy_term in taxonomy_terms:
taxonomy_term_active_record = create_taxonomy_term_active_record(
taxonomy_term,
counters
)
service_active_record.taxonomy_terms.add(taxonomy_term_active_record)
LOGGER.debug('Imported service taxonomy term')
service_active_record.save()
def create_taxonomy_term_active_record(record, counters):
taxonomy_term_active_record, created = TaxonomyTerm.objects.get_or_create(
taxonomy_id=record.taxonomy_id,
name=record.name
)
if created:
counters.count_taxonomy_term()
LOGGER.debug('Taxonomy term "%s" "%s"', record.taxonomy_id, record.name)
return taxonomy_term_active_record
def create_address_for_location(location, address_dto, counters):
address = create_address(address_dto, counters)
address_type = AddressType.objects.get(pk=address_dto.address_type_id)
create_location_address(
location,
address,
address_type
)
def create_address(address_dto, counters):
active_record, created = Address.objects.get_or_create(
address=address_dto.address_lines,
city=address_dto.city,
country=address_dto.country,
state_province=address_dto.state_province,
postal_code=address_dto.postal_code,
attention=None
)
if created:
counters.count_address()
LOGGER.debug('Address: %s %s', active_record.id, active_record.address)
return active_record
def create_location_address(location, address, address_type):
active_record = LocationAddress(address=address, location=location,
address_type=address_type).save()
LOGGER.debug('Location address')
return active_record
def create_phone_numbers_for_location(location, phone_number_dtos, counters):
for dto in phone_number_dtos:
phone_number_type, created = PhoneNumberType.objects.get_or_create(
id=dto.phone_number_type_id
)
if created:
counters.count_phone_number_types()
LOGGER.debug('PhoneNumberType: "%s"', phone_number_type.id)
number = PhoneAtLocation.objects.create(
location=location,
phone_number_type=phone_number_type,
phone_number=dto.phone_number
)
counters.count_phone_at_location()
LOGGER.debug('PhoneNumber: "%s" "%s"', number.id, number.phone_number)
| StarcoderdataPython |
3272883 | <filename>gym_gui_environments/pyside_gui_environments/src/backend/car_configurator.py
from PySide6.QtCore import Slot
from PySide6.QtWidgets import QFrame, QComboBox, QPushButton
from gym_gui_environments.pyside_gui_environments.src.utils.alert_dialogs import WarningDialog
from gym_gui_environments.pyside_gui_environments.src.utils.utils import SignalHandler
CAR_MODELS = ["Car A", "Car B", "Car C"] # pragma: no cover
TIRE_VARIANTS = ["18 Inch", "19 Inch", "20 Inch", "22 Inch"] # pragma: no cover
INTERIOR_VARIANTS = ["Modern", "Vintage", "Sport"] # pragma: no cover
PROPULSION_SYSTEMS = ["Combustion Engine A", "Combustion Engine B", "Combustion Engine C", "Electric Motor A",
"Electric Motor B"] # pragma: no cover
"""
Allowed configurations:
Car A:
- Tires: 20 and 22 Inch
- Interior: Modern, Vintage
- Propulsion System: Combustion Engine A, C
Car B:
- Tires: 18, 19 and 20 Inch
- Interior: Modern, Sport
- Propulsion System: Electric Motor A, B
Car C:
- Tires: 19, 20 and 22 Inch
- Interior: Vintage, Sport
- Propulsion System: Combustion Engine B, C and Electric Motor A, B
""" # pragma: no cover
class CarConfigurator:
def __init__(self,
car_model_selection_frame: QFrame, car_model_selection_combobox: QComboBox,
tire_selection_frame: QFrame, tire_selection_combobox: QComboBox,
interior_design_frame: QFrame, interior_design_combobox: QComboBox,
propulsion_system_frame: QFrame, propulsion_system_combobox: QComboBox,
show_configuration_button: QPushButton): # pragma: no cover
self.car_model_selection_frame = car_model_selection_frame
self.car_model_selection_combobox = car_model_selection_combobox
self.tire_selection_frame = tire_selection_frame
self.tire_selection_combobox = tire_selection_combobox
self.interior_design_frame = interior_design_frame
self.interior_design_combobox = interior_design_combobox
self.propulsion_system_frame = propulsion_system_frame
self.propulsion_system_combobox = propulsion_system_combobox
self.show_configuration_button = show_configuration_button
self.signal_handler = SignalHandler()
self.car_a = True
self.car_b = True
self.car_c = True
self.tire_18_inch = True
self.tire_19_inch = True
self.tire_20_inch = True
self.tire_22_inch = True
self.modern_interior = True
self.vintage_interior = True
self.sport_interior = True
self.combustion_engine_a = True
self.combustion_engine_b = True
self.combustion_engine_c = True
self.electric_motor_a = True
self.electric_motor_b = True
self.selected_car = None
self.selected_tire = None
self.selected_interior = None
self.selected_propulsion_system = None
self._initialize()
self._connect()
def _initialize(self): # pragma: no cover
# This keeps the space in the layout, even when the widget is not visible
default_size_policy = self.tire_selection_frame.sizePolicy()
default_size_policy.setRetainSizeWhenHidden(True)
self.tire_selection_frame.setSizePolicy(default_size_policy)
self.interior_design_frame.setSizePolicy(default_size_policy)
self.propulsion_system_frame.setSizePolicy(default_size_policy)
# Use the size policy from the button because I do not know if it is different to the one from the QFrame
button_size_policy = self.show_configuration_button.sizePolicy()
button_size_policy.setRetainSizeWhenHidden(True)
self.show_configuration_button.setSizePolicy(button_size_policy)
self._reset()
def _connect(self): # pragma: no cover
self.car_model_selection_combobox.activated.connect(self.change_selected_car)
self.tire_selection_combobox.activated.connect(self.change_selected_tire)
self.interior_design_combobox.activated.connect(self.change_selected_interior)
self.propulsion_system_combobox.activated.connect(self.change_selected_propulsion_system)
self.show_configuration_button.clicked.connect(self.print_selected_configuration)
def _hide_all_boxes(self):
# Make the configurations invisible
self.tire_selection_frame.setVisible(False)
self.interior_design_frame.setVisible(False)
self.propulsion_system_frame.setVisible(False)
self.show_configuration_button.setVisible(False)
self.signal_handler.changed_active_car_configurator_widgets.emit([
(False, self.tire_selection_frame),
(False, self.interior_design_frame),
(False, self.propulsion_system_frame),
(False, self.show_configuration_button)
])
def _reset(self):
cars = []
if self.car_a:
cars.append("Car A")
if self.car_b:
cars.append("Car B")
if self.car_c:
cars.append("Car C")
self.car_model_selection_combobox.clear()
self.car_model_selection_combobox.addItems(cars)
self._hide_all_boxes()
def update_cars_by_tire(self):
car_a_disabled = True
car_b_disabled = True
car_c_disabled = True
if self.tire_18_inch:
car_b_disabled = False
if self.tire_19_inch:
car_b_disabled = False
car_c_disabled = False
if self.tire_20_inch:
car_a_disabled = False
car_b_disabled = False
car_c_disabled = False
if self.tire_22_inch:
car_a_disabled = False
car_c_disabled = False
return car_a_disabled, car_b_disabled, car_c_disabled
def update_cars_by_interior(self):
car_a_disabled = True
car_b_disabled = True
car_c_disabled = True
if self.modern_interior:
car_a_disabled = False
car_b_disabled = False
if self.vintage_interior:
car_a_disabled = False
car_c_disabled = False
if self.sport_interior:
car_b_disabled = False
car_c_disabled = False
return car_a_disabled, car_b_disabled, car_c_disabled
def update_cars_by_propulsion_system(self):
car_a_disabled = True
car_b_disabled = True
car_c_disabled = True
if self.combustion_engine_a:
car_a_disabled = False
if self.combustion_engine_b:
car_c_disabled = False
if self.combustion_engine_c:
car_a_disabled = False
car_c_disabled = False
if self.electric_motor_a:
car_b_disabled = False
car_c_disabled = False
if self.electric_motor_b:
car_b_disabled = False
car_c_disabled = False
return car_a_disabled, car_b_disabled, car_c_disabled
def update_comboboxes(self):
disabled_cars = []
car_a_disabled_tire, car_b_disabled_tire, car_c_disabled_tire = self.update_cars_by_tire()
car_a_disabled_interior, car_b_disabled_interior, car_c_disabled_interior = self.update_cars_by_interior()
car_a_disabled_propulsion, car_b_disabled_propulsion, car_c_disabled_propulsion = self.update_cars_by_propulsion_system()
if not car_a_disabled_tire and not car_a_disabled_interior and not car_a_disabled_propulsion:
self.car_a = True
else:
if self.car_a:
disabled_cars.append("Car A")
self.car_a = False
if not car_b_disabled_tire and not car_b_disabled_interior and not car_b_disabled_propulsion:
self.car_b = True
else:
if self.car_b:
disabled_cars.append("Car B")
self.car_b = False
if not car_c_disabled_tire and not car_c_disabled_interior and not car_c_disabled_propulsion:
self.car_c = True
else:
if self.car_c:
disabled_cars.append("Car C")
self.car_c = False
if len(disabled_cars) > 0:
self.signal_handler.disabled_cars.emit(', '.join(car for car in disabled_cars))
self._reset()
def initialize_car_a(self):
tires = []
if self.tire_20_inch:
tires.append("20 Inch")
if self.tire_22_inch:
tires.append("22 Inch")
self.tire_selection_combobox.addItems(tires)
interiors = []
if self.modern_interior:
interiors.append("Modern")
if self.vintage_interior:
interiors.append("Vintage")
self.interior_design_combobox.addItems(interiors)
propulsion_systems = []
if self.combustion_engine_a:
propulsion_systems.append("Combustion Engine A")
if self.combustion_engine_c:
propulsion_systems.append("Combustion Engine C")
self.propulsion_system_combobox.addItems(propulsion_systems)
def initialize_car_b(self):
tires = []
if self.tire_18_inch:
tires.append("18 Inch")
if self.tire_19_inch:
tires.append("19 Inch")
if self.tire_20_inch:
tires.append("20 Inch")
self.tire_selection_combobox.addItems(tires)
interiors = []
if self.modern_interior:
interiors.append("Modern")
if self.sport_interior:
interiors.append("Sport")
self.interior_design_combobox.addItems(interiors)
propulsion_systems = []
if self.electric_motor_a:
propulsion_systems.append("Electric Motor A")
if self.electric_motor_b:
propulsion_systems.append("Electric Motor B")
self.propulsion_system_combobox.addItems(propulsion_systems)
def initialize_car_c(self):
tires = []
if self.tire_19_inch:
tires.append("19 Inch")
if self.tire_20_inch:
tires.append("20 Inch")
if self.tire_22_inch:
tires.append("22 Inch")
self.tire_selection_combobox.addItems(tires)
interiors = []
if self.vintage_interior:
interiors.append("Vintage")
if self.sport_interior:
interiors.append("Sport")
self.interior_design_combobox.addItems(interiors)
propulsion_systems = []
if self.combustion_engine_b:
propulsion_systems.append("Combustion Engine B")
if self.combustion_engine_c:
propulsion_systems.append("Combustion Engine C")
if self.electric_motor_a:
propulsion_systems.append("Electric Motor A")
if self.electric_motor_b:
propulsion_systems.append("Electric Motor B")
self.propulsion_system_combobox.addItems(propulsion_systems)
@Slot()
def change_selected_car(self):
selected_car = self.car_model_selection_combobox.currentText()
initialize_function = None
if selected_car == "Car A":
self.selected_car = "Car A"
initialize_function = self.initialize_car_a
elif selected_car == "Car B":
self.selected_car = "Car B"
initialize_function = self.initialize_car_b
elif selected_car == "Car C":
self.selected_car = "Car C"
initialize_function = self.initialize_car_c
assert self.selected_car in CAR_MODELS
self.tire_selection_combobox.clear()
self.interior_design_combobox.clear()
self.propulsion_system_combobox.clear()
# Initialize the content of the comboboxes depending on the selected cars as the cars can not have all possible
# parts
initialize_function()
self.tire_selection_frame.setVisible(True)
self.interior_design_frame.setVisible(False)
self.propulsion_system_frame.setVisible(False)
self.show_configuration_button.setVisible(False)
self.signal_handler.changed_active_car_configurator_widgets.emit([
(True, self.tire_selection_frame),
(False, self.interior_design_frame),
(False, self.propulsion_system_frame),
(False, self.show_configuration_button)
])
@Slot()
def change_selected_tire(self):
selected_tire = self.tire_selection_combobox.currentText()
if selected_tire == "18 Inch":
self.selected_tire = "18 Inch"
elif selected_tire == "19 Inch":
self.selected_tire = "19 Inch"
elif selected_tire == "20 Inch":
self.selected_tire = "20 Inch"
elif selected_tire == "22 Inch":
self.selected_tire = "22 Inch"
assert self.selected_tire in TIRE_VARIANTS
self.interior_design_frame.setVisible(True)
self.propulsion_system_frame.setVisible(False)
self.show_configuration_button.setVisible(False)
self.signal_handler.changed_active_car_configurator_widgets.emit([
(True, self.tire_selection_frame),
(True, self.interior_design_frame),
(False, self.propulsion_system_frame),
(False, self.show_configuration_button)
])
@Slot()
def change_selected_interior(self):
selected_interior = self.interior_design_combobox.currentText()
if selected_interior == "Modern":
self.selected_interior = "Modern"
elif selected_interior == "Vintage":
self.selected_interior = "Vintage"
elif selected_interior == "Sport":
self.selected_interior = "Sport"
assert self.selected_interior in INTERIOR_VARIANTS
self.propulsion_system_frame.setVisible(True)
self.show_configuration_button.setVisible(False)
self.signal_handler.changed_active_car_configurator_widgets.emit([
(True, self.tire_selection_frame),
(True, self.interior_design_frame),
(True, self.propulsion_system_frame),
(False, self.show_configuration_button)
])
@Slot()
def change_selected_propulsion_system(self):
selected_propulsion_system = self.propulsion_system_combobox.currentText()
if selected_propulsion_system == "Combustion Engine A":
self.selected_propulsion_system = "Combustion Engine A"
elif selected_propulsion_system == "Combustion Engine B":
self.selected_propulsion_system = "Combustion Engine B"
elif selected_propulsion_system == "Combustion Engine C":
self.selected_propulsion_system = "Combustion Engine C"
elif selected_propulsion_system == "Electric Motor A":
self.selected_propulsion_system = "Electric Motor A"
elif selected_propulsion_system == "Electric Motor B":
self.selected_propulsion_system = "Electric Motor B"
assert self.selected_propulsion_system in PROPULSION_SYSTEMS
self.show_configuration_button.setVisible(True)
self.signal_handler.changed_active_car_configurator_widgets.emit([
(True, self.tire_selection_frame),
(True, self.interior_design_frame),
(True, self.propulsion_system_frame),
(True, self.show_configuration_button)
])
@Slot()
def print_selected_configuration(self):
selected_car = None
if self.selected_car == "Car A":
selected_car = "Car A"
elif self.selected_car == "Car B":
selected_car = "Car B"
elif self.selected_car == "Car C":
selected_car = "Car C"
assert selected_car in CAR_MODELS
selected_tire = None
if self.selected_tire == "18 Inch":
selected_tire = "18 Inch"
elif self.selected_tire == "19 Inch":
selected_tire = "19 Inch"
elif self.selected_tire == "20 Inch":
selected_tire = "20 Inch"
elif self.selected_tire == "22 Inch":
selected_tire = "22 Inch"
assert selected_tire in TIRE_VARIANTS
selected_interior = None
if self.selected_interior == "Modern":
selected_interior = "Modern"
elif self.selected_interior == "Vintage":
selected_interior = "Vintage"
elif self.selected_interior == "Sport":
selected_interior = "Sport"
assert selected_interior in INTERIOR_VARIANTS
selected_propulsion_system = None
if self.selected_propulsion_system == "Combustion Engine A":
selected_propulsion_system = "Combustion Engine A"
elif self.selected_propulsion_system == "Combustion Engine B":
selected_propulsion_system = "Combustion Engine B"
elif self.selected_propulsion_system == "Combustion Engine C":
selected_propulsion_system = "Combustion Engine C"
elif self.selected_propulsion_system == "Electric Motor A":
selected_propulsion_system = "Electric Motor A"
elif self.selected_propulsion_system == "Electric Motor B":
selected_propulsion_system = "Electric Motor B"
assert selected_propulsion_system in PROPULSION_SYSTEMS
selected_configuration_message = (f"Selected {selected_car} with the following configuration:\n" +
f"Tires: {selected_tire}\n" +
f"Interior: {selected_interior}\n" +
f"Propulsion System: {selected_propulsion_system}")
self.signal_handler.car_configured.emit(selected_configuration_message)
self._reset()
# Methods for changing the settings
@Slot(bool)
def change_18_inch_tire(self, checked: bool):
if checked:
self.tire_18_inch = True
else:
self.tire_18_inch = False
self.update_comboboxes()
@Slot(bool)
def change_19_inch_tire(self, checked: bool):
if checked:
self.tire_19_inch = True
else:
self.tire_19_inch = False
self.update_comboboxes()
@Slot(bool)
def change_20_inch_tire(self, checked: bool):
if checked:
self.tire_20_inch = True
else:
self.tire_20_inch = False
self.update_comboboxes()
@Slot(bool)
def change_22_inch_tire(self, checked: bool):
if checked:
self.tire_22_inch = True
else:
self.tire_22_inch = False
self.update_comboboxes()
@Slot(bool)
def change_modern_interior(self, checked: bool):
if checked:
self.modern_interior = True
else:
self.modern_interior = False
self.update_comboboxes()
@Slot(bool)
def change_vintage_interior(self, checked: bool):
if checked:
self.vintage_interior = True
else:
self.vintage_interior = False
self.update_comboboxes()
@Slot(bool)
def change_sport_interior(self, checked: bool):
if checked:
self.sport_interior = True
else:
self.sport_interior = False
self.update_comboboxes()
@Slot(bool)
def change_combustion_engine_a(self, checked: bool):
if checked:
self.combustion_engine_a = True
else:
self.combustion_engine_a = False
self.update_comboboxes()
@Slot(bool)
def change_combustion_engine_b(self, checked: bool):
if checked:
self.combustion_engine_b = True
else:
self.combustion_engine_b = False
self.update_comboboxes()
@Slot(bool)
def change_combustion_engine_c(self, checked: bool):
if checked:
self.combustion_engine_c = True
else:
self.combustion_engine_c = False
self.update_comboboxes()
@Slot(bool)
def change_electric_motor_a(self, checked: bool):
if checked:
self.electric_motor_a = True
else:
self.electric_motor_a = False
self.update_comboboxes()
@Slot(bool)
def change_electric_motor_b(self, checked: bool):
if checked:
self.electric_motor_b = True
else:
self.electric_motor_b = False
self.update_comboboxes()
@Slot(str)
def show_disabled_cars_error_dialog(settings_dialog, disabled_cars: str):
disabled_cars_dialog = WarningDialog(
warning_text=f"Disabled the following car(s): {disabled_cars}",
parent=settings_dialog.settings_dialog
)
disabled_cars_dialog.show()
@Slot(str)
def show_car_configuration_dialog(settings_dialog, car_configuration: str):
car_configuration_dialog = WarningDialog(warning_text=car_configuration, parent=settings_dialog.settings_dialog)
car_configuration_dialog.show()
| StarcoderdataPython |
3362681 | <reponame>PhilR8/regulations-site
from unittest import TestCase
from django.urls import reverse
class UrlTests(TestCase):
def test_chrome_section_url(self):
r = reverse('reader_view', args=('201', '2', '2012-1123'))
self.assertEqual(r, '/201/2/2012-1123/')
r = reverse(
'reader_view', args=('201', '2', '2012-1123_20121011'))
self.assertEqual(r, '/201/2/2012-1123_20121011/')
| StarcoderdataPython |
31466 | <filename>traceback_test.py
import traceback
class A:
def __init__(self):
pass
def tb(self):
es = traceback.extract_stack()
print(es)
fs = es[-2]
print(fs.name)
print(fs.locals)
def another_function():
lumberstack(A())
lumberstack(A())
def lumberstack(a):
a.tb()
another_function()
"""
[<FrameSummary file traceback_test.py, line 23 in <module>>,
<FrameSummary file traceback_test.py, line 16 in another_function>,
<FrameSummary file traceback_test.py, line 21 in lumberstack>,
<FrameSummary file traceback_test.py, line 9 in tb>]
lumberstack
None
[<FrameSummary file traceback_test.py, line 23 in <module>>,
<FrameSummary file traceback_test.py, line 17 in another_function>,
<FrameSummary file traceback_test.py, line 21 in lumberstack>,
<FrameSummary file traceback_test.py, line 9 in tb>]
lumberstack
None
""" | StarcoderdataPython |
1705109 | # import bagpy
from bagpy import bagreader
import pandas as pd
import numpy as np
import pickle as pkl
import os
import rosbag
class RobotTraj():
def __init__(self) -> None:
self.desired_topic = '/anna/end_effector/states'
self.other_topics = ['/anna/joint/states', '/anna/keyframe/states']
self.users =['user2','user4','user7','user8','user10','user14','user16','user18','user20'] # narration
self.users += ['user3','user5','user6','user9','user11','user12','user15','user17','user19'] # natural
self.tasks = ['cutting', 'box']
self.demo_types = ['video', 'kt']
self.data_dir = '/Volumes/Seagate Portable Drive/audio_study/kinesthetic/'
self.path_len = {}
self.path_len_time = {}
def compute_path_len(self):
# iterate through users
for user in self.users:
for task in self.tasks:
for demo_type in self.demo_types:
key = user+'_'+task+'_'+demo_type
if key not in self.path_len_time:
self.path_len_time[key] = {}
print(user,task,demo_type)
bagfiles = []
bag_path = os.path.join(self.data_dir,user,task,demo_type)
for dirname, dirs, files in os.walk(bag_path):
for filename in files:
fn,ex = os.path.splitext(filename)
if ex == '.bag':
bagfiles.append(filename)
bagfiles.sort(reverse=True)
bag = os.path.join(bag_path,bagfiles[0])
bag_audio = rosbag.Bag(bag)
total_dist = 0.
i = 0
for idx, data in enumerate(bag_audio.read_messages(topics=[self.desired_topic])):
topic, msg, t = data
# print(msg)
if i==0:
x,y,z = msg.pose.position.x,msg.pose.position.y,msg.pose.position.z
old = np.array((x,y,z))
t_start = t.secs*pow(10,9) + t.nsecs
curr_time = 0
else:
x,y,z = msg.pose.position.x,msg.pose.position.y,msg.pose.position.z
newl = np.array((x,y,z))
total_dist += np.linalg.norm(newl-old)
t_new = t.secs*pow(10,9) + t.nsecs
curr_time = t_new-t_start
old = np.copy(newl)
# print(curr_time*pow(10,-9))
curr_time = curr_time*pow(10,-9)
# print(round(curr_time,1))
# save distance upto every 0.1 second
self.path_len_time[key][round(curr_time,1)] = total_dist
i+=1
print(f'Total distance traveled by the arm is {total_dist}')
self.path_len[key] = total_dist
def save_path_len(self):
self.compute_path_len()
filename = '../../data/robot_path_len.pkl'
with open(filename, 'wb') as handle:
pkl.dump(self.path_len, handle, protocol=pkl.HIGHEST_PROTOCOL)
filename = '../../data/robot_path_len_time.pkl'
with open(filename, 'wb') as handle:
pkl.dump(self.path_len_time, handle, protocol=pkl.HIGHEST_PROTOCOL)
def main():
traj = RobotTraj()
traj.save_path_len()
if __name__ == "__main__":
main() | StarcoderdataPython |
4800446 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Copyright [2009-2020] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import typing as ty
from rnacentral_pipeline.databases import data
def as_exon(location) -> data.Exon:
"""
Build an Exon from a biopython location object.
"""
return data.Exon(start=location.start + 1, stop=int(location.end))
def as_exons(feature) -> ty.Tuple[str, ty.List[data.Exon]]:
"""
Determine all Exons in this feature.
"""
parts = [feature.location]
if hasattr(feature.location, "parts"):
parts = feature.location.parts
strand = parts[0].strand
return strand, [as_exon(part) for part in parts]
def regions(record, feature) -> ty.List[data.SequenceRegion]:
accessions = record.annotations["accessions"]
if len(accessions) > 1 and accessions[0][-1] == "-":
accessions = ["".join(accessions)]
acc = accessions[0]
assert ":" in acc, "Invalid accession (%s) for %s" % (acc, record)
parts = acc.split(":")
assembly_id = parts[1]
chromosome_name = parts[2]
strand, exons = as_exons(feature)
return [
data.SequenceRegion(
chromosome=chromosome_name,
strand=strand,
exons=exons,
assembly_id=assembly_id,
coordinate_system=data.CoordinateSystem.one_based(),
)
]
| StarcoderdataPython |
119629 | from aiohttp import web
from redbull import Manager
mg = Manager(web.Application())
@mg.api()
async def say_hi(name: str, please: bool):
"Says hi if you say please"
if please:
return 'hi ' + name
return 'um hmm'
mg.run()
| StarcoderdataPython |
3333615 | import requests
import json
from math import ceil
from sync_dl_ytapi.helpers import getHttpErr
import sync_dl.config as cfg
def getItemIds(credJson,plId):
requestURL = "https://youtube.googleapis.com/youtube/v3/playlistItems?part=contentDetails&maxResults=25&pageToken={pageToken}&playlistId={plId}"
def makeRequest(pageToken,pageNum, attempts = 3):
getRequest = lambda pageToken: requests.get(
requestURL.format(pageToken=pageToken,plId = plId),
headers = {
'Authorization':'Bearer '+credJson["token"],
'Accept':'application/json',
}
)
response = getRequest(pageToken)
i = 1
while not response.ok and i<attempts:
cfg.logger.debug(response.content.decode("utf8"))
cfg.logger.error(getHttpErr(response))
cfg.logger.error(f"Failed to get item Ids for page {pageNum}")
cfg.logger.info(f"Retrying...")
response = getRequest(pageToken)
i+=1
if not response.ok:
cfg.logger.debug(response.content.decode("utf8"))
cfg.logger.error(getHttpErr(response))
cfg.logger.error(f"Max number of attempts reached for page {pageNum}")
return
return json.loads(response.content)
response = makeRequest('',0)
if not response:
return []
ids = [] # contains tuples (songId, plItemId)
for item in response['items']:
ids.append((item["contentDetails"]['videoId'], item["id"]))
#repeat the above process for all pages
plLen = response['pageInfo']['totalResults']
pageLen = response['pageInfo']['resultsPerPage']
numPages = ceil(plLen/pageLen)
for i in range(1,numPages): # 0th page already dealt with
response = makeRequest(response['nextPageToken'],i)
if not response:
return []
for item in response['items']:
ids.append((item["contentDetails"]['videoId'], item["id"]))
return ids
def moveSong(credJson, plId, songId, plItemId, index, attempts = 3):
'''
song and plItem Id corrispond to what is being moved index is the where it is moved.
returns true/false on success/failure
'''
# TODO sanitize/ clamp input index
putRequest = lambda: requests.put('https://youtube.googleapis.com/youtube/v3/playlistItems?part=snippet',
headers = {
'Authorization':'Bearer '+credJson["token"],
'Accept':'application/json',
'Content-Type':'application/json',
},
data = f'{{"id": "{plItemId}","snippet": {{"playlistId": "{plId}","position": {index},"resourceId": {{"kind": "youtube#video","videoId": "{songId}" }} }} }}'
)
i=1
response = putRequest()
while not response.ok and i<attempts:
cfg.logger.debug(response.content)
cfg.logger.error(getHttpErr(response))
cfg.logger.error(f"Failed attempt to move Song ID: {songId} to Index: {index}")
cfg.logger.info(f"Retrying...")
response = putRequest()
i+=1
if not response.ok:
cfg.logger.error(f"Max Attempts to Move Song ID: {songId} to Index: {index} Reached")
return False
title = json.loads(response.content)["snippet"]["title"]
cfg.logger.info(f'Moved Song: {title} to Index: {index}')
return True
| StarcoderdataPython |
3292725 | <filename>bot/commands/advice_cmd.py
import discord, datetime, random, time
from bot.commands.command import Command
class AdviceCMD(Command):
async def run(self, message, raw_args):
allInfo = False
args = []
for rarg in raw_args.split('-'):
arg = rarg.split(' ')
for i in arg:
if i == '':
arg.remove('')
if len(arg) > 0:
args.append(arg)
for arg in args:
if len(arg) < 1:
continue
if arg[0] == "all" or arg[0] == "a":
allInfo = True
embed = discord.Embed(colour=discord.Colour(0x1d837e), timestamp=datetime.datetime.utcnow())
embed.set_thumbnail(
url="https://upload.wikimedia.org/wikipedia/commons/thumb/8/82/SARS-CoV-2_without_background.png/220px-SARS-CoV-2_without_background.png")
embed.set_footer(text="Covid Watch - Coronavirus Statistics",
icon_url="https://upload.wikimedia.org/wikipedia/commons/thumb/8/82/SARS-CoV-2_without_background.png/220px-SARS-CoV-2_without_background.png")
advices = ['Make sure to **wear a mask** in public', 'Try to avoid contact with people!',
'Dont visit your elders; you could get them sick!',
'Seniors, People with medical issues, or infants are at higher risk',
'Stay 6 feet apart from the public',
'Remember that you can make it through the pandemic',
'Dont freak out. Staying calm and following the rules helps',
'Covid cannot spread through mosquito, flea, or tick bites',
'Always wash your hands after going out.',
'Being healthy doesnt necessarily mean you are immune to covid.',
'Feel free to call people you know; They probably need attention as much as you do!',
'Covid will pass. There is no need to freak out about it',
'Fight back by staying inside!']
final_advice = ""
if allInfo:
i = 0
for adv in advices:
i += 1
final_advice = final_advice + "**`" + str(i) + "-`**" + adv + "\n"
else:
for i in range(3):
while True:
ii = random.randint(0, len(advices) - 1)
if not advices[ii] in final_advice:
final_advice = final_advice + "**`" + str(i + 1) + "-`**" + advices[ii] + "\n"
break
embed.add_field(name="Advice", value=final_advice)
await message.channel.send(content=" \n**`Coronavirus Advice`**\n*`Please stay safe`*", embed=embed)
| StarcoderdataPython |
1655337 | import io
import functools
import PIL.Image
import numpy as np
from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.responses import JSONResponse
from starlette.responses import StreamingResponse
import uvicorn
from fastapi.middleware.cors import CORSMiddleware
from sm.browser import utils
from sm.browser.main import preprocess_dataset_peaks, DatasetBrowser
app = FastAPI()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@functools.lru_cache(maxsize=128)
def load_dataset_browser(s3_path: str):
return DatasetBrowser(s3_path)
class DatasetPreprocessItem(BaseModel):
s3_path: str
@app.post("/preprocess")
async def preprocess(item: DatasetPreprocessItem):
preprocess_dataset_peaks(item.s3_path)
return {"status": "ok"}
class PixelSearchItem(BaseModel):
s3_path: str
x: int
y: int
class MzSearchItem(BaseModel):
s3_path: str
mz: float
ppm: int = 3
class PngStreamingResponse(StreamingResponse):
media_type = "image/png"
@app.post("/search", response_class=PngStreamingResponse)
async def perform_search(item: MzSearchItem):
dataset_browser = load_dataset_browser(item.s3_path)
mz_lo, mz_hi = utils.mz_ppm_bin(mz=item.mz, ppm=item.ppm)
rgba_array = dataset_browser.search(mz_lo, mz_hi)
image = PIL.Image.fromarray((rgba_array * 255).astype(np.uint8), mode="RGBA")
fp = io.BytesIO()
image.save(fp, format="PNG")
fp.seek(0)
return PngStreamingResponse(fp)
@app.post("/search_pixel")
async def perform_search(item: PixelSearchItem):
dataset_browser = load_dataset_browser(item.s3_path)
return JSONResponse(dataset_browser.search_pixel(item.x, item.y))
if __name__ == "__main__":
uvicorn.run(app)
| StarcoderdataPython |
3370373 | # Copyright (c) OpenMMLab. All rights reserved.
import logging
import os.path as osp
import pytest
import torch
import torch.nn as nn
from mmcv.runner import build_runner
from mmcv.runner.fp16_utils import auto_fp16
from mmcv.utils import IS_IPU_AVAILABLE
if IS_IPU_AVAILABLE:
from mmcv.device.ipu.hook_wrapper import IPUFp16OptimizerHook
skip_no_ipu = pytest.mark.skipif(
not IS_IPU_AVAILABLE, reason='test case under ipu environment')
# TODO Once the model training and inference interfaces
# of MMCLS and MMDET are unified,
# construct the model according to the unified standards
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
self.bn = nn.BatchNorm2d(3)
self.relu = nn.ReLU6()
self.fp16_enabled = False
@auto_fp16(apply_to=('img', ))
def forward(self, img, return_loss=True, **kwargs):
x = self.conv(img)
x = self.bn(x)
x = self.relu(x)
if return_loss:
loss = ((x - kwargs['gt_label'])**2).sum()
return {
'loss': loss,
'loss_list': [loss, loss],
'loss_dict': {
'loss1': loss
}
}
return x
def _parse_losses(self, losses):
return losses['loss'], losses['loss']
def train_step(self, data, optimizer=None, **kwargs):
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img'].data))
return outputs
@skip_no_ipu
def test_ipu_hook_wrapper(tmp_path):
model = ToyModel()
dummy_input = {
'data': {
'img': torch.rand((16, 3, 10, 10)),
'gt_label': torch.rand((16, 3, 10, 10))
}
}
dir_name = 'a_tmp_dir'
working_dir = osp.join(tmp_path, dir_name)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
default_args = dict(
model=model,
work_dir=working_dir,
optimizer=optimizer,
logger=logging.getLogger())
cfg = dict(type='IPUEpochBasedRunner', max_epochs=1)
dummy_runner = build_runner(cfg, default_args=default_args)
# learning policy
lr_config = dict(policy='step', step=[1, 150])
# test optimizer config
optimizer_config = dict(
grad_clip=dict(max_norm=2), detect_anomalous_params=True)
# test building ipu_lr_hook_class
dummy_runner.register_training_hooks(
lr_config=lr_config, optimizer_config=None, timer_config=None)
# test _set_lr()
output = dummy_runner.model.train_step(**dummy_input)
dummy_runner.outputs = output
dummy_runner.call_hook('before_train_epoch')
# test building ipu_optimizer_hook_class
with pytest.raises(
NotImplementedError, match='IPU does not support gradient clip'):
dummy_runner.register_training_hooks(
lr_config=None,
optimizer_config=optimizer_config,
timer_config=None)
# test fp16 optimizer hook
lr_config = dict(policy='step', step=[1, 150])
optimizer_config = dict(grad_clip=dict(max_norm=2))
dummy_runner.hooks.pop(0)
with pytest.raises(NotImplementedError, match='IPU mode does not support'):
optimizer_config = IPUFp16OptimizerHook(
loss_scale='dynamic', distributed=False)
with pytest.raises(NotImplementedError, match='IPU mode supports single'):
optimizer_config = IPUFp16OptimizerHook(
loss_scale={}, distributed=False)
with pytest.raises(ValueError, match='loss_scale should be float'):
optimizer_config = IPUFp16OptimizerHook(
loss_scale=[], distributed=False)
optimizer_config = IPUFp16OptimizerHook(loss_scale=2.0, distributed=False)
dummy_runner.register_training_hooks(
lr_config=lr_config,
optimizer_config=optimizer_config,
timer_config=None)
dummy_runner.call_hook('after_train_iter')
| StarcoderdataPython |
3258073 | <gh_stars>10-100
# coding: utf-8
'''
__init__ file'''
| StarcoderdataPython |
4801427 | <gh_stars>1-10
import unittest
from spacer.yama import z3_dict_to_cli, z3_yaml_to_cli, z3_yaml_to_name
class Z3DictCliTest(unittest.TestCase):
def test_dict(self):
opts = {
'fixedpoint': {
'xform': {
'slice': False,
'inline_linear': False
}
},
'z3': {
'st': '',
'v': 1,
'T': 950,
'memory': 4096
}
}
cli = z3_dict_to_cli(opts)
cli = sorted(cli)
expect = [
'-T:950', '-memory:4096', '-st', '-v:1',
'fixedpoint.xform.inline_linear=false',
'fixedpoint.xform.slice=false'
]
self.assertEqual(cli, expect)
def test_yaml(self):
cli = z3_yaml_to_cli("""
fixedpoint:
xform:
slice: false
inline_linear: false
z3:
T: 950
memory: 4096
st: ''
v: 1
""")
cli = sorted(cli)
expect = [
'-T:950', '-memory:4096', '-st', '-v:1',
'fixedpoint.xform.inline_linear=false',
'fixedpoint.xform.slice=false'
]
self.assertEqual(cli, expect)
def test_yaml_name(self):
name = z3_yaml_to_name("""
fixedpoint:
xform:
slice: false
inline_linear: false
z3:
T: 950
memory: 4096
st: ''
v: 1
""")
self.assertEqual(name, "high")
| StarcoderdataPython |
41157 | <gh_stars>0
# -*- coding: utf-8 -*-
# (c) 2020 <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from units.compat.mock import MagicMock
import pytest
from ansible.module_utils.six import PY3
from ansible.utils.display import Display, get_text_width, initialize_locale
def test_get_text_width():
initialize_locale()
assert get_text_width(u'コンニチハ') == 10
assert get_text_width(u'abコcd') == 6
assert get_text_width(u'café') == 4
assert get_text_width(u'four') == 4
assert get_text_width(u'\u001B') == 0
assert get_text_width(u'ab\u0000') == 2
assert get_text_width(u'abコ\u0000') == 4
assert get_text_width(u'🚀🐮') == 4
assert get_text_width(u'\x08') == 0
assert get_text_width(u'\x08\x08') == 0
assert get_text_width(u'ab\x08cd') == 3
assert get_text_width(u'ab\x1bcd') == 3
assert get_text_width(u'ab\x7fcd') == 3
assert get_text_width(u'ab\x94cd') == 3
pytest.raises(TypeError, get_text_width, 1)
pytest.raises(TypeError, get_text_width, b'four')
@pytest.mark.skipif(PY3, reason='Fallback only happens reliably on py2')
def test_get_text_width_no_locale():
pytest.raises(EnvironmentError, get_text_width, u'🚀🐮')
def test_Display_banner_get_text_width(monkeypatch):
initialize_locale()
display = Display()
display_mock = MagicMock()
monkeypatch.setattr(display, 'display', display_mock)
display.banner(u'🚀🐮', color=False, cows=False)
args, kwargs = display_mock.call_args
msg = args[0]
stars = u' %s' % (75 * u'*')
assert msg.endswith(stars)
@pytest.mark.skipif(PY3, reason='Fallback only happens reliably on py2')
def test_Display_banner_get_text_width_fallback(monkeypatch):
display = Display()
display_mock = MagicMock()
monkeypatch.setattr(display, 'display', display_mock)
display.banner(u'🚀🐮', color=False, cows=False)
args, kwargs = display_mock.call_args
msg = args[0]
stars = u' %s' % (77 * u'*')
assert msg.endswith(stars)
| StarcoderdataPython |
3285372 | from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django import forms
from users.models import User
class InviteUserForm(forms.Form):
email = forms.CharField()
def clean(self):
cleaned_data = super().clean()
email = cleaned_data["email"]
# check if email already exists
if User.objects.check_if_email_already_exists(email):
raise forms.ValidationError("The email you entered is from an existing user.")
return cleaned_data
class SignUpForm(UserCreationForm):
class Meta:
model = User
fields = ["email"]
class LoginForm(AuthenticationForm):
error_messages = {"invalid_login": ("Please enter a correct %(username)s and password.")}
| StarcoderdataPython |
3282994 | # Import a dataset
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data for train and for test from the datasets using the train_test_split method
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .5)
# Use the simple DecisionTreeClassifier
# from sklearn import tree
# iris_classifier = tree.DecisionTreeClassifier()
# Or use another simple classifier
from sklearn.neighbors import KNeighborsClassifier
iris_classifier = KNeighborsClassifier()
iris_classifier.fit(X_train, y_train)
# Test with the classifier
predictions = iris_classifier.predict(X_test)
# Print accuracy
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test, predictions))
| StarcoderdataPython |
1687817 | """
Created on Jan 29, 2021
@file: runner.py
@desc: Run experiments given set of hyperparameters for the unmixing problem.
@author: laugh12321
@contact: <EMAIL>
"""
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
from config.get_config import get_config
from src.model import enums
from src.utils.utils import parse_train_size, subsample_test_set
from src.utils import prepare_data, artifacts_reporter
from src.model import evaluate_unmixing, train_unmixing
from src.model.models import rnn_supervised, pixel_based_cnn, \
pixel_based_fnnc, pixel_based_dacn
# Literature hyperparameters settings:
LEARNING_RATES = {
rnn_supervised.__name__: 1e-3,
pixel_based_cnn.__name__: 1e-2,
pixel_based_fnnc.__name__: 1e-4,
pixel_based_dacn.__name__: 9e-4
}
def run_experiments(*,
data_file_path: str,
ground_truth_path: str = None,
train_size: int or float,
val_size: float = 0.1,
sub_test_size: int = None,
n_runs: int = 4,
model_name: str,
dest_path: str = None,
sample_size: int,
n_classes: int,
lr: float = None,
batch_size: int = 256,
epochs: int = 100,
verbose: int = 1,
shuffle: bool = True,
patience: int = 15):
"""
Function for running experiments on unmixing given a set of hyper parameters.
:param data_file_path: Path to the data file. Supported types are: .npy.
:param ground_truth_path: Path to the ground-truth data file.
:param train_size: If float, should be between 0.0 and 1.0.
If int, specifies the number of samples in the training set.
Defaults to 0.8
:type train_size: Union[int, float]
:param val_size: Should be between 0.0 and 1.0. Represents the
percentage of samples from the training set to be
extracted as a validation set.
Defaults to 0.1.
:param sub_test_size: Number of pixels to subsample the test set
instead of performing the inference on all
samples that are not in the training set.
:param n_runs: Number of total experiment runs.
:param model_name: Name of the model, it serves as a key in the
dictionary holding all functions returning models.
:param dest_path: Path to where all experiment runs will be saved as
subdirectories in this directory.
:param sample_size: Size of the input sample.
:param n_classes: Number of classes.
:param lr: Learning rate for the model, i.e., regulates
the size of the step in the gradient descent process.
:param batch_size: Size of the batch used in training phase,
it is the size of samples per gradient step.
:param epochs: Number of epochs for model to train.
:param verbose: Verbosity mode used in training, (0, 1 or 2).
:param shuffle: Boolean indicating whether to shuffle datasets.
:param patience: Number of epochs without improvement in order to
stop the training phase.
"""
for experiment_id in range(n_runs):
experiment_dest_path = os.path.join(dest_path,
'{}_{}'.format(enums.Experiment.EXPERIMENT, str(experiment_id)))
os.makedirs(experiment_dest_path, exist_ok=True)
# Apply default literature hyper parameters:
if lr is None and model_name in LEARNING_RATES:
lr = LEARNING_RATES[model_name]
# Prepare data for unmixing:
data = prepare_data.main(data_file_path=data_file_path,
ground_truth_path=ground_truth_path,
train_size=parse_train_size(train_size),
val_size=val_size,
seed=experiment_id)
# Subsample the test set to constitute a constant size:
if sub_test_size is not None:
subsample_test_set(data[enums.Dataset.TEST], sub_test_size)
# Train the model:
train_unmixing.train(model_name=model_name,
dest_path=experiment_dest_path,
data=data,
sample_size=sample_size,
n_classes=n_classes,
lr=lr,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
shuffle=shuffle,
patience=patience,
seed=experiment_id)
# Evaluate the model:
evaluate_unmixing.evaluate(
model_name=model_name,
data=data,
dest_path=experiment_dest_path,
batch_size=batch_size)
tf.keras.backend.clear_session()
artifacts_reporter.collect_artifacts_report(
experiments_path=dest_path,
dest_path=dest_path)
if __name__ == '__main__':
args = get_config(filename='./config/config.json')
for model_name in args.model_names:
for data_name in args.dataset:
dest_path = os.path.join(args.save_path,
'{}_{}'.format(str(model_name), str(data_name)))
base_path = os.path.join(args.path, data_name)
data_file_path = os.path.join(base_path, data_name + '.npy')
ground_truth_path = os.path.join(base_path, data_name + '_gt.npy')
if data_name == 'urban':
sample_size, n_classes = 162, 6
else:
sample_size, n_classes = 157, 4
run_experiments(data_file_path=data_file_path,
ground_truth_path=ground_truth_path,
dest_path=dest_path,
train_size=args.train_size,
val_size=args.val_size,
model_name=model_name,
sample_size=sample_size,
n_classes=n_classes,
batch_size=args.batch_size,
epochs=args.epochs,
verbose=args.verbose,
patience=args.patience,
n_runs=args.n_runs)
| StarcoderdataPython |
1741462 | <filename>openhab_creator/output/__init__.py
from openhab_creator.output.color import Color
| StarcoderdataPython |
3209589 | <filename>video_to_gif.py
import numpy as np
import cv2
def mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
cap = cv2.VideoCapture('Sample2.mp4')
count = 0
Sensitivity = 1 * 2000
i = 0
NewVideoFrame=0
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
LengthofFrames = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
print fps
print LengthofFrames
fourcc = cv2.cv.CV_FOURCC(*'DIVX')
while cap.isOpened():
ret,frame = cap.read()
width = 1080
height = 720
if i == 0:
video = cv2.VideoWriter('gifart.avi',fourcc,fps,(width,height))
InitialFrame = frame
cv2.imshow('window-name',frame)
cv2.imwrite("frame%d.jpg" % count, frame)
elif i < LengthofFrames:
mseval = mse(frame,InitialFrame)
if mseval > Sensitivity:
cv2.imshow('window-name',frame)
cv2.imwrite("frame%d.jpg" % count, frame)
InitialFrame = frame
print count
print mseval
video.write(frame)
count = count + 1
i = i + 1
video.release
cap.release()
| StarcoderdataPython |
3242587 | <reponame>gengxf0505/pxt
for i in range(5):
pass
for i in range(5):
i = i + 7 # WRONG
i = i - (1 + i)
for t in range(2 + 7 * 3 / 1 % 7 - 9):
pass
for t in range(2 + 7 * 3 / 1 % 7 - 9 + 1):
pass | StarcoderdataPython |
1624258 | <gh_stars>0
import pytest
import unittest
from src.gmaillabelcreate.gmaillabel import (define_label, VALUE_ERROR_DEFINE_LABEL_TEXT,
VALUE_ERROR_DEFINE_LABEL_COLOR_TEXT)
def test_define_label():
color_dict = {
'pending':{'textColor': '#ffffff', 'backgroundColor': '#c2c2c2'},
}
correct_out = {
"name": "test",
"color": color_dict['pending'],
"messageListVisibility": "mlv",
"labelListVisibility": "llv",
"type": "tp"
}
assert correct_out == define_label("test", color=color_dict['pending'], mlv="mlv", llv="llv", tp="tp")
class TestDefineLabelValues:
@pytest.mark.parametrize("args, excpected_output",[
((1, True, 1.1, None, (1), [1], {1:1}), (VALUE_ERROR_DEFINE_LABEL_TEXT.format(["name"]))),
])
def test_args_values(self, args, excpected_output):
# tests to check 'name' argument
for arg in args:
with pytest.raises(ValueError) as exec_info:
define_label(arg)
assert str(exec_info.value) == excpected_output
@pytest.mark.parametrize("kwargs, excpected_output",[
(({"mlv":1}, {"mlv":True}, {"mlv":1.1}, {"mlv":None}, {"mlv":(1)}, {"mlv":[1]}, {"mlv":{1:1}}), (VALUE_ERROR_DEFINE_LABEL_TEXT.format(["mlv"]))),
(({"llv":1}, {"llv":True}, {"llv":1.1}, {"llv":None}, {"llv":(1)}, {"llv":[1]}, {"llv":{1:1}}), (VALUE_ERROR_DEFINE_LABEL_TEXT.format(["llv"]))),
(({"tp":1}, {"tp":True}, {"tp":1.1}, {"tp":None}, {"tp":(1)}, {"tp":[1]}, {"tp":{1:1}}), (VALUE_ERROR_DEFINE_LABEL_TEXT.format(["tp"]))),
(({"color":{"test":1, "textColor":"#ffffff"}},), (VALUE_ERROR_DEFINE_LABEL_COLOR_TEXT.format(["backgroundColor"]))),
(({"color":{"test":1, "backgroundColor":"#ffffff"}},), (VALUE_ERROR_DEFINE_LABEL_COLOR_TEXT.format(["textColor"]))),
])
def test_kwargs_values(self, kwargs, excpected_output):
# tests to check 'name' argument
for kwarg in kwargs:
with pytest.raises(ValueError) as exec_info:
define_label("test_name", **kwarg)
assert str(exec_info.value) == excpected_output
| StarcoderdataPython |
1760175 | import sys
from datetime import datetime
LOG_FILE = None
ERROR_CB = None
TIME_CBS = {}
def start_timer(tname):
TIME_CBS[tname] = datetime.now()
def end_timer(tname, msg=""):
diff = datetime.now() - TIME_CBS[tname]
log("[timer] Completed in %s |%s| %s"%(diff, msg, tname), important=True)
def set_log(fname):
global LOG_FILE
LOG_FILE = open(fname, "a")
def close_log():
global LOG_FILE
if LOG_FILE:
LOG_FILE.close()
LOG_FILE = None
def log(msg, level=0, important=False):
from cantools import config
s = "%s%s"%(" " * level, msg)
if config.log.timestamp:
s = "* %s : %s"%(datetime.now(), s)
if important:
s = "\n%s"%(s,)
if LOG_FILE:
LOG_FILE.write("%s\n"%(s,))
print(s)
def set_error(f):
global ERROR_CB
ERROR_CB = f
def error(msg, *lines):
log("error: %s"%(msg,), important=True)
for line in lines:
log(line, 1)
log("goodbye")
if ERROR_CB:
ERROR_CB(msg)
else:
sys.exit() | StarcoderdataPython |
129802 | #!/usr/bin/env python
#
# Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
import argparse
import os
from os import path
from keras import backend as K
from keras.losses import get as get_loss
from keras.utils.generic_utils import Progbar
import numpy as np
from importance_sampling import models
from importance_sampling.datasets import CIFAR10, CIFAR100, MNIST, \
OntheflyAugmentedImages, ImageNetDownsampled, PennTreeBank, ZCAWhitening
from importance_sampling.model_wrappers import OracleWrapper
from importance_sampling.reweighting import BiasedReweightingPolicy
from importance_sampling.utils import tf_config
from importance_sampling.utils.functional import compose, partial, ___
def build_grad(network):
"""Return the gradient of the network."""
x = network.input
y = network.output
target_shape = (None, 1) if "sparse" in network.loss else K.int_shape(y)
y_true = K.placeholder(shape=target_shape)
sample_weights = K.placeholder(shape=(None,))
l = K.mean(sample_weights * get_loss(network.loss)(y_true, y))
grads = network.optimizer.get_gradients(l, network.trainable_weights)
grad = K.concatenate([
K.reshape(g, (-1,))
for g in grads
])
return K.function(
[x, y_true, sample_weights],
[grad]
)
def build_grad_batched(network, batch_size):
"""Compute the average gradient by splitting the inputs in batches of size
'batch_size' and averaging."""
grad = build_grad(network)
def inner(inputs):
X, y, w = inputs
N = len(X)
g = 0
for i in range(0, N, batch_size):
g = g + w[i:i+batch_size].sum() * grad([
X[i:i+batch_size],
y[i:i+batch_size],
w[i:i+batch_size]
])[0]
return [g / w.sum()]
return inner
def load_dataset(dataset):
datasets = {
"mnist": MNIST,
"cifar10": CIFAR10,
"cifar100": CIFAR100,
"cifar10-augmented": compose(
partial(OntheflyAugmentedImages, ___, dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=False
)),
CIFAR10
),
"cifar10-whitened-augmented": compose(
partial(OntheflyAugmentedImages, ___, dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=False
), N=15*10**5),
ZCAWhitening,
CIFAR10
),
"cifar100-augmented": compose(
partial(OntheflyAugmentedImages, ___, dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=False
)),
CIFAR100
),
"cifar100-whitened-augmented": compose(
partial(OntheflyAugmentedImages, ___, dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=False
), N=15*10**5),
ZCAWhitening,
CIFAR100
),
"imagenet-32x32": partial(
ImageNetDownsampled,
os.getenv("IMAGENET"),
size=32
),
"ptb": partial(PennTreeBank, 20),
}
return datasets[dataset]()
def uniform_score(x, y, batch_size=None):
return np.ones((len(x),))
def main(argv):
parser = argparse.ArgumentParser(
description=("Compute the variance reduction achieved by different "
"importance sampling methods")
)
parser.add_argument(
"model",
choices=[
"small_cnn", "cnn", "wide_resnet_28_2", "lstm_lm"
],
help="Choose the type of the model"
)
parser.add_argument(
"weights",
help="The file containing the model weights"
)
parser.add_argument(
"dataset",
choices=[
"mnist", "cifar10", "cifar100", "cifar10-augmented",
"cifar100-augmented", "imagenet-32x32", "ptb",
"cifar10-whitened-augmented", "cifar100-whitened-augmented"
],
help="Choose the dataset to compute the loss"
)
parser.add_argument(
"--samples",
type=int,
default=10,
help="How many samples to choose"
)
parser.add_argument(
"--score",
choices=["gnorm", "full_gnorm", "loss", "ones"],
nargs="+",
default="loss",
help="Choose a score to perform sampling with"
)
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="The batch size for computing the loss"
)
parser.add_argument(
"--inner_batch_size",
type=int,
default=32,
help=("The batch size to use for gradient computations "
"(to decrease memory usage)")
)
parser.add_argument(
"--sample_size",
type=int,
default=1024,
help="The sample size to compute the variance reduction"
)
parser.add_argument(
"--random_seed",
type=int,
default=0,
help="A seed for the PRNG (mainly used for dataset generation)"
)
parser.add_argument(
"--save_scores",
help="Directory to save the scores in"
)
args = parser.parse_args(argv)
np.random.seed(args.random_seed)
dataset = load_dataset(args.dataset)
network = models.get(args.model)(dataset.shape, dataset.output_size)
network.load_weights(args.weights)
grad = build_grad_batched(network, args.inner_batch_size)
reweighting = BiasedReweightingPolicy()
# Compute the full gradient
idxs = np.random.choice(len(dataset.train_data), args.sample_size)
x, y = dataset.train_data[idxs]
full_grad = grad([x, y, np.ones(len(x))])[0]
# Sample and approximate
for score_metric in args.score:
if score_metric != "ones":
model = OracleWrapper(network, reweighting, score=score_metric)
score = model.score
else:
score = uniform_score
gs = np.zeros(shape=(10,) + full_grad.shape, dtype=np.float32)
print "Calculating %s..." % (score_metric,)
scores = score(x, y, batch_size=1)
p = scores/scores.sum()
pb = Progbar(args.samples)
for i in range(args.samples):
pb.update(i)
idxs = np.random.choice(args.sample_size, args.batch_size, p=p)
w = reweighting.sample_weights(idxs, scores).ravel()
gs[i] = grad([x[idxs], y[idxs], w])[0]
pb.update(args.samples)
norms = np.sqrt(((full_grad - gs)**2).sum(axis=1))
alignment = gs.dot(full_grad[:, np.newaxis]) / np.sqrt(np.sum(full_grad**2))
alignment /= np.sqrt((gs**2).sum(axis=1, keepdims=True))
print "Mean of norms of diff", np.mean(norms)
print "Variance of norms of diff", np.var(norms)
print "Mean of alignment", np.mean(alignment)
print "Variance of alignment", np.var(alignment)
if args.save_scores:
np.savetxt(
path.join(args.save_scores, score_metric+".txt"),
scores
)
if __name__ == "__main__":
import sys
main(sys.argv[1:])
| StarcoderdataPython |
1767966 | <reponame>Ortus-Team/Moim
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-01-02 21:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('tag', '0001_initial'),
('category', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(default='', max_length=256)),
('event_date', models.DateTimeField(blank=True, null=True)),
('pub_date', models.DateTimeField(blank=True, null=True)),
('access_level', models.CharField(max_length=10)),
('location', models.CharField(max_length=200)),
('body', models.TextField(blank=True, default='', null=True)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='category.Category')),
('tags', models.ManyToManyField(blank=True, related_name='tags', to='tag.Tag')),
],
options={
'ordering': ('-pub_date',),
},
),
]
| StarcoderdataPython |
1648546 | import pytest
from cognite import config
from tests.conftest import TEST_API_KEY, TEST_PROJECT
MOCK_URL = 'http://another.url/'
NUM_OF_RETRIES = 5
@pytest.fixture
def change_url():
config.set_base_url(MOCK_URL)
yield
config.set_base_url()
@pytest.fixture
def change_number_of_retries():
config.set_number_of_retries(NUM_OF_RETRIES)
yield
config.set_number_of_retries()
@pytest.mark.usefixtures('unset_config_variables')
def test_get_config_variables_when_not_set():
result = config.get_config_variables(None, None)
assert result == ('', '')
def test_get_config_variables_when_set():
result = config.get_config_variables(None, None)
assert result == (TEST_API_KEY, TEST_PROJECT)
def test_get_config_variables_when_set_explicitly():
result = config.get_config_variables('some_other_key', 'some_other_project')
assert result == ('some_other_key', 'some_other_project')
@pytest.mark.usefixtures('change_url')
def test_set_base_url():
assert config.get_base_url(api_version=0.5) == MOCK_URL + "0.5"
assert config.get_base_url() == MOCK_URL + "<version>"
@pytest.mark.usefixtures('change_number_of_retries')
def test_set_number_of_retries():
assert config.get_number_of_retries() == NUM_OF_RETRIES
| StarcoderdataPython |
3373039 | <gh_stars>100-1000
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from lib.base import BaseGithubAction
class ListDeploymentsAction(BaseGithubAction):
def run(self, api_user, repository, github_type):
results = []
enterprise = self._is_enterprise(github_type)
if api_user:
self.token = self._get_user_token(api_user,
enterprise)
response = self._request("GET",
"/repos/{}/deployments".format(repository),
None,
self.token,
enterprise)
for dep in response:
results.append(
{'creator': dep['creator']['login'],
'statuses_url': dep['statuses_url'],
'repository_url': dep['repository_url'],
'ref': dep['ref'],
'task': dep['task'],
'payload': dep['payload'],
'environment': dep['environment'],
'description': dep['description'],
'created_at': dep['created_at'],
'updated_at': dep['updated_at']})
return results
| StarcoderdataPython |
1748620 | <reponame>eabase/pyreadline3
# -*- coding: UTF-8 -*-
# Example snippet to use in a PYTHONSTARTUP file
from __future__ import absolute_import, print_function, unicode_literals
try:
import atexit
# pyreadline3.rlmain.config_path=r"c:\xxx\pyreadlineconfig.ini"
import readline
import pyreadline3.rlmain
import pyreadline3.unicode_helper
#
#
# Normally the codepage for pyreadline3 is set to be sys.stdout.encoding
# if you need to change this uncomment the following line
# pyreadline3.unicode_helper.pyreadline_codepage="utf8"
except ImportError:
print("Module readline not available.")
else:
# import tab completion functionality
import rlcompleter
# Override completer from rlcompleter to disable automatic ( on callable
completer_obj = rlcompleter.Completer()
def nop(val, word):
return word
completer_obj._callable_postfix = nop
readline.set_completer(completer_obj.complete)
# activate tab completion
readline.parse_and_bind("tab: complete")
readline.read_history_file()
atexit.register(readline.write_history_file)
del readline, rlcompleter, atexit
| StarcoderdataPython |
1652974 | <reponame>u93/multa-metrics-collector
from aws_cdk import core
from multacdkrecipies import (
AwsApiGatewayLambdaFanOutBE,
AwsApiGatewayLambdaPipes,
AwsIotAnalyticsSimplePipeline,
AwsLambdaFunctionsCluster,
AwsLambdaLayerVenv,
AwsS3BucketsCluster,
AwsSsmString,
AwsUserServerlessBackend,
)
from src.infrastructure.configs import (
analytics_cold_path_config,
base_configs,
serverless_rest_api_configs,
user_backend_config,
)
class BaseStack(core.Stack):
"""
Base Stack for MultaMetrics Backend. Will contain common configurations and resources for other Stacks
"""
def __init__(self, scope: core.Construct, id: str, config=None, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self._base_stack_lambdalayer = AwsLambdaLayerVenv(
self,
id=f"BaseStack-LambdaLayer-{config['environ']}",
prefix="multa_backend",
environment=config["environ"],
configuration=config["config"]["BASE_CONFIG_LAMBDA_LAYER"],
)
layer_arn = self._base_stack_lambdalayer.lambda_layer.layer_version_arn
self._base_stack_backend_ssm = AwsSsmString(
self,
id=f"BaseStack-Ssm-{config['environ']}",
prefix="multa_backend",
environment=config["environ"],
configuration=config["config"]["BASE_CONFIG_SSM"],
)
for lambda_function in config["config"]["BASE_CONFIG_FUNCTIONS"]["functions"]:
lambda_function["layers"].append(layer_arn)
self._helper_functions = AwsLambdaFunctionsCluster(
self,
id=f"BaseBackend-Functions-{config['environ']}",
prefix="multa_backend",
environment=config["environ"],
configuration=config["config"]["BASE_CONFIG_FUNCTIONS"],
)
self._base_backend_buckets = AwsS3BucketsCluster(
self,
id=f"BaseBackend-Buckets-{config['environ']}",
prefix="multa-backend",
environment=config["environ"],
configuration=config["config"]["BASE_CONFIG_BUCKETS"],
)
class UserBackendStack(core.Stack):
"""
User Backend Stack for MultaMetrics Backend. Will contain resources necessary to handle users and auth.
"""
def __init__(self, scope: core.Construct, id: str, config=None, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self._user_serverless_backend_lambdalayer = AwsLambdaLayerVenv(
self,
id=f"UserServerlessBE-LambdaLayer-{config['environ']}",
prefix="multa_backend",
environment=config["environ"],
configuration=config["config"]["USER_BACKEND_LAMBDA_LAYER"],
)
layer_arn = self._user_serverless_backend_lambdalayer.lambda_layer.layer_version_arn
self._user_serverless_backend_ssm = AwsSsmString(
self,
id=f"UserServerlessBE-Ssm-{config['environ']}",
prefix="multa_backend",
environment=config["environ"],
configuration=config["config"]["USER_BACKEND_SSM"],
)
config["config"]["USER_BACKEND"]["authorizer_function"]["origin"]["layers"].append(layer_arn)
config["config"]["USER_BACKEND"]["user_pool"]["triggers"]["post_confirmation"]["layers"].append(layer_arn)
self._user_serverless_backend = AwsUserServerlessBackend(
self,
id=f"UserServerlessBE-{config['environ']}",
prefix="multa_backend",
environment=config["environ"],
configuration=config["config"]["USER_BACKEND"],
)
def lambda_authorizer_arn(self):
return self._user_serverless_backend.authorizer_function.function_arn
class UserApisBackend(core.Stack):
"""
API Constructs to be used by MultaMetrics frontend to handle user settings, plans, roles, organizations.
"""
def __init__(self, scope: core.Construct, id: str, config=None, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self._fan_out_api_lambdalayer = AwsLambdaLayerVenv(
self,
id=f"AnalyticsColdPath-LambdaLayer-{config['environ']}",
prefix="multa_backend",
environment=config["environ"],
configuration=config["config"]["SERVERLESS_REST_API_LAMBDA_LAYER"],
)
layer_arn = self._fan_out_api_lambdalayer.lambda_layer.layer_version_arn
for function in config["config"]["SERVERLESS_REST_API"]["api"]["resource_trees"]:
function["handler"]["layers"].append(layer_arn)
self._serverless_rest_api = AwsApiGatewayLambdaPipes(
self,
id=f"ServerlessRestApi-{config['environ']}",
prefix="multa_backend",
environment=config["environ"],
configuration=config["config"]["SERVERLESS_REST_API"],
)
class AnalyticsHotPathStack(core.Stack):
"""
Hot Analytics Stack for MultaMetrics Backend. Will contain resources necessary for a rapid ingestion and
representation of data.
"""
def __init__(self, scope: core.Construct, id: str, config=None, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
class AnalyticsColdPathStack(core.Stack):
"""
Cold Analytics Stack for MultaMetrics Backend. Will contain resources necessary for a storage, ingestion and
analysis and representation of timeseries data.
"""
def __init__(self, scope: core.Construct, id: str, config=None, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self._fan_out_api_lambdalayer = AwsLambdaLayerVenv(
self,
id=f"AnalyticsColdPath-LambdaLayer-{config['environ']}",
prefix="multa_backend",
environment=config["environ"],
configuration=config["config"]["ANALYTICS_FAN_OUT_LAMBDA_LAYER"],
)
layer_arn = self._fan_out_api_lambdalayer.lambda_layer.layer_version_arn
for function in config["config"]["ANALYTICS_FAN_OUT_API"]["functions"]:
function["layers"].append(layer_arn)
config["config"]["ANALYTICS_FAN_OUT_API"]["api"]["authorizer_function"]["origin"]["layers"].append(layer_arn)
fan_out_api = AwsApiGatewayLambdaFanOutBE(
self,
id=f"AnalyticsColdPath-{config['environ']}",
prefix="multa_backend",
environment=config["environ"],
configuration=config["config"]["ANALYTICS_FAN_OUT_API"],
)
app = core.App()
for environment, configuration in base_configs.BASE_CONFIGS.items():
# print(configuration)
config = dict(environ=environment, config=configuration)
base_backend = BaseStack(app, id=f"BackendBaseStack-{environment}", config=config)
lambda_authorizers = dict()
for environment, configuration in user_backend_config.USER_BACKEND_CONFIGS.items():
# print(configuration)
config = dict(environ=environment, config=configuration)
user_backend = UserBackendStack(app, id=f"UserOrganizationStack-{environment}", config=config)
lambda_authorizers[environment] = user_backend.lambda_authorizer_arn()
for environment, configuration in serverless_rest_api_configs.SERVERLESS_REST_API_CONFIGS.items():
configuration["SERVERLESS_REST_API"]["api"]["authorizer_function"]["imported"]["arn"] = lambda_authorizers[
environment
]
config = dict(environ=environment, config=configuration)
UserApisBackend(app, id=f"UserApisBackend-{environment}", config=config)
for environment, configuration in analytics_cold_path_config.ANALYTICS_COLD_PATH_CONFIGS.items():
config = dict(environ=environment, config=configuration)
AnalyticsColdPathStack(app, id=f"AnalyticsColdPathStack-{environment}", config=config)
app.synth()
| StarcoderdataPython |
177104 | <filename>Streamlit/dataViewer/dataViewer.py<gh_stars>1-10
# !/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Bruce_H_Cottman"
__license__ = "MIT License"
import streamlit as st
import pandas as pd
from pydataset import data
df_data = data().sort_values('dataset_id').reset_index(drop=True)
st.dataframe(df_data, width=900, height=150) #choices
option = st.selectbox(
'select a dataset to display dataset and chart the dataset numeric columns?', df_data['dataset_id'])
dataset = data(option)
if isinstance(dataset, (pd.core.frame.DataFrame, pd.core.series.Series)):
st.dataframe(dataset, height=150)
st.line_chart(dataset)
| StarcoderdataPython |
33991 | """Top-level package for sta-etl."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
#from sta_etl import *
| StarcoderdataPython |
137734 | <filename>CursoEmVideo/pythonProject/ex025.py<gh_stars>0
import itertools
nome = str(input('Digite seu nome completo: '))
nome = nome.lower()
s = nome.find('silva')
if s < 0:
print('Você não tem Silva no seu nome!')
elif s >=0 :
print('Você tem Silva no seu nome!')
| StarcoderdataPython |
4839359 | import unittest
from .recorder import Recorder
class TestRecorder(unittest.TestCase):
def setUp(self):
pass
def test_sorts_correctly(self):
records = [
{'test_score': 0.8410596026490066,
'params': {'n_estimators': 260,
'subsample': 0.7800000000000002,
'min_split_loss': 0.75,
'max_depth': 8}},
{'test_score': 0.8675496688741722,
'params': {'n_estimators': 260,
'subsample': 0.9600000000000004,
'min_split_loss': 0.75,
'max_depth': 8}},
{'test_score': 0.8543046357615894,
'params': {'n_estimators': 260,
'subsample': 0.6000000000000001,
'min_split_loss': 0.75,
'max_depth': 8}},
{'test_score': 0.8807947019867549,
'params': {'n_estimators': 260,
'subsample': 0.9400000000000004,
'min_split_loss': 0.75,
'max_depth': 8}}
]
r = Recorder()
r.records = records
r.sort_records()
self.assertEqual(r.best_params()["test_score"],0.8807947019867549)
self.assertEqual(r.best_params()["params"]["subsample"],0.9400000000000004)
self.assertEqual(r.best_n_params(2)[1]["test_score"], 0.8675496688741722)
self.assertEqual(r.best_n_params(2)[1]["params"]["subsample"], 0.9600000000000004)
def test_auto_sort(self):
records = [
{'test_score': 0.8410596026490066,
'params': {'n_estimators': 260,
'subsample': 0.7800000000000002,
'min_split_loss': 0.75,
'max_depth': 8}},
{'test_score': 0.8675496688741722,
'params': {'n_estimators': 260,
'subsample': 0.9600000000000004,
'min_split_loss': 0.75,
'max_depth': 8}},
{'test_score': 0.8543046357615894,
'params': {'n_estimators': 260,
'subsample': 0.6000000000000001,
'min_split_loss': 0.75,
'max_depth': 8}},
{'test_score': 0.8807947019867549,
'params': {'n_estimators': 260,
'subsample': 0.9400000000000004,
'min_split_loss': 0.75,
'max_depth': 8}}
]
r = Recorder()
r.records = records
self.assertEqual(r.best_params()["test_score"],0.8807947019867549)
self.assertEqual(r.best_params()["params"]["subsample"],0.9400000000000004)
self.assertEqual(r.best_n_params(2)[1]["test_score"], 0.8675496688741722)
self.assertEqual(r.best_n_params(2)[1]["params"]["subsample"], 0.9600000000000004)
def test_aggregate_and_average_sort(self):
records = [
{'test_score': 0.9,
'train_score': 0.7,
'params': {'n_estimators': 260,
'subsample': 0.7800000000000002,
'min_split_loss': 0.75,
'max_depth': 8}},
{'test_score': 0.60,
'train_score': 1.0,
'params': {'n_estimators': 260,
'subsample': 0.9600000000000004,
'min_split_loss': 0.75,
'max_depth': 8}},
{'test_score': 0.8543046357615894,
'train_score': 0.6,
'params': {'n_estimators': 260,
'subsample': 0.6000000000000001,
'min_split_loss': 0.75,
'max_depth': 8}},
]
r = Recorder()
r.records = records
self.assertRaises(ValueError, r.aggeregate, records[:1])
r.aggeregate(records)
self.assertEqual(r.records[0]["test_score"], 1.8)
self.assertEqual(r.records[1]["test_score"], 1.2)
r.average_scores(3)
self.assertAlmostEqual(r.records[0]["test_score"], 0.6)
self.assertAlmostEqual(r.records[1]["test_score"], 0.4)
def test_aggregate_checks_sort(self):
records = [
{'test_score': 0.9,
'train_score': 0.7,
'params': {'n_estimators': 260,
'subsample': 0.7800000000000002,
'min_split_loss': 0.75,
'max_depth': 8}},
{'test_score': 0.60,
'train_score': 1.0,
'params': {'n_estimators': 260,
'subsample': 0.9600000000000004,
'min_split_loss': 0.75,
'max_depth': 8}},
]
records2 = records[::-1]
r = Recorder()
r.records = records
self.assertRaises(AssertionError, r.aggeregate, records2)
| StarcoderdataPython |
1667115 | import json
import requests
from requests.auth import HTTPBasicAuth
def auth(username='mark', email='<EMAIL>', password='<PASSWORD>'):
data = {
"username": username,
"email": email,
"password": password
}
response = requests.post('https://pcuav.pythonanywhere.com/auth/login/', data=data)
status_code = int(response.status_code)
if status_code == 200:
output = json.loads(response.content)
return output['key']
print(
"Error In Authentication: Please check api at https://pcuav.pythonanywhere.com/auth/login/ | error code:",
status_code
)
return None
def post_image(path=None):
""" STEP1: CALLS AUTHENTICATION -------------------------------------------- """
token = auth()
base_url = 'https://pcuav.pythonanywhere.com/api/capture/'
headers = {'Authorization': "Token {}".format(token)}
data = {
"x_axis": "2637",
"y_axis": "3123",
}
""" ------------------------------------------------------------------------- """
""" STEP2: SETTINGS API ----------------------------------------------------- """
_path = r"C:\Users\DEEBYTE COMPUTERS\Pictures\Screen\Screenshot 2021-11-14 234009.png"
files = {'image': open(_path, 'rb')}
auth_response = requests.post(url=base_url, headers=headers, data=data, files=files)
""" ------------------------------------------------------------------------- """
""" STEP3: CHECK STATUS ----------------------------------------------------- """
if auth_response.status_code == 201:
print(auth_response.json())
else:
print("something Wrong Error Code:", auth_response.status_code)
print(auth_response.content)
""" ------------------------------------------------------------------------- """
if __name__ == '__main__':
post_image()
| StarcoderdataPython |
1762285 | <filename>src/jupyrest/jupyrest/executors.py<gh_stars>0
from abc import ABC, abstractmethod
from datetime import datetime
from dataclasses import dataclass
from nbformat.notebooknode import NotebookNode
from typing import Optional
from nbclient.client import NotebookClient
from nbclient.exceptions import CellExecutionError, CellTimeoutError
import logging
from opentelemetry import trace
logger = logging.getLogger(__name__)
tracer = trace.get_tracer(__name__)
class BaseNotebookExeuctor(ABC):
@abstractmethod
def get_kernelspec_language(self) -> str:
pass
@abstractmethod
async def execute_notebook_async(self, notebook: NotebookNode) -> Optional[str]:
"""Executes a notebook in place. Returns an exception string if any.
Args:
notebook (NotebookNode): notebook to execute
Returns:
Optional[str]: exception
"""
pass
class OtelNotebookClient(NotebookClient):
"""A NotebookClient that emits OpenTelemetry Spans."""
async def async_execute(self, reset_kc: bool = False, **kwargs) -> NotebookNode:
with tracer.start_as_current_span("nbclient.async_execute"):
return await super().async_execute(reset_kc=reset_kc, **kwargs)
class IPythonNotebookExecutor(BaseNotebookExeuctor):
def __init__(
self, kernel_name="python3", timeout_seconds=600, language="python"
) -> None:
self._kernel_name = kernel_name
self._timeout_seconds = timeout_seconds
self._language = language
def get_kernelspec_language(self) -> str:
return self._language
async def execute_notebook_async(self, notebook: NotebookNode) -> Optional[str]:
exception: Optional[str] = None
try:
await OtelNotebookClient(
nb=notebook,
timeout=self._timeout_seconds,
kernel_name=self._kernel_name,
log=logger,
).async_execute()
except CellExecutionError as cee:
# handle cases where the notebook calls sys.exit(0),
# which is considered successful.
is_sys_exit_0 = cee.ename == "SystemExit" and (
cee.evalue == "" or cee.evalue == "0"
)
if not is_sys_exit_0:
exception = str(cee)
except CellTimeoutError as cte:
exception = str(cte)
return exception
| StarcoderdataPython |
129964 | __all__ = ['BaseController']
import json
from pyramid.renderers import render
from pyramid.view import view_config
from horus.views import BaseController
@view_config(http_cache=(0, {'must-revalidate': True}),
renderer='templates/embed.txt', route_name='embed')
def embed(request, standalone=True):
if standalone:
request.response.content_type = 'application/javascript'
request.response.charset = 'UTF-8'
return {
pkg: json.dumps(request.webassets_env[pkg].urls())
for pkg in ['inject', 'jquery', 'raf']
}
@view_config(renderer='templates/home.pt', route_name='index')
def home(request):
return {
'embed': render('templates/embed.txt', embed(request, False), request)
}
def includeme(config):
config.add_view(
'horus.views.AuthController',
attr='login',
renderer='h:templates/auth.pt',
route_name='login'
)
config.add_view(
'horus.views.AuthController',
attr='logout',
route_name='logout'
)
config.add_view(
'horus.views.ForgotPasswordController',
attr='forgot_password',
renderer='h:templates/auth.pt',
route_name='forgot_password'
)
config.add_view(
'horus.views.ForgotPasswordController',
attr='reset_password',
renderer='h:templates/auth.pt',
route_name='reset_password'
)
config.add_view(
'horus.views.RegisterController',
attr='register',
renderer='h:templates/auth.pt',
route_name='register'
)
config.add_view(
'horus.views.RegisterController',
attr='activate',
renderer='h:templates/auth.pt',
route_name='activate'
)
config.add_view(
'horus.views.ProfileController',
attr='profile',
renderer='h:templates/auth.pt',
route_name='profile'
)
config.scan(__name__)
| StarcoderdataPython |
3266396 | import tradius.models.accounting
import tradius.models.ippool
import tradius.models.groups
import tradius.models.group_attrs
import tradius.models.virtual
import tradius.models.nas
import tradius.models.users
import tradius.models.user_attrs
import tradius.models.user_groups
| StarcoderdataPython |
4800106 | '''setup for GameOfLife
'''
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
long_description = '''
GameOfLife is a python3 package that provides two classes
that together implement Conway's Game of Life.
Install the GameOfLife package using pip::
$ sudo pip3 install GameOfLife
Or clone the git repository::
$ git clone https://github.com/JnyJny/GameOfLife.git
$ cd GameOfLife
$ sudo python3 setup.py install
Also included in the package are several demos:
- [N]CGameOfLife: displays the simulation in a terminal window using curses.
- [N]PGameOfLife: displays the simulation in a PyGame window.
The 'N' variants use a Numpy based World and may be more performant.
'''
try:
with open(path.join(here, 'VERSION'), encoding='utf-8') as f:
version = f.read()[:-1]
except FileNotFoundError:
version = '0.0.0'
download_url = 'https://github.com/JnyJny/GameOfLife/archive/{}.tar.gz'
setup(name='GameOfLife',
version=version,
description="Conway's Game of Life - Cellular Automata.",
long_description=long_description,
url='https://github.com/JnyJny/GameOfLife',
download_url=download_url.format(version),
author="<NAME>",
author_email="<EMAIL>",
license='MIT',
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Operating System :: POSIX',
'Environment :: Console :: Curses',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Life',
'Topic :: Games/Entertainment :: Simulation',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4'],
keywords='conway game life cellular automata simulation',
packages=find_packages(exclude=['contrib']),
test_suite='GameOfLife.tests',
scripts=['contrib/CGameOfLife.py', 'contrib/PGameOfLife.py'],
install_requires=[],
extras_require={},
package_data={},
data_files=[],
)
| StarcoderdataPython |
1657186 | <gh_stars>1-10
#!/usr/bin/env python3
# Written by jack @ nyi
# Licensed under FreeBSD's 3 clause BSD license. see LICENSE
'''This class calls the system's "ping" command and stores the results'''
class sys_ping:
'''this class is a python wrapper for UNIX system ping command, subclass ping does the work, last stores data from the last sysping.ping'''
def ping(target,count,opts):
'''conducts a ping, returns the data and populates the "last" subclass'''
import subprocess
#check to see if the ping count can be used as number, if not, return with an error code
try:
int(count)
except:
return -1
count = str(count)
indata = ""
sys_ping.last.opts = opts
sys_ping.last.host = target
#actually do a syscall for the ping and output the data to indata. If ping fails to find a host, it returns error status 2, capture the error and return an error message
try:
if opts == None:
indata = subprocess.check_output(["ping","-c",count,target],stderr=subprocess.STDOUT)
else:
sys_ping.last.opts = opts
indata = subprocess.check_output(["ping","-c",count,opts,target],stderr=subprocess.STDOUT)
#if this works, return a success, which is the default state.
sys_ping.last.success = True
except subprocess.CalledProcessError:
#if ping returns an error code, return a failure, and mark the success flag as false
sys_ping.last.success = False
return {-1:"error: ping host unreachable"}
#strip trailing and leading characters, and split the lines into a list.
indata = str(indata).strip("b'")
indata = indata.strip()
indata = indata.split('\\n')
#last line is a blank, get rid of it
indata.pop()
#next line is the averages, keep splitting until we have a list of the averages.
avg_line = indata.pop()
avg_line = avg_line.split()[3]
avg_line = avg_line.split("/")
#fill the "last" class with data from the avg_line
sys_ping.last.min_time = avg_line[0]
sys_ping.last.avg_time = avg_line[1]
sys_ping.last.max_time = avg_line[2]
sys_ping.last.mdev_time = avg_line[3]
#then comes the summary line split and populate "last" class
sum_line = indata.pop()
sum_line = sum_line.split()
sys_ping.last.sent = sum_line[0]
sys_ping.last.recieved = sum_line[3]
sys_ping.last.pct_loss = sum_line[5]
sys_ping.last.op_time = sum_line[9]
#this is basicly a spacer line, throw it out as well, and a blank line above it
indata.pop()
indata.pop()
#after this is the result of the ping packets. fill a sequnce list
sequence = {}
#the first line is a worthless header.
del(indata[0])
#the rest of them are the actual ping sequence, fill them into a dictionary of sequence:pingtime
for line in indata:
line = line.split()
#fifth [4] entry is the first we care about, the sequence number. its generally icmp_seq=<#> lets keep splitting until we get the raw number.
seq = line[4].split("=")[1]
#seventh [6] entry is the second thing we care about, its the actual ping time in milliseconds.
time = line[6].split("=")[1]
sequence[seq] = time
sys_ping.last.sequence = sequence
return sequence
class last:
'''This class stores data from last sys_ping.ping()'''
#blank items for avg_line
min_time,avg_time,max_time,mdev_time = 0,0,0,0
#blank items for sum_line
sent,recieved,pct_loss,op_time = 0,0,0,0
host = ""
opts = ""
success = ""
sequence = {}
| StarcoderdataPython |
1710173 | <filename>PitchForMajorParties.py
def pitch(iSupport, iOppose, ThirdPartyCandidates, heOrShe):
for peep in ThirdPartyCandidates:
print "It does not matter that", peep, "is not", iOppose, "\b."
print "What matters is that", iSupport, "is not", iOppose, "\b!"
print"Therefore you must support", iSupport, "\b!"
print heOrShe, "is the only one that is not", iOppose, "that can win"
print "(because people like me support", iSupport, "\b)!"
print "It does not matter how", iSupport, "stands on the issues."
print "Let's jump off this cliff together!"
print "Who cares what your mom told you when you were a kid!"
print "That does not matter now!"
print "We cannot allow", iOppose, "to win!"
ThirdPartyCandidates = ['<NAME>', '<NAME>']
pitch('Hillary', 'Trump', ThirdPartyCandidates, 'She')
print("\n")
pitch('Trump', 'Hillary', ThirdPartyCandidates, 'He')
| StarcoderdataPython |
3277609 | <reponame>DavideGalilei/paymentbot
from typing import Dict, List
from pyrogram import Client, ContinuePropagation
from pyrogram.types import Update, User, Chat
from pyrogram.raw.types import UpdateBotShippingQuery, UpdateBotPrecheckoutQuery
_on_shipping_query_handlers: List[callable] = []
_on_checkout_query_handlers: List[callable] = []
def on_shipping_query(func: callable):
_on_shipping_query_handlers.append(func)
return func
def on_checkout_query(func: callable):
_on_checkout_query_handlers.append(func)
return func
@Client.on_raw_update()
async def _raw(bot: Client, update: Update, users: Dict[int, User], chats: Dict[int, Chat]):
# print(type(x) for x in (update, users, chats))
# print(update, users, chats)
if isinstance(update, UpdateBotShippingQuery):
for handler in _on_shipping_query_handlers:
await handler(bot, update, users, chats)
elif isinstance(update, UpdateBotPrecheckoutQuery):
for handler in _on_checkout_query_handlers:
await handler(bot, update, users, chats)
else:
raise ContinuePropagation()
"""
{
"_": "types.UpdateBotShippingQuery",
"query_id": 1028888578233737147,
"user_id": 239556789,
"payload": "b'239556789_bought'",
"shipping_address": {
"_": "types.PostAddress",
"street_line1": "a",
"street_line2": "a",
"city": "Rome",
"state": "a",
"country_iso2": "IT",
"post_code": "1010"
}
}
{
"_": "types.UpdateBotPrecheckoutQuery",
"query_id": 1028888575142874651,
"user_id": 239556789,
"payload": "b'239556789_bought'",
"currency": "EUR",
"total_amount": 42000,
"info": {
"_": "types.PaymentRequestedInfo",
"name": "asddsadsa",
"phone": "393331341834",
"email": "<EMAIL>",
"shipping_address": {
"_": "types.PostAddress",
"street_line1": "a",
"street_line2": "a",
"city": "Rome",
"state": "a",
"country_iso2": "IT",
"post_code": "1010"
}
},
"shipping_option_id": "asd"
}
"""
| StarcoderdataPython |
1663570 | from car import Car
my_new_car = Car('toyota', 'corolla', 2014)
print(my_new_car.get_descriptive_name())
my_new_car.odometer_reading = 23
my_new_car.read_odometer()
| StarcoderdataPython |
3341755 | <reponame>mgb4/wdisp
import os.path
address = "http://localhost"
port = 4300
def wwwroot():
return os.path.dirname(__file__) + "/wwwroot"
def root_url():
return address + ":" + str(port) + "/"
def url_for(path):
return address + ":" + str(port) + "/api" + path
| StarcoderdataPython |
3345912 | import cv2
import torch
import numpy as np
import BboxToolkit as bt
PI = np.pi
def regular_theta(theta, mode='180', start=-PI/2):
assert mode in ['360', '180']
cycle = 2 * PI if mode == '360' else PI
theta = theta - start
theta = theta % cycle
return theta + start
def mintheta_obb(rbox):
x, y, w, h, theta, cls = np.split(rbox, 6, axis=-1)
theta1 = regular_theta(theta)
theta2 = regular_theta(theta + PI / 2)
abs_theta1 = np.abs(theta1)
abs_theta2 = np.abs(theta2)
w_regular = np.where(abs_theta1 < abs_theta2, w, h)
h_regular = np.where(abs_theta1 < abs_theta2, h, w)
theta_regular = np.where(abs_theta1 < abs_theta2, theta1, theta2)
# theta_regular = np.where(theta_regular == -PI / 4, PI / 4, theta_regular)
return np.concatenate([x, y, w_regular, h_regular, theta_regular, cls], axis=-1)
# def regular_obb(obboxes):
# x, y, w, h, theta = obboxes.unbind(dim=-1)
# w_regular = torch.where(w > h, w, h)
# h_regular = torch.where(w > h, h, w)
# theta_regular = torch.where(w > h, theta, theta+PI/2)
# theta_regular = regular_theta(theta_regular)
# return torch.stack([x, y, w_regular, h_regular, theta_regular], dim=-1)
# def mintheta_obb(obboxes):
# ''''
# norm the theta to (-45, 45), and its w, h need to be exchanged
# '''
# x, y, w, h, theta = obboxes.unbind(dim=-1)
# theta1 = regular_theta(theta)
# theta2 = regular_theta(theta + pi/2)
# abs_theta1 = torch.abs(theta1)
# abs_theta2 = torch.abs(theta2)
# w_regular = torch.where(abs_theta1 < abs_theta2, w, h)
# h_regular = torch.where(abs_theta1 < abs_theta2, h, w)
# theta_regular = torch.where(abs_theta1 < abs_theta2, theta1, theta2)
# obboxes = torch.stack([x, y, w_regular, h_regular, theta_regular], dim=-1)
# return obboxes
def poly2obb_np(rbox):
if rbox.shape[-1] == 9:
res = np.empty((*rbox.shape[:-1], 6))
res[..., 5] = rbox[..., 8]
rbox = rbox[..., :8].reshape(1, -1, 2).astype(np.float32)
elif rbox.shape[-1] == 8:
res = np.empty([*rbox.shape[:-1], 5])
rbox = rbox.reshape(1, -1, 2).astype(np.float32)
else:
raise NotImplementedError
(x, y), (w, h), angle = cv2.minAreaRect(rbox)
if w >= h:
angle = -angle
else:
w, h = h, w
angle = -90 - angle
theta = angle / 180 * PI
res[..., 0] = x
res[..., 1] = y
res[..., 2] = w
res[..., 3] = h
res[..., 4] = theta
return res[None, :]
def poly2obb(polys):
polys_np = polys.detach().cpu().numpy()
order = polys_np.shape[:-1]
num_points = polys_np.shape[-1] // 2
polys_np = polys_np.reshape(-1, num_points, 2)
polys_np = polys_np.astype(np.float32)
obboxes = []
for poly in polys_np:
(x, y), (w, h), angle = cv2.minAreaRect(poly)
if w >= h:
angle = -angle
else:
w, h = h, w
angle = -90 - angle
theta = angle / 180 * PI
obboxes.append([x, y, w, h, theta])
if not obboxes:
obboxes = np.zeros((0, 5))
else:
obboxes = np.array(obboxes)
obboxes = obboxes.reshape(*order, 5)
return polys.new_tensor(obboxes)
# def rectpoly2obb(polys):
# theta = torch.atan2(-(polys[..., 3] - polys[..., 1]),
# polys[..., 2] - polys[..., 0])
# Cos, Sin = torch.cos(theta), torch.sin(theta)
# Matrix = torch.stack([Cos, -Sin, Sin, Cos], dim=-1)
# Matrix = Matrix.view(*Matrix.shape[:-1], 2, 2)
# x = polys[..., 0::2].mean(-1)
# y = polys[..., 1::2].mean(-1)
# center = torch.stack([x, y], dim=-1).unsqueeze(-2)
# center_polys = polys.view(*polys.shape[:-1], 4, 2) - center
# rotate_polys = torch.matmul(center_polys, Matrix.transpose(-1, -2))
# xmin, _ = torch.min(rotate_polys[..., :, 0], dim=-1)
# xmax, _ = torch.max(rotate_polys[..., :, 0], dim=-1)
# ymin, _ = torch.min(rotate_polys[..., :, 1], dim=-1)
# ymax, _ = torch.max(rotate_polys[..., :, 1], dim=-1)
# w = xmax - xmin
# h = ymax - ymin
# obboxes = torch.stack([x, y, w, h, theta], dim=-1)
# return regular_obb(obboxes)
def poly2hbb(polys):
polys = polys.view(*polys.shape[:-1], polys.size(-1)//2, 2)
lt_point = torch.min(polys, dim=-2)[0]
rb_point = torch.max(polys, dim=-2)[0]
return torch.cat([lt_point, rb_point], dim=-1)
def obb2poly(obboxes):
center, w, h, theta = torch.split(obboxes, [2, 1, 1, 1], dim=-1)
Cos, Sin = torch.cos(theta), torch.sin(theta)
vector1 = torch.cat(
[w/2 * Cos, -w/2 * Sin], dim=-1)
vector2 = torch.cat(
[-h/2 * Sin, -h/2 * Cos], dim=-1)
point1 = center + vector1 + vector2
point2 = center + vector1 - vector2
point3 = center - vector1 - vector2
point4 = center - vector1 + vector2
return torch.cat(
[point1, point2, point3, point4], dim=-1)
def obb2hbb(obboxes, mode="xyxy"):
center, w, h, theta = torch.split(obboxes, [2, 1, 1, 1], dim=-1)
Cos, Sin = torch.cos(theta), torch.sin(theta)
x_bias = torch.abs(w/2 * Cos) + torch.abs(h/2 * Sin)
y_bias = torch.abs(w/2 * Sin) + torch.abs(h/2 * Cos)
bias = torch.cat([x_bias, y_bias], dim=-1)
if mode == "xyxy":
obboxes = torch.cat([center-bias, center+bias], dim=-1)
elif mode == "cxcywh":
obboxes = torch.cat([center, 2 * bias.abs()])
return obboxes
def hbb2poly(hbboxes):
l, t, r, b = hbboxes.unbind(-1)
return torch.stack([l, t, r, t, r, b, l ,b], dim=-1)
def hbb2obb(hbboxes):
x = (hbboxes[..., 0] + hbboxes[..., 2]) * 0.5
y = (hbboxes[..., 1] + hbboxes[..., 3]) * 0.5
w = hbboxes[..., 2] - hbboxes[..., 0]
h = hbboxes[..., 3] - hbboxes[..., 1]
theta = x.new_zeros(*x.shape)
obboxes1 = torch.stack([x, y, w, h, theta], dim=-1)
obboxes2 = torch.stack([x, y, h, w, theta-PI/2], dim=-1)
obboxes = torch.where((w >= h)[..., None], obboxes1, obboxes2)
return obboxes
_type_func_map = {
('poly', 'obb'): poly2obb,
('poly', 'hbb'): poly2hbb,
('obb', 'poly'): obb2poly,
('obb', 'hbb'): obb2hbb,
('hbb', 'poly'): hbb2poly,
('hbb', 'obb'): hbb2obb
}
def get_bbox_type(bboxes, with_score=False):
dim = bboxes.size(-1)
if with_score:
dim -= 1
if dim == 4:
return 'hbb'
if dim == 5:
return 'obb'
if dim == 8:
return 'poly'
return 'notype'
def bbox2type(bboxes, to_type):
assert to_type in ['hbb', 'obb', 'poly']
ori_type = get_bbox_type(bboxes)
if ori_type == 'notype':
raise ValueError('Not a bbox type')
if ori_type == to_type:
return bboxes
trans_func = _type_func_map[(ori_type, to_type)]
return trans_func(bboxes)
| StarcoderdataPython |
3257484 | import os
import math
from pygears import gear
from pygears.typing import Fixp, Tuple
from pygears_vivado.vivmod import SVVivModuleInst
from pygears.core.gear import InSig
# TODO: Make it work properly with widths that are not multiple of 8
@gear(hdl={'hdlgen_cls': SVVivModuleInst},
sigmap={'aclk': 'clk'},
signals=[InSig('aclk', 1)])
async def cordic(
s_axis_phase: Fixp[3, 'W'],
*,
output_width=b'input_width',
input_width=b'W',
functional_selection="Sin_and_Cos",
architectural_configuration="Word_Serial", # Word_Serial, Parallel
pipelining_mode="Optimal", # Optimal, Maximum
phase_format="Scaled_Radians",
flow_control="Blocking", # NonBlocking, Blocking
out_tready=True,
data_format="SignedFraction",
_w_out=b'((W+7)//8)*8') -> {
'm_axis_dout': Tuple[Fixp[2, '_w_out'], Fixp[2, '_w_out']]
}:
async with s_axis_phase as p:
yield (Fixp[2, output_width](math.cos(math.pi * float(p))),
Fixp[2, output_width](math.sin(math.pi * float(p))))
| StarcoderdataPython |
156932 | import tweepy
import os
from utilities.time_management import *
from utilities.config import *
import django
os.environ["DJANGO_SETTINGS_MODULE"] = "portal.settings"
django.setup()
from details.models import Entities, Topic
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
class Tweet:
def __init__(self, id, number, text, name, handle, date, dp_url):
self.id = id
self.number = number
self.text = text
self.name = name
self.handle = handle
self.date = date
self.dp_url = dp_url
def get_tweets(topic_id, count, since_id=None):
tweets = []
try:
api = tweepy.API(auth)
topic = Topic.objects.get(topic_id=topic_id)
keyword = Entities.objects.filter(topic=topic)[0]
new_tweets = api.search(q=keyword, count=count, since_id=since_id, lang='en')
cnt = 1
for tweet in new_tweets:
id = tweet.id
text = tweet.text
name = tweet.author.name
handle = '@' + tweet.author.screen_name
dp_url = tweet.author.profile_image_url_https
date = get_tweet_date_time_string(convert_datetime_to_local(tweet.created_at.replace(tzinfo=pytz.UTC)))
tweets.append(Tweet(id, cnt, text, name, handle, date, dp_url))
cnt += 1
except: None
return tweets
| StarcoderdataPython |
50093 | <reponame>resteasy/examples
import http.client, urllib.parse
from M2Crypto import BIO, SMIME, X509
conn = http.client.HTTPConnection("localhost:9095")
conn.request("GET", "/smime/encrypted")
res = conn.getresponse()
if res.status != 200:
print((res.status))
raise Exception("Failed to connect")
contentType = res.getheader("content-type")
data = res.read()
# Need to reconstruct a Mail message with content type
# as SMIME wants it in that format
bio = BIO.MemoryBuffer(b"Content-Type: ")
bio.write(contentType)
bio.write("\r\n\r\n")
bio.write(data)
s = SMIME.SMIME()
s.load_key('src/main/resources/private.pem', 'src/main/resources/cert.pem')
p7, d = SMIME.smime_load_pkcs7_bio(bio)
out = s.decrypt(p7)
print("--- Received Data ---")
# It may contain headers like Content-Type, so you'll have to parse it
print(out) | StarcoderdataPython |
3331066 | <filename>src/sudoku_solver.py
from __future__ import division
import pyomo.environ as pyo
from data_maker import DataMaker
class SudokuSolver:
def __init__(self):
self.dataMaker = DataMaker()
self.model = pyo.AbstractModel()
self.initialize_model()
@staticmethod
def obj_expression(model):
return pyo.summation(model.x)
@staticmethod
def rows_constraint_rule(model, n, i):
return sum(model.x[i, j, n]for j in model.j) == 1
@staticmethod
def columns_constraint_rule(model, n, j):
return sum(model.x[i, j, n]for i in model.i) == 1
@staticmethod
def no_overlap_constraint_rule(model, i, j):
return sum(model.x[i, j, n]for n in model.n) == 1
@staticmethod
def squares_constraint_rule(model, n, r, c):
i_range = pyo.RangeSet(3 * r - 2, 3 * r)
j_range = pyo.RangeSet(3 * c - 2, 3 * c)
return sum(model.x[i, j, n] for i in i_range for j in j_range) == 1
def initialize_model(self):
self.model.I = pyo.Param(within=pyo.NonNegativeIntegers)
self.model.J = pyo.Param(within=pyo.NonNegativeIntegers)
self.model.N = pyo.Param(within=pyo.NonNegativeIntegers)
self.model.R = pyo.Param(within=pyo.NonNegativeIntegers)
self.model.C = pyo.Param(within=pyo.NonNegativeIntegers)
self.model.i = pyo.RangeSet(1, self.model.I)
self.model.j = pyo.RangeSet(1, self.model.J)
self.model.n = pyo.RangeSet(1, self.model.N)
self.model.r = pyo.RangeSet(1, self.model.R)
self.model.c = pyo.RangeSet(1, self.model.C)
self.model.x = pyo.Var(self.model.i,
self.model.j,
self.model.n,
domain=pyo.Binary)
self.model.obj = pyo.Objective(
rule=self.obj_expression, sense=pyo.maximize)
self.model.rows = pyo.Constraint(
self.model.n,
self.model.i,
rule=self.rows_constraint_rule)
self.model.cols = pyo.Constraint(
self.model.n,
self.model.j,
rule=self.columns_constraint_rule)
self.model.overlap = pyo.Constraint(
self.model.i,
self.model.j,
rule=self.no_overlap_constraint_rule)
self.model.squares = pyo.Constraint(
self.model.n,
self.model.r,
self.model.c,
rule=self.squares_constraint_rule)
def fix_variables(self, sudokuGrid):
for i in range(0, 9):
for j in range(0, 9):
n = sudokuGrid[i][j]
if(n != 0):
self.instance.x[i+1, j+1, n].fix(1)
def solve(self, sudokuGrid):
self.instance = self.model.create_instance(
self.dataMaker.make_indices())
self.fix_variables(sudokuGrid)
solver = pyo.SolverFactory('cplex')
solver.solve(self.instance)
return self.extractSolution(self.instance)
def extractSolution(self, instance):
solutionGrid = [[0 for i in range(0, 9)] for j in range(0, 9)]
for i in instance.i:
for j in instance.j:
for n in instance.n:
if(instance.x[i, j, n].value == 1):
solutionGrid[i-1][j-1] = n
return solutionGrid
def print_solution(self, instance):
for i in instance.i:
for j in instance.j:
print(" ", end="")
for n in instance.n:
if(instance.x[i, j, n].value == 1):
print(n, end="")
print()
| StarcoderdataPython |
1794621 | __all__ = ["Log", "Progbar", "RandomSeeds", "ModelParamStore", "DefaultDict", "Visualize"]
| StarcoderdataPython |
3335867 | import random
from typing import Optional
from fastapi import WebSocket
from starlette.responses import HTMLResponse
from starlette.websockets import WebSocketDisconnect
from app.ws.schema import BroadcastMsgReq
from app.ws import manager
from app.ws.ws import html
async def ws_view(
websocket: WebSocket, client_id: int, session: Optional[str] = "qeqe", transport: Optional[str] = "websocket"
):
"""
websocket服务
"""
res = await manager.connect_with_token(websocket)
if not res:
return
try:
while True:
await manager.receive_json(websocket)
except WebSocketDisconnect:
manager.remove_session(websocket)
async def test_html_view():
"""
测试html
"""
return HTMLResponse(html)
async def sockjs_info_view(t: Optional[int]):
"""
伪造sockjs的info接口
"""
data = {
"entropy": random.randint(1, 2147483647),
}
return data
async def broadcast_msg_view(req: BroadcastMsgReq):
"""
广播消息
"""
msg = req.msg
await manager.broadcast(req.msg)
return "success"
| StarcoderdataPython |
1642731 | import os
from flask import Flask
from flask_jwt import JWT
from .utils.errors import APIError
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py')
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# a simple page that says hello
@app.route('/healthcheck')
def healthcheck():
return 'Hello World!'
from . import auth
app.register_blueprint(auth.bp)
jwt = JWT(app, auth.authenticate, auth.identity)
from .routes import user
app.register_blueprint(user.bp)
from .routes import house
app.register_blueprint(house.bp)
app.register_error_handler(APIError, APIError.to_json)
return app
| StarcoderdataPython |
65881 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.preprocessor."""
import numpy as np
import six
import tensorflow as tf
from object_detection.tensorflow_detect.core import standard_fields as fields, \
preprocessor, preprocessor_cache
if six.PY2:
import mock # pylint: disable=g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-import-not-at-top
class PreprocessorTest(tf.test.TestCase):
def createColorfulTestImage(self):
ch255 = tf.fill([1, 100, 200, 1], tf.constant(255, dtype=tf.uint8))
ch128 = tf.fill([1, 100, 200, 1], tf.constant(128, dtype=tf.uint8))
ch0 = tf.fill([1, 100, 200, 1], tf.constant(0, dtype=tf.uint8))
imr = tf.concat([ch255, ch0, ch0], 3)
img = tf.concat([ch255, ch255, ch0], 3)
imb = tf.concat([ch255, ch0, ch255], 3)
imw = tf.concat([ch128, ch128, ch128], 3)
imu = tf.concat([imr, img], 2)
imd = tf.concat([imb, imw], 2)
im = tf.concat([imu, imd], 1)
return im
def createTestImages(self):
images_r = tf.constant([[[128, 128, 128, 128], [0, 0, 128, 128],
[0, 128, 128, 128], [192, 192, 128, 128]]],
dtype=tf.uint8)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0, 0, 128, 128], [0, 0, 128, 128],
[0, 128, 192, 192], [192, 192, 128, 192]]],
dtype=tf.uint8)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[128, 128, 192, 0], [0, 0, 128, 192],
[0, 128, 128, 0], [192, 192, 192, 128]]],
dtype=tf.uint8)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def createEmptyTestBoxes(self):
boxes = tf.constant([[]], dtype=tf.float32)
return boxes
def createTestBoxes(self):
boxes = tf.constant(
[[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32)
return boxes
def createTestLabelScores(self):
return tf.constant([1.0, 0.5], dtype=tf.float32)
def createTestLabelScoresWithMissingScore(self):
return tf.constant([0.5, np.nan], dtype=tf.float32)
def createTestMasks(self):
mask = np.array([
[[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0]],
[[255.0, 255.0, 0.0],
[255.0, 255.0, 0.0],
[255.0, 255.0, 0.0]]])
return tf.constant(mask, dtype=tf.float32)
def createTestKeypoints(self):
keypoints = np.array([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]],
])
return tf.constant(keypoints, dtype=tf.float32)
def createTestKeypointsInsideCrop(self):
keypoints = np.array([
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]],
])
return tf.constant(keypoints, dtype=tf.float32)
def createTestKeypointsOutsideCrop(self):
keypoints = np.array([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
])
return tf.constant(keypoints, dtype=tf.float32)
def createKeypointFlipPermutation(self):
return np.array([0, 2, 1], dtype=np.int32)
def createTestLabels(self):
labels = tf.constant([1, 2], dtype=tf.int32)
return labels
def createTestBoxesOutOfImage(self):
boxes = tf.constant(
[[-0.1, 0.25, 0.75, 1], [0.25, 0.5, 0.75, 1.1]], dtype=tf.float32)
return boxes
def createTestMultiClassScores(self):
return tf.constant([[1.0, 0.0], [0.5, 0.5]], dtype=tf.float32)
def expectedImagesAfterNormalization(self):
images_r = tf.constant([[[0, 0, 0, 0], [-1, -1, 0, 0],
[-1, 0, 0, 0], [0.5, 0.5, 0, 0]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[-1, -1, 0, 0], [-1, -1, 0, 0],
[-1, 0, 0.5, 0.5], [0.5, 0.5, 0, 0.5]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[0, 0, 0.5, -1], [-1, -1, 0, 0.5],
[-1, 0, 0, -1], [0.5, 0.5, 0.5, 0]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedMaxImageAfterColorScale(self):
images_r = tf.constant([[[0.1, 0.1, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1],
[-0.9, 0.1, 0.1, 0.1], [0.6, 0.6, 0.1, 0.1]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[-0.9, -0.9, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1],
[-0.9, 0.1, 0.6, 0.6], [0.6, 0.6, 0.1, 0.6]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[0.1, 0.1, 0.6, -0.9], [-0.9, -0.9, 0.1, 0.6],
[-0.9, 0.1, 0.1, -0.9], [0.6, 0.6, 0.6, 0.1]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedMinImageAfterColorScale(self):
images_r = tf.constant([[[-0.1, -0.1, -0.1, -0.1], [-1, -1, -0.1, -0.1],
[-1, -0.1, -0.1, -0.1], [0.4, 0.4, -0.1, -0.1]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[-1, -1, -0.1, -0.1], [-1, -1, -0.1, -0.1],
[-1, -0.1, 0.4, 0.4], [0.4, 0.4, -0.1, 0.4]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[-0.1, -0.1, 0.4, -1], [-1, -1, -0.1, 0.4],
[-1, -0.1, -0.1, -1], [0.4, 0.4, 0.4, -0.1]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedImagesAfterLeftRightFlip(self):
images_r = tf.constant([[[0, 0, 0, 0], [0, 0, -1, -1],
[0, 0, 0, -1], [0, 0, 0.5, 0.5]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0, 0, -1, -1], [0, 0, -1, -1],
[0.5, 0.5, 0, -1], [0.5, 0, 0.5, 0.5]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[-1, 0.5, 0, 0], [0.5, 0, -1, -1],
[-1, 0, 0, -1], [0, 0.5, 0.5, 0.5]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedImagesAfterUpDownFlip(self):
images_r = tf.constant([[[0.5, 0.5, 0, 0], [-1, 0, 0, 0],
[-1, -1, 0, 0], [0, 0, 0, 0]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0.5, 0.5, 0, 0.5], [-1, 0, 0.5, 0.5],
[-1, -1, 0, 0], [-1, -1, 0, 0]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[0.5, 0.5, 0.5, 0], [-1, 0, 0, -1],
[-1, -1, 0, 0.5], [0, 0, 0.5, -1]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedImagesAfterRot90(self):
images_r = tf.constant([[[0, 0, 0, 0], [0, 0, 0, 0],
[0, -1, 0, 0.5], [0, -1, -1, 0.5]]],
dtype=tf.float32)
images_r = tf.expand_dims(images_r, 3)
images_g = tf.constant([[[0, 0, 0.5, 0.5], [0, 0, 0.5, 0],
[-1, -1, 0, 0.5], [-1, -1, -1, 0.5]]],
dtype=tf.float32)
images_g = tf.expand_dims(images_g, 3)
images_b = tf.constant([[[-1, 0.5, -1, 0], [0.5, 0, 0, 0.5],
[0, -1, 0, 0.5], [0, -1, -1, 0.5]]],
dtype=tf.float32)
images_b = tf.expand_dims(images_b, 3)
images = tf.concat([images_r, images_g, images_b], 3)
return images
def expectedBoxesAfterLeftRightFlip(self):
boxes = tf.constant([[0.0, 0.0, 0.75, 0.75], [0.25, 0.0, 0.75, 0.5]],
dtype=tf.float32)
return boxes
def expectedBoxesAfterUpDownFlip(self):
boxes = tf.constant([[0.25, 0.25, 1.0, 1.0], [0.25, 0.5, 0.75, 1.0]],
dtype=tf.float32)
return boxes
def expectedBoxesAfterRot90(self):
boxes = tf.constant(
[[0.0, 0.0, 0.75, 0.75], [0.0, 0.25, 0.5, 0.75]], dtype=tf.float32)
return boxes
def expectedMasksAfterLeftRightFlip(self):
mask = np.array([
[[0.0, 0.0, 255.0],
[0.0, 0.0, 255.0],
[0.0, 0.0, 255.0]],
[[0.0, 255.0, 255.0],
[0.0, 255.0, 255.0],
[0.0, 255.0, 255.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedMasksAfterUpDownFlip(self):
mask = np.array([
[[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0]],
[[255.0, 255.0, 0.0],
[255.0, 255.0, 0.0],
[255.0, 255.0, 0.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedMasksAfterRot90(self):
mask = np.array([
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[255.0, 255.0, 255.0]],
[[0.0, 0.0, 0.0],
[255.0, 255.0, 255.0],
[255.0, 255.0, 255.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedLabelScoresAfterThresholding(self):
return tf.constant([1.0], dtype=tf.float32)
def expectedBoxesAfterThresholding(self):
return tf.constant([[0.0, 0.25, 0.75, 1.0]], dtype=tf.float32)
def expectedLabelsAfterThresholding(self):
return tf.constant([1], dtype=tf.float32)
def expectedMultiClassScoresAfterThresholding(self):
return tf.constant([[1.0, 0.0]], dtype=tf.float32)
def expectedMasksAfterThresholding(self):
mask = np.array([
[[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0],
[255.0, 0.0, 0.0]]])
return tf.constant(mask, dtype=tf.float32)
def expectedKeypointsAfterThresholding(self):
keypoints = np.array([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]]
])
return tf.constant(keypoints, dtype=tf.float32)
def expectedLabelScoresAfterThresholdingWithMissingScore(self):
return tf.constant([np.nan], dtype=tf.float32)
def expectedBoxesAfterThresholdingWithMissingScore(self):
return tf.constant([[0.25, 0.5, 0.75, 1]], dtype=tf.float32)
def expectedLabelsAfterThresholdingWithMissingScore(self):
return tf.constant([2], dtype=tf.float32)
def testRgbToGrayscale(self):
images = self.createTestImages()
grayscale_images = preprocessor._rgb_to_grayscale(images)
expected_images = tf.image.rgb_to_grayscale(images)
with self.test_session() as sess:
(grayscale_images, expected_images) = sess.run(
[grayscale_images, expected_images])
self.assertAllEqual(expected_images, grayscale_images)
def testNormalizeImage(self):
preprocess_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 256,
'target_minval': -1,
'target_maxval': 1
})]
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
images_expected = self.expectedImagesAfterNormalization()
with self.test_session() as sess:
(images_, images_expected_) = sess.run(
[images, images_expected])
images_shape_ = images_.shape
images_expected_shape_ = images_expected_.shape
expected_shape = [1, 4, 4, 3]
self.assertAllEqual(images_expected_shape_, images_shape_)
self.assertAllEqual(images_shape_, expected_shape)
self.assertAllClose(images_, images_expected_)
def testRetainBoxesAboveThreshold(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
(retained_boxes, retained_labels,
retained_label_scores) = preprocessor.retain_boxes_above_threshold(
boxes, labels, label_scores, threshold=0.6)
with self.test_session() as sess:
(retained_boxes_, retained_labels_, retained_label_scores_,
expected_retained_boxes_, expected_retained_labels_,
expected_retained_label_scores_) = sess.run([
retained_boxes, retained_labels, retained_label_scores,
self.expectedBoxesAfterThresholding(),
self.expectedLabelsAfterThresholding(),
self.expectedLabelScoresAfterThresholding()])
self.assertAllClose(
retained_boxes_, expected_retained_boxes_)
self.assertAllClose(
retained_labels_, expected_retained_labels_)
self.assertAllClose(
retained_label_scores_, expected_retained_label_scores_)
def testRetainBoxesAboveThresholdWithMultiClassScores(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
multiclass_scores = self.createTestMultiClassScores()
(_, _, _,
retained_multiclass_scores) = preprocessor.retain_boxes_above_threshold(
boxes,
labels,
label_scores,
multiclass_scores=multiclass_scores,
threshold=0.6)
with self.test_session() as sess:
(retained_multiclass_scores_,
expected_retained_multiclass_scores_) = sess.run([
retained_multiclass_scores,
self.expectedMultiClassScoresAfterThresholding()
])
self.assertAllClose(retained_multiclass_scores_,
expected_retained_multiclass_scores_)
def testRetainBoxesAboveThresholdWithMasks(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
masks = self.createTestMasks()
_, _, _, retained_masks = preprocessor.retain_boxes_above_threshold(
boxes, labels, label_scores, masks, threshold=0.6)
with self.test_session() as sess:
retained_masks_, expected_retained_masks_ = sess.run([
retained_masks, self.expectedMasksAfterThresholding()])
self.assertAllClose(
retained_masks_, expected_retained_masks_)
def testRetainBoxesAboveThresholdWithKeypoints(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
keypoints = self.createTestKeypoints()
(_, _, _, retained_keypoints) = preprocessor.retain_boxes_above_threshold(
boxes, labels, label_scores, keypoints=keypoints, threshold=0.6)
with self.test_session() as sess:
(retained_keypoints_,
expected_retained_keypoints_) = sess.run([
retained_keypoints,
self.expectedKeypointsAfterThresholding()])
self.assertAllClose(
retained_keypoints_, expected_retained_keypoints_)
def testRetainBoxesAboveThresholdWithMissingScore(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScoresWithMissingScore()
(retained_boxes, retained_labels,
retained_label_scores) = preprocessor.retain_boxes_above_threshold(
boxes, labels, label_scores, threshold=0.6)
with self.test_session() as sess:
(retained_boxes_, retained_labels_, retained_label_scores_,
expected_retained_boxes_, expected_retained_labels_,
expected_retained_label_scores_) = sess.run([
retained_boxes, retained_labels, retained_label_scores,
self.expectedBoxesAfterThresholdingWithMissingScore(),
self.expectedLabelsAfterThresholdingWithMissingScore(),
self.expectedLabelScoresAfterThresholdingWithMissingScore()])
self.assertAllClose(
retained_boxes_, expected_retained_boxes_)
self.assertAllClose(
retained_labels_, expected_retained_labels_)
self.assertAllClose(
retained_label_scores_, expected_retained_label_scores_)
def testFlipBoxesLeftRight(self):
boxes = self.createTestBoxes()
flipped_boxes = preprocessor._flip_boxes_left_right(boxes)
expected_boxes = self.expectedBoxesAfterLeftRightFlip()
with self.test_session() as sess:
flipped_boxes, expected_boxes = sess.run([flipped_boxes, expected_boxes])
self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten())
def testFlipBoxesUpDown(self):
boxes = self.createTestBoxes()
flipped_boxes = preprocessor._flip_boxes_up_down(boxes)
expected_boxes = self.expectedBoxesAfterUpDownFlip()
with self.test_session() as sess:
flipped_boxes, expected_boxes = sess.run([flipped_boxes, expected_boxes])
self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten())
def testRot90Boxes(self):
boxes = self.createTestBoxes()
rotated_boxes = preprocessor._rot90_boxes(boxes)
expected_boxes = self.expectedBoxesAfterRot90()
with self.test_session() as sess:
rotated_boxes, expected_boxes = sess.run([rotated_boxes, expected_boxes])
self.assertAllEqual(rotated_boxes.flatten(), expected_boxes.flatten())
def testFlipMasksLeftRight(self):
test_mask = self.createTestMasks()
flipped_mask = preprocessor._flip_masks_left_right(test_mask)
expected_mask = self.expectedMasksAfterLeftRightFlip()
with self.test_session() as sess:
flipped_mask, expected_mask = sess.run([flipped_mask, expected_mask])
self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten())
def testFlipMasksUpDown(self):
test_mask = self.createTestMasks()
flipped_mask = preprocessor._flip_masks_up_down(test_mask)
expected_mask = self.expectedMasksAfterUpDownFlip()
with self.test_session() as sess:
flipped_mask, expected_mask = sess.run([flipped_mask, expected_mask])
self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten())
def testRot90Masks(self):
test_mask = self.createTestMasks()
rotated_mask = preprocessor._rot90_masks(test_mask)
expected_mask = self.expectedMasksAfterRot90()
with self.test_session() as sess:
rotated_mask, expected_mask = sess.run([rotated_mask, expected_mask])
self.assertAllEqual(rotated_mask.flatten(), expected_mask.flatten())
def _testPreprocessorCache(self,
preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False,
num_runs=4):
cache = preprocessor_cache.PreprocessorCache()
images = self.createTestImages()
boxes = self.createTestBoxes()
classes = self.createTestLabels()
masks = self.createTestMasks()
keypoints = self.createTestKeypoints()
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=test_masks, include_keypoints=test_keypoints)
out = []
for i in range(num_runs):
tensor_dict = {
fields.InputDataFields.image: images,
}
num_outputs = 1
if test_boxes:
tensor_dict[fields.InputDataFields.groundtruth_boxes] = boxes
tensor_dict[fields.InputDataFields.groundtruth_classes] = classes
num_outputs += 1
if test_masks:
tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks
num_outputs += 1
if test_keypoints:
tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints
num_outputs += 1
out.append(preprocessor.preprocess(
tensor_dict, preprocess_options, preprocessor_arg_map, cache))
with self.test_session() as sess:
to_run = []
for i in range(num_runs):
to_run.append(out[i][fields.InputDataFields.image])
if test_boxes:
to_run.append(out[i][fields.InputDataFields.groundtruth_boxes])
if test_masks:
to_run.append(
out[i][fields.InputDataFields.groundtruth_instance_masks])
if test_keypoints:
to_run.append(out[i][fields.InputDataFields.groundtruth_keypoints])
out_array = sess.run(to_run)
for i in range(num_outputs, len(out_array)):
self.assertAllClose(out_array[i], out_array[i - num_outputs])
def testRandomHorizontalFlip(self):
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterLeftRightFlip()
boxes_expected1 = self.expectedBoxesAfterLeftRightFlip()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_diff_,
boxes_diff_expected_) = sess.run([images_diff, images_diff_expected,
boxes_diff, boxes_diff_expected])
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomHorizontalFlipWithEmptyBoxes(self):
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterLeftRightFlip()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_,
boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes,
boxes_expected])
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomHorizontalFlipWithCache(self):
keypoint_flip_permutation = self.createKeypointFlipPermutation()
preprocess_options = [
(preprocessor.random_horizontal_flip,
{'keypoint_flip_permutation': keypoint_flip_permutation})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRunRandomHorizontalFlipWithMaskAndKeypoints(self):
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
image_height = 3
image_width = 3
images = tf.random_uniform([1, image_height, image_width, 3])
boxes = self.createTestBoxes()
masks = self.createTestMasks()
keypoints = self.createTestKeypoints()
keypoint_flip_permutation = self.createKeypointFlipPermutation()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_instance_masks: masks,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocess_options = [
(preprocessor.random_horizontal_flip,
{'keypoint_flip_permutation': keypoint_flip_permutation})]
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True, include_keypoints=True)
tensor_dict = preprocessor.preprocess(
tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map)
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
boxes, masks, keypoints = sess.run([boxes, masks, keypoints])
self.assertTrue(boxes is not None)
self.assertTrue(masks is not None)
self.assertTrue(keypoints is not None)
def testRandomVerticalFlip(self):
preprocess_options = [(preprocessor.random_vertical_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterUpDownFlip()
boxes_expected1 = self.expectedBoxesAfterUpDownFlip()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_diff_,
boxes_diff_expected_) = sess.run([images_diff, images_diff_expected,
boxes_diff, boxes_diff_expected])
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomVerticalFlipWithEmptyBoxes(self):
preprocess_options = [(preprocessor.random_vertical_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterUpDownFlip()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_,
boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes,
boxes_expected])
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomVerticalFlipWithCache(self):
keypoint_flip_permutation = self.createKeypointFlipPermutation()
preprocess_options = [
(preprocessor.random_vertical_flip,
{'keypoint_flip_permutation': keypoint_flip_permutation})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRunRandomVerticalFlipWithMaskAndKeypoints(self):
preprocess_options = [(preprocessor.random_vertical_flip, {})]
image_height = 3
image_width = 3
images = tf.random_uniform([1, image_height, image_width, 3])
boxes = self.createTestBoxes()
masks = self.createTestMasks()
keypoints = self.createTestKeypoints()
keypoint_flip_permutation = self.createKeypointFlipPermutation()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_instance_masks: masks,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocess_options = [
(preprocessor.random_vertical_flip,
{'keypoint_flip_permutation': keypoint_flip_permutation})]
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True, include_keypoints=True)
tensor_dict = preprocessor.preprocess(
tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map)
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
boxes, masks, keypoints = sess.run([boxes, masks, keypoints])
self.assertTrue(boxes is not None)
self.assertTrue(masks is not None)
self.assertTrue(keypoints is not None)
def testRandomRotation90(self):
preprocess_options = [(preprocessor.random_rotation90, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterRot90()
boxes_expected1 = self.expectedBoxesAfterRot90()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_diff_,
boxes_diff_expected_) = sess.run([images_diff, images_diff_expected,
boxes_diff, boxes_diff_expected])
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomRotation90WithEmptyBoxes(self):
preprocess_options = [(preprocessor.random_rotation90, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterRot90()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_,
boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes,
boxes_expected])
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomRotation90WithCache(self):
preprocess_options = [(preprocessor.random_rotation90, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRunRandomRotation90WithMaskAndKeypoints(self):
preprocess_options = [(preprocessor.random_rotation90, {})]
image_height = 3
image_width = 3
images = tf.random_uniform([1, image_height, image_width, 3])
boxes = self.createTestBoxes()
masks = self.createTestMasks()
keypoints = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_instance_masks: masks,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True, include_keypoints=True)
tensor_dict = preprocessor.preprocess(
tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map)
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks]
keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
boxes, masks, keypoints = sess.run([boxes, masks, keypoints])
self.assertTrue(boxes is not None)
self.assertTrue(masks is not None)
self.assertTrue(keypoints is not None)
def testRandomPixelValueScale(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_pixel_value_scale, {}))
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_min = tf.to_float(images) * 0.9 / 255.0
images_max = tf.to_float(images) * 1.1 / 255.0
images = tensor_dict[fields.InputDataFields.image]
values_greater = tf.greater_equal(images, images_min)
values_less = tf.less_equal(images, images_max)
values_true = tf.fill([1, 4, 4, 3], True)
with self.test_session() as sess:
(values_greater_, values_less_, values_true_) = sess.run(
[values_greater, values_less, values_true])
self.assertAllClose(values_greater_, values_true_)
self.assertAllClose(values_less_, values_true_)
def testRandomPixelValueScaleWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_pixel_value_scale, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def testRandomImageScale(self):
preprocess_options = [(preprocessor.random_image_scale, {})]
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images_scaled = tensor_dict[fields.InputDataFields.image]
images_original_shape = tf.shape(images_original)
images_scaled_shape = tf.shape(images_scaled)
with self.test_session() as sess:
(images_original_shape_, images_scaled_shape_) = sess.run(
[images_original_shape, images_scaled_shape])
self.assertTrue(
images_original_shape_[1] * 0.5 <= images_scaled_shape_[1])
self.assertTrue(
images_original_shape_[1] * 2.0 >= images_scaled_shape_[1])
self.assertTrue(
images_original_shape_[2] * 0.5 <= images_scaled_shape_[2])
self.assertTrue(
images_original_shape_[2] * 2.0 >= images_scaled_shape_[2])
def testRandomImageScaleWithCache(self):
preprocess_options = [(preprocessor.random_image_scale, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomRGBtoGray(self):
preprocess_options = [(preprocessor.random_rgb_to_gray, {})]
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images_gray = tensor_dict[fields.InputDataFields.image]
images_gray_r, images_gray_g, images_gray_b = tf.split(
value=images_gray, num_or_size_splits=3, axis=3)
images_r, images_g, images_b = tf.split(
value=images_original, num_or_size_splits=3, axis=3)
images_r_diff1 = tf.squared_difference(tf.to_float(images_r),
tf.to_float(images_gray_r))
images_r_diff2 = tf.squared_difference(tf.to_float(images_gray_r),
tf.to_float(images_gray_g))
images_r_diff = tf.multiply(images_r_diff1, images_r_diff2)
images_g_diff1 = tf.squared_difference(tf.to_float(images_g),
tf.to_float(images_gray_g))
images_g_diff2 = tf.squared_difference(tf.to_float(images_gray_g),
tf.to_float(images_gray_b))
images_g_diff = tf.multiply(images_g_diff1, images_g_diff2)
images_b_diff1 = tf.squared_difference(tf.to_float(images_b),
tf.to_float(images_gray_b))
images_b_diff2 = tf.squared_difference(tf.to_float(images_gray_b),
tf.to_float(images_gray_r))
images_b_diff = tf.multiply(images_b_diff1, images_b_diff2)
image_zero1 = tf.constant(0, dtype=tf.float32, shape=[1, 4, 4, 1])
with self.test_session() as sess:
(images_r_diff_, images_g_diff_, images_b_diff_, image_zero1_) = sess.run(
[images_r_diff, images_g_diff, images_b_diff, image_zero1])
self.assertAllClose(images_r_diff_, image_zero1_)
self.assertAllClose(images_g_diff_, image_zero1_)
self.assertAllClose(images_b_diff_, image_zero1_)
def testRandomRGBtoGrayWithCache(self):
preprocess_options = [(
preprocessor.random_rgb_to_gray, {'probability': 0.5})]
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomAdjustBrightness(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_adjust_brightness, {}))
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_bright = tensor_dict[fields.InputDataFields.image]
image_original_shape = tf.shape(images_original)
image_bright_shape = tf.shape(images_bright)
with self.test_session() as sess:
(image_original_shape_, image_bright_shape_) = sess.run(
[image_original_shape, image_bright_shape])
self.assertAllEqual(image_original_shape_, image_bright_shape_)
def testRandomAdjustBrightnessWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_adjust_brightness, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomAdjustContrast(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_adjust_contrast, {}))
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_contrast = tensor_dict[fields.InputDataFields.image]
image_original_shape = tf.shape(images_original)
image_contrast_shape = tf.shape(images_contrast)
with self.test_session() as sess:
(image_original_shape_, image_contrast_shape_) = sess.run(
[image_original_shape, image_contrast_shape])
self.assertAllEqual(image_original_shape_, image_contrast_shape_)
def testRandomAdjustContrastWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_adjust_contrast, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomAdjustHue(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_adjust_hue, {}))
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_hue = tensor_dict[fields.InputDataFields.image]
image_original_shape = tf.shape(images_original)
image_hue_shape = tf.shape(images_hue)
with self.test_session() as sess:
(image_original_shape_, image_hue_shape_) = sess.run(
[image_original_shape, image_hue_shape])
self.assertAllEqual(image_original_shape_, image_hue_shape_)
def testRandomAdjustHueWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_adjust_hue, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomDistortColor(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_distort_color, {}))
images_original = self.createTestImages()
images_original_shape = tf.shape(images_original)
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images_distorted_color = tensor_dict[fields.InputDataFields.image]
images_distorted_color_shape = tf.shape(images_distorted_color)
with self.test_session() as sess:
(images_original_shape_, images_distorted_color_shape_) = sess.run(
[images_original_shape, images_distorted_color_shape])
self.assertAllEqual(images_original_shape_, images_distorted_color_shape_)
def testRandomDistortColorWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_distort_color, {}))
self._testPreprocessorCache(preprocess_options,
test_boxes=False,
test_masks=False,
test_keypoints=False)
def testRandomJitterBoxes(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.random_jitter_boxes, {}))
boxes = self.createTestBoxes()
boxes_shape = tf.shape(boxes)
tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
distorted_boxes_shape = tf.shape(distorted_boxes)
with self.test_session() as sess:
(boxes_shape_, distorted_boxes_shape_) = sess.run(
[boxes_shape, distorted_boxes_shape])
self.assertAllEqual(boxes_shape_, distorted_boxes_shape_)
def testRandomCropImage(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_crop_image, {}))
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
self.assertEqual(3, distorted_images.get_shape()[3])
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = sess.run([
boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank
])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testRandomCropImageWithCache(self):
preprocess_options = [(preprocessor.random_rgb_to_gray,
{'probability': 0.5}),
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1,
}),
(preprocessor.random_crop_image, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def testRandomCropImageGrayscale(self):
preprocessing_options = [(preprocessor.rgb_to_gray, {}),
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1,
}),
(preprocessor.random_crop_image, {})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
self.assertEqual(1, distorted_images.get_shape()[3])
with self.test_session() as sess:
session_results = sess.run([
boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank
])
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = session_results
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testRandomCropImageWithBoxOutOfImage(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_crop_image, {}))
images = self.createTestImages()
boxes = self.createTestBoxesOutOfImage()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = sess.run(
[boxes_rank, distorted_boxes_rank, images_rank,
distorted_images_rank])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testRandomCropImageWithRandomCoefOne(self):
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_label_scores: label_scores
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_image, {
'random_coef': 1.0
})]
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_label_scores = distorted_tensor_dict[
fields.InputDataFields.groundtruth_label_scores]
boxes_shape = tf.shape(boxes)
distorted_boxes_shape = tf.shape(distorted_boxes)
images_shape = tf.shape(images)
distorted_images_shape = tf.shape(distorted_images)
with self.test_session() as sess:
(boxes_shape_, distorted_boxes_shape_, images_shape_,
distorted_images_shape_, images_, distorted_images_,
boxes_, distorted_boxes_, labels_, distorted_labels_,
label_scores_, distorted_label_scores_) = sess.run(
[boxes_shape, distorted_boxes_shape, images_shape,
distorted_images_shape, images, distorted_images,
boxes, distorted_boxes, labels, distorted_labels,
label_scores, distorted_label_scores])
self.assertAllEqual(boxes_shape_, distorted_boxes_shape_)
self.assertAllEqual(images_shape_, distorted_images_shape_)
self.assertAllClose(images_, distorted_images_)
self.assertAllClose(boxes_, distorted_boxes_)
self.assertAllEqual(labels_, distorted_labels_)
self.assertAllEqual(label_scores_, distorted_label_scores_)
def testRandomCropWithMockSampleDistortedBoundingBox(self):
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createColorfulTestImage()
boxes = tf.constant([[0.1, 0.1, 0.8, 0.3],
[0.2, 0.4, 0.75, 0.75],
[0.3, 0.1, 0.4, 0.7]], dtype=tf.float32)
labels = tf.constant([1, 7, 11], dtype=tf.int32)
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box') as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (tf.constant(
[6, 143, 0], dtype=tf.int32), tf.constant(
[190, 237, -1], dtype=tf.int32), tf.constant(
[[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
expected_boxes = tf.constant([[0.178947, 0.07173, 0.75789469, 0.66244733],
[0.28421, 0.0, 0.38947365, 0.57805908]],
dtype=tf.float32)
expected_labels = tf.constant([7, 11], dtype=tf.int32)
with self.test_session() as sess:
(distorted_boxes_, distorted_labels_,
expected_boxes_, expected_labels_) = sess.run(
[distorted_boxes, distorted_labels,
expected_boxes, expected_labels])
self.assertAllClose(distorted_boxes_, expected_boxes_)
self.assertAllEqual(distorted_labels_, expected_labels_)
def testRandomCropImageWithMultiClassScores(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_crop_image, {}))
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
multiclass_scores = self.createTestMultiClassScores()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.multiclass_scores: multiclass_scores
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_multiclass_scores = distorted_tensor_dict[
fields.InputDataFields.multiclass_scores]
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
multiclass_scores_rank = tf.rank(multiclass_scores)
distorted_multiclass_scores_rank = tf.rank(distorted_multiclass_scores)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_, multiclass_scores_rank_,
distorted_multiclass_scores_rank_,
distorted_multiclass_scores_) = sess.run([
boxes_rank, distorted_boxes, distorted_boxes_rank, images_rank,
distorted_images_rank, multiclass_scores_rank,
distorted_multiclass_scores_rank, distorted_multiclass_scores
])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
self.assertAllEqual(multiclass_scores_rank_,
distorted_multiclass_scores_rank_)
self.assertAllEqual(distorted_boxes_.shape[0],
distorted_multiclass_scores_.shape[0])
def testStrictRandomCropImageWithLabelScores(self):
image = self.createColorfulTestImage()[0]
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
new_image, new_boxes, new_labels, new_label_scores = (
preprocessor._strict_random_crop_image(
image, boxes, labels, label_scores))
with self.test_session() as sess:
new_image, new_boxes, new_labels, new_label_scores = (
sess.run(
[new_image, new_boxes, new_labels, new_label_scores])
)
expected_boxes = np.array(
[[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32)
self.assertAllEqual(new_image.shape, [190, 237, 3])
self.assertAllEqual(new_label_scores, [1.0, 0.5])
self.assertAllClose(
new_boxes.flatten(), expected_boxes.flatten())
def testStrictRandomCropImageWithMasks(self):
image = self.createColorfulTestImage()[0]
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
new_image, new_boxes, new_labels, new_masks = (
preprocessor._strict_random_crop_image(
image, boxes, labels, masks=masks))
with self.test_session() as sess:
new_image, new_boxes, new_labels, new_masks = sess.run(
[new_image, new_boxes, new_labels, new_masks])
expected_boxes = np.array(
[[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32)
self.assertAllEqual(new_image.shape, [190, 237, 3])
self.assertAllEqual(new_masks.shape, [2, 190, 237])
self.assertAllClose(
new_boxes.flatten(), expected_boxes.flatten())
def testStrictRandomCropImageWithKeypoints(self):
image = self.createColorfulTestImage()[0]
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypoints()
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
new_image, new_boxes, new_labels, new_keypoints = (
preprocessor._strict_random_crop_image(
image, boxes, labels, keypoints=keypoints))
with self.test_session() as sess:
new_image, new_boxes, new_labels, new_keypoints = sess.run(
[new_image, new_boxes, new_labels, new_keypoints])
expected_boxes = np.array([
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],], dtype=np.float32)
expected_keypoints = np.array([
[[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, np.nan]],
[[0.38947368, 0.07173],
[0.49473682, 0.24050637],
[0.60000002, 0.40928277]]
], dtype=np.float32)
self.assertAllEqual(new_image.shape, [190, 237, 3])
self.assertAllClose(
new_boxes.flatten(), expected_boxes.flatten())
self.assertAllClose(
new_keypoints.flatten(), expected_keypoints.flatten())
def testRunRandomCropImageWithMasks(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_instance_masks: masks,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True)
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_masks = distorted_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_masks_) = sess.run(
[distorted_image, distorted_boxes, distorted_labels,
distorted_masks])
expected_boxes = np.array([
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3])
self.assertAllEqual(distorted_masks_.shape, [2, 190, 237])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten())
def testRunRandomCropImageWithKeypointsInsideCrop(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypointsInsideCrop()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_keypoints_) = sess.run(
[distorted_image, distorted_boxes, distorted_labels,
distorted_keypoints])
expected_boxes = np.array([
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
], dtype=np.float32)
expected_keypoints = np.array([
[[0.38947368, 0.07173],
[0.49473682, 0.24050637],
[0.60000002, 0.40928277]],
[[0.38947368, 0.07173],
[0.49473682, 0.24050637],
[0.60000002, 0.40928277]]
])
self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten())
self.assertAllClose(
distorted_keypoints_.flatten(), expected_keypoints.flatten())
def testRunRandomCropImageWithKeypointsOutsideCrop(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypointsOutsideCrop()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [(preprocessor.random_crop_image, {})]
with mock.patch.object(
tf.image,
'sample_distorted_bounding_box'
) as mock_sample_distorted_bounding_box:
mock_sample_distorted_bounding_box.return_value = (
tf.constant([6, 143, 0], dtype=tf.int32),
tf.constant([190, 237, -1], dtype=tf.int32),
tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_keypoints_) = sess.run(
[distorted_image, distorted_boxes, distorted_labels,
distorted_keypoints])
expected_boxes = np.array([
[0.0, 0.0, 0.75789469, 1.0],
[0.23157893, 0.24050637, 0.75789469, 1.0],
], dtype=np.float32)
expected_keypoints = np.array([
[[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, np.nan]],
[[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, np.nan]],
])
self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(
distorted_boxes_.flatten(), expected_boxes.flatten())
self.assertAllClose(
distorted_keypoints_.flatten(), expected_keypoints.flatten())
def testRunRetainBoxesAboveThreshold(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
tensor_dict = {
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_label_scores: label_scores
}
preprocessing_options = [
(preprocessor.retain_boxes_above_threshold, {'threshold': 0.6})
]
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_scores=True)
retained_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
retained_boxes = retained_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
retained_labels = retained_tensor_dict[
fields.InputDataFields.groundtruth_classes]
retained_label_scores = retained_tensor_dict[
fields.InputDataFields.groundtruth_label_scores]
with self.test_session() as sess:
(retained_boxes_, retained_labels_,
retained_label_scores_, expected_retained_boxes_,
expected_retained_labels_, expected_retained_label_scores_) = sess.run(
[retained_boxes, retained_labels, retained_label_scores,
self.expectedBoxesAfterThresholding(),
self.expectedLabelsAfterThresholding(),
self.expectedLabelScoresAfterThresholding()])
self.assertAllClose(retained_boxes_, expected_retained_boxes_)
self.assertAllClose(retained_labels_, expected_retained_labels_)
self.assertAllClose(
retained_label_scores_, expected_retained_label_scores_)
def testRunRetainBoxesAboveThresholdWithMasks(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
masks = self.createTestMasks()
tensor_dict = {
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_label_scores: label_scores,
fields.InputDataFields.groundtruth_instance_masks: masks
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_scores=True,
include_instance_masks=True)
preprocessing_options = [
(preprocessor.retain_boxes_above_threshold, {'threshold': 0.6})
]
retained_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
retained_masks = retained_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
with self.test_session() as sess:
(retained_masks_, expected_masks_) = sess.run(
[retained_masks,
self.expectedMasksAfterThresholding()])
self.assertAllClose(retained_masks_, expected_masks_)
def testRunRetainBoxesAboveThresholdWithKeypoints(self):
boxes = self.createTestBoxes()
labels = self.createTestLabels()
label_scores = self.createTestLabelScores()
keypoints = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_label_scores: label_scores,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_scores=True,
include_keypoints=True)
preprocessing_options = [
(preprocessor.retain_boxes_above_threshold, {'threshold': 0.6})
]
retained_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
retained_keypoints = retained_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
(retained_keypoints_, expected_keypoints_) = sess.run(
[retained_keypoints,
self.expectedKeypointsAfterThresholding()])
self.assertAllClose(retained_keypoints_, expected_keypoints_)
def testRandomCropToAspectRatioWithCache(self):
preprocess_options = [(preprocessor.random_crop_to_aspect_ratio, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def testRunRandomCropToAspectRatioWithMasks(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_instance_masks: masks
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True)
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})]
with mock.patch.object(preprocessor,
'_random_integer') as mock_random_integer:
mock_random_integer.return_value = tf.constant(0, dtype=tf.int32)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_masks = distorted_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_masks_) = sess.run([
distorted_image, distorted_boxes, distorted_labels, distorted_masks
])
expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3])
self.assertAllEqual(distorted_labels_, [1])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
self.assertAllEqual(distorted_masks_.shape, [1, 200, 200])
def testRunRandomCropToAspectRatioWithKeypoints(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})]
with mock.patch.object(preprocessor,
'_random_integer') as mock_random_integer:
mock_random_integer.return_value = tf.constant(0, dtype=tf.int32)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_keypoints_) = sess.run([
distorted_image, distorted_boxes, distorted_labels,
distorted_keypoints
])
expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32)
expected_keypoints = np.array(
[[0.1, 0.2], [0.2, 0.4], [0.3, 0.6]], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3])
self.assertAllEqual(distorted_labels_, [1])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
self.assertAllClose(distorted_keypoints_.flatten(),
expected_keypoints.flatten())
def testRandomPadToAspectRatioWithCache(self):
preprocess_options = [(preprocessor.random_pad_to_aspect_ratio, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRunRandomPadToAspectRatioWithMinMaxPaddedSizeRatios(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map()
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio,
{'min_padded_size_ratio': (4.0, 4.0),
'max_padded_size_ratio': (4.0, 4.0)})]
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
with self.test_session() as sess:
distorted_image_, distorted_boxes_, distorted_labels_ = sess.run([
distorted_image, distorted_boxes, distorted_labels])
expected_boxes = np.array(
[[0.0, 0.125, 0.1875, 0.5], [0.0625, 0.25, 0.1875, 0.5]],
dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 800, 800, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
def testRunRandomPadToAspectRatioWithMasks(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
masks = tf.random_uniform([2, 200, 400], dtype=tf.float32)
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_instance_masks: masks
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_instance_masks=True)
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})]
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_masks = distorted_tensor_dict[
fields.InputDataFields.groundtruth_instance_masks]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_masks_) = sess.run([
distorted_image, distorted_boxes, distorted_labels, distorted_masks
])
expected_boxes = np.array(
[[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
self.assertAllEqual(distorted_masks_.shape, [2, 400, 400])
def testRunRandomPadToAspectRatioWithKeypoints(self):
image = self.createColorfulTestImage()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
keypoints = self.createTestKeypoints()
tensor_dict = {
fields.InputDataFields.image: image,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.groundtruth_keypoints: keypoints
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_keypoints=True)
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})]
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_image = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_labels = distorted_tensor_dict[
fields.InputDataFields.groundtruth_classes]
distorted_keypoints = distorted_tensor_dict[
fields.InputDataFields.groundtruth_keypoints]
with self.test_session() as sess:
(distorted_image_, distorted_boxes_, distorted_labels_,
distorted_keypoints_) = sess.run([
distorted_image, distorted_boxes, distorted_labels,
distorted_keypoints
])
expected_boxes = np.array(
[[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32)
expected_keypoints = np.array([
[[0.05, 0.1], [0.1, 0.2], [0.15, 0.3]],
[[0.2, 0.4], [0.25, 0.5], [0.3, 0.6]],
], dtype=np.float32)
self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3])
self.assertAllEqual(distorted_labels_, [1, 2])
self.assertAllClose(distorted_boxes_.flatten(),
expected_boxes.flatten())
self.assertAllClose(distorted_keypoints_.flatten(),
expected_keypoints.flatten())
def testRandomPadImageWithCache(self):
preprocess_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1,}), (preprocessor.random_pad_image, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRandomPadImage(self):
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_pad_image, {})]
padded_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
padded_boxes_shape = tf.shape(padded_boxes)
images_shape = tf.shape(images)
padded_images_shape = tf.shape(padded_images)
with self.test_session() as sess:
(boxes_shape_, padded_boxes_shape_, images_shape_,
padded_images_shape_, boxes_, padded_boxes_) = sess.run(
[boxes_shape, padded_boxes_shape, images_shape,
padded_images_shape, boxes, padded_boxes])
self.assertAllEqual(boxes_shape_, padded_boxes_shape_)
self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all)
self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all)
self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all)
self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all)
self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= (
padded_boxes_[:, 2] - padded_boxes_[:, 0])))
self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= (
padded_boxes_[:, 3] - padded_boxes_[:, 1])))
def testRandomCropPadImageWithCache(self):
preprocess_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1,}), (preprocessor.random_crop_pad_image, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRandomCropPadImageWithRandomCoefOne(self):
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_pad_image, {
'random_coef': 1.0
})]
padded_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
padded_boxes_shape = tf.shape(padded_boxes)
images_shape = tf.shape(images)
padded_images_shape = tf.shape(padded_images)
with self.test_session() as sess:
(boxes_shape_, padded_boxes_shape_, images_shape_,
padded_images_shape_, boxes_, padded_boxes_) = sess.run(
[boxes_shape, padded_boxes_shape, images_shape,
padded_images_shape, boxes, padded_boxes])
self.assertAllEqual(boxes_shape_, padded_boxes_shape_)
self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all)
self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all)
self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all)
self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all)
self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= (
padded_boxes_[:, 2] - padded_boxes_[:, 0])))
self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= (
padded_boxes_[:, 3] - padded_boxes_[:, 1])))
def testRandomCropToAspectRatio(self):
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, [])
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {
'aspect_ratio': 2.0
})]
cropped_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
cropped_images = cropped_tensor_dict[fields.InputDataFields.image]
cropped_boxes = cropped_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
cropped_boxes_shape = tf.shape(cropped_boxes)
images_shape = tf.shape(images)
cropped_images_shape = tf.shape(cropped_images)
with self.test_session() as sess:
(boxes_shape_, cropped_boxes_shape_, images_shape_,
cropped_images_shape_) = sess.run([
boxes_shape, cropped_boxes_shape, images_shape, cropped_images_shape
])
self.assertAllEqual(boxes_shape_, cropped_boxes_shape_)
self.assertEqual(images_shape_[1], cropped_images_shape_[1] * 2)
self.assertEqual(images_shape_[2], cropped_images_shape_[2])
def testRandomPadToAspectRatio(self):
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
tensor_dict = preprocessor.preprocess(tensor_dict, [])
images = tensor_dict[fields.InputDataFields.image]
preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {
'aspect_ratio': 2.0
})]
padded_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
padded_images = padded_tensor_dict[fields.InputDataFields.image]
padded_boxes = padded_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
boxes_shape = tf.shape(boxes)
padded_boxes_shape = tf.shape(padded_boxes)
images_shape = tf.shape(images)
padded_images_shape = tf.shape(padded_images)
with self.test_session() as sess:
(boxes_shape_, padded_boxes_shape_, images_shape_,
padded_images_shape_) = sess.run([
boxes_shape, padded_boxes_shape, images_shape, padded_images_shape
])
self.assertAllEqual(boxes_shape_, padded_boxes_shape_)
self.assertEqual(images_shape_[1], padded_images_shape_[1])
self.assertEqual(2 * images_shape_[2], padded_images_shape_[2])
def testRandomBlackPatchesWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_black_patches, {
'size_to_image_ratio': 0.5
}))
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRandomBlackPatches(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_black_patches, {
'size_to_image_ratio': 0.5
}))
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
blacked_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
blacked_images = blacked_tensor_dict[fields.InputDataFields.image]
images_shape = tf.shape(images)
blacked_images_shape = tf.shape(blacked_images)
with self.test_session() as sess:
(images_shape_, blacked_images_shape_) = sess.run(
[images_shape, blacked_images_shape])
self.assertAllEqual(images_shape_, blacked_images_shape_)
def testRandomResizeMethodWithCache(self):
preprocess_options = []
preprocess_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocess_options.append((preprocessor.random_resize_method, {
'target_size': (75, 150)
}))
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=True,
test_keypoints=True)
def testRandomResizeMethod(self):
preprocessing_options = []
preprocessing_options.append((preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}))
preprocessing_options.append((preprocessor.random_resize_method, {
'target_size': (75, 150)
}))
images = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images}
resized_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
resized_images = resized_tensor_dict[fields.InputDataFields.image]
resized_images_shape = tf.shape(resized_images)
expected_images_shape = tf.constant([1, 75, 150, 3], dtype=tf.int32)
with self.test_session() as sess:
(expected_images_shape_, resized_images_shape_) = sess.run(
[expected_images_shape, resized_images_shape])
self.assertAllEqual(expected_images_shape_,
resized_images_shape_)
def testResizeImageWithMasks(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
height = 50
width = 100
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_image(
in_image, in_masks, new_height=height, new_width=width)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeImageWithMasksTensorInputHeightAndWidth(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
height = tf.constant(50, dtype=tf.int32)
width = tf.constant(100, dtype=tf.int32)
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_image(
in_image, in_masks, new_height=height, new_width=width)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeImageWithNoInstanceMask(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
height = 50
width = 100
expected_image_shape_list = [[50, 100, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 50, 100], [0, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_image(
in_image, in_masks, new_height=height, new_width=width)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRangePreservesStaticSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]]
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
in_image = tf.random_uniform(in_shape)
out_image, _ = preprocessor.resize_to_range(
in_image, min_dimension=min_dim, max_dimension=max_dim)
self.assertAllEqual(out_image.get_shape().as_list(), expected_shape)
def testResizeToRangeWithDynamicSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]]
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
out_image, _ = preprocessor.resize_to_range(
in_image, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
with self.test_session() as sess:
out_image_shape = sess.run(out_image_shape,
feed_dict={in_image:
np.random.randn(*in_shape)})
self.assertAllEqual(out_image_shape, expected_shape)
def testResizeToRangeWithPadToMaxDimensionReturnsCorrectShapes(self):
in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
min_dim = 50
max_dim = 100
expected_shape_list = [[100, 100, 3], [100, 100, 3], [100, 100, 3]]
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
out_image, _ = preprocessor.resize_to_range(
in_image,
min_dimension=min_dim,
max_dimension=max_dim,
pad_to_max_dimension=True)
self.assertAllEqual(out_image.shape.as_list(), expected_shape)
out_image_shape = tf.shape(out_image)
with self.test_session() as sess:
out_image_shape = sess.run(
out_image_shape, feed_dict={in_image: np.random.randn(*in_shape)})
self.assertAllEqual(out_image_shape, expected_shape)
def testResizeToRangeWithPadToMaxDimensionReturnsCorrectTensor(self):
in_image_np = np.array([[[0, 1, 2]]], np.float32)
ex_image_np = np.array(
[[[0, 1, 2], [123.68, 116.779, 103.939]],
[[123.68, 116.779, 103.939], [123.68, 116.779, 103.939]]], np.float32)
min_dim = 1
max_dim = 2
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
out_image, _ = preprocessor.resize_to_range(
in_image,
min_dimension=min_dim,
max_dimension=max_dim,
pad_to_max_dimension=True,
per_channel_pad_value=(123.68, 116.779, 103.939))
with self.test_session() as sess:
out_image_np = sess.run(out_image, feed_dict={in_image: in_image_np})
self.assertAllClose(ex_image_np, out_image_np)
def testResizeToRangeWithMasksPreservesStaticSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_range(
in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim)
self.assertAllEqual(out_masks.get_shape().as_list(), expected_mask_shape)
self.assertAllEqual(out_image.get_shape().as_list(), expected_image_shape)
def testResizeToRangeWithMasksAndPadToMaxDimension(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[100, 100, 3], [100, 100, 3]]
expected_masks_shape_list = [[15, 100, 100], [10, 100, 100]]
for (in_image_shape,
expected_image_shape, in_masks_shape, expected_mask_shape) in zip(
in_image_shape_list, expected_image_shape_list,
in_masks_shape_list, expected_masks_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
in_masks = tf.placeholder(tf.float32, shape=(None, None, None))
out_image, out_masks, _ = preprocessor.resize_to_range(
in_image,
in_masks,
min_dimension=min_dim,
max_dimension=max_dim,
pad_to_max_dimension=True)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape],
feed_dict={
in_image: np.random.randn(*in_image_shape),
in_masks: np.random.randn(*in_masks_shape)
})
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRangeWithMasksAndDynamicSpatialShape(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 40], [10, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
in_masks = tf.placeholder(tf.float32, shape=(None, None, None))
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_range(
in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape],
feed_dict={
in_image: np.random.randn(*in_image_shape),
in_masks: np.random.randn(*in_masks_shape)
})
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRangeWithInstanceMasksTensorOfSizeZero(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
min_dim = 50
max_dim = 100
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_range(
in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToRange4DImageTensor(self):
image = tf.random_uniform([1, 200, 300, 3])
with self.assertRaises(ValueError):
preprocessor.resize_to_range(image, 500, 600)
def testResizeToRangeSameMinMax(self):
"""Tests image resizing, checking output sizes."""
in_shape_list = [[312, 312, 3], [299, 299, 3]]
min_dim = 320
max_dim = 320
expected_shape_list = [[320, 320, 3], [320, 320, 3]]
for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
in_image = tf.random_uniform(in_shape)
out_image, _ = preprocessor.resize_to_range(
in_image, min_dimension=min_dim, max_dimension=max_dim)
out_image_shape = tf.shape(out_image)
with self.test_session() as sess:
out_image_shape = sess.run(out_image_shape)
self.assertAllEqual(out_image_shape, expected_shape)
def testResizeToMinDimensionTensorShapes(self):
in_image_shape_list = [[60, 55, 3], [15, 30, 3]]
in_masks_shape_list = [[15, 60, 55], [10, 15, 30]]
min_dim = 50
expected_image_shape_list = [[60, 55, 3], [50, 100, 3]]
expected_masks_shape_list = [[15, 60, 55], [10, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.placeholder(tf.float32, shape=(None, None, 3))
in_masks = tf.placeholder(tf.float32, shape=(None, None, None))
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_min_dimension(
in_image, in_masks, min_dimension=min_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape],
feed_dict={
in_image: np.random.randn(*in_image_shape),
in_masks: np.random.randn(*in_masks_shape)
})
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToMinDimensionWithInstanceMasksTensorOfSizeZero(self):
"""Tests image resizing, checking output sizes."""
in_image_shape_list = [[60, 40, 3], [15, 30, 3]]
in_masks_shape_list = [[0, 60, 40], [0, 15, 30]]
min_dim = 50
expected_image_shape_list = [[75, 50, 3], [50, 100, 3]]
expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]]
for (in_image_shape, expected_image_shape, in_masks_shape,
expected_mask_shape) in zip(in_image_shape_list,
expected_image_shape_list,
in_masks_shape_list,
expected_masks_shape_list):
in_image = tf.random_uniform(in_image_shape)
in_masks = tf.random_uniform(in_masks_shape)
out_image, out_masks, _ = preprocessor.resize_to_min_dimension(
in_image, in_masks, min_dimension=min_dim)
out_image_shape = tf.shape(out_image)
out_masks_shape = tf.shape(out_masks)
with self.test_session() as sess:
out_image_shape, out_masks_shape = sess.run(
[out_image_shape, out_masks_shape])
self.assertAllEqual(out_image_shape, expected_image_shape)
self.assertAllEqual(out_masks_shape, expected_mask_shape)
def testResizeToMinDimensionRaisesErrorOn4DImage(self):
image = tf.random_uniform([1, 200, 300, 3])
with self.assertRaises(ValueError):
preprocessor.resize_to_min_dimension(image, 500)
def testScaleBoxesToPixelCoordinates(self):
"""Tests box scaling, checking scaled values."""
in_shape = [60, 40, 3]
in_boxes = [[0.1, 0.2, 0.4, 0.6],
[0.5, 0.3, 0.9, 0.7]]
expected_boxes = [[6., 8., 24., 24.],
[30., 12., 54., 28.]]
in_image = tf.random_uniform(in_shape)
in_boxes = tf.constant(in_boxes)
_, out_boxes = preprocessor.scale_boxes_to_pixel_coordinates(
in_image, boxes=in_boxes)
with self.test_session() as sess:
out_boxes = sess.run(out_boxes)
self.assertAllClose(out_boxes, expected_boxes)
def testScaleBoxesToPixelCoordinatesWithKeypoints(self):
"""Tests box and keypoint scaling, checking scaled values."""
in_shape = [60, 40, 3]
in_boxes = self.createTestBoxes()
in_keypoints = self.createTestKeypoints()
expected_boxes = [[0., 10., 45., 40.],
[15., 20., 45., 40.]]
expected_keypoints = [
[[6., 4.], [12., 8.], [18., 12.]],
[[24., 16.], [30., 20.], [36., 24.]],
]
in_image = tf.random_uniform(in_shape)
_, out_boxes, out_keypoints = preprocessor.scale_boxes_to_pixel_coordinates(
in_image, boxes=in_boxes, keypoints=in_keypoints)
with self.test_session() as sess:
out_boxes_, out_keypoints_ = sess.run([out_boxes, out_keypoints])
self.assertAllClose(out_boxes_, expected_boxes)
self.assertAllClose(out_keypoints_, expected_keypoints)
def testSubtractChannelMean(self):
"""Tests whether channel means have been subtracted."""
with self.test_session():
image = tf.zeros((240, 320, 3))
means = [1, 2, 3]
actual = preprocessor.subtract_channel_mean(image, means=means)
actual = actual.eval()
self.assertTrue((actual[:, :, 0] == -1).all())
self.assertTrue((actual[:, :, 1] == -2).all())
self.assertTrue((actual[:, :, 2] == -3).all())
def testOneHotEncoding(self):
"""Tests one hot encoding of multiclass labels."""
with self.test_session():
labels = tf.constant([1, 4, 2], dtype=tf.int32)
one_hot = preprocessor.one_hot_encoding(labels, num_classes=5)
one_hot = one_hot.eval()
self.assertAllEqual([0, 1, 1, 0, 1], one_hot)
def testSSDRandomCropWithCache(self):
preprocess_options = [
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}),
(preprocessor.ssd_random_crop, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def testSSDRandomCrop(self):
preprocessing_options = [
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}),
(preprocessor.ssd_random_crop, {})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = sess.run(
[boxes_rank, distorted_boxes_rank, images_rank,
distorted_images_rank])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testSSDRandomCropWithMultiClassScores(self):
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}), (preprocessor.ssd_random_crop, {})]
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
multiclass_scores = self.createTestMultiClassScores()
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
fields.InputDataFields.multiclass_scores: multiclass_scores,
}
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_multiclass_scores=True)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
distorted_multiclass_scores = distorted_tensor_dict[
fields.InputDataFields.multiclass_scores]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
multiclass_scores_rank = tf.rank(multiclass_scores)
distorted_multiclass_scores_rank = tf.rank(distorted_multiclass_scores)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_, multiclass_scores_rank_,
distorted_multiclass_scores_,
distorted_multiclass_scores_rank_) = sess.run([
boxes_rank, distorted_boxes, distorted_boxes_rank, images_rank,
distorted_images_rank, multiclass_scores_rank,
distorted_multiclass_scores, distorted_multiclass_scores_rank
])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
self.assertAllEqual(multiclass_scores_rank_,
distorted_multiclass_scores_rank_)
self.assertAllEqual(distorted_boxes_.shape[0],
distorted_multiclass_scores_.shape[0])
def testSSDRandomCropPad(self):
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
preprocessing_options = [
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}),
(preprocessor.ssd_random_crop_pad, {})]
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
distorted_tensor_dict = preprocessor.preprocess(tensor_dict,
preprocessing_options)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = sess.run([
boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank
])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testSSDRandomCropFixedAspectRatioWithCache(self):
preprocess_options = [
(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}),
(preprocessor.ssd_random_crop_fixed_aspect_ratio, {})]
self._testPreprocessorCache(preprocess_options,
test_boxes=True,
test_masks=False,
test_keypoints=False)
def _testSSDRandomCropFixedAspectRatio(self,
include_label_scores,
include_multiclass_scores,
include_instance_masks,
include_keypoints):
images = self.createTestImages()
boxes = self.createTestBoxes()
labels = self.createTestLabels()
preprocessing_options = [(preprocessor.normalize_image, {
'original_minval': 0,
'original_maxval': 255,
'target_minval': 0,
'target_maxval': 1
}), (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})]
tensor_dict = {
fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes,
fields.InputDataFields.groundtruth_classes: labels,
}
if include_label_scores:
label_scores = self.createTestLabelScores()
tensor_dict[fields.InputDataFields.groundtruth_label_scores] = (
label_scores)
if include_multiclass_scores:
multiclass_scores = self.createTestMultiClassScores()
tensor_dict[fields.InputDataFields.multiclass_scores] = (
multiclass_scores)
if include_instance_masks:
masks = self.createTestMasks()
tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks
if include_keypoints:
keypoints = self.createTestKeypoints()
tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints
preprocessor_arg_map = preprocessor.get_default_func_arg_map(
include_label_scores=include_label_scores,
include_multiclass_scores=include_multiclass_scores,
include_instance_masks=include_instance_masks,
include_keypoints=include_keypoints)
distorted_tensor_dict = preprocessor.preprocess(
tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map)
distorted_images = distorted_tensor_dict[fields.InputDataFields.image]
distorted_boxes = distorted_tensor_dict[
fields.InputDataFields.groundtruth_boxes]
images_rank = tf.rank(images)
distorted_images_rank = tf.rank(distorted_images)
boxes_rank = tf.rank(boxes)
distorted_boxes_rank = tf.rank(distorted_boxes)
with self.test_session() as sess:
(boxes_rank_, distorted_boxes_rank_, images_rank_,
distorted_images_rank_) = sess.run(
[boxes_rank, distorted_boxes_rank, images_rank,
distorted_images_rank])
self.assertAllEqual(boxes_rank_, distorted_boxes_rank_)
self.assertAllEqual(images_rank_, distorted_images_rank_)
def testSSDRandomCropFixedAspectRatio(self):
self._testSSDRandomCropFixedAspectRatio(include_label_scores=False,
include_multiclass_scores=False,
include_instance_masks=False,
include_keypoints=False)
def testSSDRandomCropFixedAspectRatioWithMultiClassScores(self):
self._testSSDRandomCropFixedAspectRatio(include_label_scores=False,
include_multiclass_scores=True,
include_instance_masks=False,
include_keypoints=False)
def testSSDRandomCropFixedAspectRatioWithMasksAndKeypoints(self):
self._testSSDRandomCropFixedAspectRatio(include_label_scores=False,
include_multiclass_scores=False,
include_instance_masks=True,
include_keypoints=True)
def testSSDRandomCropFixedAspectRatioWithLabelScoresMasksAndKeypoints(self):
self._testSSDRandomCropFixedAspectRatio(include_label_scores=True,
include_multiclass_scores=False,
include_instance_masks=True,
include_keypoints=True)
def testConvertClassLogitsToSoftmax(self):
multiclass_scores = tf.constant(
[[1.0, 0.0], [0.5, 0.5], [1000, 1]], dtype=tf.float32)
temperature = 2.0
converted_multiclass_scores = (
preprocessor.convert_class_logits_to_softmax(
multiclass_scores=multiclass_scores, temperature=temperature))
expected_converted_multiclass_scores = [[[0.62245935, 0.37754068],
[0.5, 0.5], [1, 0]]]
with self.test_session() as sess:
(converted_multiclass_scores_) = sess.run([converted_multiclass_scores])
self.assertAllClose(converted_multiclass_scores_,
expected_converted_multiclass_scores)
if __name__ == '__main__':
tf.test.main()
| StarcoderdataPython |
176307 | <reponame>PullRequest-Agent/paramak
"""
This python script demonstrates the creation of a breeder blanket from points
"""
import paramak
def main(filename="blanket_from_points.stp"):
blanket = paramak.RotateMixedShape(
rotation_angle=180,
points=[
(538, 305, "straight"),
(538, -305, "straight"),
(322, -305, "spline"),
(470, 0, "spline"),
(322, 305, "straight"),
]
)
blanket.export_stp(filename=filename)
if __name__ == "__main__":
main()
| StarcoderdataPython |
53257 | <filename>keras/datasets/sin.py
# -*- coding: utf-8 -*-
import cPickle
import sys, os
import numpy as np
# written by zhaowuxia @ 2015/5/30
# used for generate linear datasets
def generate_data(sz, T, diff_start, diff_T):
data = []
for i in range(sz):
start = 0
if diff_start:
start = np.random.random()
Time = 2*np.pi
times_of_loops = 5
if diff_T:
times_of_loops = np.random.randint(10) + 1
data.append(np.sin((np.array(range(T+1)).astype(float)/T + start)*Time*times_of_loops) + np.random.random()/T)
data = np.array(data).reshape(sz, T+1, 1)
X = data[:, :T]
Y = data[:, -1]
return (X, Y)
def load_data(sz, T, path="sin.pkl", diff_start = False, diff_T = False):
data = []
if not os.path.exists(path):
print(path, 'not exists')
data = generate_data(sz, T, diff_start, diff_T)
cPickle.dump(data, open(path, 'wb'))
else:
print(path, 'exists')
data = cPickle.load(open(path, 'rb'))
assert(data[0].shape[0] == sz)
assert(data[0].shape[1] == T)
return data #(X, Y)
| StarcoderdataPython |
4816752 | <filename>randomizer/v_randomizer.py
import argparse, sys
import string
import random
#--------------#
# Argument #
#--------------#
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output',dest="output", help='Path of the output file to be created')
parser.add_argument('-l', '--seqlen', dest="seqlen", help='The length of random protein sequences to be generated')
parser.add_argument('-n', '--seqnum', dest="seqnum", help='The number of random protein sequences to be generated')
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
seqlen = int(args.seqlen)
seqnum = int(args.seqnum)
counter = 0
aa_fullList = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V', 'X']
random_list = []
with open(args.output, 'w') as f:
for i in range(seqnum):
counter += 1
randomSeqList = ''.join(random.choices(aa_fullList, weights = [6.64, 4.22, 5.2, 5.04, 2.56, 3.72, 5.28, 6.2, 1.92, 5.55, 9.13, 5.86, 2.33, 4.45, 4.28, 6.7, 7.07, 1.35, 3.99, 7.34, 1.16], k=seqlen))
random_list.append(randomSeqList)
i+=1
f.write(f">Sequence {counter}" + '\n' + randomSeqList + '\n')
| StarcoderdataPython |
3244344 | <gh_stars>1-10
#!/usr/bin/python2.4
# encoding: utf-8
"""
db.py
High-level functions for interacting with the ddG database.
Created by <NAME> 2012.
Copyright (c) 2012 __UCSF__. All rights reserved.
"""
import sys
import os
import string
import glob
import traceback
import random
import datetime
import zipfile
import StringIO
import gzip
import pprint
import json
from io import BytesIO
try:
import pandas
except ImportError:
pass
try:
import matplotlib
# A non-interactive backend to generate PNGs. matplotlib.use('PS') is used for PS files. If used, this command must be run before importing matplotlib.pyplot.
matplotlib.use("AGG")
import matplotlib.pyplot as plt
import textwrap
except ImportError:
plt=None
from sqlalchemy import and_, func
from sqlalchemy.orm import load_only
from klab.bio.pdb import PDB
from klab.bio.basics import residue_type_3to1_map as aa1, dssp_elision
from klab.bio.basics import Mutation
from klab.bio.pdbtm import PDBTM
from klab.fs.fsio import write_file, read_file, open_temp_file
from klab.process import Popen
from klab.constants import rosetta_weights
from klab import colortext
from klab.stats.misc import get_xy_dataset_statistics
from klab.general.strutil import remove_trailing_line_whitespace
from klab.hash.md5 import get_hexdigest
from klab.fs.fsio import read_file, get_file_lines, write_file, write_temp_file
from klab.db.sqlalchemy_interface import row_to_dict, get_or_create_in_transaction, get_single_record_from_query
from klab.rosetta.input_files import Mutfile, Resfile
from kddg.api import dbi
from kddg.api.data import DataImportInterface, json_dumps
import kddg.api.schema as dbmodel
from kddg.api.layers import *
from kddg.api import settings
sys_settings = settings.load()
class FatalException(Exception): pass
class PartialDataException(Exception): pass
class SanityCheckException(Exception): pass
DeclarativeBase = dbmodel.DeclarativeBase
class MutationSet(object):
'''This class is a leftover from Lin's work and should probably be folded into an API function along with the functions that call this.
I wrote a function elsewhere to create a list of all mutations for a PDB file - maybe check out the ubiquitin complex project - which
can probably be used to replace most of this code.'''
def __init__(self):
self.mutations = []
def addMutation(self, chainID, residueID, wildtypeAA, mutantAA):
self.mutations.append((chainID, residueID, wildtypeAA, mutantAA))
def getChains(self):
return sorted(list(set([m[0] for m in self.mutations])))
class ddG(object):
'''This is the base database API class. It should not be used directly to create interface objects. Instead, use one
of the derived classes e.g. MonomericStabilityDDGInterface or the clean user API which hides internal functionality.
The clean API is instantiated as in the example below:
from kddg.api.monomer import get_interface as get_protein_stability_interface
stability_api = get_protein_stability_interface(read_file('ddgdb.pw'))
stability_api.help()
Objects of this class and derived subclasses has three main members:
self.DDG_db - a database interface used to interact directly with the database via MySQL commands
self.DDG_db_utf - the same interface but with UTF support. This should be used when dealing with UTF fields e.g. publication data
self.prediction_data_path - this is the location on the file server where output form jobs of the derived class type (e.g. binding affinity jobs) should be stored.
'''
GET_JOB_FN_CALL_COUNTER_MAX = 10
def __init__(self, passwd = None, username = sys_settings.database.username, hostname = sys_settings.database.hostname, rosetta_scripts_path = None, rosetta_database_path = None, port = sys_settings.database.port, file_content_buffer_size = None):
if passwd:
passwd = passwd.strip()
self.DDG_db = dbi.ddGDatabase(passwd = <PASSWORD>, username = username, hostname = hostname, port = port)
self.DDG_db_utf = dbi.ddGDatabase(passwd = <PASSWORD>, username = username, hostname = hostname, use_utf = True, port = port)
self.prediction_data_path = None
self.rosetta_scripts_path = rosetta_scripts_path
self.rosetta_database_path = rosetta_database_path
self.PredictionTable = self._get_sqa_prediction_table()
# Before continuing, make sure that the SQLAlchemy definitions match the table definitions
dbmodel.test_schema_against_database_instance(self.DDG_db)
# This counter is used to check the number of times get_job is called and raise an exception if this exceeds a certain amount
# If the API is misused then get_job may be called infinitely on one job - this is meant to protect against that
self._get_job_fn_call_counter = {}
self._get_job_fn_call_counter_max = ddG.GET_JOB_FN_CALL_COUNTER_MAX
# Caching dictionaries
self.cached_score_method_details = None
self.prediction_scores_cache = {}
# Create an instance of the import API
try:
self.importer = DataImportInterface.get_interface_with_config_file()
except Exception, e:
colortext.warning('The data import interface could not be set up. Some features in the API rely on this interface. Please check your configuration file.\n{0}\n{1}'.format(e, traceback.format_exc()))
def __del__(self):
pass #self.DDG_db.close() #self.ddGDataDB.close()
#########################################################################################
## Public API
#########################################################################################
#########################################################################################
## Broken API layer
##
## This section contains useful functions which need to be updated to work with the new
## schema or code
#########################################################################################
#== Alien functions ====================================================================
#==
#== These functions do not belong here.
@alien
def write_abacus_graph(self, graph_filename, graph_title, labels, data):
'''NOTE: This function should be generalized and moved into the klab repository.
This is a simple function wrapper around create_abacus_graph which writes the graph to file.'''
byte_stream = self.create_abacus_graph(graph_title, labels, data)
write_file(graph_filename, byte_stream.getvalue(), 'wb')
@alien
def create_abacus_graph(self, graph_title, labels, data):
'''NOTE: This function should be generalized and moved into the klab repository.
This function creates an 'abacus graph' from a set of data. Even though this is technically a scatterplot,
I call this an abacus graph because it is looks like rows of beads on lines.
The function takes a graph title, a set of labels (one per row of data), and an array of data where each row
should have the same number of columns.
A byte stream for the graph (currently PNG format but we could parameterize this) is returned. This may be
written directly to a binary file or streamed for online display.
'''
if plt:
assert(data)
image_dpi = 300.0
horizontal_margin = 400.0 # an estimate of the horizontal space not used by the graph
horizontal_spacing = 100.0 # an estimate of the horizontal space between points on the same line
vertical_margin = 100.0 # an estimate of the vertical space not used by the graph
vertical_spacing = 50.0 # the rough amount of pixels between abacus lines
point_size = 50 # the size of datapoints in points^2.
y_offset = 1.0
points_per_line = set([len(line[1]) for line in data])
assert(len(points_per_line) == 1)
points_per_line = points_per_line.pop()
assert(len(labels) == points_per_line)
number_of_lines = float(len(data))
number_of_labels = float(len(labels))
height_in_inches = max(600/image_dpi, (vertical_margin + (vertical_spacing * number_of_lines)) / image_dpi) # Use a minimum of 600 pixels in height. This avoids graphs with a small number of lines (<=10) not to become squashed.
width_in_inches = max(700/image_dpi, (horizontal_margin + (horizontal_spacing * points_per_line)) / image_dpi) # Use a minimum of 600 pixels in width. This avoids graphs with a small number of labels (e.g. 1) not to become squashed.
graph_color_scheme = matplotlib.cm.jet
#y_offset = (1.75 * data_length) / 128
#image_dpi = (400 * data_length) / 128
#image_dpi = 400
#point_sizes = {1 : 100, 64: 75, 128: 50, 192: 25, 256: 10}
#index = round(data_length / 64.0) * 64
#point_size = point_sizes.get(index, 10)
fig = plt.figure(figsize=(width_in_inches, height_in_inches)) # figsize is specified in inches - w, h
fig.set_dpi(image_dpi)
# Create three identically-sized lists. Each triple is of an x-coordinate, a y-coordinate, and the DDG value
# and corresponds to a 1 in the matrix i.e. we should draw a point/abacus bead at these coordinates.
x_values = []
y_values = []
x_coordinate_skip = 3 # x-axis distance between two points
y_coordinate_skip = 7 # y-axis distance between two points
ddg_values = []
y = 0
for line in data:
x = 0
y += y_coordinate_skip
w = line[0]
#plt.text(30, y, str('%.3f' % line[0]), fontdict=None, withdash=True, fontsize=9)
for point in line[1]:
x += x_coordinate_skip
if point == 1:
x_values.append(x)
y_values.append(y)
ddg_values.append(line[0])
# Draw the scatter plot
plt.scatter(x_values, y_values, c=ddg_values, s=point_size, cmap=graph_color_scheme, edgecolors='none', zorder=99)
# Define the limits of the cartesian coordinates. Add extra space on the right for the DDG values.
extra_space = 1.3
plt.axis((0, (points_per_line + 1 + extra_space) * x_coordinate_skip, -15, (y_coordinate_skip * number_of_lines) + 15))
plt.tick_params(
axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
left='off', # ticks along the left edge are off
labelleft='off', # labels along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off') # labels along the bottom edge are off
# Add the mutation labels at the botom of the diagram
x = 1.9
for i in range(len(labels)):
l = labels[i]
plt.text(x, -5 + ((i % 2) * -5), l, fontdict=None, withdash=True, fontsize=6)
x += x_coordinate_skip
added_zero_line = False
last_y_value = 0
y = 0
for line in data:
x = 0
y += 7
plt.plot([1, 25], [y, y], color='#999999', linestyle='-', linewidth=0.1)
# Add a DDG value on every third line
if y % 21 == 7:
plt.text(((points_per_line + 0.6) * x_coordinate_skip) , y-y_offset, str('%.3f' % line[0]), fontdict=None, withdash=True, fontsize=6)
if not added_zero_line:
if line[0] > 0:
plt.plot([1, 25], [0.5 + ((y + last_y_value) / 2), 0.5 + ((y + last_y_value) / 2)], color='k', linestyle='-', linewidth=1)
added_zero_line = True
else:
last_y_value = y
plt.text(((points_per_line + 0.6) * x_coordinate_skip), y-y_offset, str('%.3f' % line[0]), fontdict=None, withdash=True, fontsize=6)
# Set the colorbar font size and then add a colorbar
#cbar.ax.tick_params(labelsize=6)
#plt.colorbar(use_gridspec=True)
#ax = fig.add_subplot(111)
# Add a title. Note: doing this after the colorbar code below messes up the alignment.
# Adjust the wrap length to the width of the graph
wrap_length = 40 + max(0, (points_per_line - 3) * 14)
graph_title = "\n".join(textwrap.wrap(graph_title, wrap_length))
plt.title(graph_title, fontdict={'fontsize' : 6})
from mpl_toolkits.axes_grid1 import make_axes_locatable
ax = fig.add_subplot(111)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05) # pad specifies the padding between the right of the graph and the left of the colorbar, size seems to specify the width of the colobar relative to the graph
CS3 = plt.contourf([[0,0],[0,0]], ddg_values, cmap=graph_color_scheme)
#plt.colorbar(CS3)
#cbar = fig.colorbar(CS3, format='%.2f')
cbar = fig.colorbar(CS3, format='%.2f', cax=cax)
cbar.set_label('$\Delta\Delta$G',size=6)
cbar.ax.tick_params(labelsize=5)
# Use the tight_layout command to tighten up the spaces. The pad, w_pad, and h_pad parameters are specified in fraction of fontsize.
plt.tight_layout(pad=0.5)
#quadmesh = ax.pcolormesh(theta,phi,data)
#cb = fig.colorbar(quadmesh,ax=ax, shrink=.5, pad=.2, aspect=10)
#cax = ax.imshow(ddg_values, interpolation='nearest', cmap=matplotlib.cm.coolwarm)
#cbar = fig.colorbar(cax, ticks=[-1, 0, 1])
#surf = ax.contourf(X,Y,Z, 8, cmap=cm.jet)
#cbar = fig.colorbar(surf, use_gridspec=True, shrink=0.5, aspect=20, fraction=.12,pad=.02)
#cbar.set_label('Activation',size=18)
byte_stream = BytesIO()
plt.savefig(byte_stream, dpi=image_dpi, format="png")
plt.close(fig)
return byte_stream
else:
return None
@alien
def get_flattened_prediction_results(self, PredictionSet):
'''This is defined here as an API function but should be defined as a stored procedure.'''
# @todo: this is the monomeric stability implementation - move this into that API
#Ubiquitin scan: 1UBQ p16
#Prediction.Scores no longer exists
kellogg_score_id = self.get_score_method_id('global', method_type = 'protocol 16', method_authors = 'kellogg', fuzzy = True)
noah_score_id = self.get_score_method_id('local', method_type = 'position', method_parameters = '8Å radius', method_authors = '<NAME>', fuzzy = False)
score_ids = {}
score_ids['kellogg'] = kellogg_score_id
score_ids['noah8A'] = noah_score_id
records = self.DDG_db.execute_select('''
SELECT Prediction.ID AS PredictionID, Prediction.ExperimentID, Experiment.PDBFileID, ExperimentMutations.FlattenedMutations, TIMEDIFF(Prediction.EndDate, Prediction.StartDate) AS TimeTaken, PredictionStructureScore.ScoreMethodID, PredictionStructureScore.DDG
FROM Prediction INNER JOIN
(
SELECT ExperimentID, GROUP_CONCAT(Mutation SEPARATOR ', ') AS FlattenedMutations FROM
(
SELECT ExperimentID, CONCAT(Chain, ' ', WildTypeAA, ResidueID, MutantAA) As Mutation FROM ExperimentMutation
) AS FlattenedMutation
GROUP BY ExperimentID
) AS ExperimentMutations
ON Prediction.ExperimentID=ExperimentMutations.ExperimentID
INNER JOIN Experiment ON Prediction.ExperimentID=Experiment.ID
INNER JOIN PredictionStructureScore ON Prediction.ID=PredictionStructureScore.PredictionID
WHERE Prediction.PredictionSet=%s AND Prediction.Status="done" AND ScoreType="DDG" AND StructureID=-1 AND (ScoreMethodID=%s OR ScoreMethodID=%s)
ORDER BY ScoreMethodID''', parameters=(PredictionSet, kellogg_score_id, noah_score_id))
return records, score_ids
#== Broken functions ====================================================================
#@informational or part of a filter API
@brokenfn
def get_publications_for_result_set(self, result_set):
'''This should be fixed once the filter API has been rewritten to work with the new DB schema. It returns a list of publications associated with the filter result set.'''
raise Exception('')
from ddgfilters import ExperimentResultSet, StructureResultSet
if result_set:
structures = None
experiments = None
if result_set.isOfClass(ExperimentResultSet):
experiments = result_set
elif ExperimentResultSet in result_set.__class__.allowed_restrict_sets:
experiments, experiment_map = result_set.getExperiments()
if result_set.isOfClass(StructureResultSet):
structures = result_set
elif StructureResultSet in result_set.__class__.allowed_restrict_sets:
structures, structure_map = result_set.getStructures()
if structures:
colortext.printf("\nRelated publications for structures:", "lightgreen")
for id in sorted(structures.IDs):
pubs = self.DDG_db.callproc("GetPublications", parameters=(id,))
print(id)
for pub in pubs:
print("\t%s: %s" % (pub["Type"], pub["PublicationID"]))
if experiments:
colortext.printf("\nRelated publications for experiments:", "lightgreen")
for id in sorted(experiments.IDs):
pubs = self.DDG_db.callproc("GetExperimentPublications", parameters=(id,))
print(id)
for pub in pubs:
print("\t%s: %s" % (pub["Type"], pub["SourceLocation.ID"]))
experimentsets = [e[0] for e in self.DDG_db.execute_select("SELECT DISTINCT Source FROM Experiment WHERE ID IN (%s)" % ','.join(map(str, list(experiments.IDs))), cursorClass = dbi.StdCursor)]
if experimentsets:
colortext.printf("\nRelated publications for experiment-set sources:", "lightgreen")
for id in sorted(experimentsets):
print(id)
pubs = self.DDG_db.execute_select("SELECT ID, Type FROM SourceLocation WHERE SourceID=%s", parameters = (id,))
for pub in pubs:
print("\t%s: %s" % (pub["Type"], pub["ID"]))
else:
raise Exception("Empty result set.")
#@analysis_api
@brokenfn
def analyze(self, prediction_result_set, outpath = os.getcwd()):
'''This function needs to be rewritten and renamed. It calls the analysis module (which creates LaTeX reports) to generate correlation and MAE graphs.'''
raise Exception('The import of analysis was commented out - presumably some change in DB structure or API broke the import. This code probably needs to be fixed.')
import analysis
PredictionIDs = sorted(list(prediction_result_set.getFilteredIDs()))
colortext.printf("Analyzing %d records:" % len(PredictionIDs), "lightgreen")
#results = self.DDG_db.execute_select("SELECT ID, ExperimentID, ddG FROM Prediction WHERE ID IN (%s)" % join(map(str, PredictionIDs), ","))
#for r in results:
# r["ddG"] = pickle.loads(r["ddG"])
# predicted_score = r["ddG"]["data"]["ddG"]
# experimental_scores = [expscore["ddG"] for expscore in self.DDG_db.callproc("GetScores", parameters = r["ExperimentID"])]
# mean_experimental_score = float(sum(experimental_scores)) / float(len(experimental_scores))
results = self.DDG_db.execute_select("SELECT ID, ExperimentID, ddG FROM Prediction WHERE ID IN (%s)" % ','.join(map(str, PredictionIDs)))
analysis.plot(analysis._R_mean_unsigned_error, analysis._createMAEFile, results, "my_plot1.pdf", average_fn = analysis._mean)
analysis.plot(analysis._R_correlation_coefficient, analysis._createAveragedInputFile, results, "my_plot2.pdf", average_fn = analysis._mean)
colortext.printf("Done", "lightgreen")
#score.ddgTestScore
#== Deprecated functions =================================================================
@deprecated
def create_PredictionSet(self, PredictionSetID, halted = True, Priority = 5, BatchSize = 40, allow_existing_prediction_set = False, contains_protein_stability_predictions = True, contains_binding_affinity_predictions = False): raise Exception('This function has been deprecated. Use add_prediction_set instead.')
@deprecated
def charge_PredictionSet_by_number_of_residues(self, PredictionSet): raise Exception('This function has been deprecated. Use _charge_prediction_set_by_residue_count instead.')
@deprecated
def createPredictionsFromUserDataSet(self, userdatasetTextID, PredictionSet, ProtocolID, KeepHETATMLines, StoreOutput = False, Description = {}, InputFiles = {}, quiet = False, testonly = False, only_single_mutations = False, shortrun = False): raise Exception('This function has been deprecated. Use add_prediction_run instead.')
@deprecated
def add_predictions_by_pdb_id(self, pdb_ID, PredictionSet, ProtocolID, status = 'active', priority = 5, KeepHETATMLines = False, strip_other_chains = True): raise Exception('This function has been deprecated. Use add_jobs_by_pdb_id instead.')
@deprecated
def addPrediction(self, experimentID, UserDataSetExperimentID, PredictionSet, ProtocolID, KeepHETATMLines, PDB_ID = None, StoreOutput = False, ReverseMutation = False, Description = {}, InputFiles = {}, testonly = False, strip_other_chains = True): raise Exception('This function has been deprecated. Use add_job instead.')
@deprecated
def add_pdb_file(self, filepath, pdb_id): raise Exception('This function has been deprecated. Use the kddg.api.data.add_pdb_* functions instead.')
@deprecated
def getPublications(self, result_set): raise Exception('This function has been deprecated. Use get_publications_for_result_set instead.')
@deprecated
def getData(self, predictionID): raise Exception('This function has been deprecated. Use get_job_data instead.')
@deprecated
def dumpData(self, outfile, predictionID): raise Exception('This function has been deprecated. Use write_job_data_to_disk instead (note the change in argument order).')
@deprecated
def get_amino_acids_for_analysis(self): raise Exception('This function has been deprecated. Use get_amino_acid_details instead.')
@deprecated
def get_pdb_details_for_analysis(self, pdb_ids, cached_pdb_details = None): raise Exception('This function has been deprecated. Use get_pdb_details instead.')
@deprecated
def add_pdb_file_content(self, pdb_content): raise Exception('This function may never have been used and should be removed.') # return self._add_file_content(pdb_content, rm_trailing_line_whitespace = True, forced_mime_type = 'chemical/x-pdb')
@deprecated
def create_pymol_session(self, download_dir, prediction_id, task_number, keep_files = True): raise Exception('This function has been deprecated. Use create_pymol_session_in_memory and write_pymol_session instead.''')
@deprecated
def createDummyExperiment(self, pdbID, mutationset, chains, sourceID, ddG, ExperimentSetName = "DummySource"):
#todo: elide createDummyExperiment, createDummyExperiment_ankyrin_repeat, and add_mutant
raise Exception("Out of date function.")
Experiment = dbi.ExperimentSet(pdbID, ExperimentSetName)
for m in mutationset.mutations:
Experiment.addMutation(m[0], m[1], m[2], m[3])
for c in chains:
Experiment.addChain(c)
Experiment.addExperimentalScore(sourceID, ddG, pdbID)
Experiment.commit(self.DDG_db)
@deprecated
def createDummyExperiment_ankyrin_repeat(self, pdbID, mutations, chain):
raise Exception("Out of date function.")
#todo: elide createDummyExperiment, createDummyExperiment_ankyrin_repeat, and add_mutant
experiment = dbi.ExperimentDefinition(self.DDG_db, pdbID, interface = None)
experiment.addChain(chain)
for m in mutations:
experiment.addMutation(m)
experiment.commit(False)
#@analysis_api
@deprecated
def test_results(self, output_dir, PredictionSet):
PredictionIDs = []
results = self.get_flattened_prediction_results(PredictionSet)
mutation_lists = {}
for r in results:
PredictionIDs.append(r['PredictionID'])
mutation_lists[r['PredictionID']] = r['FlattenedMutations']
RandomPredictionIDs = [PredictionIDs[random.randint(0, len(PredictionIDs) - 1)] for k in range(10)]
RandomPredictionIDs = [54090L, 53875L, 54085L, 54079L, 54008L, 53853L, 53952L, 54056L, 53935L, 53893L]
# Retrieve and unzip results
if not(os.path.exists(output_dir)):
os.mkdir(output_dir)
for PredictionID in PredictionIDs:#RandomPredictionIDs:
if not(os.path.exists(os.path.join(output_dir, str(PredictionID)))):
colortext.message('Retrieving archive for Prediction %d.' % PredictionID)
self.write_job_data_to_disk(PredictionID, output_dir)
# Get the sequences of the wildtype and mutant structures
count = 0
for PredictionID in PredictionIDs:#RandomPredictionIDs:
wildtype_sequences = set()
mutation_sequences = set()
working_dir = os.path.join(os.path.join(output_dir, str(PredictionID)))
for f in glob.glob(os.path.join(working_dir, '*.pdb')):
if os.path.split(f)[1].startswith('mut_'):
p = PDB.from_filepath(f)
assert(len(p.atom_sequences) == 1)
sequence = str(p.atom_sequences.values()[0])
mutation_sequences.add(sequence)
elif os.path.split(f)[1].startswith('repacked_wt_'):
p = PDB.from_filepath(f)
assert(len(p.atom_sequences) == 1)
sequence = str(p.atom_sequences.values()[0])
wildtype_sequences.add(sequence)
assert(len(wildtype_sequences) == 1)
assert(len(mutation_sequences) == 1)
wildtype_sequence = wildtype_sequences.pop()
mutation_sequence = mutation_sequences.pop()
colortext.message('Prediction %d. Mutations: %s' % (PredictionID, mutation_lists[PredictionID]))
assert(len(wildtype_sequence) == len(mutation_sequence))
s = ''
t = ''
for x in range(len(wildtype_sequence)):
if wildtype_sequence[x] != mutation_sequence[x]:
s += colortext.make(wildtype_sequence[x], color="green")
t += colortext.make(mutation_sequence[x], color="yellow")
else:
s += wildtype_sequence[x]
t += mutation_sequence[x]
print(s)
print(t)
@deprecated
def add_mutant(self, pdb_ID, mutant_mutations):
'''Use this function to add one set of mutations ON THE SAME CHAIN (i.e. corresponding to one mutant) to the database.
todo: generalize this to allow different chains
'''
raise Exception("Out of date function.")
#todo: elide createDummyExperiment, createDummyExperiment_ankyrin_repeat, and add_mutant
chains = set([m.Chain for m in mutant_mutations])
assert(len(chains) == 1)
colortext.warning("Adding mutation: %s." % ', '.join(map(str, mutant_mutations)))
self.createDummyExperiment_ankyrin_repeat(pdb_ID, mutant_mutations, chains.pop())
###########################################################################################
## Information layer
##
## This layer is for functions which extract data from the database.
###########################################################################################
#== Information API =======================================================================
@informational_misc
def get_amino_acid_details(self):
'''This function returns a dictionary of canonical amino acid details e.g. polarity, aromaticity, size etc.'''
amino_acids = {}
polarity_map = {'polar' : 'P', 'charged' : 'C', 'hydrophobic' : 'H'}
aromaticity_map = {'aliphatic' : 'L', 'aromatic' : 'R', 'neither' : '-'}
results = self.DDG_db.execute_select('SELECT * FROM AminoAcid')
for r in results:
if r['Code'] != 'X':
amino_acids[r['Code']] = dict(
LongCode = r['LongCode'],
Name = r['Name'],
Polarity = polarity_map.get(r['Polarity'], 'H'),
Aromaticity = aromaticity_map[r['Aromaticity']],
Size = r['Size'],
van_der_Waals_volume = r['Volume']
)
amino_acids['Y']['Polarity'] = 'H' # tyrosine is a special case
return amino_acids
@informational_misc
def get_publication(self, ID):
'''Returns the information (title, publication, authors etc.) for a publication.'''
r = self.DDG_db_utf.execute_select('SELECT * FROM Publication WHERE ID=%s', parameters=(ID,))
if not r:
raise Exception('No publication exists with ID %s.' % str(ID))
r = r[0]
pubmed_id = self.DDG_db_utf.execute_select('SELECT * FROM PublicationIdentifier WHERE SourceID=%s AND Type="PMID"', parameters=(r['ID'],))
if pubmed_id:
pubmed_id = pubmed_id[0]['ID']
authors = self.DDG_db_utf.execute_select('SELECT * FROM PublicationAuthor WHERE PublicationID=%s ORDER BY AuthorOrder', parameters=(r['ID'],))
authorlist = []
for a in authors:
authorlist.append(dict(FirstName = a['FirstName'], MiddleNames = a['MiddleNames'], Surname = a['Surname']))
pub_details = dict(
Title = r['Title'],
Publication = r['Publication'],
Volume = r['Volume'],
StartPage = r['StartPage'],
EndPage = r['EndPage'],
PublicationYear = r['PublicationYear'],
PublicationDate = r['PublicationDate'],
DOI = r['DOI'],
URL = r['URL'],
PubMedID = pubmed_id,
Authors = authorlist,
)
if pub_details['PublicationDate']:
pub_details['PublicationDate'] = pub_details['PublicationDate'].strftime('%Y-%m-%d')
if not pub_details['URL'] and pub_details['DOI']:
pub_details['URL'] = 'https://dx.doi.org/%s' % pub_details['DOI']
return pub_details
@informational_misc
def get_publications(self):
'''Returns the information (title, publication, authors etc.) for all publications.'''
publications = {}
for r in self.DDG_db.execute_select('SELECT ID FROM Publication'):
publications[r['ID']] = self.get_publication(r['ID'])
return publications
def _cache_all_score_method_details(self):
'''Helper function for get_score_method_details.'''
score_methods = {}
for r in self.get_session(utf = True).query(dbmodel.ScoreMethod):
score_methods[r.ID] = row_to_dict(r)
self.cached_score_method_details = score_methods
return score_methods
@informational_misc
def get_pdb_residues_by_pos(self, pdb_id, strip_res_ids = False):
'''Returns a mapping chain_id -> residue_id -> reside_aa.'''
chain_residue_by_pos = {}
for c in self.get_session().query(dbmodel.PDBChain).filter(dbmodel.PDBChain.PDBFileID == pdb_id):
chain_residue_by_pos[c.Chain] = {}
for r in self.get_session().query(dbmodel.PDBResidue).filter(dbmodel.PDBResidue.PDBFileID == pdb_id):
chain_residue_by_pos[r.Chain][r.ResidueID.strip() if strip_res_ids else r.ResidueID] = r.ResidueAA
return chain_residue_by_pos
@informational_misc
def get_score_method_details(self, score_method_id = None, allow_recaching = True):
'''Returns all score method details, unless a score method id is passed, then only those details are returned'''
if not self.cached_score_method_details or (score_method_id and not(score_method_id in self.cached_score_method_details)):
self._cache_all_score_method_details()
if score_method_id:
# Returns ScoreMethod record for specific score_method_id
if score_method_id in self.cached_score_method_details:
return self.cached_score_method_details[score_method_id]
else:
# We have already refreshed the cache, so fail
raise Exception("score_method_id {0} isn't in score methods table".format(score_method_id))
else:
# Returns all defined ScoreMethod records
return self.cached_score_method_details
def output_score_method_information(self, score_method_id, output_directory, analysis_set_id = None, take_lowest = None, expectn = None):
'''Outputs details about score method to a txt file in the specified output directory'''
score_method_details = sorted([(k, v) for k, v in self.get_score_method_details(score_method_id = score_method_id).iteritems()])
with open(os.path.join(output_directory, 'score_method.txt'), 'w') as f:
f.write('Score method ID: %s\n' % str(score_method_id))
if analysis_set_id:
f.write('Analysis set ID: %s\n' % str(analysis_set_id))
if take_lowest:
f.write('Take lowest (TopX): %s scores\n' % str(take_lowest))
if expectn:
f.write('Expected number of output structures): %s\n' % str(expectn))
if len(score_method_details) > 0:
f.write('\nScore method details\n')
for key, value in score_method_details:
f.write('%s: %s\n' % (str(key), str(value)))
@informational_misc
def get_score_method_id(self, method_name, method_type = None, method_parameters = None, method_authors = None, fuzzy = True):
'''Returns the ID for the ScoreMethod with the specified parameters.
If fuzzy is True then the string matching uses LIKE rather than equality.
e.g. method_id = self.get_score_method_id('interface', method_authors = 'kyle')
'''
if fuzzy:
match_phrase = 'LIKE %s'
method_name = '%{0}%'.format(method_name)
if method_type: method_type = '%{0}%'.format(method_type)
if method_parameters: method_parameters = '%{0}%'.format(method_parameters)
if method_authors: method_authors = '%{0}%'.format(method_authors)
else:
match_phrase = '=%s'
condition_parameters = [method_name]
conditions = ['MethodName {0}'.format(match_phrase)]
if method_type:
conditions.append('MethodType {0}'.format(match_phrase))
condition_parameters.append(method_type)
if method_parameters:
conditions.append('Parameters {0}'.format(match_phrase))
condition_parameters.append(method_parameters)
if method_authors:
conditions.append('Authors {0}'.format(match_phrase))
condition_parameters.append(method_authors)
conditions = ' AND '.join(conditions)
condition_parameters = tuple(condition_parameters)
results = self.DDG_db_utf.execute_select('SELECT ID FROM ScoreMethod WHERE {0}'.format(conditions), parameters=condition_parameters)
if not results:
raise Exception('Error: No ScoreMethod records were found using the criteria: {0}'.format(', '.join(map(str, [s for s in [method_name, method_type, method_parameters] if s]))))
elif len(results) > 1:
raise Exception('Error: Multiple ScoreMethod records were found using the criteria: {0}'.format(', '.join(map(str, [s for s in [method_name, method_type, method_parameters] if s]))))
else:
return results[0]['ID']
@informational_misc
def get_score_dict(self, prediction_id = None, score_method_id = None, score_type = None, structure_id = None, prediction_structure_scores_table = None, prediction_id_field = None):
'''Returns a dict with keys for all fields in the Score table. The optional arguments can be used to set the
corresponding fields of the dict. All other fields are set to None.'''
if prediction_structure_scores_table == None:
prediction_structure_scores_table = self._get_prediction_structure_scores_table()
if prediction_id_field == None:
prediction_id_field = self._get_prediction_id_field()
# Relax the typing
if structure_id: structure_id = int(structure_id)
if prediction_id: prediction_id = int(prediction_id)
if score_method_id: score_method_id = int(score_method_id)
if score_type:
allowed_score_types = self._get_allowed_score_types()
if score_type not in allowed_score_types:
raise Exception('"{0}" is not an allowed score type. Allowed types are: "{1}".'.format(score_type, '", "'.join(sorted(allowed_score_types))))
fieldnames = set([f for f in self.DDG_db.FieldNames.__dict__[prediction_structure_scores_table].__dict__.keys() if not(f.startswith('_'))])
d = dict.fromkeys(fieldnames, None)
if prediction_id_field != None:
d[prediction_id_field] = prediction_id
d['ScoreMethodID'] = score_method_id
d['ScoreType'] = score_type
d['StructureID'] = structure_id
return d
@informational_pdb
def get_pdb_chain_coordinates(self, pdb_id, chain_id):
'''Read a saved dataframe.'''
zipped_coordinates = self.DDG_db.execute_select('SELECT Coordinates FROM PDBChain WHERE PDBFileID=%s AND Chain=%s AND Coordinates IS NOT NULL', parameters=(pdb_id, chain_id))
if zipped_coordinates:
assert(len(zipped_coordinates) == 1)
buf = BytesIO(zipped_coordinates[0]['Coordinates'])
gf = gzip.GzipFile(fileobj=buf, mode="rb")
residue_matrix = None
try:
store = pandas.read_hdf(gf)
residue_matrix = store['dataframe']
store.close()
except NotImplementedError, e:
# "Support for generic buffers has not been implemented"
try:
nfname = None
f, nfname = open_temp_file('/tmp', suffix = '.hdf5')
f.close()
write_file(nfname, gf.read(), ftype = 'wb')
store = pandas.HDFStore(nfname)
residue_matrix = store['dataframe']
store.close()
os.remove(nfname)
print('get_pdb_chain_coordinates here')
except:
if nfname: os.remove(nfname)
raise
return residue_matrix
return None
@informational_pdb
def get_pdb_chains_for_prediction(self, prediction_id):
'''Returns the PDB file ID and a list of chains for the prediction.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_pdb
def get_chain_sets_for_mutatagenesis(self, mutagenesis_id, complex_id = None):
'''Gets a list of possibilities for the associated complex and calls get_chains_for_mutatagenesis on each.
This function assumes that a complex structure is required i.e. that all chains in the PDB chain set are in the same PDB file.
This is a useful method for listing the possible complexes to use in a prediction or to determine whether one
may be missing. and we need to update the database.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_pdb
def get_chains_for_mutatagenesis(self, mutagenesis_id, pdb_file_id, pdb_set_number, complex_id = None, tsession = None):
'''Returns the PDB chains used in the mutagenesis.
Note: At present, monomeric data e.g. protein stability does not have the notion of complex in our database
but this abstraction is planned so that multiple choices of PDB file and chain can be easily represented.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_pdb
def get_pdb_mutations_for_mutagenesis(self, mutagenesis_id, pdb_file_id, set_number, complex_id = None):
'''Returns the PDB mutations for a mutagenesis experiment as well as the PDB residue information.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_pdb
def get_pdb_details(self, pdb_ids, cached_pdb_details = None):
'''Returns the details stored in the database about the PDB files associated with pdb_ids e.g. chains, resolution,
technique used to determine the structure etc.'''
raise Exception('Replace this with a call to kddg.api.data.py::DataImportInterface.get_pdb_details()')
return self.importer.get_pdb_details(pdb_ids, cached_pdb_details = None)
pdbs = {}
cached_pdb_ids = []
if cached_pdb_details:
cached_pdb_ids = set(cached_pdb_details.keys())
for pdb_id in pdb_ids:
if pdb_id in cached_pdb_ids:
pdbs[pdb_id] = cached_pdb_details[pdb_id]
else:
record = self.DDG_db.execute_select('SELECT * FROM PDBFile WHERE ID=%s', parameters=(pdb_id,))[0]
p = PDB(record['Content'])
pdb_chain_lengths = {}
for chain_id, s in p.atom_sequences.iteritems():
pdb_chain_lengths[chain_id] = len(s)
# todo: get the list of protein chains and PDB residues from the database and assert that they are the same
# as what were extracted from the PDB file.
# maybe change 'chains' below to 'protein_chains'
pdbs[pdb_id] = dict(
chains = pdb_chain_lengths,
TM = record['Transmembrane'],
Technique = record['Techniques'],
XRay = record['Techniques'].find('X-RAY') != -1,
Resolution = record['Resolution'],
)
return pdbs
@informational_pdb
def get_prediction_set_pdb_chain_details(self, PredictionSet, cached_pdb_details = None, restrict_to_pdbs = set()):
'''Used by the analysis API. This could be combined with get_pdb_details.'''
pdb_ids = [r['PDBFileID'] for r in self.DDG_db.execute_select('SELECT DISTINCT PDBFileID FROM {0} INNER JOIN {1} ON {1}ID={1}.ID WHERE PredictionSet=%s ORDER BY PDBFileID'.format(self._get_prediction_table(), self._get_user_dataset_experiment_table()), parameters=(PredictionSet,))]
if not pdb_ids:
try:
pdb_ids = [r['PDBFileID'] for r in self.DDG_db.execute_select('SELECT DISTINCT PDBFileID FROM {0} INNER JOIN {1} ON {1}ID={1}.ID WHERE PredictionSet=%s ORDER BY PDBFileID'.format(self._get_prediction_table(), 'Experiment'), parameters=(PredictionSet,))]
except: pass
if restrict_to_pdbs:
pdb_ids = sorted(set(pdb_ids).intersection(restrict_to_pdbs))
pdbs = {}
cached_pdb_ids = []
if cached_pdb_details:
cached_pdb_ids = set(cached_pdb_details.keys())
for pdb_id in pdb_ids:
if pdb_id in cached_pdb_ids:
pdbs[pdb_id] = cached_pdb_details[pdb_id]
else:
record = self.DDG_db.execute_select('SELECT * FROM PDBFile WHERE ID=%s', parameters=(pdb_id,))[0]
p = PDB(record['Content'])
d = {}
chain_ids = set(p.chain_types.keys()).union(set(p.seqres_chain_order)).union(set(p.atom_sequences.keys()))
d['Chains'] = dict.fromkeys(chain_ids)
for chain_id in chain_ids:
d['Chains'][chain_id] = dict(
Sequence = str(p.atom_sequences.get(chain_id) or ''),
Type = p.chain_types.get(chain_id),
)
d['Resolution'] = p.get_resolution()
d['MethodOfDetermination'] = p.get_techniques()
pdbs[pdb_id] = d
return pdbs
@informational_job
def get_development_protocol(self, development_protocol_id):
'''Possibly temporary function which returns a DevelopmentProtocol record from the database.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_job
def get_complex_details(self, complex_id):
'''Returns the database record for the given complex.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_job
def get_job_description(self, prediction_id):
'''Returns the details necessary to run the job.'''
try:
prediction = self.get_session().query(self.PredictionTable).filter(self.PredictionTable.ID == prediction_id).one()
except Exception, e:
raise colortext.Exception('No details could be found for prediction #{0} in the database.\n{1}\n{2}'.format(prediction_id, str(e), traceback.format_exc()))
return str(prediction)
@informational_job
def get_job_details(self, prediction_id, include_files = True, truncate_content = None):
'''Returns the details necessary to run the job.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
@informational_misc
def get_file_content_cache_stats(self):
'''Returns basic statistics on the file content cache access.'''
return self.importer.get_file_content_cache_stats()
@informational_job
def get_job_files(self, prediction_id, truncate_content = None, set_pdb_occupancy_one = True):
'''Returns a dict mapping the stages (e.g. 'input', 'output', 'analysis') of a job with the files associated with
that stage.
If truncate_content is set, it should be an integer specifying the amount of characters to include. This is useful
to see if the file header is as expected.
'''
assert(truncate_content == None or (isinstance(truncate_content, int) and truncate_content >= 0))
job_files = {}
prediction_record = self.get_session().query(self.PredictionTable).filter(self.PredictionTable.ID == prediction_id).one()
for pf in prediction_record.files:
r = row_to_dict(pf)
if truncate_content != 0:
fcontent = pf.content
r['MIMEType'] = fcontent.MIMEType
r['Filesize'] = fcontent.Filesize
r['MD5HexDigest'] = fcontent.MD5HexDigest
file_content = self.importer.get_file_content_from_cache(fcontent.ID)
if set_pdb_occupancy_one and pf.Filetype == 'PDB': # Set all occupancies to 1
pdb = PDB(file_content.split("\n"))
pdb.fillUnoccupied()
r['Content'] = pdb.get_content()
else:
r['Content'] = file_content
if truncate_content:
if len(file_content) > int(truncate_content):
r['Content'] = '%s...' % file_content[:int(truncate_content)]
else:
r['Content'] = None
r['MIMEType'] = None
r['Filesize'] = None
r['MD5HexDigest'] = None
r['Content'] = None
job_stage = r['Stage']
del r['Stage']
job_files[job_stage] = job_files.get(job_stage, [])
job_files[job_stage].append(r)
return job_files
@informational_job
def get_prediction_set_details(self, prediction_set_id):
'''Returns the PredictionSet record from the database.'''
tsession = self.get_session()
prediction_set = tsession.query(dbmodel.PredictionSet).filter(dbmodel.PredictionSet.ID == prediction_set_id)
if prediction_set.count() == 1:
d = row_to_dict(prediction_set.one())
d['Job status summary'] = self._get_prediction_set_status_counts(prediction_set_id)
return d
return None
def _get_prediction_set_status_counts(self, prediction_set_id):
'''Returns a summary of the prediction job statuses for the prediction set.'''
return dict((x, y) for x, y in self.get_session().query(self.PredictionTable.Status, func.count(self.PredictionTable.Status)).filter(self.PredictionTable.PredictionSet == prediction_set_id).group_by(self.PredictionTable.Status))
def get_session(self, new_session = False, autoflush = True, autocommit = False, utf = False):
return self.importer.get_session(new_session = new_session, autoflush = autoflush, autocommit = autocommit, utf = utf)
@informational_job
def get_prediction_ids(self, prediction_set_id):
'''Returns the list of Prediction IDs associated with the PredictionSet.'''
self._assert_prediction_set_is_correct_type(prediction_set_id)
return [r.ID for r in self.get_session().query(self.PredictionTable).filter(self.PredictionTable.PredictionSet == prediction_set_id)]
def _get_prediction_set_prediction_table_rows(self, prediction_set_id):
'''Returns the list of Prediction IDs associated with the PredictionSet.'''
self._assert_prediction_set_is_correct_type(prediction_set_id)
return {r.ID : r for r in self.get_session().query(self.PredictionTable).filter(self.PredictionTable.PredictionSet == prediction_set_id)}
@informational_job
def get_defined_user_datasets(self, tsession = None):
'''Return a dict detailing the defined UserDataSets, their tagged subsets (if any), and the mutagenesis counts
(i.e. the number of prediction cases) of both the user datasets and the associated tagged subsets .'''
tsession = tsession or self.get_session(new_session = True)
d = {}
user_datasets = tsession.query(dbmodel.UserDataSet).filter(dbmodel.UserDataSet.DatasetType == self._get_prediction_dataset_type())
for uds in user_datasets:
uds = row_to_dict(uds)
q = tsession.query(func.count(self._get_sqa_user_dataset_experiment_table().ID).label('MutagenesisCount')).filter(self._get_sqa_user_dataset_experiment_table().UserDataSetID == uds['ID']).one()
uds['MutagenesisCount'] = q[0]
d[uds['TextID']] = uds
subsets = {}
if self._get_user_dataset_experiment_tag_table():
for tagged_subset in tsession.query(
self._get_sqa_user_dataset_experiment_tag_table().Tag, func.count(self._get_sqa_user_dataset_experiment_tag_table().Tag).label('MutagenesisCount')).filter(and_(
self._get_sqa_user_dataset_experiment_table().ID == self._get_sqa_user_dataset_experiment_tag_table_udsid(),
self._get_sqa_user_dataset_experiment_table().UserDataSetID == uds['ID'])).group_by(self._get_sqa_user_dataset_experiment_tag_table().Tag):
subsets[tagged_subset[0]] = dict(MutagenesisCount = tagged_subset[1])
uds['Subsets'] = subsets
return d
@informational_job
def get_user_dataset_experiments(self, tsession, user_dataset_name, tagged_subset = None):
'''Returns a list of UserDataSet experiment records for the given user dataset.'''
udse = self._get_sqa_user_dataset_experiment_table()
udse_tag = self._get_sqa_user_dataset_experiment_tag_table()
if tagged_subset:
return tsession.query(udse).filter(and_(
udse.ID == self._get_sqa_user_dataset_experiment_tag_table_udsid(),
udse.UserDataSetID == dbmodel.UserDataSet.ID,
dbmodel.UserDataSet.TextID == user_dataset_name,
udse_tag.Tag == tagged_subset))
else:
return tsession.query(udse).filter(and_(
udse.UserDataSetID == dbmodel.UserDataSet.ID,
dbmodel.UserDataSet.TextID == user_dataset_name))
@informational_job
def get_user_dataset_experiment_details(self, user_dataset_experiment_id, user_dataset_id = None):
'''Returns all the data relating to a user dataset experiment.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_job
def get_dataset_experiment_details(self, dataset_experiment_id, dataset_id = None):
'''Returns the experimental data relating to a dataset experiment.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_job
def export_dataset_to_json(self, dataset_id):
'''Returns the dataset information in JSON format.'''
return json_dumps(self._export_dataset(dataset_id))
@informational_job
def export_dataset_to_csv(self, dataset_id):
'''Returns the dataset information in CSV format.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_job
def get_predictions_experimental_details(self, prediction_id, userdatset_experiment_ids_to_subset_ddgs = None, include_files = False, reference_ids = set(), include_experimental_data = True):
'''Returns a dict containing the experimental details for the Prediction. This is what is used by export_prediction_cases_to_json etc.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_job
def get_experimental_ddgs_by_analysis_set(self, user_dataset_experiment_id = None, reference_ids = set()):
'''Returns a mapping from UserPPDataSetExperimentIDs to dicts mapping analysis Subsets to a dicts containing the
record identifier triple (subset, section, record number), the experimental DDG values, the mean of those values,
and whether the values / one of the values are derived from other measurements e.g.
23 : {
'BeAtMuSiC' : {'Cases' : set([('BeAtMuSiC', 'Main', 1408L)]),
'DDGs' : [{'IsDerivedValue': 0L,
'Value': 2.9802478611},
{'IsDerivedValue': 0L,
'Value': 2.1978328374}],
'IsDerivedValue' : 0L,
'MeanDDG' : 2.5890403492500003},
'SKEMPI' : {'Cases' : set([('SKEMPI', 'Non-derivative', 1L)]),
'DDGs' : [{'IsDerivedValue': 0L, 'Value': 2.9802478611},
{'IsDerivedValue': 0L, 'Value': 2.1978328374}],
'IsDerivedValue' : 0L,
'MeanDDG' : 2.5890403492500003},
'ZEMu' : {'Cases' : set([('ZEMu', 'Main', 1144L)]),
'DDGs' : [{'IsDerivedValue': 0L, 'Value': 2.1978328374}],
'IsDerivedValue' : 0L,
'MeanDDG' : 2.1978328374}}
...
This can be used to: i) generate histograms showing the spread of experimental values for a dataset; or
ii) to add columns to an analysis dataframe so that, once created, it can be analyzed over multiple analysis sets.
'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@informational_job
def get_prediction_set_case_details(self, prediction_set_id, retrieve_references = True, include_experimental_data = True, prediction_table_rows_cache = None):
'''Returns a dict containing the case information for prediction cases in the prediction set with a structure
expected by the analysis class.'''
# Read the Prediction details
reference_ids = set()
prediction_ids = self.get_prediction_ids(prediction_set_id)
userdatset_experiment_ids_to_subset_ddgs = {}
if include_experimental_data:
userdatset_experiment_ids_to_subset_ddgs = self.get_experimental_ddgs_by_analysis_set(reference_ids = reference_ids)
prediction_cases = {}
for prediction_id in prediction_ids:
UserDataSetExperimentID = self._get_sqa_predictions_user_dataset_experiment_id(prediction_table_rows_cache[prediction_id])
experimental_details = self.get_predictions_experimental_details(prediction_id, userdatset_experiment_ids_to_subset_ddgs, include_experimental_data = include_experimental_data)
experimental_details['PredictionID'] = prediction_id
prediction_cases[UserDataSetExperimentID] = experimental_details
references = {}
if retrieve_references:
for reference_id in sorted(reference_ids):
references[reference_id] = self.get_publication(reference_id)
return dict(
Data = prediction_cases,
References = references,
PredictionSet = self.get_prediction_set_details(prediction_set_id)
)
@informational_job
def export_prediction_cases_to_json(self, prediction_set_id, retrieve_references = True):
'''A JSON wrapper to get_prediction_set_case_details.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
def _export_dataset(self, dataset_id):
'''Returns a dict containing the dataset information.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
###########################################################################################
## Prediction creation/management layer
##
###########################################################################################
#== Job creation API ===========================================================
#
# This part of the API is responsible for inserting prediction jobs in the database via
# the trickle-down proteomics paradigm.
@job_creator
def add_prediction_set(self, prediction_set_id, halted = True, priority = 5, batch_size = 40,
allow_existing_prediction_set = False, contains_protein_stability_predictions = True, contains_binding_affinity_predictions = False,
series_name = None, series_color = 'ff0000', series_alpha = 1.0, description = None):
'''Adds a new PredictionSet (a construct used to group Predictions) to the database.
If a PredictionSet is halted then running schedulers will not kick off the jobs. Otherwise, they will be queued
depending on the priority of the PredictionSet (higher numbers mean higher priority).
batch_size defines the number of jobs to be added as once as an array job.
Returns True if a PredictionSet with the same ID did not previously exist.
The priority and batch size can be modified while a scheduler is running and will affect the next round of
predictions to be queued.
Raises an exception or returns False otherwise depending on the value of allow_existing_prediction_set.'''
if halted:
Status = 'halted'
else:
Status = 'active'
existing_record = self.DDG_db.execute_select('SELECT * FROM PredictionSet WHERE ID=%s', parameters=(prediction_set_id,))
if self.prediction_set_exists(prediction_set_id):
if allow_existing_prediction_set == False:
raise Exception('The PredictionSet %s already exists.' % prediction_set_id)
else:
return False
d = dict(
ID = prediction_set_id,
Status = Status,
Priority = priority,
ProteinStability = contains_protein_stability_predictions,
BindingAffinity = contains_binding_affinity_predictions,
BatchSize = batch_size,
SeriesName = series_name,
SeriesColor = series_color,
SeriesAlpha = series_alpha,
Description = description,
)
self.DDG_db.insertDictIfNew("PredictionSet", d, ['ID'], locked = False)
return True
def prediction_set_exists(self, prediction_set_id):
existing_record = self.DDG_db.execute_select('SELECT * FROM PredictionSet WHERE ID=%s', parameters=(prediction_set_id,))
if len(existing_record) > 0:
assert(len(existing_record) == 1)
return True
else:
return False
@job_creator
def destroy_prediction_set(self, prediction_set_id):
'''This function removes the PredictionSet from the database.
THIS CANNOT BE UNDONE.
For safety, we should only allow PredictionSets with no corresponding scores to be removed.
It fits into the job_creator category since usually these empty PredictionSets will have been created while
setting up a job.'''
can_be_deleted = self.DDG_db.execute_select('SELECT CanBeDeleted FROM PredictionSet WHERE ID=%s', parameters=(prediction_set_id,))
if len(can_be_deleted) == 0:
raise colortext.Exception('The prediction set "%s" does not exist.' % prediction_set_id)
elif can_be_deleted[0]['CanBeDeleted'] == 0:
raise colortext.Exception('The prediction set "%s" is not allowed to be deleted. Change the CanBeDeleted property on its record first.' % prediction_set_id)
params = (self._get_prediction_table(), self._get_prediction_structure_scores_table())
qry = 'SELECT COUNT({0}.ID) AS NumRecords FROM {0} INNER JOIN {1} ON {0}.ID={1}.{0}ID WHERE PredictionSet=%s'.format(*params)
existing_scores = self.DDG_db.execute_select(qry, parameters=(prediction_set_id,))
if existing_scores[0]['NumRecords'] > 0:
raise colortext.Exception('Cannot remove a prediction set with associated scores.')
qry = 'SELECT COUNT(ID) AS NumRecords FROM {0} WHERE Status <> "queued" AND PredictionSet=%s'.format(*params)
jobs_in_flux = self.DDG_db.execute_select(qry, parameters=(prediction_set_id,))
if jobs_in_flux[0]['NumRecords'] > 0:
raise colortext.Exception('Cannot remove a prediction set unless all jobs are set as "queued".')
# Use a transaction to prevent a partial deletion
self.DDG_db._get_connection()
con = self.DDG_db.connection
try:
with con:
cur = con.cursor()
# Delete the associated file records
delete_files_qry = 'DELETE {0}File FROM {0}File INNER JOIN {0} ON {0}.ID={0}File.{0}ID WHERE PredictionSet=%s'.format(*params)
cur.execute(delete_files_qry, (prediction_set_id, ))
# Delete the predictions
delete_predictions_qry = 'DELETE FROM {0} WHERE PredictionSet=%s'.format(*params)
cur.execute(delete_predictions_qry, (prediction_set_id, ))
cur.execute('DELETE FROM PredictionSet WHERE ID=%s', (prediction_set_id, ))
except Exception, e:
raise colortext.Exception('An exception occurred removing the PredictionSet from the database: "%s".\n%s' % (str(e), traceback.format_exc()))
@job_creator
def start_prediction_set(self, PredictionSetID):
'''Sets the Status of a PredictionSet to "active".'''
self._set_prediction_set_status(PredictionSetID, 'active')
@job_creator
def stop_prediction_set(self, PredictionSetID):
'''Sets the Status of a PredictionSet to "halted".'''
self._set_prediction_set_status(PredictionSetID, 'halted')
@job_creator
def add_job(self, *args, **kwargs):
'''Add a single prediction job to a prediction set. This should not typically be called - add_prediction_run
is generally what should be called instead.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_creator
def add_jobs_by_pdb_id(self, *args, **kwargs):
''' This function adds predictions for all Experiments corresponding to pdb_ID to the specified prediction set.
This is useful for custom runs e.g. when we are using the DDG scheduler for design rather than for benchmarking.
Variants of this function were used before for CypA and ubiquitin runs.
This is currently unimplemented but ask Shane if we need this functionality again.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
def _add_prediction_run_preconditions(self, tsession, prediction_set_id, user_dataset_name, tagged_subset):
'''Check to make sure that the prediction set, user dataset, and optional tagged subset make sense for this API.
Returns the set of allowed_user_datasets.
'''
prediction_set = get_single_record_from_query(tsession.query(dbmodel.PredictionSet).filter(dbmodel.PredictionSet.ID == prediction_set_id))
if not prediction_set:
raise colortext.Exception('The prediction set "%s" does not exist in the database.' % prediction_set_id)
elif getattr(prediction_set, self._get_prediction_type()) != 1:
raise colortext.Exception('The prediction set "%s" is not the correct type ("%s") for this API.' % (prediction_set_id, self._get_prediction_type()))
allowed_user_datasets = self.get_defined_user_datasets(tsession)
if user_dataset_name not in allowed_user_datasets:
raise colortext.Exception('The user dataset "%s" does not exist in the database.' % user_dataset_name)
if tagged_subset and tagged_subset not in allowed_user_datasets[user_dataset_name]['Subsets']:
raise colortext.Exception('The tagged subset "%s" of user dataset "%s" does not exist in the database.' % (tagged_subset, user_dataset_name))
return allowed_user_datasets
@job_creator
def add_prediction_run(self, *args, **kwargs):
'''Adds all jobs corresponding to a user dataset e.g. add_prediction_run("my first run", "AllBindingAffinityData", tagged_subset = "ZEMu").'''
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_creator
def merge_prediction_run(self, from_prediction_set_id, to_prediction_set_id, create_if_does_not_exist = True, series_color = 'ff0000'):
'''Adds all of the jobs from from_prediction_set_id to to_prediction_set_id.
When to_prediction_set_id is empty, this function makes a clone of from_prediction_set_id.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_creator
def add_job_by_user_dataset_record(self, *args, **kwargs):
'''Uses the UserDataSet record to get most of the information needed to set up the job e.g. PDB complex, mutagenesis details.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_creator
def clone_prediction_run(self, existing_prediction_set, new_prediction_set, *args, **kwargs):
'''add_prediction_run sets up a full run of dataset predictions but is slow as it needs to perform a lot of
calculations and parsing. If you want to test the same dataset with slightly different parameters (e.g. a
different protocol) then these calculations can be reused which reduces the overhead considerably.
clone_prediction_run was written with this in mind. It copies the list of predictions and their setup (input
files etc.) from an existing prediction set to an empty prediction set.'''
raise Exception('not implemented yet')
@job_creator
def add_development_protocol_command_lines(self, development_protocol_id):
'''Possibly temporary function used to add protocol command lines for methods in development.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
#== Input file generation API ===========================================================
#
# This part of the API is responsible for creating input files for predictions
@job_input
def create_resfile(self, prediction_id):
'''This function returns the resfile content for the prediction. It is usually not called directly by the user but
is available for convenience and debugging.'''
# todo: is this being used?
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_input
def create_mutfile(self, prediction_id):
'''This function returns the mutfile content for the prediction. It is usually not called directly by the user but
is available for convenience and debugging.'''
# todo: is this being used?
raise Exception('This function needs to be implemented by subclasses of the API.')
#== Job execution/completion API ===========================================================
#
# This part of the API is responsible for starting jobs and setting them as failed or
# completed
@job_execution
def get_queued_jobs(self, prediction_set_id, order_by = 'Cost', order_order_asc = False, include_files = True, truncate_content = None):
'''An iterator to return the details of the queued prediction records in this prediction set.
An exception is raised if the prediction set is halted.
Assuming Cost is filled in and is representative of the expected runtime, it makes sense to request jobs ordered
by Cost and order_order_asc = False rather than by ID as longer jobs can then be kicked off before shorter jobs.
Usage:
for prediction_record in ppi_api.get_queued_jobs(prediction_set_id, include_files = True, truncate_content = 30):
pprint.pprint(prediction_record)
'''
if self.get_prediction_set_details(prediction_set_id)['Status'] == 'halted':
raise Exception('The prediction set is halted so no job details can be returned.')
self._assert_prediction_set_exists(prediction_set_id)
for job_id in self.get_queued_job_list(prediction_set_id, order_by = order_by, order_order_asc = order_order_asc):
self._get_job_fn_call_counter[job_id] = self._get_job_fn_call_counter.get(job_id, 0)
self._get_job_fn_call_counter[job_id] += 1
if self._get_job_fn_call_counter[job_id] > self._get_job_fn_call_counter_max:
self.DDG_db = None
self.DDG_db_utf = None
raise Exception('get_job was called %d times for this prediction. This is probably a bug in the calling code.' % self._get_job_fn_call_counter[job_id])
yield(self.get_job_details(job_id, include_files = include_files, truncate_content = truncate_content))
@job_execution
def get_queued_job_list(self, prediction_set_id, order_by = 'Cost', order_order_asc = False):
'''An iterator to return the list of queued prediction records in this prediction set.
Assuming Cost is filled in and is representative of the expected runtime, it makes sense to request jobs ordered
by Cost and order_order_asc = False rather than by ID as longer jobs can then be kicked off before shorter jobs.
Usage:
for prediction_id in ppi_api.get_queued_job_list(prediction_set_id):
print(prediction_id)
'''
assert((order_by in ['Cost', 'ID']) and isinstance(order_order_asc, bool))
if order_order_asc:
order_order_asc = 'ASC'
else:
order_order_asc = 'DESC'
params = (self._get_prediction_table(), order_by, order_order_asc)
qry = 'SELECT ID FROM {0} WHERE PredictionSet=%s AND Status="queued" ORDER BY {1} {2}'.format(*params)
results = self.DDG_db.execute_select(qry, parameters=(prediction_set_id,))
x = 0
while x < len(results):
yield results[x]['ID']
x += 1
@job_execution
def start_job(self, prediction_id, prediction_set_id):
'''Sets the job status to "active". prediction_set must be passed and is used as a sanity check.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
@job_execution
def get_max_number_of_cluster_jobs(self, prediction_set_id, priority):
'''Returns the maximum number of cluster jobs that schedulers should run for this interface.'''
return self.DDG_db.execute_select('SELECT Value FROM _DBCONSTANTS WHERE VariableName="MaxClusterJobs"')['Value']
def _assert_prediction_set_exists(self, prediction_set_id):
if len(self.DDG_db.execute_select('SELECT * FROM PredictionSet WHERE ID=%s', parameters=(prediction_set_id,))) != 1:
raise Exception('The PredictionSet %s does not exist.' % prediction_set_id)
@job_execution
def alter_prediction_set_priority(self, prediction_set_id, priority):
'''Modify the priority for a PredictionSet. Higher values give the PredictionSet more priority over other running PredictionSets.'''
priority = int(priority)
assert(priority > 0)
self._assert_prediction_set_exists(prediction_set_id)
self.DDG_db.execute_select('UPDATE PredictionSet SET Priority=%s WHERE ID=%s', parameters=(priority, prediction_set_id,))
@job_execution
def alter_prediction_set_batch_size(self, prediction_set_id, batch_size):
'''Modify the batch size for a PredictionSet. The batch size is the number of jobs which will be submitted together
during subsequent job submissions.'''
batch_size = int(batch_size)
assert(batch_size > 0)
self._assert_prediction_set_exists(prediction_set_id)
self.DDG_db.execute_select('UPDATE PredictionSet SET BatchSize=%s WHERE ID=%s', parameters=(batch_size, prediction_set_id,))
@job_execution
def set_job_temporary_protocol_field(self, prediction_id, prediction_set_id, temporary_protocol_field):
'''Possibly temporary function which sets fields in the temporary protocol field.'''
raise Exception('not implemented yet')
@job_completion
def fail_job(self, prediction_id, prediction_set, maxvmem, ddgtime, errors = None):
'''Sets the job status to "failed". prediction_set must be passed and is used as a sanity check.'''
self._check_prediction(prediction_id, prediction_set)
self.DDG_db.execute('UPDATE {0} SET Status="failed", maxvmem=%s, DDGTime=%s, Errors=%s WHERE ID=%s'.format(self._get_prediction_table()), parameters=(maxvmem, ddgtime, errors, prediction_id,))
@job_completion
def extract_data(self, prediction_set_id, root_directory = None, force = False, score_method_id = None):
'''Extracts the data for the prediction set run and stores it into the database.
For all PredictionIDs associated with the PredictionSet:
- looks for a subdirectory of root_directory with the same name as the ID e.g. /some/folder/21412
- call extract_data_for_case
Note: we do not use a transaction at this level. We could but it may end up being a very large transaction
depending on the dataset size. It seems to make more sense to me to use transactions at the single prediction
level i.e. in extract_data_for_case
root_directory defaults to sys_settings.[api].prediction_data_path.
If force is True then existing records should be overridden.
'''
root_directory = root_directory or self.prediction_data_path
prediction_ids = self.get_prediction_ids(prediction_set_id)
for prediction_id in prediction_ids:
job_path = os.path.join(root_directory, prediction_id)
if not os.path.exists(job_path):
raise Exception('The folder {0} for Prediction #{1} does not exist.'.format(job_path, prediction_id))
for prediction_id in prediction_ids:
self.extract_data_for_case(prediction_id, root_directory = root_directory, force = force, score_method_id = score_method_id)
@job_completion
def extract_data_for_case(self, prediction_id, root_directory = None, score_method_id = None, force = False):
'''Extracts the data for the prediction case (e.g. by processing stdout) and stores it in the Prediction*StructureScore
table.
The scores are returned to prevent the caller having to run another query.
If force is False and the expected number of records for the case exists in the database, these are returned.
Otherwise, the data are extracted, stored using a database transaction to prevent partial storage, and returned.
Note:
We use a lot of functions here: extract_data_for_case, parse_prediction_scores, store_scores.
This may seem like overkill but I think it could allow us to reuse a lot of the code since the tables for
PredictionPPIStructureScore and PredictionStructureScore are very similar (but are different since the underlying
foreign tables PredictionPPI and Prediction are at least currently separate).
parse_prediction_scores only returns dicts for database storage so it can be useful for debugging during development.
store_scores stores scores in the database (passed as a list of dicts) but does not care from where they came.
extract_data_for_case calls parse_prediction_scores to get the scores and the store_scores to commit them to the database.
'''
root_directory = root_directory or self.prediction_data_path # defaults to sys_settings.[api].prediction_data_path
prediction_set = self.get_job_details(prediction_id, include_files = False)['PredictionSet']
# todo: implement force behavior
# Create a list of dicts for the PredictionPPIStructureScore table
scores = self.parse_prediction_scores(prediction_id, root_directory = root_directory, score_method_id = score_method_id)
# Store the dicts as PredictionPPIStructureScore records
if len(scores) > 0:
self.store_scores(prediction_set, prediction_id, scores)
return scores
@job_completion
def parse_prediction_scores(self, prediction_id, root_directory = None, score_method_id = None):
'''Returns a list of dicts suitable for database storage e.g. PredictionStructureScore or PredictionPPIStructureScore records.'''
raise Exception('Abstract method. This needs to be overridden by a subclass. Returns a dict suitable for database storage e.g. PredictionStructureScore or PredictionPPIStructureScore records.')
@job_completion
def store_scores_for_many_predictions(self, prediction_set, scores, safe = True, prediction_structure_scores_table = None, prediction_id_field = None):
'''Stores scores for many predictions.
scores should be a list of dicts suitable for database storage e.g. PredictionStructureScore or
PredictionPPIStructureScore records.
'''
prediction_id_field = prediction_id_field or self._get_prediction_id_field()
prediction_structure_scores_table = prediction_structure_scores_table or self._get_prediction_structure_scores_table()
if safe:
# Sanity checks
for score in scores:
if prediction_id_field not in score:
raise Exception('The score record is missing a {0} field: {1}.'.format(prediction_id_field, pprint.pformat(score)))
self._check_prediction(score[prediction_id_field], prediction_set)
con = self.DDG_db.connection
cursor = con.cursor()
sql_query = None
if safe:
params_to_insert = set()
else:
params_to_insert = []
for score in scores:
if safe:
sql, params, record_exists = self.DDG_db.create_insert_dict_string(prediction_structure_scores_table, score, PKfields = [prediction_id_field, 'ScoreMethodID', 'ScoreType', 'StructureID'], check_existing = True)
else:
sql, params, record_exists = self.DDG_db.create_insert_dict_string(prediction_structure_scores_table, score, PKfields = [prediction_id_field, 'ScoreMethodID', 'ScoreType', 'StructureID'], check_existing = False)
if sql_query:
assert( sql == sql_query )
else:
sql_query = sql
if safe:
if params in params_to_insert or record_exists:
print params
print params_list
raise Exception('Duplicate params')
params_to_insert.add(params)
else:
params_to_insert.append(params)
with con:
db_cursor = con.cursor()
if safe:
db_cursor.executemany(sql_query, [x for x in params_to_insert])
else:
# print params_to_insert
db_cursor.executemany(sql_query, params_to_insert)
def remove_scores(self, prediction_set, prediction_id, score_method_id, prediction_structure_scores_table = None, prediction_id_field = None, test_mode = False):
prediction_structure_scores_table = prediction_structure_scores_table or self._get_prediction_structure_scores_table()
prediction_id_field = prediction_id_field or self._get_prediction_id_field()
if score_method_id != None:
query = 'DELETE FROM %s WHERE %s=%s AND ScoreMethodID=%s' % (
prediction_structure_scores_table, prediction_id_field, str(prediction_id), str(score_method_id)
)
else:
query = 'DELETE FROM %s WHERE %s=%s' % (
prediction_structure_scores_table, prediction_id_field, str(prediction_id)
)
if test_mode:
print query
self.DDG_db.execute(query)
@job_completion
def store_scores(self, prediction_set, prediction_id, scores, prediction_structure_scores_table = None, prediction_id_field = None):
'''Stores scores for one prediction.
scores should be a list of dicts suitable for database storage e.g. PredictionStructureScore or
PredictionPPIStructureScore records.
This function uses a transaction so if any of the insertions fail then they are all rolled back.
The default scores table and prediction_id_field can be (evilly) overridden to put scores in the wrong table
'''
if prediction_set:
# Only check prediction is in prediction set if prediction set is passed in
self._check_prediction(prediction_id, prediction_set)
if prediction_id_field == None:
# Only check for self-consistency if we are not (evilly) overriding everything that is good in the world
self._check_scores_for_main_fields(scores, prediction_id)
if prediction_structure_scores_table == None:
# Only check for self-consistency if we are not (evilly) overriding everything our forefathers died for
self._check_score_fields(scores)
prediction_structure_scores_table = prediction_structure_scores_table or self._get_prediction_structure_scores_table()
prediction_id_field = prediction_id_field or self._get_prediction_id_field()
try:
con = self.DDG_db.connection
with con:
db_cursor = con.cursor()
for score in scores:
sql, params, record_exists = self.DDG_db.create_insert_dict_string(prediction_structure_scores_table, score, PKfields = [prediction_id_field, 'ScoreMethodID', 'ScoreType', 'StructureID'], check_existing = True)
if not record_exists:
db_cursor.execute(sql, params)
except Exception, e:
print sql, params, record_exists
raise colortext.Exception('Failed to insert scores for Prediction #{0}: "{1}".\n{2}'.format(prediction_id, str(e), traceback.format_exc()))
@job_completion
def complete_job(self, prediction_id, prediction_set, scores, maxvmem, ddgtime):
'''Sets a job to 'completed' and stores scores. prediction_set must be passed and is used as a sanity check.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
###########################################################################################
## Prediction results layer
##
## This part of the API for returning data about completed predictions.
###########################################################################################
@job_results
def get_ddg_scores_per_structure(self, prediction_id):
'''Returns the list of all DDG scores for a prediction_id. NOTE: Consider allowing the score method to be passed as a parameter.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@job_results
def get_prediction_data_path(self):
'''Returns the file server path to the where archived prediction data is stored.'''
return self.prediction_data_path
@job_results
def get_job_data(self, prediction_id):
'''Returns (in memory) the contents of the zip file corresponding to the prediction.'''
job_data_path = os.path.join(self.prediction_data_path, '%d.zip' % prediction_id)
if os.path.exists(job_data_path):
return read_file(job_data_path, binary = True)
@job_results
def write_job_archive_to_file(self, prediction_id, output_filename):
'''Writes the contents of the zip file corresponding to the prediction.'''
job_data_path = os.path.join(self.prediction_data_path, '%d.zip' % prediction_id)
assert(output_filename != job_data_path) # do not overwrite the existing file or allow to extract in place
write_file(output_filename, self.get_job_data(prediction_id))
@job_results
def write_job_data_to_disk(self, prediction_id, output_path):
'''Saves the job output for the prediction to the specified path.'''
assert(os.path.exists(output_path))
assert(output_path != self.prediction_data_path) # do not overwrite the existing file or allow to extract in place
archive = self.get_job_data(prediction_id)
write_file(os.path.join(output_path, '%d.zip' % prediction_id), archive, 'wb')
p = Popen(output_path, ['unzip', '%d.zip' % prediction_id])
os.remove(os.path.join(output_path, '%d.zip' % prediction_id))
if p.errorcode != 0:
raise colortext.Exception(p.stderr)
else:
colortext.warning(p.stdout)
@job_results
def extract_sge_job_stdout_from_archive(self, prediction_id):
'''Returns the stdout files created during the prediction.
The files are returned as a dict mapping with the type of output file (e.g. ddg_monomer step) to the content
of the stdout files.
'''
# Retrieve and unzip results in memory
archive = self.get_job_data(prediction_id)
zipped_content = zipfile.ZipFile(BytesIO(archive), 'r', zipfile.ZIP_DEFLATED)
try:
stdout_file_names = {}
stdout_file_list = [l for l in sorted(zipped_content.namelist()) if (l.find('cmd.o') != -1)]
for f in stdout_file_list:
tokens = os.path.split(f)
assert(tokens[0].isdigit())
title = tokens[1].split('_')[0]
assert(stdout_file_names.get(title) == None)
stdout_file_names[title] = f
stdout_files = {}
for stdout_type, filename in stdout_file_names.iteritems():
stdout_files[stdout_type] = zipped_content.open(filename, 'r').read()
zipped_content.close()
return stdout_files
except Exception, e:
zipped_content.close()
raise Exception(str(e))
###########################################################################################
## Analysis layer
##
## This part of the API is responsible for running analysis on completed predictions
###########################################################################################
@analysis_api
def get_top_x_scores(self, prediction_id, score_method_id, score_type, x, component = 'total', order_by = 'ASC'):
'''get_top_x_ddg_stability'''
results = self.DDG_db.execute_select('SELECT * FROM {0} WHERE {1}=%s AND ScoreMethodID=%s AND ScoreType=%s ORDER BY {2} {3}'.format(self._get_prediction_structure_scores_table(), self._get_prediction_id_field(), component, order_by), parameters=(prediction_id, score_method_id, score_type))
if len(results) < x:
raise Exception('The top {0} best scores were requested but only {1} results are stored in the database.'.format(x, len(results)))
results = results[:x]
return [{
'PredictionID' : r[self._get_prediction_id_field()],
'ScoreMethodID' : score_method_id,
'ScoreType' : score_type,
'StructureID' : r['StructureID'],
component : r[component],
} for r in results]
@analysis_api
def get_prediction_scores(self, prediction_id, expectn = None):
'''Returns the scores for the prediction using nested dicts with the structure:
ScoreMethodID -> StructureID -> ScoreType -> database record
'''
cache_id = (prediction_id, expectn)
if cache_id in self.prediction_scores_cache:
return self.prediction_scores_cache[cache_id]
scores = {}
for r in self.DDG_db.execute_select('SELECT * FROM {0} WHERE {1}=%s'.format(self._get_prediction_structure_scores_table(), self._get_prediction_id_field()), parameters=(prediction_id,)):
ScoreMethodID = r['ScoreMethodID']
ScoreType = r['ScoreType']
StructureID = r['StructureID']
if StructureID == -1:
StructureID = 'None' # usually this indicates an overall or aggregate value
scores[ScoreMethodID] = scores.get(ScoreMethodID, {})
scores[ScoreMethodID][StructureID] = scores[ScoreMethodID].get(StructureID, {})
scores[ScoreMethodID][StructureID][ScoreType] = r
del scores[ScoreMethodID][StructureID][ScoreType]['ScoreMethodID']
del scores[ScoreMethodID][StructureID][ScoreType]['StructureID']
del scores[ScoreMethodID][StructureID][ScoreType]['ScoreType']
del scores[ScoreMethodID][StructureID][ScoreType][self._get_prediction_id_field()]
del scores[ScoreMethodID][StructureID][ScoreType]['ID']
if expectn != None:
for score_method_id, score_method_scores in scores.iteritems():
num_cases = 0
for k in score_method_scores.keys():
if isinstance(k, int) or isinstance(k, long):
num_cases += 1
if num_cases < expectn:
print 'Expected scores for at least {0} runs with score method {1}; found {2}. Prediction id: {3}.'.format(expectn, score_method_id, num_cases, prediction_id)
self.prediction_scores_cache[cache_id] = scores
return scores
@analysis_api
def get_top_x_ddg(self, prediction_id, score_method_id, top_x = 3, expectn = None):
'''Returns the TopX value for the prediction. Typically, this is the mean value of the top X predictions for a
case computed using the associated Score records in the database.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
@analysis_api
def get_existing_analysis(self, prediction_set_id):
'''Returns the summary statistics for all existing dataframes in the database.
Unlike get_analysis_dataframe, this function does not create any dataframes.'''
raise Exception('This function needs to be implemented by subclasses of the API.')
@analysis_api
def get_analysis_dataframe(self, prediction_set_id,
experimental_data_exists = True,
prediction_set_series_name = None, prediction_set_description = None, prediction_set_credit = None,
prediction_set_color = None, prediction_set_alpha = None,
use_existing_benchmark_data = True,
include_derived_mutations = False,
use_single_reported_value = False,
ddg_analysis_type = None,
take_lowest = 3,
burial_cutoff = 0.25,
stability_classication_experimental_cutoff = 1.0,
stability_classication_predicted_cutoff = 1.0,
report_analysis = True,
silent = False,
root_directory = None, # where to find the prediction data on disk
score_method_id = None,
expectn = None,
allow_failures = False,
extract_data_for_case_if_missing = True,
):
'''This function uses experimental data from the database and prediction data from the Prediction*StructureScore
table to build a pandas dataframe and store it in the database. See .analyze for an explanation of the
parameters.
The dataframes mostly contain redundant data so their storage could be seen to break a key database design
principal. However, we store the dataframe in the database as it can take a while to build it from scratch and
pre-built dataframes can be used to run quick analysis, for rapid development of the analysis methods, or to
plug into webservers where responsiveness is important.
If use_existing_benchmark_data is True and the dataframe already exists then it is returned as a BenchmarkRun object.
Otherwise, it is built from the Prediction*StructureScore records.
If the Prediction*StructureScore records do not exist, this function falls back into extract_data_for_case
to generate them in which case root_directory needs to be specified (this is the only use for the root_directory
parameter).
'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@analysis_api
def get_prediction_data(self, prediction_id, score_method_id, main_ddg_analysis_type, expectn = None, extract_data_for_case_if_missing = True, root_directory = None, prediction_table_rows_cache = None, dataframe_type = None):
'''Returns a dictionary with values relevant to predictions e.g. binding affinity, monomeric stability.'''
prediction_data = {}
# Add memory and runtime
if prediction_table_rows_cache != None:
prediction = prediction_table_rows_cache[prediction_id]
prediction_data['UserDataSetExperimentID'] = self._get_sqa_predictions_user_dataset_experiment_id(prediction)
if prediction.DDGTime == None:
prediction_data['RunTime'] = 0.0
else:
prediction_data['RunTime'] = float(prediction.DDGTime)
if prediction.maxvmem == None:
prediction_data['MaxMemory'] = 0.0
else:
prediction_data['MaxMemory'] = float(prediction.maxvmem)
else:
raise Exception("Not implemented. Write a function to get the data only for this prediction_id here")
return self._get_prediction_data(prediction_id, score_method_id, main_ddg_analysis_type, expectn = expectn, extract_data_for_case_if_missing = extract_data_for_case_if_missing, root_directory = root_directory, prediction_data = prediction_data, dataframe_type = dataframe_type)
def _get_prediction_data(self, prediction_id, score_method_id, main_ddg_analysis_type, top_x = 3, expectn = None, extract_data_for_case_if_missing = True, root_directory = None):
'''Returns a dictionary with values relevant to predictions e.g. binding affinity, monomeric stability.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
def _get_analysis_dataframe(self, benchmark_run_class,
dataframe_type = None,
prediction_set_id = None,
experimental_data_exists = True,
prediction_set_series_name = None, prediction_set_description = None, prediction_set_credit = None,
ddg_analysis_type = None,
prediction_set_color = None, prediction_set_alpha = None,
use_existing_benchmark_data = True,
include_derived_mutations = False,
use_single_reported_value = False,
take_lowest = 3,
burial_cutoff = 0.25,
stability_classication_experimental_cutoff = 1.0,
stability_classication_predicted_cutoff = 1.0,
report_analysis = True,
silent = False,
root_directory = None, # where to find the prediction data on disk
score_method_id = None,
expectn = None,
allow_failures = False,
extract_data_for_case_if_missing = True,
debug = False,
restrict_to = set(),
remove_cases = set(),
):
'''This 'private' function does most of the work for get_analysis_dataframe.'''
if take_lowest:
assert( ddg_analysis_type == None )
ddg_analysis_type = 'DDG_Top%d' % take_lowest
else:
assert( ddg_analysis_type != None and take_lowest == None )
if ddg_analysis_type.startswith( 'DDG_Top' ):
take_lowest = int( ddg_analysis_type[7:] )
assert(dataframe_type != None and prediction_set_id != None)
hdf_store_blob = None
if use_existing_benchmark_data:
if take_lowest == None:
hdf_store_blob = self.DDG_db.execute_select('''
SELECT PandasHDFStore FROM AnalysisDataFrame WHERE
PredictionSet=%s AND DataFrameType=%s AND ContainsExperimentalData=%s AND ScoreMethodID=%s AND UseSingleReportedValue=%s AND TopX IS NULL AND BurialCutoff=%s AND
StabilityClassicationExperimentalCutoff=%s AND StabilityClassicationPredictedCutoff=%s AND
IncludesDerivedMutations=%s AND DDGAnalysisType=%s''', parameters=(
prediction_set_id, dataframe_type, experimental_data_exists, score_method_id, use_single_reported_value, burial_cutoff,
stability_classication_experimental_cutoff, stability_classication_predicted_cutoff, include_derived_mutations, ddg_analysis_type))
else:
# KAB TODO: to ask Shane - why does passing None not correctly change to IS NULL?
hdf_store_blob = self.DDG_db.execute_select('''
SELECT PandasHDFStore FROM AnalysisDataFrame WHERE
PredictionSet=%s AND DataFrameType=%s AND ContainsExperimentalData=%s AND ScoreMethodID=%s AND UseSingleReportedValue=%s AND TopX=%s AND BurialCutoff=%s AND
StabilityClassicationExperimentalCutoff=%s AND StabilityClassicationPredictedCutoff=%s AND
IncludesDerivedMutations=%s AND DDGAnalysisType=%s''', parameters=(
prediction_set_id, dataframe_type, experimental_data_exists, score_method_id, use_single_reported_value, take_lowest, burial_cutoff,
stability_classication_experimental_cutoff, stability_classication_predicted_cutoff, include_derived_mutations, ddg_analysis_type))
if hdf_store_blob:
assert(len(hdf_store_blob) == 1)
mem_zip = StringIO.StringIO()
mem_zip.write(hdf_store_blob[0]['PandasHDFStore'])
mem_zip.seek(0)
hdf_store_blob = gzip.GzipFile(fileobj = mem_zip, mode='rb').read()
if not(use_existing_benchmark_data and hdf_store_blob):
# Create this cache if we are going to end up using it in if statements below
prediction_table_rows_cache = self._get_prediction_set_prediction_table_rows(prediction_set_id)
else:
prediction_table_rows_cache = None
# This dict is similar to dataset_cases in the benchmark capture (dataset.json)
prediction_set_case_details = None
prediction_ids = []
if not(use_existing_benchmark_data and hdf_store_blob):
print('Retrieving the associated experimental data for the user dataset.')
prediction_set_case_details = self.get_prediction_set_case_details(prediction_set_id, retrieve_references = True, include_experimental_data = experimental_data_exists, prediction_table_rows_cache = prediction_table_rows_cache)
UserDataSetExperimentIDs = prediction_set_case_details['Data'].keys()
prediction_set_case_details = prediction_set_case_details['Data']
analysis_data = {}
top_level_dataframe_attributes = {}
if not(use_existing_benchmark_data and hdf_store_blob):
if extract_data_for_case_if_missing and not silent:
print('Computing the best/top/whatever values for each prediction case, extracting data if need be.')
elif not extract_data_for_case_if_missing and not silent:
print('Computing the best/top/whatever values for each prediction case; skipping missing data without attempting to extract.')
num_predictions_in_prediction_set = len(prediction_ids)
failed_cases = set()
## get_job_description(self, prediction_id)
for UserDataSetExperimentID in UserDataSetExperimentIDs:
try:
prediction_id = prediction_set_case_details[UserDataSetExperimentID]['PredictionID']
prediction_id_data = self.get_prediction_data(prediction_id, score_method_id, ddg_analysis_type, expectn = expectn, extract_data_for_case_if_missing = extract_data_for_case_if_missing, root_directory = root_directory, dataframe_type = dataframe_type, prediction_table_rows_cache = prediction_table_rows_cache)
analysis_data[UserDataSetExperimentID] = prediction_id_data
del analysis_data[UserDataSetExperimentID]['UserDataSetExperimentID']
analysis_data[UserDataSetExperimentID]['PredictionID'] = prediction_id
except FatalException, e:
raise
except PartialDataException, e:
if not allow_failures:
raise Exception('Prediction {0} has partial data. Skipping.'.format(prediction_id))
failed_cases.add(prediction_id)
except Exception, e:
raise Exception('An error occurred during the best/top/whatever computation: {0}.\n{1}'.format(str(e), traceback.format_exc()))
failed_cases.add(prediction_id)
if debug and len(analysis_data) >= 20:
break
if failed_cases:
colortext.error('Failed to determine the best/top/whatever score for {0}/{1} predictions. Continuing with the analysis ignoring these cases.'.format(len(failed_cases), len(prediction_ids)))
working_prediction_ids = sorted(set(prediction_ids).difference(failed_cases))
top_level_dataframe_attributes = dict(
num_predictions_in_prediction_set = num_predictions_in_prediction_set,
num_predictions_in_dataframe = len(working_prediction_ids),
dataframe_type = dataframe_type,
contains_experimental_data = experimental_data_exists,
)
# Only pull PDB data for cases where we have data
restrict_to_pdbs = set([prediction_set_case_details[k]['Structure']['PDBFileID'] for k in analysis_data])
prediction_set_details = self.get_prediction_set_details(prediction_set_id)
prediction_set_series_name = prediction_set_series_name or prediction_set_details['SeriesName'] or prediction_set_details['ID']
prediction_set_description = prediction_set_description or prediction_set_details['Description']
prediction_set_color = prediction_set_color or prediction_set_details['SeriesColor']
prediction_set_alpha = prediction_set_alpha or prediction_set_details['SeriesAlpha']
score_method_details = self.get_score_method_details( score_method_id = score_method_id )
additional_join_parameters = {
'score_method' : {
'short_name' : score_method_details['MethodName'],
'long_name' : '%s - %s' % (score_method_details['MethodType'], score_method_details['Authors']),
},
'prediction_set_id' : {
'short_name' : prediction_set_id,
},
'ddg_analysis_type' : {
'short_name' : ddg_analysis_type[4:],
'long_name' : ddg_analysis_type,
},
}
# Initialize the BindingAffinityBenchmarkRun object
# Note: prediction_set_case_details, analysis_data, and top_level_dataframe_attributes will not be filled in
benchmark_run = benchmark_run_class(
prediction_set_series_name,
prediction_set_case_details,
analysis_data,
contains_experimental_data = experimental_data_exists,
additional_join_parameters = additional_join_parameters,
store_data_on_disk = False,
calculate_scalar_adjustments = False,
benchmark_run_directory = None,
use_single_reported_value = use_single_reported_value,
description = prediction_set_description,
dataset_description = prediction_set_description,
credit = prediction_set_credit,
include_derived_mutations = include_derived_mutations,
generate_plots = False,
report_analysis = report_analysis,
silent = silent,
burial_cutoff = burial_cutoff,
stability_classication_x_cutoff = stability_classication_experimental_cutoff,
stability_classication_y_cutoff = stability_classication_predicted_cutoff,
use_existing_benchmark_data = False,
recreate_graphs = False,
misc_dataframe_attributes = top_level_dataframe_attributes,
restrict_to = restrict_to,
remove_cases = remove_cases,
)
if not(use_existing_benchmark_data and hdf_store_blob):
hdf_store_blob = benchmark_run.create_dataframe(pdb_data = self.get_prediction_set_pdb_chain_details(prediction_set_id, restrict_to_pdbs = restrict_to_pdbs))
d = dict(
PredictionSet = prediction_set_id,
DataFrameType = dataframe_type,
ContainsExperimentalData = experimental_data_exists,
ScoreMethodID = score_method_id,
UseSingleReportedValue = use_single_reported_value,
TopX = take_lowest,
BurialCutoff = burial_cutoff,
StabilityClassicationExperimentalCutoff = stability_classication_experimental_cutoff,
StabilityClassicationPredictedCutoff = stability_classication_predicted_cutoff,
IncludesDerivedMutations = include_derived_mutations,
DDGAnalysisType = ddg_analysis_type,
SeriesName = prediction_set_series_name,
SeriesColor = prediction_set_color,
SeriesAlpha = prediction_set_alpha,
Description = prediction_set_description,
Credit = prediction_set_credit,
DDGAnalysisTypeDescription = benchmark_run.ddg_analysis_type_description,
PandasHDFStore = hdf_store_blob,
)
self.DDG_db.execute('''DELETE FROM AnalysisDataFrame WHERE PredictionSet=%s AND DataFrameType=%s AND ContainsExperimentalData=%s AND ScoreMethodID=%s AND UseSingleReportedValue=%s AND TopX=%s AND
BurialCutoff=%s AND StabilityClassicationExperimentalCutoff=%s AND StabilityClassicationPredictedCutoff=%s AND
IncludesDerivedMutations=%s AND DDGAnalysisType=%s''',
parameters = (prediction_set_id, dataframe_type, experimental_data_exists, score_method_id, use_single_reported_value, take_lowest,
burial_cutoff, stability_classication_experimental_cutoff, stability_classication_predicted_cutoff,
include_derived_mutations, ddg_analysis_type
))
self.DDG_db.insertDictIfNew('AnalysisDataFrame', d, ['PredictionSet', 'DataFrameType', 'ContainsExperimentalData', 'ScoreMethodID', 'UseSingleReportedValue', 'TopX', 'BurialCutoff',
'StabilityClassicationExperimentalCutoff', 'StabilityClassicationPredictedCutoff',
'IncludesDerivedMutations', 'DDGAnalysisType'], locked = False)
else:
benchmark_run.read_dataframe_from_content(hdf_store_blob)
return benchmark_run
# if use_existing_benchmark_data and dataframe exists: return dataframe
# else retrieve all of the Score records from the database
# if a record does not exist:
# if root_directory then call extract_data_for_case to create an analysis dataframe and store it in the database
# store the number of complete Score records as a column in the dataframe (to indicate whether analysis is being performed on a full set of data)
#
# For Shane: this extracts the dataset_description and dataset_cases data that DDGBenchmarkManager currently takes care of in the capture.
# The analysis_data variable of DDGBenchmarkManager should be compiled via queries calls to the Prediction*StructureScore table.
def map_prediction_ids(self, first_prediction_set_id, second_prediction_set_id):
'''
Returns pairs of prediction IDs corresponding to ther same underlying UserDataSet.
Useful when input for a prediction run is based on the saved output files of another run.
'''
first_prediction_set_case_details = self.get_prediction_set_case_details(
first_prediction_set_id,
retrieve_references = False,
include_experimental_data = False,
prediction_table_rows_cache = self._get_prediction_set_prediction_table_rows(first_prediction_set_id),
)
second_prediction_set_case_details = self.get_prediction_set_case_details(
second_prediction_set_id,
retrieve_references = False,
include_experimental_data = False,
prediction_table_rows_cache = self._get_prediction_set_prediction_table_rows(second_prediction_set_id),
)
first_UserDataSetExperimentIDs = set( first_prediction_set_case_details['Data'].keys() )
second_UserDataSetExperimentIDs = set( second_prediction_set_case_details['Data'].keys() )
assert( first_UserDataSetExperimentIDs == second_UserDataSetExperimentIDs )
return_list = []
for UserDataSetExperimentID in first_UserDataSetExperimentIDs:
return_list.append( (
first_prediction_set_case_details['Data'][UserDataSetExperimentID]['PredictionID'],
second_prediction_set_case_details['Data'][UserDataSetExperimentID]['PredictionID'],
) )
return sorted( return_list )
@analysis_api
def analyze(self, prediction_set_ids,
prediction_set_series_names = {}, prediction_set_descriptions = {}, prediction_set_credits = {}, prediction_set_colors = {}, prediction_set_alphas = {},
use_published_data = False,
use_existing_benchmark_data = True, recreate_graphs = False,
include_derived_mutations = False,
expectn = 50,
use_single_reported_value = False,
take_lowest = 3,
burial_cutoff = 0.25,
stability_classication_experimental_cutoff = 1.0,
stability_classication_predicted_cutoff = 1.0,
output_directory = None,
generate_plots = True,
report_analysis = True,
silent = False,
root_directory = None,
restrict_to = set(),
remove_cases = set(),
):
'''Runs the analyses for the specified PredictionSets and cross-analyzes the sets against each other if appropriate.
* Analysis setup arguments *
prediction_set_ids is a list of PredictionSet IDs. Each PredictionSet will be analyzed separately and appropriate
pairs will be cross-analyzed.
prediction_set_series_names, prediction_set_descriptions, and prediction_set_credits are mappings from PredictionSet IDs
to series names (in plots), descriptions, and credits respectively. These details are stored in PredictionSet so
they are optional arguments. If passed, these mappings will override the PredictionSet values in the database
which allows the user to customize the analysis reports. Likewise, prediction_set_colors and prediction_set_alphas
are mappings to series colors and transparency values for use in the plots.
use_published_data. todo: implement later. This should include any published data e.g. the Kellogg et al. data for protein stability.
use_existing_benchmark_data and recreate_graphs are data creation arguments i.e. "should we use existing data or create it from scratch?"
include_derived_mutations is used to filter out dataset cases with derived mutations.
expectn declares how many predictions we expect to see per dataset case. If the actual number is less than expectn
then a warning will be included in the analysis.
* Dataframe arguments *
use_single_reported_value is specific to ddg_monomer. If this is True then the DDG value reported by the application is used and take_lowest is ignored. This is inadvisable - take_lowest = 3 is a better default.
take_lowest AKA Top_X. Specifies how many of the best-scoring groups of structures to consider when calculating the predicted DDG value.
burial_cutoff defines what should be considered buried (DSSPExposure field). Values around 1.0 are fully exposed, values of 0.0 are fully buried. For technical reasons, the DSSP value can exceed 1.0 but usually not by much.
stability_classication_experimental_cutoff AKA x_cutoff. This defines the neutral mutation range for experimental values in kcal/mol i.e. values between -1.0 and 1.0 kcal/mol are considered neutral by default.
stability_classication_predicted_cutoff AKA y_cutoff. This defines the neutral mutation range for predicted values in energy units.
* Reporting arguments *
output_directory : The directory in which to save plots and reports.
generate_plots : if plots are not needed, setting this to False can shorten the analysis time.
report_analysis : Whether or not to print analysis to stdout.
silent = False : Whether or not anything should be printed to stdout (True is useful for webserver interaction).
'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
# colors, alpha, and default series name and descriptions are taken from PredictionSet records
# The order (if p1 before p2 then p1 will be on the X-axis in comparative plots) in comparative analysis plots is determined by the order in PredictionSets
assert(take_lowest > 0 and (int(take_lowest) == take_lowest))
assert(0 <= burial_cutoff <= 2.0)
assert(stability_classication_experimental_cutoff > 0)
assert(stability_classication_predicted_cutoff > 0)
# assert PredictionSet for PredictionSet in PredictionSets is in the database
# calls get_analysis_dataframe(options) over all PredictionSets
# if output_directory is set, save files
# think about how to handle this in-memory. Maybe return a dict like:
#"run_analyis" -> benchmark_name -> {analysis_type -> object}
#"comparative_analysis" -> (benchmark_name_1, benchmark_name_2) -> {analysis_type -> object}
# comparative analysis
# only compare dataframes with the exact same points
# allow cutoffs, take_lowest to differ but report if they do so
@analysis_api
def determine_best_pair(self, prediction_id, score_method_id = 1):
'''This returns the best wildtype/mutant pair for a prediction given a scoring method. NOTE: Consider generalising this to the n best pairs.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@analysis_api
def create_abacus_graph_for_a_single_structure(self, PredictionSet, scoring_method, scoring_type, graph_title = None, PredictionIDs = None, graph_filename = None, cached_results = None, num_datapoints = 0):
'''This function creates an abacus graph for one PDB file. It is useful when scanning all mutations at all positions
on small proteins e.g. ubiquitin to show which mutations at which positions are likely to improve the stability or
binding affinity.
The num_datapoints variable is mainly for debugging - I was tuning the resolution/DPI to fit the number of datapoints.'''
raise Exception('This should work or nearly work. Test it again when we have real data. Does it assume single point mutations?')
results = cached_results
if not results:
results = self.get_flattened_prediction_results(PredictionSet)
pdb_ids = set()
for r in results:
pdb_ids.add(r['PDBFileID'])
if len(pdb_ids) != 1:
raise Exception('This function is only meant to be called when the PredictionSet or the set of results contains records for a single structure. The set of results contains %d structures.' % len(pdb_ids))
sortable_results = {}
for r in results:
if (not PredictionIDs) or (r['PredictionID'] in PredictionIDs):
sortable_results[(json.loads(r['Scores'])['data'][scoring_method][scoring_type]['ddG'], r['ExperimentID'])] = r
count = 0
set_of_mutations = set()
for k, r in sorted(sortable_results.iteritems()):
#if r['FlattenedMutations'].find('A E141L') != -1 and r['FlattenedMutations'].find('A S142A') != -1 and r['FlattenedMutations'].find('A L78Y') != -1:
# print('%f, %s' % (k[0], r['FlattenedMutations']))
#if r['FlattenedMutations'].find('A W103M') != -1 and r['FlattenedMutations'].find('A F70Y') != -1:
# if r['FlattenedMutations'].find('A E141L') == -1 and r['FlattenedMutations'].find('A S142A') == -1 and r['FlattenedMutations'].find('A L78Y') == -1:
# print('%f, %s' % (k[0], r['FlattenedMutations']))
if r['FlattenedMutations'].find('A W103M') != -1 and r['FlattenedMutations'].find('A F70Y') != -1:
if r['FlattenedMutations'].find('A E141L') == -1 and r['FlattenedMutations'].find('A S142A') == -1 and r['FlattenedMutations'].find('A L78Y') == -1:
#print('%f, %s' % (k[0], r['FlattenedMutations']))
count += 1
#A E141L, A S142A
mutations = [m for m in map(string.strip, r['FlattenedMutations'].split(',')) if m]
for m in mutations:
set_of_mutations.add((int(m.split()[1][1:-1]), m))
#if r['FlattenedMutations'].find('A L78Y') == -1:
# print('%f, %s' % (k[0], r['FlattenedMutations']))
# #count += 1
pruned_data = []
for k, r in sorted(sortable_results.iteritems()):
line = []
#print(json.loads(r['Scores'])['data'][scoring_method][scoring_type]['ddG'], r['FlattenedMutations'])
for m in sorted(set_of_mutations):
if r['FlattenedMutations'].find(m[1]) != -1:
line.append(1)
else:
line.append(0)
pruned_data.append((json.loads(r['Scores'])['data'][scoring_method][scoring_type]['ddG'], line))
labels = [m[1].split()[1] for m in sorted(set_of_mutations)]
graph_title = graph_title or r'$\Delta\Delta$G predictions for %s (%s.%s)' % (PredictionSet, scoring_method.replace(',0A', '.0$\AA$').replace('_', ' '), scoring_type)
pruned_data = pruned_data[0:num_datapoints or len(pruned_data)]
colortext.message('Creating graph with %d datapoints...' % len(pruned_data))
number_of_non_zero_datapoints = 0
for p in pruned_data:
if 1 in p[1]:
number_of_non_zero_datapoints += 1
if number_of_non_zero_datapoints > 1:
break
if number_of_non_zero_datapoints < 2:
raise Exception('The dataset must contain at least two non-zero points.')
if graph_filename:
return self.write_abacus_graph(graph_filename, graph_title, labels, pruned_data, scoring_method, scoring_type)
else:
return self.create_abacus_graph(graph_title, labels, pruned_data, scoring_method, scoring_type)
################################################################################################
## Application layer
## These functions combine the database and prediction data with useful klab
################################################################################################
#== PyMOL API ===========================================================
@app_pymol
def create_pymol_session_in_memory(self, prediction_id, task_number, pymol_executable = '/var/www/tg2/tg2env/designdb/pymol/pymol/pymol'):
'''Returns (in memory) a PyMOL session for a pair of structures.'''
raise Exception('Abstract method. This needs to be overridden by a subclass.')
@app_pymol
def write_pymol_session(self, prediction_id, task_number, output_filepath, pymol_executable = '/var/www/tg2/tg2env/designdb/pymol/pymol/pymol'):
'''Writes the PyMOL session for a pair of structures to disk.'''
PSE_file_contents = self.create_pymol_session_in_memory(prediction_id, task_number, pymol_executable = pymol_executable)
write_file(output_filepath, PSE_file_contents, 'wb')
@general_data_entry
def associate_pdb_file_with_project(self, pdb_file_id, project_id, notes = None):
tsession = self.get_session(new_session = True)
record = None
try:
record = get_or_create_in_transaction(tsession, dbmodel.ProjectPDBFile, dict(
PDBFileID = pdb_file_id,
ProjectID = project_id,
Notes = notes,
))
tsession.commit()
tsession.close()
except Exception, e:
tsession.rollback()
tsession.close()
raise
return record
@general_data_entry
def add_dataset(self, user_id, long_id, short_id, description, has_stability_ddg_records, has_binding_affinity_ddg_records, has_binding_affinity_de_records, ddg_convention, dataset_creation_start_date = None, dataset_creation_end_date = None, publication_ids = [], existing_session = None):
'''Adds a UserDataSet record. This is typically called before add_user_dataset_case which adds the user dataset
experiment records (e.g. UserDataSetExperiment or UserPPDataSetExperiment records).
:param user_id: User ID for the user adding this dataset to the database.
:param long_id: This should be a descriptive name e.g. "SSM_Psd95-CRIPT_Rama_10.1038/nature11500" which describes the type of dataset (SSM on the Psd95-CRIPT complex) and includes the DOI of the associated publication.
:param short_id: A short ID which will be used to refer to the dataset by humans e.g. "Psd95-CRIPT".
:param description: A description of the dataset.
:param has_stability_ddg_records: Does the dataset contain DDG data for monomeric stability assays?
:param has_binding_affinity_ddg_records: Does the dataset contain DDG data for binding affinity assays?
:param has_binding_affinity_de_records: Does the dataset contain DeltaE data for binding affinity assays?
:param ddg_convention: Either "Rosetta" (negative values indicate higher stability or binding) or "ProTherm" (negative values indicate lower stability or binding).
:param dataset_creation_start_date: The date when the dataset was first created. For publication datasets, this should be the publication date. For updated resources like ProTherm, this should be the publication date for the first revision.
:param dataset_creation_end_date: The date when the dataset was last modified or finalized. For publication datasets, this should be the publication date. For updated resources like ProTherm, this should be the publication date for the latest revision.
:param publication_id: A list of Publication.ID field values from the associated publications.
:return: The SQLAlchemy DataSet object.
'''
tsession = existing_session or self.get_session(new_session = True)
try:
user_record = tsession.query(dbmodel.User).filter(dbmodel.User.ID == user_id).one()
except:
raise Exception('Could not retrieve a record for user "{0}".'.format(user_id))
if not (ddg_convention == 'Rosetta' or ddg_convention == 'ProTherm'):
raise Exception('The DDG convention should be specified as either "Rosetta" (negative values indicate higher stability or binding) or "ProTherm" (negative values indicate lower stability or binding).')
if (len(long_id) > 128) or (len(short_id) > 32):
raise Exception('The long ID is limited to 128 characters and the short ID is limited to 32 characters.')
dataset_dict = {}
try:
dataset_dict = dict(
ID = long_id,
ShortID = short_id,
UserID = user_id,
Description = description,
DatasetType = self._get_prediction_dataset_type(),
ContainsStabilityDDG = has_stability_ddg_records,
ContainsBindingAffinityDDG = has_binding_affinity_ddg_records,
ContainsBindingAffinityDE = has_binding_affinity_de_records,
CreationDateStart = dataset_creation_start_date,
CreationDateEnd = dataset_creation_end_date,
DDGConvention = ddg_convention,
)
data_set = get_or_create_in_transaction(tsession, dbmodel.DataSet, dataset_dict, variable_columns = ['Description', 'CreationDateStart', 'CreationDateEnd'])
data_set_id = data_set.ID
for publication_id in publication_ids:
dataset_reference = get_or_create_in_transaction(tsession, dbmodel.DataSetReference, dict(
DataSetID = data_set_id,
Publication = publication_id,
))
if existing_session == None:
tsession.commit()
tsession.close()
return data_set
except Exception, e:
colortext.error('An exception occurred while adding the dataset:\n\n{0}\n\n{1}\n{2}'.format(pprint.pformat(dataset_dict), str(e), traceback.format_exc()))
if existing_session == None:
tsession.rollback()
tsession.close()
raise
@general_data_entry
def add_user_dataset(self, user_id, text_id, description, analyze_ddg, analyze_de, existing_session = None):
'''Adds a UserDataSet record. This is typically called before add_user_dataset_case which adds the user dataset
experiment records (e.g. UserDataSetExperiment or UserPPDataSetExperiment records).'''
dt = datetime.datetime.now()
tsession = existing_session or self.get_session(new_session = True)
try:
user_record = tsession.query(dbmodel.User).filter(dbmodel.User.ID == user_id).one()
except:
raise Exception('Could not retrieve a record for user "{0}".'.format(user_id))
user_dataset_dict = {}
try:
user_dataset_dict = dict(
TextID = text_id,
UserID = user_id,
Description = description,
DatasetType = self._get_prediction_dataset_type(),
AnalyzeDDG = analyze_ddg,
AnalyzeDE = analyze_de,
FirstCreated = dt,
LastModified = dt,
)
user_data_set = get_or_create_in_transaction(tsession, dbmodel.UserDataSet, user_dataset_dict, missing_columns = ['ID'], variable_columns = ['Description', 'FirstCreated', 'LastModified'])
if existing_session == None:
tsession.commit()
tsession.close()
return user_data_set
except Exception, e:
colortext.error('An exception occurred while adding the user dataset:\n\n{0}\n\n{1}\n{2}'.format(pprint.pformat(user_dataset_dict), str(e), traceback.format_exc()))
if existing_session == None:
tsession.rollback()
tsession.close()
raise
@general_data_entry
def add_ddg_user_dataset(self, user_id, text_id, description, existing_session = None):
'''Convenience wrapper for add_user_dataset for DDG-only user datasets.'''
return self.add_user_dataset(user_id, text_id, description, True, False, existing_session = existing_session)
@general_data_entry
def add_de_user_dataset(self, user_id, text_id, description, existing_session = None):
'''Convenience wrapper for add_user_dataset for DeltaE-only user datasets.'''
return self.add_user_dataset(user_id, text_id, description, False, True, existing_session = existing_session)
################################################################################################
## Private API layer
## These are helper functions used internally by the class but which are not intended for export
################################################################################################
###########################################################################################
## Subclass layer
##
## These functions need to be implemented by subclasses
###########################################################################################
def _get_sqa_prediction_table(self): return None
def _get_sqa_prediction_structure_scores_table(self): return None
def _get_prediction_table(self): return None
def _get_prediction_structure_scores_table(self): return None
def _get_prediction_id_field(self): return self._get_prediction_table() + 'ID'
def _get_prediction_type(self): return None
def _get_prediction_dataset_type(self): return None
def _get_prediction_type_description(self): return None
def _get_user_dataset_experiment_table(self): return None
def _get_user_dataset_experiment_tag_table(self): return None
def _get_allowed_score_types(self): return None
###########################################################################################
## Assertion layer
##
## These functions check pre- and post-conditions
###########################################################################################
def _check_prediction(self, prediction_id, prediction_set):
'''Sanity check: Asserts that a Prediction belongs in the expected PredictionSet.'''
prediction_table = self._get_prediction_table()
if not self.DDG_db.execute_select('SELECT * FROM {0} WHERE ID=%s AND PredictionSet=%s'.format(prediction_table), parameters=(prediction_id, prediction_set)):
raise Exception('{0} record #{1} does not belong to PredictionSet {2}.'.format(prediction_table, prediction_id, prediction_set))
def _check_scores_for_main_fields(self, scores, prediction_id):
'''Sanity check: Asserts that the identifying fields for the scores make sense for this interface.'''
prediction_id_field = self._get_prediction_id_field()
score_method_details = self.get_score_method_details()
allowed_score_types = self._get_allowed_score_types()
int_type = type(1)
for score in scores:
assert(prediction_id_field in score and score[prediction_id_field] == prediction_id)
assert('ScoreMethodID' in score and score['ScoreMethodID'] in score_method_details)
assert('ScoreType' in score and score['ScoreType'] in allowed_score_types)
assert('StructureID' in score and type(score['StructureID']) == int_type)
def _check_score_fields(self, scores):
'''Sanity check: Asserts that the fields for the scores are represented in the database table.'''
fieldnames = set([f for f in self.DDG_db.FieldNames.__dict__[self._get_prediction_structure_scores_table()].__dict__.keys() if not(f.startswith('_'))])
for score in scores:
score_keys = score.keys()
if sorted(fieldnames.intersection(score_keys)) != sorted(score_keys):
print score_keys
print fieldnames
raise Exception('These score table fieldnames were not recognized: %s.'.format(', '.join(sorted(set(score_keys).difference(fieldnames)))))
###########################################################################################
## Prediction layer
##
## This part of the API is responsible for inserting prediction jobs in the database via
## the trickle-down proteomics paradigm.
###########################################################################################
#== Job creation/management API ===========================================================
#
# This part of the API is responsible for inserting prediction jobs in the database via the
# trickle-down proteomics paradigm.
# PredictionSet interface
def _assert_prediction_set_is_correct_type(self, PredictionSetID):
'''Returns the list of Prediction IDs associated with the PredictionSet.'''
assert(self._get_prediction_type() and self._get_prediction_type_description())
if (self.get_prediction_set_details(PredictionSetID) or {}).get(self._get_prediction_type()) != 1:
raise Exception('This PredictionSet either does not exist or else contains no %s predictions.' % self._get_prediction_type_description())
def _set_prediction_set_status(self, PredictionSetID, status):
'''Sets the Status of a PredictionSet.'''
tsession = self.get_session()
assert(status == 'halted' or status == 'active')
assert(self.get_prediction_set_details(PredictionSetID))
prediction_set = tsession.query(dbmodel.PredictionSet).filter(dbmodel.PredictionSet.ID == PredictionSetID).one().update({
dbmodel.PredictionSet.Status : status,
})
tsession.commit()
# Prediction setup interface
def _add_prediction_file(self, tsession, prediction_id, file_content, filename, filetype, filerole, stage, rm_trailing_line_whitespace = False, forced_mime_type = None, file_content_id = None):
'''This function adds file content to the database and then creates a record associating that content with a prediction.
This call must be made within an existing session (tsession). This is crucial for many of the database functions
as they rely on transactions rolling back on failure.'''
prediction_table = self._get_prediction_table()
# Add the file contents to the database
if filetype == 'PDB':
forced_mime_type = forced_mime_type or 'chemical/x-pdb'
if file_content_id == None:
assert(file_content != None)
file_content_id = self.importer._add_file_content(file_content, tsession = tsession, rm_trailing_line_whitespace = rm_trailing_line_whitespace, forced_mime_type = forced_mime_type)
# Link the file contents to the prediction
prediction_file_record = dict(
FileContentID = file_content_id,
Filename = filename,
Filetype = filetype,
FileRole = filerole,
Stage = stage,
)
prediction_id_field, db_table = None, None
if prediction_table == 'Prediction':
prediction_id_field = 'PredictionID'
db_table = dbmodel.PredictionFile
elif prediction_table == 'PredictionPPI':
prediction_id_field = 'PredictionPPIID'
db_table = dbmodel.PredictionPPIFile
else:
raise('Invalid table "%s" passed.' % prediction_table)
prediction_file_record[prediction_id_field] = prediction_id
# Create the database record
# Note: We have already searched the file cache and database for uniqueness so we do NOT call get_or_create_in_transaction
# here. This turns out to be a huge time saver since get_or_create_in_transaction will, in this case,
# look up the FileContent.Content field which is an expensive operation.
existing_records = [r for r in tsession.execute('SELECT * FROM {0} WHERE {1}=:{1} AND FileContentID=:FileContentID AND Filename=:Filename AND Filetype=:Filetype AND FileRole=:FileRole AND Stage=:Stage'.format(prediction_table + 'File', prediction_id_field), prediction_file_record)]
if existing_records:
assert(len(existing_records) == 1)
else:
prediction_file_record = db_table(**prediction_file_record)
tsession.add(prediction_file_record)
tsession.flush()
return file_content_id
def _strip_pdb(self, pdb_file_id, chains):
raise Exception('assert that chains exist in PDBChain table. reads PDB content from the database. call PDB class functions to strip to chains.')
def _add_residue_map_json_to_prediction(self, tsession, prediction_id, residue_mapping_json, map_type):
assert(isinstance(residue_mapping_json, str))
assert(isinstance(json.loads(residue_mapping_json), dict))
if map_type == 'Rosetta residue->PDB residue map':
filename = 'rosetta2pdb.resmap.json'
elif map_type == 'PDB residue->Rosetta residue map':
filename = 'pdb2rosetta.resmap.json'
else:
raise colortext.Exception('Unexpected map type "{0}".'.format(map_type))
return self._add_prediction_file(tsession, prediction_id, residue_mapping_json, filename, 'RosettaPDBMapping', map_type, 'Input', rm_trailing_line_whitespace = True, forced_mime_type = "application/json")
def _add_stripped_pdb_to_prediction(self, prediction_id):
# todo: this is not being called (and should be) - see _add_job in kddg.api.ppi.py
raise Exception('reimplement')
pdb_file_id, chains = self.get_pdb_chains_for_prediction(prediction_id)
pdb_content = self._strip_pdb(pdb_file_id, chains)
filename = '%s_%s' % (pdb_file_id, ''.join(sorted(chains)))
return self._add_prediction_file(tsession, prediction_id, pdb_content, filename, 'PDB', 'StrippedPDB', 'Input', rm_trailing_line_whitespace = True, forced_mime_type = 'chemical/x-pdb')
def _add_resfile_to_prediction(self, tsession, prediction_id, mutations, resfile_name):
rf = Resfile.from_mutageneses(mutations)
return self._add_prediction_file(tsession, prediction_id, str(rf), resfile_name, 'Resfile', 'Resfile', 'Input', rm_trailing_line_whitespace = True, forced_mime_type = 'text/plain')
def _add_mutfile_to_prediction(self, tsession, prediction_id, rosetta_mutations, mutfile_name):
mf = Mutfile.from_mutagenesis(rosetta_mutations)
return self._add_prediction_file(tsession, prediction_id, str(mf), mutfile_name, 'Mutfile', 'Mutfile', 'Input', rm_trailing_line_whitespace = True, forced_mime_type = 'text/plain')
def _add_ligand_params_files_to_prediction(self, tsession, prediction_id, pdb_file_id):
for params_file_record in tsession.query(dbmodel.PDBLigandFile).filter(dbmodel.PDBLigandFile.PDBFileID == pdb_file_id):
ligand_code = params_file_record.PDBLigandCode
self._add_prediction_file(tsession, prediction_id, None, '{0}.params'.format(ligand_code), 'Params', '{0} params file'.format(ligand_code), 'Input', rm_trailing_line_whitespace = False, forced_mime_type = 'text/plain', file_content_id = params_file_record.ParamsFileContentID)
return None
def _create_resfile_from_pdb_mutations(self, stripped_pdb, pdb_mutations):
'''This function takes a PDB object to be used in a DDG job (i.e. usually stripped to certain chains but with the
original PDB numbering) and a list of mutations using the original PDB numbering. Resfiles use PDB numbering
so no mapping needs to be done.'''
if not pdb_mutations:
raise Exception("There needs to be at least one mutation.")
try:
resfile = []
for mutation in pdb_mutations:
# Check that the expected wildtype exists in the PDB
stripped_pdb.assert_wildtype_matches(mutation)
chain, resid, mt = mutation.Chain, mutation.ResidueID.strip(), mutation.MutantAA
#resfile.append("%(resid)s %(chain)s PIKAA %(mt)s" % vars())
resfile.append("%(resid)s %(chain)s PIKAA %(mt)s" % vars())
assert(resfile)
return '\n'.join(["NATAA", "start"] + resfile)
except:
raise Exception("An error occurred creating a resfile for the ddG job.")
def _create_mutfile_from_pdb_mutations(self, stripped_pdb, pdb_mutations):
'''This function takes a PDB object to be used in a DDG job (i.e. usually stripped to certain chains but with the
original PDB numbering)) and a list of mutations using the original PDB numbering. Since mutfiles use Rosetta
numbering, we need to map the residue IDs from PDB numbering to Rosetta numbering.'''
if not pdb_mutations:
raise Exception("There needs to be at least one mutation.")
try:
# Map the mutations from PDB numbering to Rosetta numbering
rosetta_mutations = stripped_pdb.map_pdb_residues_to_rosetta_residues(pdb_mutations)
assert(len(rosetta_mutations) == len(pdb_mutations))
mutfile = []
for x in len(pdb_mutations):
pdb_mutation = pdb_mutations[x]
rosetta_mutation = pdb_mutations[x]
# Check that the expected wildtype exists in the PDB
stripped_pdb.assert_wildtype_matches(pdb_mutation)
wt, resid, mt = rosetta_mutation.WildTypeAA, rosetta_mutation.ResidueID, rosetta_mutation.MutantAA
mutfile.append("%(wt)s %(resid)s %(mt)s" % vars())
assert(mutfile)
return '\n'.join(["total %d" % len(rosetta_mutations), "%d" % len(rosetta_mutations)] + mutfile)
except:
raise Exception("An error occurred creating a mutfile for the ddG job.")
| StarcoderdataPython |
4843110 | <reponame>aliborji/ShapeDefence
from __future__ import print_function
import argparse
from lib import *
from config import *
from model import model_dispatcher
from utils import *
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
############### IMPORTANT: Specify your edge detector in config.py #######################################################
# Training settings
parser = argparse.ArgumentParser(description='shape defence adv training')
parser.add_argument('--epochs', type=int, default=20, help='num epochs')
parser.add_argument('--batch_size', type=int, default=100, help='training batch size')
parser.add_argument('--attack', type=str, default='FGSM', help='attack type FGSM or PGD')
parser.add_argument('--net_type', type=str, default='rgb', help='edge, rgb, grayedge, rgbedge')
parser.add_argument('--data_dir', type=str, default='MNIST', help='data directory')
parser.add_argument('--model', type=str, default='mnist', help='which model implemented in model.py')
parser.add_argument('--classes', type=int, default=10, help='number of classes')
parser.add_argument('--inp_size', type=int, default=28, help='size of the input image')
parser.add_argument('--sigmas', nargs='+', type=int)
parser.add_argument('--load_model', type=str, default='', help='path to the trained model')
parser.add_argument('--alpha', type=int, default=.5, help='loss balance ratio') # not implemented yet in the utils model!! TODO
opt = parser.parse_args()
print(opt)
# eg\
# python train.py --net_type rgbedge --model gtsrb --sigmas 8 32 --data_dir GTSRB --classes 43 --epochs 10 --inp_size 64 --load_model gtsrb_rgbedge.pth
num_epochs = opt.epochs # 20
batch_size = opt.batch_size # 100
attack_type = opt.attack #'FGSM'
net_type = opt.net_type #'grayedge'
data_dir = opt.data_dir #'MNIST'
which_model = opt.model #'mnist'
n_classes = opt.classes # 10
inp_size = opt.inp_size# 28
sigmas = opt.sigmas
if not os.path.exists(f'./Res{data_dir}'):
os.mkdir(f'./Res{data_dir}')
if not os.path.exists(f'./Res{data_dir}/{attack_type}'):
os.mkdir(f'././Res{data_dir}/{attack_type}')
fo = open(f'./Res{data_dir}/{attack_type}/results_{net_type}.txt', 'a+')
# --------------------------------------------------------------------------------------------------------------------------------------------
# Train a model first
if opt.load_model: # if model exist just load it
save_path = opt.load_model
net, dataloader_dict, criterior, optimizer = model_dispatcher(which_model, net_type, data_dir, inp_size, n_classes)
load_model(net, save_path)
net.to(device)
else:
save_path = f'./Res{data_dir}/{attack_type}/{data_dir}_{net_type}.pth'
net, dataloader_dict, criterior, optimizer = model_dispatcher(which_model, net_type, data_dir, inp_size, n_classes)
net.to(device)
train_model(net, dataloader_dict, criterior, optimizer, num_epochs, save_path)
# Test the clean model on clean and attacks
acc, images = test_model_clean(net, dataloader_dict)
print('Accuracy of original model on clean images: %f ' % acc)
fo.write('Accuracy of original model on clean images: %f \n' % acc)
for eps_t in sigmas: #[8,32,64]:
print(f'eps_t={eps_t}')
fo.write(f'eps_t={eps_t} \n')
epsilons = [eps_t/255]
# Test the clean model on clean and attacks
net, dataloader_dict, criterior, optimizer = model_dispatcher(which_model, net_type, data_dir, inp_size, n_classes)
load_model(net, save_path)
net.to(device)
acc_attack, images = test_model_attack(net, dataloader_dict, epsilons, attack_type, net_type, redetect_edge=False)
print('Accuracy of clean model on adversarial images: %f %%' % acc_attack[0])
fo.write('Accuracy of clean model on adversarial images: %f \n' % acc_attack[0])
if (net_type.lower() in ['grayedge', 'rgbedge']):
acc_attack, images = test_model_attack(net, dataloader_dict, epsilons, attack_type, net_type, redetect_edge=True)
print('Accuracy of clean model on adversarial images with redetect_edge: %f %%' % acc_attack[0])
fo.write('Accuracy of clean model on adversarial images with redetect_edge: %f \n' % acc_attack[0])
# --------------------------------------------------------------------------------------------------------------------------------------------
# Now perform adversarial training
save_path_robust = f'./Res{data_dir}/{attack_type}/{data_dir}_{net_type}_{eps_t}_robust_{eps_t}.pth'
# if train_phase:
net_robust, dataloader_dict, criterior, optimizer = model_dispatcher(which_model, net_type, data_dir, inp_size, n_classes)
net_robust.to(device)
train_robust_model(net_robust, dataloader_dict, criterior, optimizer, num_epochs, save_path_robust, attack_type, eps=eps_t/255, net_type=net_type, redetect_edge=False)
# --------------------------------------------------------------------------------------------------------------------------------------------
# Test the robust model on clean and attacks
net_robust, dataloader_dict, criterior, optimizer = model_dispatcher(which_model, net_type, data_dir, inp_size, n_classes)
load_model(net_robust, save_path_robust)
# load_model(net_robust, f'./{attack_type}-imagenette2-160/imagenette2-160_rgbedge_{eps_t}_robust_{eps_t}.pth')
# load_model(net_robust, f'./{attack_type}-gtsrb/gtsrb_rgbedge_{eps_t}_robust_{eps_t}.pth')
net_robust.to(device)
acc, images = test_model_clean(net_robust, dataloader_dict)
print('Accuracy of robust model on clean images: %f %%' % acc)
fo.write('Accuracy of robust model on clean images: %f \n' % acc)
acc_attack, images = test_model_attack(net_robust, dataloader_dict, epsilons, attack_type, net_type, redetect_edge=False)
print('Accuracy of robust model on adversarial images: %f %%' % acc_attack[0])
fo.write('Accuracy of robust model on adversarial images: %f \n' % acc_attack[0])
if (net_type.lower() in ['grayedge', 'rgbedge']):
acc_attack, images = test_model_attack(net_robust, dataloader_dict, epsilons, attack_type, net_type, redetect_edge=True)
print('Accuracy of robust model on adversarial images with redetect_edge: %f %%' % acc_attack[0])
fo.write('Accuracy of robust model on adversarial images with redetect_edge: %f \n' % acc_attack[0])
# --------------------------------------------------------------------------------------------------------------------------------------------
# Now perform adversarial training with redetect
if not (net_type.lower() in ['grayedge', 'rgbedge']): continue
save_path_robust = f'./Res{data_dir}/{attack_type}/{data_dir}_{net_type}_{eps_t}_robust_{eps_t}_redetect.pth'
net_robust, dataloader_dict, criterior, optimizer = model_dispatcher(which_model, net_type, data_dir, inp_size, n_classes)
net_robust.to(device)
train_robust_model(net_robust, dataloader_dict, criterior, optimizer, num_epochs, save_path_robust, attack_type, eps=eps_t/255, net_type=net_type, redetect_edge=True)
acc, images = test_model_clean(net_robust, dataloader_dict)
print('Accuracy of robust redetect model on clean images: %f %%' % acc)
fo.write('Accuracy of robust redetect model on clean images: %f \n' % acc)
acc_attack, images = test_model_attack(net_robust, dataloader_dict, epsilons, attack_type, net_type, redetect_edge=False)
print('Accuracy of robust redetect model on adversarial images: %f %%' % acc_attack[0])
fo.write('Accuracy of robust redetect model on adversarial images: %f \n' % acc_attack[0])
acc_attack, images = test_model_attack(net_robust, dataloader_dict, epsilons, attack_type, net_type, redetect_edge=True)
print('Accuracy of robust redtect model on adversarial images with redetect_edge: %f %%' % acc_attack[0])
fo.write('Accuracy of robust redetect model on adversarial images with redetect_edge: %f \n' % acc_attack[0])
fo.close()
| StarcoderdataPython |
3232412 |
class Node:
def __init__(self, value = None, left = None, right = None):
self.value = value
self.left = left
self.right = right
class K_aryTree:
def __init__(self, root = None):
self.root = root
def pre_order(self):
list = []
def traverse(root):
if root is None:
return "Empty Tree"
list.append(root.value)
traverse(root.left)
traverse(root.right)
traverse(self.root)
return list
def fizz_buzz_tree(tree):
root = tree
list = []
if root.value is None:
return "Empty Tree"
def traverse(root):
if root.value is None:
return
if root.value % 3 == 0 and root.value % 5 == 0:
root.value = "FizzBuzz"
elif root.value % 3 == 0:
root.value == "Fizz"
elif root.value % 5 == 0:
root.value = "Buzz"
list.append(root.value)
traverse(root.left)
traverse(root.right)
return list
| StarcoderdataPython |
4835459 | # -*- coding: utf-8 -*-
"""
walle-web
:copyright: © 2015-2019 walle-web.io
:created time: 2018-11-24 07:12:13
:author: <EMAIL>
"""
from datetime import datetime
from sqlalchemy import String, Integer, Text, DateTime
from walle import model
from walle.model.database import SurrogatePK, db, Model
from walle.model.user import UserModel
from walle.service.extensions import permission
from walle.service.rbac.role import *
# 项目配置表
class ProjectModel(SurrogatePK, Model):
# 表的名字:
__tablename__ = 'projects'
current_time = datetime.now()
status_close = 0
status_open = 1
task_audit_true = 1
task_audit_false = 0
# 表的结构:
id = db.Column(Integer, primary_key=True, autoincrement=True)
user_id = db.Column(Integer)
name = db.Column(String(100))
environment_id = db.Column(Integer)
space_id = db.Column(Integer)
status = db.Column(Integer)
master = db.Column(String(100))
version = db.Column(String(40))
excludes = db.Column(Text)
is_include = db.Column(Integer)
target_root = db.Column(String(200))
target_releases = db.Column(String(200))
server_ids = db.Column(Text)
task_vars = db.Column(Text)
prev_deploy = db.Column(Text)
post_deploy = db.Column(Text)
prev_release = db.Column(Text)
post_release = db.Column(Text)
keep_version_num = db.Column(Integer)
repo_url = db.Column(String(200))
repo_username = db.Column(String(50))
repo_password = db.Column(String(50))
repo_mode = db.Column(String(50))
repo_type = db.Column(String(10))
notice_type = db.Column(String(10))
notice_hook = db.Column(Text)
task_audit = db.Column(Integer)
created_at = db.Column(DateTime, default=current_time)
updated_at = db.Column(DateTime, default=current_time, onupdate=current_time)
def list(self, page=0, size=10, kw=None, space_id=None, environment_id=None):
"""
获取分页列表
:param page:
:param size:
:return:
"""
query = self.query.filter(ProjectModel.status.notin_([self.status_remove]))
if kw:
query = query.filter(ProjectModel.name.like('%' + kw + '%'))
# 关联 environments
EnvironmentModel = model.environment.EnvironmentModel
query = query.join(EnvironmentModel, EnvironmentModel.id == ProjectModel.environment_id)
query = query.filter(EnvironmentModel.status.notin_([self.status_remove]))
# 关联 spaces
SpaceModel = model.space.SpaceModel
query = query.join(SpaceModel, SpaceModel.id == ProjectModel.space_id)
query = query.filter(SpaceModel.status.notin_([self.status_remove]))
if environment_id:
query = query.filter(ProjectModel.environment_id == environment_id)
if space_id:
query = query.filter(ProjectModel.space_id == space_id)
query = query.add_columns(EnvironmentModel.name, SpaceModel.name)
count = query.count()
data = query.order_by(ProjectModel.id.desc()).offset(int(size) * int(page)).limit(size).all()
project_list = []
for p in data:
item = p[0].to_json()
item['environment_name'] = p[1]
item['space_name'] = p[2]
project_list.append(item)
return project_list, count
def item(self, id=None):
"""
获取单条记录
:param role_id:
:return:
"""
id = id if id else self.id
data = self.query.filter(ProjectModel.status.notin_([self.status_remove])).filter_by(id=id).first()
if not data:
return []
project_info = data.to_json()
ServerModel = model.server.ServerModel
server_ids = project_info['server_ids']
project_info['servers_info'] = ServerModel.fetch_by_id(list(map(int, server_ids.split(','))))
return project_info
def add(self, *args, **kwargs):
data = dict(*args)
project = ProjectModel(**data)
db.session.add(project)
db.session.commit()
return project.to_json()
def update(self, *args, **kwargs):
update_data = dict(*args)
return super(ProjectModel, self).update(**update_data)
def remove(self, role_id=None):
"""
:param role_id:
:return:
"""
role_id = role_id if role_id else self.id
ProjectModel.query.filter_by(id=role_id).update({'status': self.status_remove})
ret = db.session.commit()
return ret
def to_json(self):
item = {
'id': self.id,
'user_id': self.user_id,
'name': self.name,
'environment_id': self.environment_id,
'space_id': self.space_id,
'status': self.status,
'master': UserModel.fetch_by_uid(self.master.split(',')) if self.master else '',
'version': self.version,
'excludes': self.excludes,
'is_include': self.is_include,
'target_root': self.target_root,
'target_releases': self.target_releases,
'server_ids': self.server_ids,
'task_vars': self.task_vars,
'prev_deploy': self.prev_deploy,
'post_deploy': self.post_deploy,
'prev_release': self.prev_release,
'post_release': self.post_release,
'keep_version_num': self.keep_version_num,
'repo_url': self.repo_url,
'repo_username': self.repo_username,
'repo_password': self.repo_password,
'repo_mode': self.repo_mode,
'repo_type': self.repo_type,
'notice_type': self.notice_type,
'notice_hook': self.notice_hook,
'task_audit': self.task_audit,
'created_at': self.created_at.strftime('%Y-%m-%d %H:%M:%S'),
'updated_at': self.updated_at.strftime('%Y-%m-%d %H:%M:%S'),
}
item.update(self.enable())
return item
def enable(self):
return {
'enable_view': True,
'enable_update': permission.role_upper_developer(),
'enable_delete': permission.enable_uid(self.user_id) or permission.role_upper_developer(),
'enable_create': False,
'enable_online': False,
'enable_audit': False,
'enable_block': False,
}
| StarcoderdataPython |
10514 | <reponame>gillins/pyshepseg
#Copyright 2021 <NAME> and <NAME>. All rights reserved.
#
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without restriction,
#including without limitation the rights to use, copy, modify,
#merge, publish, distribute, sublicense, and/or sell copies of the
#Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
#ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
#CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
#WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from numpy.distutils.core import setup
import pyshepseg
setup(name='pyshepseg',
version=pyshepseg.SHEPSEG_VERSION,
description='Python implementation of the image segmentation algorithm described by Shepherd et al',
author='<NAME> and <NAME>',
scripts=['bin/test_pyshepseg.py', 'bin/test_pyshepseg_tiling.py',
'bin/test_pyshepseg_subset.py'],
packages=['pyshepseg'],
license='LICENSE.txt',
url='https://github.com/ubarsc/pyshepseg'
)
| StarcoderdataPython |
118738 | # Generated by Django 2.2 on 2019-08-06 15:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('communities', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='community',
name='administrator',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='community',
name='banned_users',
field=models.ManyToManyField(blank=True, related_name='banned_users', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='community',
name='invited_users',
field=models.ManyToManyField(blank=True, related_name='invited_users', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='community',
name='moderators',
field=models.ManyToManyField(blank=True, related_name='community_moderators', to=settings.AUTH_USER_MODEL),
),
]
| StarcoderdataPython |
1783624 | <reponame>jyotti/backlog-toolbox<filename>backlog_toolbox/__init__.py
# coding:utf-8
__author__ = '<NAME>'
| StarcoderdataPython |
4834682 | <reponame>carlosjpc/panditas<gh_stars>1-10
from panditas.models import DataFlow, DataSet, MergeMultipleRule, MergeRule
from panditas.transformation_rules import ConstantColumn
def test_data_set_dependencies():
data_flow = DataFlow(
name="Test Dependent Data Sets",
steps=[
DataSet(df_path="claims.csv", name="claims", source="csv"),
DataSet(df_path="policies.csv", name="policies", source="csv"),
],
)
assert data_flow.steps[0].name == "claims"
assert data_flow.steps[1].name == "policies"
assert data_flow.steps[0].position == 0
assert data_flow.steps[1].position == 1
assert data_flow.steps[0].depends_on == []
assert data_flow.steps[1].depends_on == []
def test_data_set_get_columns():
pass
def test_data_set_dependencies_manual():
data_flow = DataFlow(
name="Test Data Sets",
steps=[
DataSet(df_path="claims.csv", name="claims", source="csv"),
DataSet(
df_path="policies.csv",
name="policies",
source="csv",
depends_on=["claims"],
),
],
)
assert data_flow.steps[0].name == "claims"
assert data_flow.steps[1].name == "policies"
assert data_flow.steps[0].position == 0
assert data_flow.steps[1].position == 1
assert data_flow.steps[0].depends_on == []
assert data_flow.steps[1].depends_on == ["claims"]
def test_dependencies():
data_flow = DataFlow(
name="Test Dependencies with Constant",
steps=[
DataSet(df_path="claims.csv", name="claims", source="csv"),
ConstantColumn(
column_name="new",
column_value="THIS IS A CONSTANT VALUE",
name="add_constant",
),
],
)
assert data_flow.steps[0].name == "claims"
assert data_flow.steps[1].name == "add_constant"
assert data_flow.steps[0].position == 0
assert data_flow.steps[1].position == 1
assert data_flow.steps[0].depends_on == []
assert data_flow.steps[1].depends_on == ["claims"]
def test_dependencies_merge():
data_flow = DataFlow(
name="Test Merge",
steps=[
DataSet(df_path="df_one.csv", name="df_one", source="csv"),
DataSet(df_path="df_two.csv", name="df_two", source="csv"),
MergeRule(
left_data_set="df_one",
right_data_set="df_two",
merge_type="inner",
name="merge_data_sets",
),
],
)
assert data_flow.steps[0].name == "df_one"
assert data_flow.steps[1].name == "df_two"
assert data_flow.steps[2].name == "merge_data_sets"
assert data_flow.steps[0].position == 0
assert data_flow.steps[1].position == 1
assert data_flow.steps[2].position == 2
assert data_flow.steps[0].depends_on == []
assert data_flow.steps[1].depends_on == []
assert data_flow.steps[2].depends_on == ["df_one", "df_two"]
def test_dependencies_merge_multiple():
data_flow = DataFlow(
name="Test Merge Multiple",
steps=[
DataSet(df_path="claims.csv", name="claims", source="csv"),
DataSet(df_path="policies.csv", name="policies", source="csv"),
DataSet(df_path="agencies.csv", name="agencies", source="csv"),
MergeMultipleRule(
data_sets=["claims", "policies", "agencies"],
name="merge_facts_dims",
merge_types=["inner", "inner", "inner"],
),
],
)
assert data_flow.steps[0].depends_on == []
assert data_flow.steps[1].depends_on == []
assert data_flow.steps[2].depends_on == []
assert data_flow.steps[3].depends_on == ["claims", "policies", "agencies"]
| StarcoderdataPython |
16951 | <reponame>zhiyuli/HydroLearn<filename>src/apps/core/migrations/0005_auto_20180417_1219.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-04-17 17:19
from __future__ import unicode_literals
from django.db import migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20180417_1218'),
]
operations = [
migrations.AddField(
model_name='topic',
name='ref_id',
field=django_extensions.db.fields.RandomCharField(blank=True, editable=False, length=8, unique=True),
),
migrations.AlterField(
model_name='topic',
name='slug',
field=django_extensions.db.fields.AutoSlugField(blank=True, default='', editable=False, help_text='Please enter a unique slug for this Topic (can autogenerate from name field)', max_length=64, populate_from=('ref_id',), unique=True, verbose_name='slug'),
),
]
| StarcoderdataPython |
1748708 | <reponame>Izacht13/pyCML<gh_stars>0
"""
pyCasual
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name="pycasual",
version="0.0.3",
url="http://github.com/izacht13/pyCasual/",
license="MIT",
author="<NAME>",
author_email="<EMAIL>",
description="An interpreter for Casual Markup Language.",
py_modules=["pycasual"],
scripts=["pycasual.py"],
keywords=["web html markup"],
platforms="any",
classifiers=[
"Development Status :: 1 - Alpha",
"Intended Audience :: Developers",
'Topic :: Software Development',
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6"
]
) | StarcoderdataPython |
1769255 | <reponame>qgerome/openhexa-app
# Generated by Django 3.2.7 on 2021-09-30 09:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("connector_dhis2", "0018_dataset"),
]
operations = [
migrations.AlterField(
model_name="instance",
name="name",
field=models.TextField(blank=True),
),
]
| StarcoderdataPython |
3200981 | <reponame>ZhangFly/MUISeverSourceCode
import numpy as np
from scipy import signal
from scipy import fftpack
def execute(context):
if not context.data is None:
context.data = np.sqrt(context.data**2 + fftpack.hilbert(context.data)**2)
context.prev = __name__ | StarcoderdataPython |
1667238 | <filename>noxfile.py
# Copyright (c) 2020 <NAME>, <NAME>. All rights
# reserved.
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>
"""Nox test automation file."""
from typing import List
import nox
requirements: List[str] = ["-r", "requirements.txt"]
test_requirements: List[str] = [
*requirements,
"pytest==5.4.3",
"pytest-cov==2.10.0",
]
format_requirements: List[str] = ["black==19.10b0", "isort==4.3.21"]
lint_requirements: List[str] = [
*requirements,
*format_requirements,
"pylint==2.5.3",
"mypy==0.782",
"flake8==3.8.3",
"pycodestyle==2.6.0",
]
python_target_files = ["etsiit_bot/", "tests/"]
python = ["3.6", "3.7", "3.8"]
nox.options.reuse_existing_virtualenvs = True
nox.options.stop_on_first_error = False
###############################################################################
# Linting
###############################################################################
@nox.session(name="lintpy")
def lint_python(session):
"""Lint Python source code."""
session.log("# Linting Python files...")
session.install(*lint_requirements)
session.run("pylint", *python_target_files)
session.run("mypy", *python_target_files)
session.run("flake8", *python_target_files)
session.run("pycodestyle", *python_target_files)
session.run("black", "-l", "79", "--check", "--diff", *python_target_files)
session.run("isort", "-rc", "--check-only", "--diff", *python_target_files)
@nox.session(name="lintmd")
def lint_markdown(session):
"""Lint Markdown files."""
session.log("# Linting Markdown files...")
session.run("mdl", "--style", ".mdl.rb", ".", external=True)
###############################################################################
# Formating
###############################################################################
@nox.session(name="format")
def python_format(session):
"""Format Python source code."""
session.log("# Formating Python files...")
session.install(*format_requirements)
session.run("black", "-l", "79", *python_target_files)
session.run("isort", *python_target_files)
###############################################################################
# Testing
###############################################################################
@nox.session(python=python)
def tests(session):
"""Run python tests."""
session.log("# Running tests...")
session.install(*test_requirements)
session.run(
"pytest",
env={
"REPO_ROOT": "REPO_ROOT_dummy",
"TELEGRAM_TOKEN": "TELEGRAM_TOKEN_dummy",
"PROJECT_NAME": "PROJECT_NAME_dummy",
"PORT": "123",
},
)
| StarcoderdataPython |
1794462 | class CRMSystemError(Exception):
def __init__(self, errorCode, errorMessage, *args, **kwargs):
super().__init__(errorMessage, *args, **kwargs)
self.errorCode = errorCode
self.errorMessage = errorMessage
def __str__(self):
return "{} - {}".format(self.errorCode, self.errorMessage)
| StarcoderdataPython |
132909 | <filename>loaner/web_app/backend/actions/request_shelf_audit_test.py
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backend.actions.request_shelf_audit."""
import mock
from loaner.web_app.backend.lib import send_email # pylint: disable=unused-import
from loaner.web_app.backend.models import shelf_model
from loaner.web_app.backend.testing import loanertest
class RequestShelfAuditTest(loanertest.ActionTestCase):
"""Test the NotifyShelfAuditors Action class."""
def setUp(self):
self.testing_action = 'request_shelf_audit'
super(RequestShelfAuditTest, self).setUp()
def test_run_no_shelf(self):
self.assertRaisesRegexp( # Raises generic because imported != loaded.
Exception, '.*did not receive a shelf.*', self.action.run)
@mock.patch('__main__.send_email.send_shelf_audit_email')
def test_run_success(self, mock_sendshelfauditemail):
shelf = shelf_model.Shelf.enroll(
loanertest.USER_EMAIL, 'US-BLD', 24, 'Overlook Hotel', 40.6892534,
-74.0466891, 1.0, loanertest.USER_EMAIL)
self.action.run(shelf=shelf)
mock_sendshelfauditemail.assert_called_with(shelf)
self.assertTrue(shelf.audit_requested)
if __name__ == '__main__':
loanertest.main()
| StarcoderdataPython |
92482 | import numpy as np
from .. import inf
from ... import blm
from . import learning
from .prior import prior
class model:
def __init__(self, lik, mean, cov, inf='exact'):
self.lik = lik
self.prior = prior(mean=mean, cov=cov)
self.inf = inf
self.num_params = self.lik.num_params + self.prior.num_params
self.params = self.cat_params(self.lik.params, self.prior.params)
self.stats = ()
def cat_params(self, lik_params, prior_params):
''' concatinate the likelihood and prior parameters '''
params = np.append(lik_params, prior_params)
return params
def decomp_params(self, params=None):
if params is None:
params = np.copy(self.params)
lik_params = params[0:self.lik.num_params]
prior_params = params[self.lik.num_params:]
return lik_params, prior_params
def set_params(self, params):
self.params = params
lik_params, prior_params = self.decomp_params(params)
self.lik.set_params(lik_params)
self.prior.set_params(prior_params)
def sub_sampling(self, X, t, N):
num_data = X.shape[0]
if N is None or N < num_data:
index = np.random.permutation(num_data)
subX = X[index[0:N], :]
subt = t[index[0:N]]
else:
subX = X
subt = t
return subX, subt
def export_blm(self, num_basis):
if not hasattr(self.prior.cov, "rand_expans"):
raise ValueError('The kernel must be.')
basis_params = self.prior.cov.rand_expans(num_basis)
basis = blm.basis.fourier(basis_params)
prior = blm.prior.gauss(num_basis)
lik = blm.lik.gauss(blm.lik.linear(basis, bias=self.prior.get_mean(1)),
blm.lik.cov(self.lik.params))
blr = blm.model(lik, prior)
return blr
def eval_marlik(self, params, X, t, N=None):
subX, subt = self.sub_sampling(X, t, N)
if self.inf is 'exact':
marlik = inf.exact.eval_marlik(self, subX, subt, params=params)
else:
pass
return marlik
def get_grad_marlik(self, params, X, t, N=None):
subX, subt = self.sub_sampling(X, t, N)
if self.inf is 'exact':
grad_marlik = inf.exact.get_grad_marlik(self, subX, subt,
params=params)
return grad_marlik
def get_params_bound(self):
if self.lik.num_params != 0:
bound = self.lik.get_params_bound()
if self.prior.mean.num_params != 0:
bound.extend(self.prior.mean.get_params_bound())
if self.prior.cov.num_params != 0:
bound.extend(self.prior.cov.get_params_bound())
return bound
def prepare(self, X, t, params=None):
if params is None:
params = np.copy(self.params)
if self.inf is 'exact':
self.stats = inf.exact.prepare(self, X, t, params)
else:
pass
def get_post_fmean(self, X, Z, params=None):
if params is None:
params = np.copy(self.params)
if self.inf is 'exact':
post_fmu = inf.exact.get_post_fmean(self, X, Z, params)
return post_fmu
def get_post_fcov(self, X, Z, params=None, diag=True):
if params is None:
params = np.copy(self.params)
if self.inf is 'exact':
post_fcov = inf.exact.get_post_fcov(self, X, Z, params, diag)
return post_fcov
def post_sampling(self, X, Z, params=None, N=1, alpha=1):
if params is None:
params = np.copy(self.params)
fmean = self.get_post_fmean(X, Z, params=None)
fcov = self.get_post_fcov(X, Z, params=None, diag=False)
return np.random.multivariate_normal(fmean, fcov * alpha**2, N)
def predict_sampling(self, X, Z, params=None, N=1):
if params is None:
params = np.copy(self.params)
ndata = Z.shape[0]
fmean = self.get_post_fmean(X, Z, params=None)
fcov = self.get_post_fcov(X, Z, params=None, diag=False) \
+ self.lik.get_cov(ndata)
return np.random.multivariate_normal(fmean, fcov, N)
def print_params(self):
print('\n')
if self.lik.num_params != 0:
print('likelihood parameter = ', self.lik.params)
if self.prior.mean.num_params != 0:
print('mean parameter in GP prior: ', self.prior.mean.params)
print('covariance parameter in GP prior: ', self.prior.cov.params)
print('\n')
def get_cand_params(self, X, t):
''' candidate for parameters '''
params = np.zeros(self.num_params)
if self.lik.num_params != 0:
params[0:self.lik.num_params] = self.lik.get_cand_params(t)
temp = self.lik.num_params
if self.prior.mean.num_params != 0:
params[temp:temp + self.prior.mean.num_params] \
= self.prior.mean.get_cand_params(t)
temp += self.prior.mean.num_params
if self.prior.cov.num_params != 0:
params[temp:] = self.prior.cov.get_cand_params(X, t)
return params
def fit(self, X, t, config):
method = config.learning.method
if method == 'adam':
adam = learning.adam(self, config)
params = adam.run(X, t)
if method in ('bfgs', 'batch'):
bfgs = learning.batch(self, config)
params = bfgs.run(X, t)
self.set_params(params)
| StarcoderdataPython |
4835151 | """
Handle labels
"""
from lib.amech_io import parser
from routines.pf.models.typ import need_fake_wells
def make_pes_label_dct(rxn_lst, pes_idx, spc_dct, spc_model_dct):
""" Builds a dictionary that matches the mechanism name to the labels used
in the MESS input and output files for the whole PES
"""
pes_label_dct = {}
for rxn in rxn_lst:
print('rxn\n', rxn)
print()
chn_idx = rxn['chn_idx']
pf_models = parser.model.pf_model_info(
spc_model_dct[rxn['model'][1]]['pf'])
print(pf_models)
rwell_model = pf_models['rwells']
pwell_model = pf_models['pwells']
tsname = 'ts_{:g}_{:g}'.format(pes_idx, chn_idx)
pes_label_dct.update(
_make_channel_label_dct(
tsname, chn_idx, pes_label_dct, rxn, spc_dct,
rwell_model, pwell_model))
print('pes_label dct')
print(pes_label_dct)
print()
return pes_label_dct
def _make_channel_label_dct(tsname, chn_idx, label_dct, rxn, spc_dct,
rwell_model, pwell_model):
""" Builds a dictionary that matches the mechanism name to the labels used
in the MESS input and output files
"""
# Initialize idxs for bimol, well, and fake species
pidx, widx, fidx = 1, 1, 1
for val in label_dct.values():
if 'P' in val:
pidx += 1
elif 'W' in val:
widx += 1
elif 'F' in val:
fidx += 1
# Determine the idxs for the channel reactants
reac_label = ''
bimol = bool(len(rxn['reacs']) > 1)
well_dct_key1 = '+'.join(rxn['reacs'])
well_dct_key2 = '+'.join(rxn['reacs'][::-1])
if well_dct_key1 not in label_dct:
if well_dct_key2 in label_dct:
well_dct_key1 = well_dct_key2
else:
if bimol:
reac_label = 'P' + str(pidx)
pidx += 1
label_dct[well_dct_key1] = reac_label
else:
reac_label = 'W' + str(widx)
widx += 1
label_dct[well_dct_key1] = reac_label
if not reac_label:
reac_label = label_dct[well_dct_key1]
# Determine the idxs for the channel products
prod_label = ''
bimol = bool(len(rxn['prods']) > 1)
well_dct_key1 = '+'.join(rxn['prods'])
well_dct_key2 = '+'.join(rxn['prods'][::-1])
if well_dct_key1 not in label_dct:
if well_dct_key2 in label_dct:
well_dct_key1 = well_dct_key2
else:
if bimol:
prod_label = 'P' + str(pidx)
label_dct[well_dct_key1] = prod_label
else:
prod_label = 'W' + str(widx)
label_dct[well_dct_key1] = prod_label
if not prod_label:
prod_label = label_dct[well_dct_key1]
# Determine idxs for any fake wells if they are needed
fake_wellr_label = ''
if need_fake_wells(spc_dct[tsname]['class'], rwell_model):
well_dct_key1 = 'F' + '+'.join(rxn['reacs'])
well_dct_key2 = 'F' + '+'.join(rxn['reacs'][::-1])
if well_dct_key1 not in label_dct:
if well_dct_key2 in label_dct:
well_dct_key1 = well_dct_key2
else:
fake_wellr_label = 'F' + str(fidx)
fidx += 1
label_dct[well_dct_key1] = fake_wellr_label
# pst_r_label = 'FRB' + str(int(tsname.replace('ts_', ''))+1)
pst_r_label = 'FRB' + str(chn_idx)
label_dct[well_dct_key1.replace('F', 'FRB')] = pst_r_label
if not fake_wellr_label:
fake_wellr_label = label_dct[well_dct_key1]
pst_r_label = label_dct[well_dct_key1.replace('F', 'FRB')]
else:
fake_wellr_label = label_dct[well_dct_key1]
fake_wellp_label = ''
if need_fake_wells(spc_dct[tsname]['class'], pwell_model):
well_dct_key1 = 'F' + '+'.join(rxn['prods'])
well_dct_key2 = 'F' + '+'.join(rxn['prods'][::-1])
if well_dct_key1 not in label_dct:
if well_dct_key2 in label_dct:
well_dct_key1 = well_dct_key2
else:
fake_wellp_label = 'F' + str(fidx)
fidx += 1
label_dct[well_dct_key1] = fake_wellp_label
# pst_p_label = 'FPB' + str(int(tsname.replace('ts_', ''))+1)
pst_p_label = 'FPB' + str(chn_idx)
label_dct[well_dct_key1.replace('F', 'FPB')] = pst_p_label
if not fake_wellp_label:
print('label test', label_dct, well_dct_key1)
fake_wellp_label = label_dct[well_dct_key1]
if rxn['prods'] == rxn['reacs'] or rxn['prods'] == rxn['reacs'][::-1]:
pst_p_label = label_dct[well_dct_key1.replace('F', 'FRB')]
else:
pst_p_label = label_dct[well_dct_key1.replace('F', 'FPB')]
else:
fake_wellp_label = label_dct[well_dct_key1]
label_dct[tsname] = 'B' + str(chn_idx)
return label_dct
| StarcoderdataPython |
3311276 | import os
import sys
import sqlite3
# This is the Windows Path
PathName = os.getenv('localappdata') + '\\Google\\Chrome\\User Data\\Default\\'
if (os.path.isdir(PathName) == False):
print('[!] Chrome Doesn\'t exists')
sys.exit(0)
def DownloadsHash():
hash_file = open('downloadhash.txt', 'w', encoding="utf-8")
downloadsPath = PathName + 'History'
connexion = sqlite3.connect(downloadsPath)
c = connexion.cursor()
hashes = c.execute("SELECT guid from downloads ")
for haash in hashes:
haash = str(haash)
hash_file.write("\n,%s" %haash)
print("Hash file wrote successfully 1:)")
def DownloadsFullReport():
download_full_file = open('downloadfull.txt', 'w', encoding="utf-8")
downloadsPath = PathName + 'History'
connexion = sqlite3.connect(downloadsPath)
c = connexion.cursor()
downloads = c.execute("SELECT * from downloads ")
for download in downloads:
download = str(download)
download_full_file.write("\n,%s" %download)
print("Download full report wrote successfully 1:)")
def History():
history_file = open('history.txt', 'w', encoding="utf-8")
historyPath = PathName + 'History'
connexion = sqlite3.connect(historyPath)
c = connexion.cursor()
urls = c.execute("SELECT * from urls")
for url in urls:
url = str(url)
history_file.write("\n,%s" %url)
print("History file wrote successfully 1:)")
def Cookies():
cookie_file = open('cookies.txt', 'w', encoding="utf-8")
cokkiesPath = PathName + 'Cookies'
connexion = sqlite3.connect(cokkiesPath)
c = connexion.cursor()
cookies = c.execute("SELECT * from cookies")
for cookie in cookies:
cookie = str(cookie)
cookie_file.write("\n,%s" %cookie)
print("Cookie file wrote successfully 1:)")
def mainMenu():
print('[*]Make sure Google Chrom is closed')
print("1.History Dump")
print("2.Downloads Dump")
print("3.Cookies Dump")
try:
want = int(input("?> "))
except ValueError:
print("Enter number of your choice!")
want = int(input("?> "))
if want == 1:
History()
elif want == 2:
print("1.Download Hash Dump")
print("2.Full Report")
try:
want_for_downloads = int(input("?> "))
except ValueError:
print("Enter number of your choice!")
want_for_downloads = int(input("?> "))
if want_for_downloads == 1:
DownloadsHash()
elif want_for_downloads == 2:
DownloadsFullReport()
elif want == 3:
Cookies()
def banner():
print(f"""
$$$$$$\ $$\
$$ __$$\ $$ |
$$ / \__|$$$$$$$\ $$$$$$\ $$$$$$\ $$$$$$\$$$$\
$$ | $$ __$$\ $$ __$$\ $$ __$$\ $$ _$$ _$$\
$$ | $$ | $$ |$$ | \__|$$ / $$ |$$ / $$ / $$ |
$$ | $$\ $$ | $$ |$$ | $$ | $$ |$$ | $$ | $$ |
\$$$$$$ |$$ | $$ |$$ | \$$$$$$ |$$ | $$ | $$ |
\______/ \__| \__|\__| \______/ \__| \__| \__|
$$$$$$$\
$$ __$$\
$$ | $$ |$$\ $$\ $$$$$$\$$$$\ $$$$$$\
$$ | $$ |$$ | $$ |$$ _$$ _$$\ $$ __$$\
$$ | $$ |$$ | $$ |$$ / $$ / $$ |$$ / $$ |
$$ | $$ |$$ | $$ |$$ | $$ | $$ |$$ | $$ |
$$$$$$$ |\$$$$$$ |$$ | $$ | $$ |$$$$$$$ |
\_______/ \______/ \__| \__| \__|$$ ____/
$$ |
$$ |
\__|
[*] Make sure Google Chrom is Closed
[i]Author: <NAME>
[i]Follow: twitter.com/akklaontweet
[i]Website: www.akilabandara.rf.gd
""")
def main():
banner()
mainMenu()
main() | StarcoderdataPython |
111790 | <reponame>JackTriton/OoT-Randomizer
import importlib.util
import os
import ListE
from Utils import data_path as dataPath
def getLang(world, set, name=None):
if world.settings.language_selection == "extra":
Path = world.settings.lang_path
dan = os.path.join(Path, "ListX.py")
data = os.path.join(Path, "data")
get = importlib.util.spec_from_file_location("ListX", dan)
lang = importlib.util.module_from_spec(get)
get.loader.exec_module(lang)
if set == "items":
if name is None:
return lang.NEW_ITEMS_X
else:
return lang.NEW_ITEMS_X[name]
elif set == "navi":
if name is None:
return lang.NAVI_X
else:
return lang.NAVI_X[name]
elif set == "shop":
if name is None:
return lang.SHOP_X
else:
return lang.SHOP_X[name]
elif set == "misc":
if name is None:
return lang.MISC_X
else:
return lang.MISC_X[name]
elif set == "text":
if name is None:
return lang.textTableX
else:
return lang.textTableX[name]
elif set == "special":
if name is None:
return lang.specialTableX
else:
return lang.specialTableX[name]
elif set == "reward text":
if name is None:
return lang.reward_text_X
else:
return lang.reward_text_X[name]
elif set == "reward list":
if name is None:
return lang.reward_list_X
else:
return lang.reward_list_X[name]
elif set == "dungeon list":
if name is None:
return lang.dungeon_list_X
else:
return lang.dungeon_list_X[name]
elif set == "hint":
if name is None:
return lang.hintTableX
else:
return lang.hintTableX[name]
elif set == "trial":
if name is None:
return lang.trial_X
else:
return lang.trial_X[name]
elif set == "goal":
if name is None:
return lang.goalTableX
else:
return lang.goalTableX[name]
elif set == "return":
if name is None:
return lang.retX
else:
return lang.retX[name]
elif set == "data":
if name is None:
return data
else:
x = os.path.join(data, name)
return x
elif world.settings.language_selection != "extra":
lang = ListE
if set == "items":
if name is None:
return lang.NEW_ITEMS_X
else:
return lang.NEW_ITEMS_X[name]
elif set == "navi":
if name is None:
return lang.NAVI_X
else:
return lang.NAVI_X[name]
elif set == "shop":
if name is None:
return lang.SHOP_X
else:
return lang.SHOP_X[name]
elif set == "misc":
if name is None:
return lang.MISC_X
else:
return lang.MISC_X[name]
elif set == "text":
if name is None:
return lang.textTableX
else:
return lang.textTableX[name]
elif set == "special":
if name is None:
return lang.specialTableX
else:
return lang.specialTableX[name]
elif set == "reward text":
if name is None:
return lang.reward_text_X
else:
return lang.reward_text_X[name]
elif set == "reward list":
if name is None:
return lang.reward_list_X
else:
return lang.reward_list_X[name]
elif set == "dungeon list":
if name is None:
return lang.dungeon_list_X
else:
return lang.dungeon_list_X[name]
elif set == "hint":
if name is None:
return lang.hintTableX
else:
return lang.hintTableX[name]
elif set == "goal":
if name is None:
return lang.goalTableX
else:
return lang.goalTableX[name]
elif set == "trial":
if name is None:
return lang.trial_X
else:
return lang.trial_X[name]
elif set == "return":
if name is None:
return lang.retX
else:
return lang.retX[name]
elif set == "data":
if name is None:
return dataPath()
else:
return dataPath(name)
# DataName: AddressStart #AddressEnd
dataList = {
"title": 0x01795300, # 0x017AE300
"continue_JP": 0x00862000, # 0x00862980
"continue_EN": 0x00862980, # 0x00863300
"IDTTitle_JP": 0x00864000, # 0x00864600
"DDCTitle_JP": 0x00864600, # 0x00864C00
"JJBTitle_JP": 0x00864C00, # 0x00865200
"FoTTitle_JP": 0x00865200, # 0x00865800
"FiTTitle_JP": 0x00865800, # 0x00865E00
"WaTTitle_JP": 0x00865E00, # 0x00866400
"SpTTitle_JP": 0x00866400, # 0x00866A00
"ShTTitle_JP": 0x00866A00, # 0x00867000
"BoWTitle_JP": 0x00867000, # 0x00867600
"IcCTitle_JP": 0x00867600, # 0x00867C00
"To_Equip_JP": 0x00867C00, # 0x00867F80
"To_Decide_JP": 0x00867F80, # 0x00868380
"To_Play_M_JP": 0x00868380, # 0x00868880
"To_Select_I_JP": 0x00868880, # 0x00869080
"To_Map_JP": 0x00869080, # 0x00869880
"To_Quest_Stat_JP": 0x00869880, # 0x0086A080
"To_Equip_JP": 0x0086A080, # 0x0086A880
"Save_Text_JP": 0x0086A880, # 0x0086B200
"Saved_Text_JP": 0x0086B200, # 0x0086BB80
"Yes_JP": 0x0086BB80, # 0x0086BE80
"No_JP": 0x0086BE80, # 0x0086C180
"Cur_Pos_JP": 0x0086C180, # 0x0086C280
"Equip_1,0_JP": 0x0086C280, # 0x0086CC80
"Select_I_0,0_JP": 0x0086CC80, # 0x0086D680
"Select_I_1,0_JP": 0x0086D680, # 0x0086E080
"Select_I_2,0_JP": 0x0086E080, # 0x0086EA80
"Map_1,0_JP": 0x0086EA80, # 0x0086F480
"Quest_Stat_0,0_JP": 0x0086F480, # 0x0086FE80
"Quest_Status_1,0_JP": 0x0086FE80, # 0x00870880
"Quest_Status_2,0_JP": 0x00870880, # 0x00871280
"Save_1,0_JP": 0x00871280, # 0x00872000
"IDTTitle_EN": 0x00872000, # 0x00872600
"DDCTitle_EN": 0x00872600, # 0x00872C00
"JJBTitle_EN": 0x00873C00, # 0x00873200
"FoTTitle_EN": 0x00873200, # 0x00873800
"FiTTitle_EN": 0x00873800, # 0x00873E00
"WaTTitle_EN": 0x00873E00, # 0x00874400
"SpTTitle_EN": 0x00874400, # 0x00874A00
"ShTTitle_EN": 0x00874A00, # 0x00875000
"BoWTitle_EN": 0x00875000, # 0x00875600
"IcCTitle_EN": 0x00875600, # 0x00875C00
"To_Equip_EN": 0x00875C00, # 0x00875F80
"To_Decide_EN": 0x00875F80, # 0x00876380
"To_Play_M_EN": 0x00876380, # 0x00876880
"To_Select_I_EN": 0x00876880, # 0x00877080
"To_Map_EN": 0x00877080, # 0x00877880
"To_Quest_Stat_EN": 0x00877880, # 0x00878080
"To_Equip_EN": 0x00878080, # 0x00878880
"Save_Text_EN": 0x00878880, # 0x00879200
"Saved_Text_EN": 0x00879200, # 0x00879B80
"Yes_EN": 0x00879B80, # 0x00879E80
"No_EN": 0x00879E80, # 0x0087A180
"Cur_Pos_EN": 0x0087A180, # 0x0087A280
"Equip_1,0_EN": 0x0087A280, # 0x0087AC80
"Select_I_0,0_EN": 0x0087AC80, # 0x0087B680
"Select_I_1,0_EN": 0x0087B680, # 0x0087C080
"Select_I_2,0_EN": 0x0087C080, # 0x0087CA80
"Map_1,0_EN": 0x0087CA80, # 0x0087D480
"Quest_Stat_0,0_EN": 0x0087D480, # 0x0087DE80
"Quest_Status_1,0_EN": 0x0087DE80, # 0x0087E880
"Quest_Status_2,0_EN": 0x0087E880, # 0x0087F280
"Save_1,0_EN": 0x0087F280, # 0x00880000
"Stick_JP": 0x00880000, # 0x00880400
"Nut_JP": 0x00880400, # 0x00880800
"Bomb_JP": 0x00880800, # 0x00880C00
"Bow_JP": 0x00880C00, # 0x00881000
"Fire_Arrow_JP": 0x00881000, # 0x00881400
"Din_Fire_JP": 0x00881400, # 0x00881800
"Slingshot_JP": 0x00881800, # 0x00881C00
"FOcarina_JP": 0x00881C00, # 0x00882000
"TOcarina_JP": 0x00882000, # 0x00882400
"Bombchu_JP": 0x00882400, # 0x00882800
"Hookshot_JP": 0x00882800, # 0x00882C00
"Longshot_JP": 0x00882C00, # 0x00883000
"Ice_Arrow_JP": 0x00883000, # 0x00883400
"Farore_Wind_JP": 0x00883400, # 0x00883800
"Boomerang_JP": 0x00883800, # 0x00883C00
"LoTruth_JP": 0x00883C00, # 0x00884000
"Beans_JP": 0x00884000, # 0x00884400
"Hammer_JP": 0x00884400, # 0x00884800
"Light_Arrow_JP": 0x00884800, # 0x00884C00
"Nayru_Love_JP": 0x00884C00, # 0x00885000
"Bottle_JP": 0x00885000, # 0x00885400
"Red_Potion_JP": 0x00885400, # 0x00885800
"Green_Potion_JP": 0x00885800, # 0x00885C00
"Blue_Potion_JP": 0x00885C00, # 0x00886000
"Fairy_JP": 0x00886000, # 0x00886400
"Fish_JP": 0x00886400, # 0x00886800
"Milk_JP": 0x00886800, # 0x00886C00
"RLetter_JP": 0x00886C00, # 0x00887000
"Blue_Fire_JP": 0x00887000, # 0x00887400
"Bug_JP": 0x00887400, # 0x00887800
"B_Poe_JP": 0x00887800, # 0x00887C00
"Milk_H_JP": 0x00887C00, # 0x00888000
"Poe_JP": 0x00888000, # 0x00888400
"Weird_Egg_JP": 0x00888400, # 0x00888800
"Cucco_JP": 0x00888800, # 0x00888C00
"ZLetter_JP": 0x00888C00, # 0x00889000
"Keaton_JP": 0x00889000, # 0x00889400
"Skull_JP": 0x00889400, # 0x00889800
"Spook_JP": 0x00889800, # 0x00889C00
"Bunny_JP": 0x00889C00, # 0x0088A000
"Goron_JP": 0x0088A000, # 0x0088A400
"Zora_JP": 0x0088A400, # 0x0088A800
"Gerudo_JP": 0x0088A800, # 0x0088AC00
"MoTruth_JP": 0x0088AC00, # 0x0088B000
"SOLD_OUT_JP": 0x0088B000, # 0x0088B400
"Pocket_Egg_JP": 0x0088B400, # 0x0088B800
"Pocket_Cucco_JP": 0x0088B800, # 0x0088BC00
"Cojiro_JP": 0x0088BC00, # 0x0088C000
"Mushroom_JP": 0x0088C000, # 0x0088C400
"OPotion_JP": 0x0088C400, # 0x0088C800
"Saw_JP": 0x0088C800, # 0x0088CC00
"GoronSB_JP": 0x0088CC00, # 0x0088D000
"Prescription_JP": 0x0088D000, # 0x0088D400
"Frog_JP": 0x0088D400, # 0x0088D800
"Eye_Drop_JP": 0x0088D800, # 0x0088DC00
"Claim_JP": 0x0088DC00, # 0x0088EC00
"KSword_JP": 0x0088EC00, # 0x0088F000
"MSword_JP": 0x0088F000, # 0x0088F400
"BKnife_JP": 0x0088F400, # 0x0088F800
"DekuS_JP": 0x0088F800, # 0x0088FC00
"HylianS_JP": 0x0088FC00, # 0x00890000
"MirrorS_JP": 0x00890000, # 0x00890400
"KTunic_JP": 0x00890400, # 0x00890800
"GTunic_JP": 0x00890800, # 0x00890C00
"ZTunic_JP": 0x00890C00, # 0x00891000
"KBoots_JP": 0x00891000, # 0x00891400
"IBoots_JP": 0x00891400, # 0x00891800
"HBoots_JP": 0x00891800, # 0x00891C00
"BuBag30_JP": 0x00891C00, # 0x00892000
"BuBag40_JP": 0x00892000, # 0x00892400
"BuBag50_JP": 0x00892400, # 0x00892800
"Quiver30_JP": 0x00892800, # 0x00892C00
"Quiver40_JP": 0x00892C00, # 0x00893000
"Quiver50_JP": 0x00893000, # 0x00893400
"BombBag20_JP": 0x00893400, # 0x00893800
"BombBag30_JP": 0x00893800, # 0x00893C00
"BombBag40_JP": 0x00893C00, # 0x00894000
"GoronBracelet_JP": 0x00894000, # 0x00894400
"SilverGauntlets_JP": 0x00894400, # 0x00894800
"GoldenGauntlets_JP": 0x00894800, # 0x00894C00
"SilverScale_JP": 0x00894C00, # 0x00895000
"GoldenScale_JP": 0x00895000, # 0x00895400
"GiantsKnife B_JP": 0x00895400, # 0x00895800
"AdultsWallet_JP": 0x00895800, # 0x00895C00
"GiantsWallet_JP": 0x00895C00, # 0x00896000
"DekuSeeds_JP": 0x00896000, # 0x00896400
"FishPole_JP": 0x00896400, # 0x00896800
"MoF_JP": 0x00896800, # 0x00896C00
"BoF_JP": 0x00896C00, # 0x00897000
"SoW_JP": 0x00897000, # 0x00897400
"RoS_JP": 0x00897400, # 0x00897800
"NoS_JP": 0x00897800, # 0x00897C00
"PoL_JP": 0x00897C00, # 0x00898000
"ZeL_JP": 0x00898000, # 0x00898400
"EpS_JP": 0x00898400, # 0x00898800
"SaS_JP": 0x00898800, # 0x00898C00
"SunS_JP": 0x00898C00, # 0x00899000
"SoT_JP": 0x00899000, # 0x00899400
"SoS_JP": 0x00899400, # 0x00899800
"ForMedal_JP": 0x00899800, # 0x00899C00
"FirMedal_JP": 0x00899C00, # 0x0089A000
"WatMedal_JP": 0x0089A000, # 0x0089A400
"SpiMedal_JP": 0x0089A400, # 0x0089A800
"ShaMedal_JP": 0x0089A800, # 0x0089AC00
"LigMedal_JP": 0x0089AC00, # 0x0089B000
"Emerald_JP": 0x0089B000, # 0x0089B400
"Ruby_JP": 0x0089B400, # 0x0089B800
"Sapphire_JP": 0x0089B800, # 0x0089BC00
"Agony_JP": 0x0089BC00, # 0x0089C000
"Gerudo_Card_JP": 0x0089C000, # 0x0089C400
"GS_JP": 0x0089C400, # 0x0089C800
"HContainer_JP": 0x0089C800, # 0x0089D000
"Boss_Key_JP": 0x0089D000, # 0x0089D400
"Compass_JP": 0x0089D400, # 0x0089D800
"Dungeon_Map_JP": 0x0089D800, # 0x0089E800
"BSword_JP": 0x0089E800, # 0x0089EC00
"Stick_EN": 0x0089EC00, # 0x0089F000
"Nut_EN": 0x0089F000, # 0x0089F400
"Bomb_EN": 0x0089F400, # 0x0089F800
"Bow_EN": 0x0089F800, # 0x0089FC00
"Fire_Arrow_EN": 0x0089FC00, # 0x008A0000
"Din_Fire_EN": 0x008A0000, # 0x008A0400
"Slingshot_EN": 0x008A0400, # 0x008A0800
"FOcarina_EN": 0x008A0800, # 0x008A0C00
"TOcarina_EN": 0x008A0C00, # 0x008A1000
"Bombchu_EN": 0x008A1000, # 0x008A1400
"Hookshot_EN": 0x008A1400, # 0x008A1800
"Longshot_EN": 0x008A1800, # 0x008A1C00
"Ice_Arrow_EN": 0x008A1C00, # 0x008A2000
"Farore_Wind_EN": 0x008A2000, # 0x008A2400
"Boomerang_EN": 0x008A2400, # 0x008A2800
"LoTruth_EN": 0x008A2800, # 0x008A2C00
"Beans_EN": 0x008A2C00, # 0x008A3000
"Hammer_EN": 0x008A3000, # 0x008A3400
"Light_Arrow_EN": 0x008A3400, # 0x008A3800
"Nayru_Love_EN": 0x008A3800, # 0x008A3C00
"Bottle_EN": 0x008A3C00, # 0x008A4000
"Red_Potion_EN": 0x008A4000, # 0x008A4400
"Green_Potion_EN": 0x008A4400, # 0x008A4800
"Blue_Potion_EN": 0x008A4800, # 0x008A4C00
"Fairy_EN": 0x008A4C00, # 0x008A5000
"Fish_EN": 0x008A5000, # 0x008A5400
"Milk_EN": 0x008A5400, # 0x008A5800
"RLetter_EN": 0x008A5800, # 0x008A5C00
"Blue_Fire_EN": 0x008A5C00, # 0x008A6000
"Bug_EN": 0x008A6000, # 0x008A6400
"B_Poe_EN": 0x008A6400, # 0x008A6800
"Milk_H_EN": 0x008A6800, # 0x008A6C00
"Poe_EN": 0x008A6C00, # 0x008A7000
"Weird_Egg_EN": 0x008A7000, # 0x008A7400
"Cucco_EN": 0x008A7400, # 0x008A7800
"ZLetter_EN": 0x008A7800, # 0x008A7C00
"Keaton_EN": 0x008A7C00, # 0x008A8000
"Skull_EN": 0x008A8000, # 0x008A8400
"Spook_EN": 0x008A8400, # 0x008A8800
"Bunny_EN": 0x008A8800, # 0x008A8C00
"Goron_EN": 0x008A8C00, # 0x008A9000
"Zora_EN": 0x008A9000, # 0x008A9400
"Gerudo_EN": 0x008A9400, # 0x008A9800
"MoTruth_EN": 0x008A9800, # 0x008A9C00
"SOLD_OUT_EN": 0x008A9C00, # 0x008AA000
"Pocket_Egg_EN": 0x008AA000, # 0x008AA400
"Pocket_Cucco_EN": 0x008AA400, # 0x008AA800
"Cojiro_EN": 0x008AA800, # 0x008AAC00
"Mushroom_EN": 0x008AAC00, # 0x008AB000
"OPotion_EN": 0x008AB000, # 0x008AB400
"Saw_EN": 0x008AB400, # 0x008AB800
"GoronSB_EN": 0x008AB800, # 0x008ABC00
"Prescription_EN": 0x008ABC00, # 0x008AC000
"Frog_EN": 0x008AC000, # 0x008AC400
"Eye_Drop_EN": 0x008AC400, # 0x008AC800
"Claim_EN": 0x008AC800, # 0x008AD800
"KSword_EN": 0x008AD800, # 0x008ADC00
"MSword_EN": 0x008ADC00, # 0x008AE000
"BKnife_EN": 0x008AE000, # 0x008AE400
"DekuS_EN": 0x008AE400, # 0x008AE800
"HylianS_EN": 0x008AE800, # 0x008AEC00
"MirrorS_EN": 0x008AEC00, # 0x008AF000
"KTunic_EN": 0x008AF000, # 0x008AF400
"GTunic_EN": 0x008AF400, # 0x008AF800
"ZTunic_EN": 0x008AF800, # 0x008AFC00
"KBoots_EN": 0x008AFC00, # 0x008B0000
"IBoots_EN": 0x008B0000, # 0x008B0400
"HBoots_EN": 0x008B0400, # 0x008B0800
"BuBag30_EN": 0x008B0800, # 0x008B0C00
"BuBag40_EN": 0x008B0C00, # 0x008B1000
"BuBag50_EN": 0x008B1000, # 0x008B1400
"Quiver30_EN": 0x008B1400, # 0x008B1800
"Quiver40_EN": 0x008B1800, # 0x008B1C00
"Quiver50_EN": 0x008B1C00, # 0x008B2000
"BombBag20_EN": 0x008B2000, # 0x008B2400
"BombBag30_EN": 0x008B2400, # 0x008B2800
"BombBag40_EN": 0x008B2800, # 0x008B2C00
"GoronBracelet_EN": 0x008B2C00, # 0x008B3000
"SilverGauntlets_EN": 0x008B3000, # 0x008B3400
"GoldenGauntlets_EN": 0x008B3400, # 0x008B3800
"SilverScale_EN": 0x008B3800, # 0x008B3C00
"GoldenScale_EN": 0x008B3C00, # 0x008B4000
"GiantsKnife B_EN": 0x008B4000, # 0x008B4400
"AdultsWallet_EN": 0x008B4400, # 0x008B4800
"GiantsWallet_EN": 0x008B4800, # 0x008B4C00
"DekuSeeds_EN": 0x008B4C00, # 0x008B5000
"FishPole_EN": 0x008B5000, # 0x008B5400
"MoF_EN": 0x008B5400, # 0x008B5800
"BoF_EN": 0x008B5800, # 0x008B5C00
"SoW_EN": 0x008B5C00, # 0x008B6000
"RoS_EN": 0x008B6000, # 0x008B6400
"NoS_EN": 0x008B6400, # 0x008B6800
"PoL_EN": 0x008B6800, # 0x008B6C00
"ZeL_EN": 0x008B6C00, # 0x008B7000
"EpS_EN": 0x008B7000, # 0x008B7400
"SaS_EN": 0x008B7400, # 0x008B7800
"SunS_EN": 0x008B7800, # 0x008B7C00
"SoT_EN": 0x008B7C00, # 0x008B8000
"SoS_EN": 0x008B8000, # 0x008B8400
"ForMedal_EN": 0x008B8400, # 0x008B8800
"FirMedal_EN": 0x008B8800, # 0x008B8C00
"WatMedal_EN": 0x008B8C00, # 0x008B9000
"SpiMedal_EN": 0x008B9000, # 0x008B9400
"ShaMedal_EN": 0x008B9400, # 0x008B9800
"LigMedal_EN": 0x008B9800, # 0x008B9C00
"Emerald_EN": 0x008B9C00, # 0x008BA000
"Ruby_EN": 0x008BA000, # 0x008BA400
"Sapphire_EN": 0x008BA400, # 0x008BA800
"Agony_EN": 0x008BA800, # 0x008BAC00
"Gerudo_Card_EN": 0x008BAC00, # 0x008BB000
"GS_EN": 0x008BB000, # 0x008BB400
"HContainer_EN": 0x008BB400, # 0x008BBC00
"Boss_Key_EN": <KEY>, # 0x008BC000
"Compass_EN": 0x008BC000, # 0x008BC400
"Dungeon_Map_EN": 0x008BC400, # 0x008BD400
"BSword_EN": 0x008BD400, # 0x008BD800
"Wasteland_JP": 0x008BE000, # 0x008BE400
"Fortress_JP": 0x008BE400, # 0x008BE800
"Valley_JP": 0x008BE800, # 0x008BEC00
"Lakeside_JP": 0x008BEC00, # 0x008BF000
"LLR_JP": 0x008BF000, # 0x008BF400
"Market_JP": 0x008BF400, # 0x008BF800
"Field_JP": 0x008BF800, # 0x008BFC00
"Mountain_JP": 0x008BFC00, # 0x008C0000
"Village_JP": 0x008C0000, # 0x008C0400
"Woods_JP": 0x008C0400, # 0x008C0800
"Forest_JP": 0x008C0800, # 0x008C0C00
"Domain_JP": 0x008C0C00, # 0x008C1000
"Wasteland_EN": 0x008C1000, # 0x008C1400
"Fortress_EN": 0x008C1400, # 0x008C1800
"Valley_EN": 0x008C1800, # 0x008C1C00
"Lakeside_EN": 0x008C1C00, # 0x008C2000
"LLR_EN": 0x008C2000, # 0x008C2400
"Market_EN": 0x008C2400, # 0x008C2800
"Field_EN": 0x008C2800, # 0x008C2C00
"Mountain_EN": 0x008C2C00, # 0x008C3000
"Village_EN": 0x008C3000, # 0x008C3400
"Woods_EN": 0x008C3400, # 0x008C3800
"Forest_EN": 0x008C3800, # 0x008C3C00
"Domain_EN": 0x008C3C00, # 0x008E0000
"Attack_JP": 0x008E0000, # 0x008E0180
"Check_JP": 0x008E0180, # 0x008E0300
"Enter_JP": 0x008E0300, # 0x008E0480
"Return_JP": 0x008E0480, # 0x008E0600
"Open_JP": 0x008E0600, # 0x008E0780
"Jump_JP": 0x008E0780, # 0x008E0900
"Decide_JP": 0x008E0900, # 0x008E0A80
"Dive_JP": 0x008E0A80, # 0x008E0C00
"Faster_JP": 0x008E0C00, # 0x008E0D80
"Throw_JP": 0x008E0D80, # 0x008E0F00
"Climb_JP": 0x008E1080, # 0x008E1200
"Drop_JP": 0x008E1200, # 0x008E1380
"Down_JP": 0x008E1380, # 0x008E1500
"Save_JP": 0x008E1500, # 0x008E1680
"Speak_JP": 0x008E1680, # 0x008E1800
"Next_JP": 0x008E1800, # 0x008E1980
"Grab_JP": 0x008E1980, # 0x008E1B00
"Stop_JP": 0x008E1B00, # 0x008E1C80
"PutAway_JP": 0x008E1C80, # 0x008E1E00
"Reel_JP": 0x008E1E00, # 0x008E1F80
"Attack_EN": 0x008E2B80, # 0x008E2D00
"Check_EN": 0x008E2D00, # 0x008E2E80
"Enter_EN": 0x008E2E80, # 0x008E3000
"Return_EN": 0x008E3000, # 0x008E3180
"Open_EN": 0x008E3180, # 0x008E3300
"Jump_EN": 0x008E3300, # 0x008E3480
"Decide_EN": 0x008E3480, # 0x008E3600
"Dive_EN": 0x008E3600, # 0x008E3780
"Faster_EN": 0x008E3780, # 0x008E3900
"Throw_EN": 0x008E3900, # 0x008E3A80
"Climb_EN": 0x008E3C00, # 0x008E3D80
"Drop_EN": 0x008E3D80, # 0x008E3F00
"Down_EN": 0x008E3F00, # 0x008E4080
"Save_EN": 0x008E4080, # 0x008E4200
"Speak_EN": 0x008E4200, # 0x008E4380
"Next_EN": 0x008E4380, # 0x008E4500
"Grab_EN": 0x008E4500, # 0x008E4680
"Stop_EN": 0x008E4680, # 0x008E4800
"PutAway_EN": 0x008E4800, # 0x008E4980
"Reel_EN": 0x008E4980, # 0x008E4B00
}
| StarcoderdataPython |
1703543 | <gh_stars>1-10
# Copyright (c) Aetheros, Inc. See COPYRIGHT
#!/usr/bin/env python
import os, sys, json, time
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from client.onem2m.OneM2MPrimitive import OneM2MPrimitive
from client.onem2m.resource.Subscription import Subscription
from client.cse.CSE import CSE
from client.ae.AE import AE
def main():
try:
AE_ID = '1234567890'
# host, port, resource
CSE_HOST = 'dev9.usw1.aws.corp.grid-net.com'
CSE_PORT = 21300
# Create an instance of CSE
pn_cse = CSE(CSE_HOST, CSE_PORT)
# Create an AE instance to register with the CSE.
req_ae = AE(
{
AE.M2M_ATTR_APP_ID : 'N_SB_AE_1',
AE.M2M_ATTR_APP_NAME : 'N_SB_AE_1',
AE.M2M_ATTR_AE_ID : AE_ID,
AE.M2M_ATTR_POINT_OF_ACCESS: ['http://localhost:7000'],
}
)
print('Registering AE "{}" with CSE @ {}'.format(req_ae.aei, CSE_HOST))
# Register ae
res = pn_cse.register_ae(req_ae)
res.dump('Register AE')
if res.rsc != OneM2MPrimitive.M2M_RSC_CREATED:
print('Could not register AE\nExiting...')
sys.exit()
print('AE registration successful:')
# Discover containers.
print('Discovering containers:')
containers = pn_cse.discover_containers()
containers.dump('Discover Containers')
print('Retrieved {} containers\n'.format(len(containers)))
# Pick a container resource to work with.
containerUri = containers[0]
# Create a subscription to the container.
print('Subscribing to container: {}'.format(containerUri))
sub_res = pn_cse.create_subscription(containerUri, '10.250.10.142:8080')
sub_res.dump('Create Subscription')
print('Subscription created: {}'.format(sub_res.cn))
print('Retrieving subscription: {}'.format(sub_res.cn))
res = pn_cse.retrieve_resource(sub_res.cn)
res.dump('Retrieve Subscription')
# Use the returned subscription resource as update target.
res_sub_dict = res.pc['m2m:sub']
# print(res_sub_dict)
update_sub_dict = {}
# Strip non-updatable attributes.
for k, v in res_sub_dict.items():
if k not in ('ct', 'lt', 'pi', 'ri', 'rn', 'ty'):
update_sub_dict[k] = v
sub = Subscription(update_sub_dict)
# sub = Subscription(res_sub_dict)
# print(sub)
print('Updating subscription resource: {}'.format(sub_res.cn))
# sub.nu = ['0.0.0.0']
sub.ct = ''
res = pn_cse.update_resource(sub_res.cn, sub)
res.dump('Update Subscription')
print('Retrieving subscription: {}'.format(sub_res.cn))
res = pn_cse.retrieve_resource(sub_res.cn)
res.dump('Retrieve Subscription')
except Exception as err:
print('Exception raised...\n')
print(err)
finally:
print('Cleaning up...')
# Clean up AE.
if pn_cse.ae is not None:
del_res = pn_cse.delete_ae()
del_res.dump('Delete AE')
if __name__ == '__main__':
main()
| StarcoderdataPython |
1672068 | headers = {
'Cookie': '316558|dbebe0ac7c8cf0185517814d52954e37cb8eb2f7"',
'Host':'www.zhihu.com',
'Referer':'http://www.zhihu.com/',
'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',
'Accept-Encoding':'gzip'
}
| StarcoderdataPython |
3297348 | <reponame>inbo/speciesbim
from helpers import get_database_connection, get_config, setup_log_file, execute_sql_from_jinja_string, \
insert_or_get_scientificnameid
from csv import reader
import time
import logging
FIELDS_ANNEXSCIENTIFICNAME = ('scientificNameId', 'scientificNameInAnnex', 'isScientificName', 'annexCode', 'remarks')
def _load_annex_data_from_file(path):
""" Read taxa from file with list of taxa (names) contained in official annexes
Return a dictionary of names and their corrected versions (which are equal to the names if no correction is needed)
"""
with open(path) as csvfile:
annex_data = reader(csvfile)
annex_scientificnames = []
fields = next(annex_data)
print("Columns in " + path + ": " + ", ".join(fields))
for row in annex_data:
scientific_name_corrected = row[2]
annex_scientificnames.append({'scientificNameId': None,
'scientificNameInAnnex': row[1],
'scientificName': scientific_name_corrected,
'authorship': row[3],
'isScientificName': (scientific_name_corrected != ''),
'annexCode': row[0],
'remarks': row[5]})
return annex_scientificnames
def populate_annex_scientificname(conn, config_parser, annex_file):
""" Populate the table annexscientificname
If taxa-limit in configuration file is not a empty string but a number n, then the first n taxa are imported into
the table
"""
annex_names = _load_annex_data_from_file(path=annex_file)
message_n_names_in_annex_file = f"Number of taxa listed in official annexes and ordinances: {len(annex_names)}"
print(message_n_names_in_annex_file)
logging.info(message_n_names_in_annex_file)
n_taxa_max = config_parser.get('annex_scientificname', 'taxa-limit')
if len(n_taxa_max) > 0:
n_taxa_max = int(n_taxa_max)
else:
n_taxa_max = len(annex_names)
start = time.time()
counter_insertions = 0
for annex_entry in annex_names:
if counter_insertions < n_taxa_max:
dict_for_annexscientificname = {k: annex_entry[k] for k in FIELDS_ANNEXSCIENTIFICNAME}
if (dict_for_annexscientificname['isScientificName'] is True):
dict_for_scientificname = { k: annex_entry[k] for k in annex_entry.keys() - FIELDS_ANNEXSCIENTIFICNAME }
if dict_for_scientificname['authorship'] == '':
dict_for_scientificname['authorship'] = None
id_scn = insert_or_get_scientificnameid(conn,
scientific_name=dict_for_scientificname['scientificName'],
authorship=dict_for_scientificname['scientificName'])
dict_for_annexscientificname['scientificNameId'] = id_scn
# insert in annexscientificname
template = """INSERT INTO annexscientificname ({{ col_names | surround_by_quote | join(', ') | sqlsafe
}}) VALUES {{ values | inclause }} """
execute_sql_from_jinja_string(
conn,
template,
context={'col_names': tuple(dict_for_annexscientificname.keys()),
'values': tuple(dict_for_annexscientificname.values())}
)
counter_insertions += 1
# running infos on screen (no logging)
if counter_insertions % 20 == 0:
elapsed_time = time.time() - start
expected_time = elapsed_time / counter_insertions * (n_taxa_max - counter_insertions)
info_message = "\r" + \
f"{counter_insertions}/{n_taxa_max} taxa inserted in annexscientificname in" + \
f" {round(elapsed_time, 2)}s." + \
f" Expected time to go: {round(expected_time, 2)}s."
print(info_message, end="", flush=True)
else:
break
# Logging and statistics
end = time.time()
n_taxa_inserted = f"\nTotal number of taxa inserted in annexscientificname: {counter_insertions}"
print(n_taxa_inserted)
logging.info(n_taxa_inserted)
elapsed_time = f"Table annexscientificname populated in {round(end - start)}s."
print(elapsed_time)
logging.info(elapsed_time)
if __name__ == "__main__":
connection = get_database_connection()
config = get_config()
setup_log_file("./logs/populate_annexscientificname_log.csv")
annex_file_path = "../data/raw/official_annexes.csv"
# for demo
annex_file_path_demo = "../data/raw/official_annexes_demo.csv"
demo = config.getboolean('demo_mode', 'demo')
if not demo:
populate_annex_scientificname(conn=connection, config_parser=config, annex_file=annex_file_path)
else:
populate_annex_scientificname(conn=connection, config_parser=config, annex_file=annex_file_path_demo)
| StarcoderdataPython |
22255 | <gh_stars>0
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.packages
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from ..io.x_input_stream import XInputStream as XInputStream_98d40ab4
class XDataSinkEncrSupport(XInterface_8f010a43):
"""
Allows to get access to the stream of a PackageStream.
See Also:
`API XDataSinkEncrSupport <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1packages_1_1XDataSinkEncrSupport.html>`_
"""
__ooo_ns__: str = 'com.sun.star.packages'
__ooo_full_ns__: str = 'com.sun.star.packages.XDataSinkEncrSupport'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.packages.XDataSinkEncrSupport'
@abstractmethod
def getDataStream(self) -> 'XInputStream_98d40ab4':
"""
Allows to get access to the data of the PackageStream.
In case stream is encrypted one and the key for the stream is not set, an exception must be thrown.
Raises:
com.sun.star.packages.WrongPasswordException: ``WrongPasswordException``
com.sun.star.packages.zip.ZipException: ``ZipException``
com.sun.star.io.IOException: ``IOException``
"""
@abstractmethod
def getPlainRawStream(self) -> 'XInputStream_98d40ab4':
"""
Allows to get access to the raw data of the stream as it is stored in the package.
Raises:
com.sun.star.io.IOException: ``IOException``
com.sun.star.packages.NoEncryptionException: ``NoEncryptionException``
"""
@abstractmethod
def getRawStream(self) -> 'XInputStream_98d40ab4':
"""
Allows to get access to the data of the PackageStream as to raw stream.
In case stream is not encrypted an exception will be thrown.
The difference of raw stream is that it contains header for encrypted data, so an encrypted stream can be copied from one PackageStream to another one without decryption.
Raises:
com.sun.star.packages.NoEncryptionException: ``NoEncryptionException``
com.sun.star.io.IOException: ``IOException``
"""
@abstractmethod
def setDataStream(self, aStream: 'XInputStream_98d40ab4') -> None:
"""
Allows to set a data stream for the PackageStream.
In case PackageStream is marked as encrypted the data stream will be encrypted on storing.
Raises:
com.sun.star.io.IOException: ``IOException``
"""
@abstractmethod
def setRawStream(self, aStream: 'XInputStream_98d40ab4') -> None:
"""
Allows to set raw stream for the PackageStream.
The PackageStream object can not be marked as encrypted one, an exception will be thrown in such case.
Raises:
com.sun.star.packages.EncryptionNotAllowedException: ``EncryptionNotAllowedException``
com.sun.star.packages.NoRawFormatException: ``NoRawFormatException``
com.sun.star.io.IOException: ``IOException``
"""
__all__ = ['XDataSinkEncrSupport']
| StarcoderdataPython |
3268160 | <filename>schdst.py
import os,subprocess
from tkinter import *
from tkinter.ttk import *
import tkinter as tk
from tkinter import filedialog
root = Tk()
root.wm_iconbitmap('@/home/s2/Documents/hds/favicon.xbm')
root.wm_title("X=10Hrs to 22Hrs <::Set Schedule::> Y=0Mins to 59Mins")
root.geometry('1175x650')
global doc
doc=[]
global btn
#f=open("/home/s2/Documents/hds/choda.txt","r")
#fn=f.read()
#f.close()
fpt="/home/s2/Documents/hds/"
fN="schd"
fn1=fpt+fN+".txt"
print(fn1)
LABEL_BG = "#ccc"
ROWS, COLS = 1050, 650
ROWS_DISP = 400
COLS_DISP = 650
master_frame = tk.Frame(bg="Light Blue", bd=3, relief=tk.RIDGE)
master_frame.grid(sticky=tk.NSEW)
master_frame.columnconfigure(0, weight=1)
frame2 = tk.Frame(master_frame)
frame2.grid(row=3, column=0, sticky=tk.NW)
canvas = tk.Canvas(frame2, bg="white")
canvas.grid(row=0, column=0)
vsbar = tk.Scrollbar(frame2, orient=tk.VERTICAL, command=canvas.yview)
vsbar.grid(row=0, column=1, sticky=tk.NS)
canvas.configure(yscrollcommand=vsbar.set)
buttons_frame = tk.Frame(canvas, bg="gray", bd=2)
def open_file(h,m):
print(h,m)
global doc
tp=0
doc1=[]
doc2=[]
file = filedialog.askopenfile(mode ='r', filetypes =[('Videos & Images', '*.*')])
if file is not None:
f=file.name
if ".mp4" in f or ".vlc" in f:
dur = subprocess.run(["ffprobe","-v","error","-show_entries","format=duration","-of","default=noprint_wrappers=1:nokey=1",f],stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
dr1=dur.stdout.decode('utf-8')
print("dr1",dr1)
dr=str(round(float(dr1)))
else:
dr="60"
f1=str(h+10)+"="+str(m)+"="+f+"="+dr
doc[h*60+m]=f1
k=round(int(dr)/60)
while tp<(k-1):
doc[h*60+m+tp+1]=str(h+10)+"="+str(m+tp+1)+"="+"n"+str(h+10)+str(m+tp+1)+"="+"0"
tp+=1
print(file,f1)
print("doc[h*m]",doc[h*m])
#print(doc)
cnr0()
def clear():
global var
list = buttons_frame.grid_slaves()
for l in list:
l.destroy()
def cnr0():
with open(fn1, 'w+') as filehandle:
for listitem in doc:
filehandle.write('%s\n' % listitem)
cnr1()
def cnr1():
da=[]
dh=[]
dm=[]
dp=[]
dx=[]
for d in doc:
da=d.split("=")
#print(len(da),da[0],da[1],da[2],da[3])
du=float(da[3])/60
#print(du,round(du))
dt=os.path.basename(da[2])
da0=int(da[0])-10
#print(da0)
dh.append(int(da[0])-10)
dm.append(int(da[1]))
dp.append(dt)
dx.append(round(du))
print(dp)
clear()
t=0
vh=0
vm=0
vx=0#print("dh",dh,"dm",dm)
while t<(len(doc)):
vm=dm[t]
vh=dh[t]
print(vh,vm)
if vx-2>0:
btn=Button(buttons_frame, text ="Disable", state='disabled').grid(row=dm[t],column=dh[t],columnspan=1)
vx-=1
else:
btn=Button(buttons_frame, text =dp[t], command = lambda vh=vh,vm=vm:open_file(vh,vm)).grid(row=dm[t],column=dh[t],columnspan=1)
vx=dx[t]
t+=1
for g in range(10,23,1):
Label(buttons_frame, text = (str(g)+"Hrs"),font =('Times New Roman', 12)).grid(row=60,column=g-10,columnspan=1)
for h in range(0,60,1):
Label(buttons_frame, text = (str(h)+"Mins"),font =('Times New Roman', 12)).grid(row=h,column=24,columnspan=1)
with open(fn1,'r+') as filehandle:
for line in filehandle:
currentPlace = line[:-1]
doc.append(currentPlace)
if len(doc)>0:
cnr1()
canvas.create_window((0,0), window=buttons_frame, anchor=tk.NW)
buttons_frame.update_idletasks()
bbox = canvas.bbox(tk.ALL)
w, h = bbox[2]-bbox[1], bbox[3]-bbox[1]
dw, dH = int((w/COLS) * COLS_DISP), int((h/ROWS) * ROWS_DISP)
canvas.configure(scrollregion=bbox, width=dw, height=dH)
mainloop() | StarcoderdataPython |
1799415 | <filename>catkin_ws/src/00-infrastructure/duckietown/include/duckietown_utils/image_conversions.py<gh_stars>1-10
class ImageConversions():
# We only instantiate the bridge once
bridge = None
def get_cv_bridge():
if ImageConversions.bridge is None:
from cv_bridge import CvBridge # @UnresolvedImport
ImageConversions.bridge = CvBridge()
return ImageConversions.bridge
def rgb_from_imgmsg(msg):
bridge = get_cv_bridge()
return bridge.imgmsg_to_cv2(msg, "rgb8")
def bgr_from_imgmsg(msg):
bridge = get_cv_bridge()
return bridge.imgmsg_to_cv2(msg, "bgr8")
def d8n_image_msg_from_cv_image(cv_image, image_format, same_timestamp_as = None):
"""
Makes an Image message from a CV image.
if same_timestamp_as is not None, we copy the timestamp
from that image.
image_format: 'bgr8' or 'mono' or similar
"""
bridge = get_cv_bridge()
image_msg_out = bridge.cv2_to_imgmsg(cv_image, image_format)
if same_timestamp_as is not None:
image_msg_out.header.stamp = same_timestamp_as.header.stamp
return image_msg_out
def pil_from_CompressedImage(msg):
from PIL import ImageFile # @UnresolvedImport
parser = ImageFile.Parser()
parser.feed(msg.data)
res = parser.close()
return res
def rgb_from_pil(im):
import numpy as np
return np.asarray(im).astype(np.uint8)
def rgb_from_ros(msg):
if 'CompressedImage' in msg.__class__.__name__:
return rgb_from_pil(pil_from_CompressedImage(msg))
else:
return rgb_from_imgmsg(msg)
numpy_from_ros_compressed = rgb_from_ros
| StarcoderdataPython |
3337919 | <gh_stars>0
import importlib
import numpy as np
import tensorflow as tf
from keras.layers import Activation, Dense, Input
from keras.models import Model, Sequential
class VAE(object):
"""
Variational Autoencoder
This object is composed of an Encoder and a Decoder object
"""
def __init__(self, input_data: np.ndarray):
inputs = Input(shape=(784,))
z_inputs = Input(shape=(50,))
input_data = tf.convert_to_tensor(input_data)
self.ae = self.model(input_data)
def model(self, input_data: np.ndarray):
"""
x -> mu, log_sigma_sq -> N(mu, log_sigma_sq) -> Z -> x
"""
self.input = input_data
self.z = tf.Variable(tf.zeros(shape=[1,50], dtype='float32'))
#TODO Add modularity for encoder architecture
# instead of using a vanilla MLP like outlined
# below, we may want to use something like a CNNVAE
with tf.variable_scope("enc_input_hidden_1"):
self.x = Dense(300, activation='relu', input_dim=784)(self.input)
with tf.variable_scope("enc_hidden_1_hidden_2"):
self.x = Dense(200, activation='relu', input_dim=300)(self.x)
with tf.variable_scope("enc_hidden_2_hidden_3"):
self.x = Dense(100, activation='relu', input_dim=200)(self.x)
# Borrowed from https://github.com/tegg89/VAE-Tensorflow/blob/master/model.py
encoder_mu_weights = tf.Variable(tf.random_normal([100, 30], stddev=0.1), name='encoder_mu_weights')
encoder_sigma_weights = tf.Variable(tf.random_normal([100, 30], stddev=0.1), name='encoder_sigma_weights')
encoder_mu_bias = tf.Variable(tf.zeros([30]), name="encoder_mu_bias")
encoder_sigma_bias = tf.Variable(tf.zeros([30]), name="encoder_sigma_bias")
with tf.variable_scope("encoder_mu"):
encoder_mu = tf.matmul(self.x, encoder_mu_weights) + encoder_mu_bias
with tf.variable_scope("encoder_sigma_bias"):
encoder_sigma = tf.matmul(self.x, encoder_sigma_weights) + encoder_sigma_bias
# Sample an epsilon and generate a z from the latent space provided by the encoder
# as outlined in "Autoencoding Variational Bayes" : Kingma et al.
# http://arxiv.org/abs/1312.6114
epsilon = tf.random_normal(tf.shape(encoder_sigma), name='epsilon')
sample_encoder = tf.exp(0.5 * encoder_sigma)
kl_divergence = -0.5 * tf.reduce_sum(1. + encoder_sigma - tf.pow(encoder_mu, 2) - tf.exp(encoder_sigma), reduction_indices=1)
self.z = encoder_mu + tf.multiply(sample_encoder, epsilon)
with tf.variable_scope("enc_hidden_3_z"):
self.z = Dense(50, activation='relu', input_dim=100)(self.z)
with tf.variable_scope("dec_z_hidden_1"):
self.z = Dense(50, activation='relu', input_dim=50)(self.z)
with tf.variable_scope("dec_hidden_1_hidden_2"):
self.z = Dense(100, activation='relu', input_dim=50)(self.z)
with tf.variable_scope("dec_hidden_2_hidden_3"):
self.z = Dense(200, activation='relu', input_dim=100)(self.z)
with tf.variable_scope("dec_hidden_3_hidden_4"):
self.z = Dense(300, activation='relu', input_dim=200)(self.z)
with tf.variable_scope("dec_hidden_4_reconstruction"):
self.reconstruction = Dense(784, activation='relu', input_dim=300)(self.z)
binary_cross_entropy = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.reconstruction, labels=self.input), reduction_indices=1)
self.loss = tf.reduce_mean(kl_divergence + binary_cross_entropy)
tf.print(self.loss)
return self.reconstruction, self.loss
#TODO create loss function method for future inheritance / class extension
# def loss_function(self, reconstructed_x, x):
# """
# Loss function as defined in "Autoencoding Variational Bayes" : Kingma, et al.
# http://arxiv.org/abs/1312.6114
# Arguments:
# reconstructed_x {np.ndarray} -- reconstruction of input x
# x {np.ndarray} -- original input x
# """
# binary_cross_entropy = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(reconstructed_x, x), reduction_indices=1)
# #kl_divergence = -0.5 * tf.reduce_sum(1. + self.Encoder.)
| StarcoderdataPython |
55415 | <filename>google_fanyi/fanyi.py<gh_stars>0
from winreg import REG_QWORD
import requests
from fake_useragent import UserAgent
import random
import urllib.parse
import time
class GoogleFanyi():
def __init__(self, query):
form_data = f'[[["MkEWBc","[[\"{query}\",\"zh-CN\",\"en\",true],[null]]",null,"generic"]]]'
data = urllib.parse.quote(form_data)
self.data = {
'f.req': data
}
self.url = 'https://translate.google.cn/_/TranslateWebserverUi/data/batchexecute'
self.headers = {
'referer': 'https://translate.google.cn/',
'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
'User-Agent': UserAgent(verify_ssl=False).random,
'x-goog-batchexecute-bgr': self.get_bgr()
}
self.cookies = {
'NID': '<KEY>',
'ga': 'GA1.3.1609092802.1648347034',
'gid': 'GA1.3.1355829628.1648347034',
'OTZ': '6434051_24_24__24_'
}
def make_params(self):
params = {
'rpcids': 'MkEWBc',
'source-path': '/',
'f.sid': '8460628132913231713',
'bl': self.get_bl(),
'hl': 'zh-CN',
'soc-app': '1',
'soc-platform': '1',
'soc-device': '1',
'_reqid': self.make_reqid(),
'rt': 'c'
}
return params
def get_bl(self):
date = time.strftime('%Y%M/%D')
bl = 'boq_translate-webserver_' + date + '.06_p0'
return bl
def make_reqid(self):
_reqid = str(random.randint(1, 20)) + '67742'
return _reqid
def get_bgr(self):
pass
def get_contents(self):
response = requests.post(
url=self.url,
params=self.make_params(),
data=self.data,
headers=self.headers,
cookies=self.cookies
).text
print(response)
if __name__=='__main__':
fanyi = GoogleFanyi('金融')
fanyi.get_contents()
| StarcoderdataPython |
102111 | <reponame>leucinw/leucinwChemTools
#===================================
# <NAME> #
# <EMAIL> #
# University of Texas at Austin #
#===================================
''' Usage: python matchTXYZ.py template.txyz dealwith.(t)xyz
# Assign the atom types of the template.txyz file to the dealwith txyz or xyz file.
# Especially suitable for the case that their atoms are different in order.
'''
import os
import sys
import numpy as np
def readTXYZ(TXYZ):
atoms=[]
coord=[]
order=[]
types=[]
connections=[]
for line in open(TXYZ).readlines()[1:]:
data=line.split()
order.append(data[0])
atoms.append(data[1])
coord.append([float(data[2]), float(data[3]), float(data[4])])
types.append(data[5])
connections.append(data[6:])
return atoms,coord,order,types,connections
def fingerprint(TXYZ):
fprints = []
atoms, elements = np.loadtxt(TXYZ, usecols=(0,1), dtype='str', skiprows=1, unpack=True)
connections = []
for line in open(TXYZ).readlines()[1:]:
d = line.split()
connections.append(d[6:])
atom_ele_dict = dict(zip(atoms, elements))
atom_con_dict = {}
for atom, con in zip(atoms,connections):
con_ele = [atom_ele_dict[c] for c in con]
constr = ''.join(sorted(con_ele))
atom_con_dict[atom] = constr
level = 5
if level > 1:
atom_con_dict2 = {}
for atom, con in zip(atoms,connections):
eles = []
cons = []
for c in con:
eles.append(atom_ele_dict[c])
cons.append(c)
cons = [x for _,x in sorted(zip(eles,cons))]
newstr = ''.join([atom_con_dict[c] for c in cons])
atom_con_dict2[atom] = ''.join(sorted(newstr))
# level 3 is good for chain molecules
if level > 2:
atom_con_dict3 = {}
for atom, con in zip(atoms,connections):
eles = []
cons = []
for c in con:
eles.append(atom_ele_dict[c])
cons.append(c)
cons = [x for _,x in sorted(zip(eles,cons))]
newstr = ''.join([atom_con_dict2[c] for c in cons])
atom_con_dict3[atom] = ''.join(sorted(newstr))
# level 4 is needed for ring molecules
if level > 3:
atom_con_dict4 = {}
for atom, con in zip(atoms,connections):
eles = []
cons = []
for c in con:
eles.append(atom_ele_dict[c])
cons.append(c)
cons = [x for _,x in sorted(zip(eles,cons))]
newstr = ''.join([atom_con_dict3[c] for c in cons])
atom_con_dict4[atom] = ''.join(sorted(newstr))
if level > 4:
atom_con_dict5 = {}
for atom, con in zip(atoms,connections):
eles = []
cons = []
for c in con:
eles.append(atom_ele_dict[c])
cons.append(c)
cons = [x for _,x in sorted(zip(eles,cons))]
newstr = ''.join([atom_con_dict4[c] for c in cons])
atom_con_dict5[atom] = ''.join(sorted(newstr))
for atom in atoms:
fprints.append(atom_ele_dict[atom] + '-' + str(''.join(sorted(atom_con_dict[atom] + atom_con_dict2[atom] + atom_con_dict3[atom] + atom_con_dict4[atom] + atom_con_dict5[atom]))))
return fprints
def main():
template = sys.argv[1]
dealwith = sys.argv[2]
if os.path.splitext(dealwith)[1] == ".xyz":
xyz = dealwith
dealwith = dealwith.replace("xyz", "txyz")
obstr = "obabel -ixyz %s -otxyz -O %s"%(xyz, dealwith)
os.system(obstr)
fname = dealwith + "_2"
fp1 = fingerprint(template)
fp2 = fingerprint(dealwith)
newidx = []
for i in fp2:
if i in fp1:
idx = fp1.index(i)
newidx.append(idx)
fp1[idx] = ' '
else:
print(f"Error:{template} and {dealwith} could not match!")
atoms, coord, _, _, connections = readTXYZ(dealwith)
_, _, _,types, _ = readTXYZ(template)
with open(fname, 'w') as f:
f.write("%3s\n"%len(atoms))
for i in range(len(newidx)):
idx = int(newidx[i])
f.write("%3s%3s%12.6f%12.6f%12.6f %s %s\n"%(i+1,atoms[i], coord[i][0], coord[i][1], coord[i][2], types[idx], ' '.join(connections[i])))
return
if __name__ == "__main__":
main()
| StarcoderdataPython |
154774 | # Generated by Django 2.0 on 2018-02-27 02:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('events', '0010_userprofile_send_notifications'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('description', models.TextField()),
('img_url', models.URLField()),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('description', models.TextField()),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='events.Category')),
],
),
migrations.AlterField(
model_name='event',
name='tags',
field=models.CharField(blank=True, help_text='Comma-separates list of tags', max_length=128, null=True, verbose_name='Keyword Tags'),
),
migrations.AddField(
model_name='event',
name='topics',
field=models.ManyToManyField(blank=True, to='events.Topic'),
),
migrations.AddField(
model_name='team',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='events.Category'),
),
migrations.AddField(
model_name='team',
name='topics',
field=models.ManyToManyField(blank=True, to='events.Topic'),
),
]
| StarcoderdataPython |
3282044 | <reponame>cocoaaa/vision
import os
import shutil
import tempfile
import contextlib
import unittest
import argparse
import sys
import torch
import errno
import __main__
@contextlib.contextmanager
def get_tmp_dir(src=None, **kwargs):
tmp_dir = tempfile.mkdtemp(**kwargs)
if src is not None:
os.rmdir(tmp_dir)
shutil.copytree(src, tmp_dir)
try:
yield tmp_dir
finally:
shutil.rmtree(tmp_dir)
ACCEPT = os.getenv('EXPECTTEST_ACCEPT')
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--accept', action='store_true')
args, remaining = parser.parse_known_args()
if not ACCEPT:
ACCEPT = args.accept
for i, arg in enumerate(sys.argv):
if arg == '--accept':
del sys.argv[i]
break
class MapNestedTensorObjectImpl(object):
def __init__(self, tensor_map_fn):
self.tensor_map_fn = tensor_map_fn
def __call__(self, object):
if isinstance(object, torch.Tensor):
return self.tensor_map_fn(object)
elif isinstance(object, dict):
mapped_dict = {}
for key, value in object.items():
mapped_dict[self(key)] = self(value)
return mapped_dict
elif isinstance(object, (list, tuple)):
mapped_iter = []
for iter in object:
mapped_iter.append(self(iter))
return mapped_iter if not isinstance(object, tuple) else tuple(mapped_iter)
else:
return object
def map_nested_tensor_object(object, tensor_map_fn):
impl = MapNestedTensorObjectImpl(tensor_map_fn)
return impl(object)
# adapted from TestCase in torch/test/common_utils to accept non-string
# inputs and set maximum binary size
class TestCase(unittest.TestCase):
def assertExpected(self, output, subname=None, rtol=None, atol=None):
r"""
Test that a python value matches the recorded contents of a file
derived from the name of this test and subname. The value must be
pickable with `torch.save`. This file
is placed in the 'expect' directory in the same directory
as the test script. You can automatically update the recorded test
output using --accept.
If you call this multiple times in a single function, you must
give a unique subname each time.
"""
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
# lives, NOT where test/common_utils.py lives.
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file),
"expect",
munged_id)
subname_output = ""
if subname:
expected_file += "_" + subname
subname_output = " ({})".format(subname)
expected_file += "_expect.pkl"
expected = None
def accept_output(update_type):
print("Accepting {} for {}{}:\n\n{}".format(update_type, munged_id, subname_output, output))
torch.save(output, expected_file)
MAX_PICKLE_SIZE = 50 * 1000 # 50 KB
binary_size = os.path.getsize(expected_file)
self.assertTrue(binary_size <= MAX_PICKLE_SIZE)
try:
expected = torch.load(expected_file)
except IOError as e:
if e.errno != errno.ENOENT:
raise
elif ACCEPT:
return accept_output("output")
else:
raise RuntimeError(
("I got this output for {}{}:\n\n{}\n\n"
"No expect file exists; to accept the current output, run:\n"
"python {} {} --accept").format(munged_id, subname_output, output, __main__.__file__, munged_id))
if ACCEPT:
equal = False
try:
equal = self.assertNestedTensorObjectsEqual(output, expected, rtol=rtol, atol=atol)
except Exception:
equal = False
if not equal:
return accept_output("updated output")
else:
self.assertNestedTensorObjectsEqual(output, expected, rtol=rtol, atol=atol)
def assertNestedTensorObjectsEqual(self, a, b, rtol=None, atol=None):
self.assertEqual(type(a), type(b))
if isinstance(a, torch.Tensor):
torch.testing.assert_allclose(a, b, rtol=rtol, atol=atol)
elif isinstance(a, dict):
self.assertEqual(len(a), len(b))
for key, value in a.items():
self.assertTrue(key in b, "key: " + str(key))
self.assertNestedTensorObjectsEqual(value, b[key], rtol=rtol, atol=atol)
elif isinstance(a, (list, tuple)):
self.assertEqual(len(a), len(b))
for val1, val2 in zip(a, b):
self.assertNestedTensorObjectsEqual(val1, val2, rtol=rtol, atol=atol)
else:
self.assertEqual(a, b)
| StarcoderdataPython |
3248941 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2016 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" """
from .redis import REDIS_AVAILABLE
from .redis import RedisCacheDriver
from .memcached import MEMCACHED_AVAILABLE
from .memcached import MemcachedCacheDriver
__all__ = (
'REDIS_AVAILABLE',
'RedisCacheDriver',
'MEMCACHED_AVAILABLE',
'MemcachedCacheDriver',
)
| StarcoderdataPython |
41801 | import pyeccodes.accessors as _
def load(h):
if (h.get_l('class') == 8):
h.alias('mars.origin', 'centre')
| StarcoderdataPython |
1643676 | <reponame>Zirkuit/statistico<gh_stars>0
from http.server import HTTPServer, BaseHTTPRequestHandler
import subprocess
import os
class Serv(BaseHTTPRequestHandler):
def do_GET(self):
if os.path.exists("/index.html"):
os.remove("/index.html")
f = open("index.html", "w")
f.write('<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Raspberry Pi Stats</title></head><body>')
cmd = "hostname -I | cut -d\' \' -f1"
IP = subprocess.check_output(cmd, shell = True )
cmd = "top -bn1 | grep load | awk '{printf \"CPU Load: %.2f\", $(NF-2)}'"
CPU = subprocess.check_output(cmd, shell = True )
cmd = "free -m | awk 'NR==2{printf \"Mem: %s/%sMB %.2f%%\", $3,$2,$3*100/$2 }'"
MemUsage = subprocess.check_output(cmd, shell = True )
cmd = "df -h | awk '$NF==\"/\"{printf \"Disk: %d/%dGB %s\", $3,$2,$5}'"
Disk = subprocess.check_output(cmd, shell = True )
cmd = "uptime"
uptime = subprocess.check_output(cmd, shell = True )
f.write("<p>IP Adress: " + str(IP) + "</p>")
f.write("<p>CPU Usage: " + str(CPU) + "</p>")
f.write("<p>Memory Usage: " + str(MemUsage) + "</p>")
f.write("<p>Disk Usage: " + str(Disk) + "</p>")
f.write("<p>Uptime: " + str(uptime) + "</p>")
f.write("</body></html>")
f.close()
self.path = '/index.html'
try:
file_to_open = open(self.path[1:]).read()
self.send_response(200)
except:
file_to_open = "File not found"
self.send_response(404)
self.end_headers()
self.wfile.write(bytes(file_to_open, 'utf-8'))
httpd = HTTPServer(('192.168.0.16', 8080), Serv)
httpd.serve_forever() | StarcoderdataPython |
748 | <filename>scripts/run_rbf_comparison_car_air_top5.py
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 16:26:35 2019
@author: Administrator
"""
# Forked from run_rbf_comparison.py
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import math
import copy
import numpy as np
import pandas as pd
import sklearn.linear_model as linear_model
import sklearn.preprocessing as preprocessing
import scipy
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
import random
import sys
sys.path.append("C:/Tang/influence-release-master") #设置自定义包的搜索路径
from load_vehicles import load_vehicles
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import base
from sklearn.metrics.pairwise import rbf_kernel
from influence.inceptionModel import BinaryInceptionModel
from influence.smooth_hinge import SmoothHinge
from influence.binaryLogisticRegressionWithLBFGS import BinaryLogisticRegressionWithLBFGS
import influence.dataset as dataset
from influence.dataset import DataSet
from influence.dataset_poisoning import generate_inception_features
#%%
def get_Y_pred_correct_inception(model):
Y_test = model.data_sets.test.labels
if np.min(Y_test) < -0.5:
Y_test = (np.copy(Y_test) + 1) / 2
Y_pred = model.sess.run(model.preds, feed_dict=model.all_test_feed_dict)
Y_pred_correct = np.zeros([len(Y_test)])
for idx, label in enumerate(Y_test):
Y_pred_correct[idx] = Y_pred[idx, int(label)]
return Y_pred_correct
num_classes = 2
num_train_ex_per_class = 40
num_test_ex_per_class = 300
dataset_name = 'carair_%s_%s' % (num_train_ex_per_class, num_test_ex_per_class)
image_data_sets = load_vehicles(
num_train_ex_per_class=num_train_ex_per_class,
num_test_ex_per_class=num_test_ex_per_class)
weight_decay = 0.001
initial_learning_rate = 0.001
keep_probs = None
decay_epochs = [1000, 10000]
#%%
### Generate kernelized feature vectors
X_train = image_data_sets.train.x
X_test = image_data_sets.test.x
Y_train = np.copy(image_data_sets.train.labels) * 2 - 1
Y_test = np.copy(image_data_sets.test.labels) * 2 - 1
num_train = X_train.shape[0]
num_test = X_test.shape[0]
X_stacked = np.vstack((X_train, X_test))
gamma = 0.05
weight_decay = 0.0001
K = rbf_kernel(X_stacked, gamma = gamma / num_train)
# =============================================================================
# L = slin.cholesky(K, lower=True)
# L_train = L[:num_train, :num_train]
# L_test = L[num_train:, :num_train]
# =============================================================================
K_train = K[:num_train, :num_train]
K_test = K[num_train:, :num_train]
### Compare top 5 influential examples from each network
test_idx = 0
## RBF
input_channels = 1
weight_decay = 0.001
batch_size = num_train
initial_learning_rate = 0.001
keep_probs = None
max_lbfgs_iter = 1000
use_bias = False
decay_epochs = [1000, 10000]
tf.reset_default_graph()
X_train = image_data_sets.train.x
Y_train = image_data_sets.train.labels * 2 - 1
train = DataSet(K_train, Y_train)
test = DataSet(K_test, Y_test)
data_sets = base.Datasets(train=train, validation=None, test=test)
input_dim = data_sets.train.x.shape[1]
# Train with hinge
print('Train rbf with hinge...')
rbf_model = SmoothHinge(
temp=0,
use_bias=use_bias,
input_dim=input_dim,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output7',
log_dir='log',
model_name='carair_rbf_hinge_t-0')
rbf_model.train()
hinge_W = rbf_model.sess.run(rbf_model.params)[0]
# Then load weights into smoothed version
print('Load weights into smoothed version...')
tf.reset_default_graph()
rbf_model = SmoothHinge(
temp=0.001,
use_bias=use_bias,
input_dim=input_dim,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output7',
log_dir='log',
model_name='car_air_rbf_hinge_t-0.001')
params_feed_dict = {}
params_feed_dict[rbf_model.W_placeholder] = hinge_W
rbf_model.sess.run(rbf_model.set_params_op, feed_dict=params_feed_dict)
rbf_predicted_loss_diffs = rbf_model.get_influence_on_test_loss(
[test_idx],
np.arange(len(rbf_model.data_sets.train.labels)),
force_refresh=True)
#%%
## Inception
dataset_name = 'carair_40_300'
test_idx = 0
# Generate inception features
print('Generate inception features...')
img_side = 299
num_channels = 3
num_train_ex_per_class = 40
num_test_ex_per_class = 300
batch_size = 20 #TODO: 需要根据配置修改
# reset_default_graph大概就是重置当前线程,让tf session里定义的东西都失效,重来。就是重开一个神经网络session
tf.reset_default_graph()
full_model_name = '%s_inception' % dataset_name
# 下面的语句是定义一个inception双分类器
full_model = BinaryInceptionModel(
img_side=img_side,
num_channels=num_channels,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=image_data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=True,
train_dir='output9',
log_dir='log',
model_name=full_model_name)
# 下面的代码是在使用inception的卷积层生成特征
train_inception_features_val = generate_inception_features(
full_model,
image_data_sets.train.x,
image_data_sets.train.labels,
batch_size=batch_size)
test_inception_features_val = generate_inception_features(
full_model,
image_data_sets.test.x,
image_data_sets.test.labels,
batch_size=batch_size)
train = DataSet(
train_inception_features_val,
image_data_sets.train.labels)
test = DataSet(
test_inception_features_val,
image_data_sets.test.labels)
# train_f = np.load('G:/output/%s_inception_features_new_train.npz' % dataset_name)
# train = DataSet(train_f['inception_features_val'], train_f['labels'])
# test_f = np.load('G:/output/%s_inception_features_new_test.npz' % dataset_name)
# test = DataSet(test_f['inception_features_val'], test_f['labels'])
validation = None
# 上面的代码是训练了inception模型的全连接层前面的部分,因此输出的feature有2048个维度
data_sets = base.Datasets(train=train, validation=validation, test=test)
# train_f = np.load('G:/output/%s_inception_features_new_train.npz' % dataset_name)
# train = DataSet(train_f['inception_features_val'], train_f['labels'])
# test_f = np.load('G:/output/%s_inception_features_new_test.npz' % dataset_name)
# test = DataSet(test_f['inception_features_val'], test_f['labels'])
# validation = None
# data_sets = base.Datasets(train=train, validation=validation, test=test)
# 下面的代码利用从inception卷积层训练完成后的feature进行一个二分类逻辑回归,取消卷积层后面的FC全连接层
print('Train logistic regression after inception...')
input_dim = 2048
weight_decay = 0.001
batch_size = 20
initial_learning_rate = 0.001
keep_probs = None
decay_epochs = [1000, 10000]
max_lbfgs_iter = 1000
num_classes = 2
tf.reset_default_graph()
inception_model = BinaryLogisticRegressionWithLBFGS(
input_dim=input_dim,
weight_decay=weight_decay,
max_lbfgs_iter=max_lbfgs_iter,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output9',
log_dir='log',
model_name='%s_inception_onlytop' % dataset_name)
inception_model.train()
# =============================================================================
# inception_predicted_loss_diffs = inception_model.get_influence_on_test_loss(
# [test_idx],
# np.arange(len(inception_model.data_sets.train.labels)),
# force_refresh=True)
#
# x_test = X_test[test_idx, :]
# y_test = Y_test[test_idx]
#
#
# distances = dataset.find_distances(x_test, X_train)
# flipped_idx = Y_train != y_test
# rbf_margins_test = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_test_feed_dict)
# rbf_margins_train = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_train_feed_dict)
# inception_Y_pred_correct = get_Y_pred_correct_inception(inception_model)
#
#
# np.savez(
# 'output7/rbf_carair_results_%s' % test_idx,
# test_idx=test_idx,
# distances=distances,
# flipped_idx=flipped_idx,
# rbf_margins_test=rbf_margins_test,
# rbf_margins_train=rbf_margins_train,
# inception_Y_pred_correct=inception_Y_pred_correct,
# rbf_predicted_loss_diffs=rbf_predicted_loss_diffs,
# inception_predicted_loss_diffs=inception_predicted_loss_diffs
# )
# =============================================================================
#%%
print('Save results...')
#rand_test = random.sample(range(1, 600),50)
#np.savez('output7/rand_test_point', rand_test=rand_test)
for test_idx in range(1, 600):
rbf_predicted_loss_diffs = rbf_model.get_influence_on_test_loss(
[test_idx],
np.arange(len(rbf_model.data_sets.train.labels)),
force_refresh=True)
inception_predicted_loss_diffs = inception_model.get_influence_on_test_loss(
[test_idx],
np.arange(len(inception_model.data_sets.train.labels)),
force_refresh=True)
x_test = X_test[test_idx, :]
y_test = Y_test[test_idx]
distances = dataset.find_distances(x_test, X_train)
flipped_idx = Y_train != y_test
rbf_margins_test = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_test_feed_dict)
rbf_margins_train = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_train_feed_dict)
inception_Y_pred_correct = get_Y_pred_correct_inception(inception_model)
np.savez(
'output9/rbf_carair_results_%s' % test_idx,
test_idx=test_idx,
distances=distances,
flipped_idx=flipped_idx,
rbf_margins_test=rbf_margins_test,
rbf_margins_train=rbf_margins_train,
inception_Y_pred_correct=inception_Y_pred_correct,
rbf_predicted_loss_diffs=rbf_predicted_loss_diffs,
inception_predicted_loss_diffs=inception_predicted_loss_diffs
)
| StarcoderdataPython |
160854 | import tensorflow as tf
from tfLego.model.Model import Model
class NeuralNetwork(Model):
def __init__(self, *args, **kwargs):
super().__init__(loss=tf.losses.mean_squared_error, *args, **kwargs)
| StarcoderdataPython |
135744 | <reponame>al-arz/the-tale<gh_stars>10-100
import smart_imports
smart_imports.all()
urlpatterns = [django_urls.url(r'^tokens/', django_urls.include((old_views.resource_patterns(views.TokensResource), 'tokens')))]
| StarcoderdataPython |
1678368 | <reponame>oferbaharav/lambdata
import unittest
import pandas as pd
from df_utils import check_dataframe_na
class TestDfUtils(unittest.TestCase):
def test_check_dataframe_na(self):
df = pd.DataFrame({'a': [0,1,2], 'b': [1,1,1]})
self.assertFalse(check_dataframe_na(df))
df = pd.DataFrame({'a': [0,1,float('nan')], 'b': [1,1,float('nan')]})
self.assertTrue(check_dataframe_na(df))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4802456 | <filename>quizmake/__init__.py<gh_stars>1-10
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Initiate the file."""
__author__ = "jnguyen1098"
__copyright__ = "Copyright 2020, jnguyen1098"
__credits__ = ["jnguyen1098"]
__license__ = "MIT"
__maintainer__ = "jnguyen1098"
__status__ = "Planning"
__version__ = "0.1.6"
# from .core import main
# from .corpus import Corpus, corn
# __all__ = ["Corpus", "corn", "main"]
| StarcoderdataPython |
145420 | <reponame>headstrongsolutions/Jarvis_Screen
import RPi.GPIO as GPIO
class transistor_switch:
# describes the type
typeDescription = 'Transistor Switch on pin 13 - initally set to on'
def __init__(self):
self.switch_pin = 13
self.switch_state = True
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.switch_pin, GPIO.OUT)
def switch(self, switch_state):
if switch_state is not None:
self.switch_state = switch_state
GPIO.output(self.switch_pin, self.switch_state)
return self.switch_state
import time
screen = transistor_switch()
screen.switch(True)
print(screen.switch_state)
time.sleep(3)
screen.switch(False)
print(screen.switch_state)
time.sleep(3)
screen.switch(True)
print(screen.switch_state)
time.sleep(3)
GPIO.cleanup() | StarcoderdataPython |
4822562 | <filename>introduction-to-data-visualization-in-python/1. Customizing plots/script_8.py
import pandas as pd
percent_bachelors_degrees_women_usa = pd.read_csv('datasets/percent-bachelors-degrees-women-usa.csv')
year = percent_bachelors_degrees_women_usa['Year']
physical_sciences = percent_bachelors_degrees_women_usa['Physical Sciences']
computer_science = percent_bachelors_degrees_women_usa['Computer Science']
health = percent_bachelors_degrees_women_usa['Health Professions']
education = percent_bachelors_degrees_women_usa['Education']
# Import matplotlib.pyplot
import matplotlib.pyplot as plt
# Compute the maximum enrollment of women in Computer Science: cs_max
cs_max = computer_science.max()
# Calculate the year in which there was maximum enrollment of women in Computer Science: yr_max
yr_max = year[computer_science.argmax()]
# Plot with legend as before
plt.plot(year, computer_science, color='red', label='Computer Science')
plt.plot(year, physical_sciences, color='blue', label='Physical Sciences')
plt.legend(loc='lower right')
# Add a black arrow annotation
plt.annotate('Maximum', xy=(yr_max, cs_max), xytext=(yr_max+5, cs_max+5), arrowprops=dict(facecolor='black'))
# Add axis labels and title
plt.xlabel('Year')
plt.ylabel('Enrollment (%)')
plt.title('Undergraduate enrollment of women')
plt.show() | StarcoderdataPython |
1717204 | # -*- coding: utf-8 -*-
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.1.0'
from .h5shelve import open
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.