content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
## Problem: Finding Numbers in a Haystack
# use regex library
import re
# open file for reading
# save the file into the same directory
textfile_handle = open("regex_sum_42.txt")
# list of all numbers found so far
num_all_list = list()
# read through and parse a file with text and numbers
# loop over every line of file
for line in textfile_handle:
line = line.rstrip()
# random numbers are inserted throughout the text
# numbers can appear anywhere in the line. there can be any number of numbers in each line (including none)
# extract any number from line and put it into list of numbers of that line
# use regular expressions
num_line_list = re.findall('[0-9]+', line)
# skip lines without any number
if len(num_line_list) == 0:
continue
# convert extracted strings to integers
# append list of all numbers found so far with list of numbers of that line
for num in num_line_list:
num_all_list.append(int(num))
# sum up all integers in list of all numbers found
sum_num_all = sum(num_all_list)
print(sum_num_all)
# Example: http://py4e-data.dr-chuck.net/regex_sum_42.txt
# (There are 90 values with a sum=445833)
|
nilq/baby-python
|
python
|
from numpy import asarray
from datetime import datetime, timedelta
from PyQt5.QtCore import Qt
from PyQt5.QtChart import QChart, QLineSeries, QBarCategoryAxis, QValueAxis
from PyQt5.QtGui import QPainter
from core import AppCore
from widget.GeometryRestoreWidget import GeometryRestoreWidget
from gen.ui_AnalysisWidget import Ui_AnalysisWidget
from qt.ChartWidget import ChartWidget
from widget.LinearTestWidget import LinearTestWidget
class AnalysisWidget(GeometryRestoreWidget):
"""
Widget to display analysis graphs from
transformations performed on DB data
"""
core = AppCore()
ui = None
linear_analysis_widget = None
linear_test_widget = None
linear_analysis_chart = None
linear_analyis_series = QLineSeries()
def __init__(self, parent=None):
"""
Create analysis widget
:param parent: Parent widget
"""
# Restore geometry
super().__init__("AnalysisWidget", parent)
# Load UI
self.ui = Ui_AnalysisWidget()
self.ui.setupUi(self)
# Setup analysis widget
self.linear_analysis_widget = ChartWidget()
# Setup analysis chart
self.linear_analysis_chart = QChart()
self.linear_analysis_chart.setTheme(QChart.ChartThemeBlueCerulean)
self.linear_analysis_chart.setBackgroundVisible(False)
self.linear_analysis_chart.setAnimationOptions(QChart.SeriesAnimations)
self.linear_analysis_chart.legend().setVisible(True)
self.linear_analysis_chart.legend().setAlignment(Qt.AlignBottom)
self.linear_analysis_widget.ui.chartView.setRenderHint(QPainter.Antialiasing)
self.linear_analysis_widget.ui.chartView.setChart(self.linear_analysis_chart)
# Add to display
self.ui.linearRegTab.layout().addWidget(self.linear_analysis_widget)
# Create test widget
self.linear_test_widget = LinearTestWidget()
self.ui.linearTestTab.layout().addWidget(self.linear_test_widget)
# Update analysis from test model config changes
self.linear_test_widget.model_updated.connect(self.update_linear_analysis)
def update_linear_analysis(self):
"""
Populate the linear analysis for N days using
the configuration from the test widget
"""
# Load most recent open value
query = "SELECT open FROM time_series_daily_adjusted WHERE symbol = " + \
'\'' + self.linear_test_widget.symbol + '\'' + " ORDER BY timestamp DESC LIMIT 1"
self.core.data_store.cursor.execute(query)
value = self.core.data_store.cursor.fetchall()
if len(value) == 0:
# Some error
return
# Create a chart using the values, clear
# any existing series from chart
if len(self.linear_analysis_chart.series()) > 0:
self.linear_analysis_chart.removeAllSeries()
self.linear_analyis_series = QLineSeries()
x_axis = self.linear_analysis_chart.axes(Qt.Horizontal)[0]
y_axis = self.linear_analysis_chart.axes(Qt.Vertical)[0]
self.linear_analysis_chart.removeAxis(y_axis)
self.linear_analysis_chart.removeAxis(x_axis)
# Predict 7 days ahead using the model generated
# through the configuration widget for training and
# test, starting with the current open value
value = value[0][0]
n = 0
categories = []
max = value
min = value
self.linear_analyis_series.append(n, value)
categories.append((datetime.utcnow() + timedelta(days=n)).strftime("%Y-%m-%d"))
while n < 7:
n += 1
prediction = self.linear_test_widget.model.predict(asarray(value).reshape(-1, 1))
value = prediction.flatten()[0]
categories.append((datetime.utcnow() + timedelta(days=n)).strftime("%Y-%m-%d"))
self.linear_analyis_series.append(n, value)
if value > max:
max = value
if value < min:
min = value
# Series names
self.linear_analyis_series.setName("Forecast close values")
self.linear_analysis_chart.setTitle(self.linear_test_widget.symbol + " Linear regression 7-day forecast")
# Add series
self.linear_analysis_chart.addSeries(self.linear_analyis_series)
# Axis setup
x_axis = QBarCategoryAxis()
x_axis.setTitleText("Date")
x_axis.setLabelsAngle(-90)
x_axis.setCategories(categories)
self.linear_analysis_chart.addAxis(x_axis, Qt.AlignBottom)
self.linear_analyis_series.attachAxis(x_axis)
y_axis = QValueAxis()
y_axis.setLabelFormat("%f")
y_axis.setTitleText("Value (USD)")
pad = max - min
y_axis.setRange(min - pad, max + pad)
self.linear_analysis_chart.addAxis(y_axis, Qt.AlignLeft)
self.linear_analyis_series.attachAxis(y_axis)
def update_analysis(self, symbol):
"""
Update the analysis configuration widget to let
the user dynamically configure the parameter to
use for linear regression training and display
the test data
"""
# Update test/train display
self.linear_test_widget.update_symbol(symbol)
# Perform initial analysis
self.update_linear_analysis()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: task.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from easy_command_sdk.model.inspection import user_or_user_group_pb2 as easy__command__sdk_dot_model_dot_inspection_dot_user__or__user__group__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='task.proto',
package='inspection',
syntax='proto3',
serialized_options=_b('ZDgo.easyops.local/contracts/protorepo-models/easyops/model/inspection'),
serialized_pb=_b('\n\ntask.proto\x12\ninspection\x1a:easy_command_sdk/model/inspection/user_or_user_group.proto\"\xc8\x04\n\x0eInspectionTask\x12\x18\n\x10inspectionTaskId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x13\n\x0bisAllNotify\x18\x03 \x01(\x08\x12\x1c\n\x14notifyPassComparator\x18\x04 \x01(\t\x12\x13\n\x0bnotifyScore\x18\x05 \x01(\x02\x12-\n\x04\x61rgs\x18\x06 \x03(\x0b\x32\x1f.inspection.InspectionTask.Args\x12\x39\n\nnotifyUser\x18\x07 \x01(\x0b\x32%.inspection.InspectionUserOrUserGroup\x12>\n\x0fnotifyUserGroup\x18\x08 \x01(\x0b\x32%.inspection.InspectionUserOrUserGroup\x12\x10\n\x08taskType\x18\t \x01(\t\x12\x1a\n\x12performanceTargets\x18\n \x01(\t\x12\x17\n\x0fqueryStrategyId\x18\x0b \x01(\t\x12\x15\n\rtaskScheduler\x18\x0c \x01(\t\x12\x33\n\x07targets\x18\r \x03(\x0b\x32\".inspection.InspectionTask.Targets\x12\x0c\n\x04memo\x18\x0e \x01(\t\x12\x12\n\ntemplateId\x18\x0f \x01(\t\x12\x14\n\x0ctemplateName\x18\x10 \x01(\t\x1a\x32\n\x04\x41rgs\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12\x0e\n\x06source\x18\x03 \x01(\t\x1a\x1d\n\x07Targets\x12\x12\n\ninstanceId\x18\x01 \x01(\tBFZDgo.easyops.local/contracts/protorepo-models/easyops/model/inspectionb\x06proto3')
,
dependencies=[easy__command__sdk_dot_model_dot_inspection_dot_user__or__user__group__pb2.DESCRIPTOR,])
_INSPECTIONTASK_ARGS = _descriptor.Descriptor(
name='Args',
full_name='inspection.InspectionTask.Args',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='inspection.InspectionTask.Args.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='inspection.InspectionTask.Args.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source', full_name='inspection.InspectionTask.Args.source', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=590,
serialized_end=640,
)
_INSPECTIONTASK_TARGETS = _descriptor.Descriptor(
name='Targets',
full_name='inspection.InspectionTask.Targets',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='inspection.InspectionTask.Targets.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=642,
serialized_end=671,
)
_INSPECTIONTASK = _descriptor.Descriptor(
name='InspectionTask',
full_name='inspection.InspectionTask',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='inspectionTaskId', full_name='inspection.InspectionTask.inspectionTaskId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='inspection.InspectionTask.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isAllNotify', full_name='inspection.InspectionTask.isAllNotify', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='notifyPassComparator', full_name='inspection.InspectionTask.notifyPassComparator', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='notifyScore', full_name='inspection.InspectionTask.notifyScore', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='args', full_name='inspection.InspectionTask.args', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='notifyUser', full_name='inspection.InspectionTask.notifyUser', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='notifyUserGroup', full_name='inspection.InspectionTask.notifyUserGroup', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='taskType', full_name='inspection.InspectionTask.taskType', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='performanceTargets', full_name='inspection.InspectionTask.performanceTargets', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='queryStrategyId', full_name='inspection.InspectionTask.queryStrategyId', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='taskScheduler', full_name='inspection.InspectionTask.taskScheduler', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targets', full_name='inspection.InspectionTask.targets', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memo', full_name='inspection.InspectionTask.memo', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='templateId', full_name='inspection.InspectionTask.templateId', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='templateName', full_name='inspection.InspectionTask.templateName', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_INSPECTIONTASK_ARGS, _INSPECTIONTASK_TARGETS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=87,
serialized_end=671,
)
_INSPECTIONTASK_ARGS.containing_type = _INSPECTIONTASK
_INSPECTIONTASK_TARGETS.containing_type = _INSPECTIONTASK
_INSPECTIONTASK.fields_by_name['args'].message_type = _INSPECTIONTASK_ARGS
_INSPECTIONTASK.fields_by_name['notifyUser'].message_type = easy__command__sdk_dot_model_dot_inspection_dot_user__or__user__group__pb2._INSPECTIONUSERORUSERGROUP
_INSPECTIONTASK.fields_by_name['notifyUserGroup'].message_type = easy__command__sdk_dot_model_dot_inspection_dot_user__or__user__group__pb2._INSPECTIONUSERORUSERGROUP
_INSPECTIONTASK.fields_by_name['targets'].message_type = _INSPECTIONTASK_TARGETS
DESCRIPTOR.message_types_by_name['InspectionTask'] = _INSPECTIONTASK
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InspectionTask = _reflection.GeneratedProtocolMessageType('InspectionTask', (_message.Message,), {
'Args' : _reflection.GeneratedProtocolMessageType('Args', (_message.Message,), {
'DESCRIPTOR' : _INSPECTIONTASK_ARGS,
'__module__' : 'task_pb2'
# @@protoc_insertion_point(class_scope:inspection.InspectionTask.Args)
})
,
'Targets' : _reflection.GeneratedProtocolMessageType('Targets', (_message.Message,), {
'DESCRIPTOR' : _INSPECTIONTASK_TARGETS,
'__module__' : 'task_pb2'
# @@protoc_insertion_point(class_scope:inspection.InspectionTask.Targets)
})
,
'DESCRIPTOR' : _INSPECTIONTASK,
'__module__' : 'task_pb2'
# @@protoc_insertion_point(class_scope:inspection.InspectionTask)
})
_sym_db.RegisterMessage(InspectionTask)
_sym_db.RegisterMessage(InspectionTask.Args)
_sym_db.RegisterMessage(InspectionTask.Targets)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
nilq/baby-python
|
python
|
import os
DB_HOST = os.environ["REDIS_HOST"]
DB_PORT = int(os.environ["REDIS_PORT"])
DB_NAME = int(os.environ["REDIS_ID"])
DB_QUEUE = os.environ["INPUT_QUEUE"]
BATCH_SIZE = 16
SERVER_SLEEP = 0.25
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import roslib
import rospy
import math
import time
import numpy as np
import os
from std_msgs.msg import Int32MultiArray
from std_msgs.msg import Int32
from rospy_tutorials.msg import Floats
from gpiozero import PWMOutputDevice
#initialize all variables
#current positon
Xc=0
Yc=0
#final position
Xf=0
Yf=0
#goal angel
Theta_g=0
#current angel
Theta_c=0
#initial value of flag
Flag =0
R=3.25 #radius of wheel cm
L=18.5 #seperation distance cm
#define publishers
pub1 = rospy.Publisher('Wr_target_Rob3', Int32, queue_size=10)
pub2 = rospy.Publisher('Wl_target_Rob3', Int32, queue_size=10)
pub3 = rospy.Publisher('Flag_3', Int32, queue_size=10)
#get data from the callbacks(goal angel, current and final positions)
def callback1(data):
try:
global Theta_g
Theta_g=data.data[0]
except IndexError:
pass
def callback2(data):
global Xc, Yc , Theta_c #C=current position
Xc=data.data[0]
Yc=data.data[1]
Theta_c=data.data[2]
def callback3(data):
global Xf, Yf # f=final position
Xf=data.data[0]
Yf=data.data[1]
#run the smooth function
def callback4(data):
smooth()
#set the subscribers
def listener():
rospy.init_node('SmoothController_Rob3')
rospy.Subscriber('theta_goal_Rob3' ,Int32MultiArray,callback1)
rospy.Subscriber('rob3_CurrentPose',Int32MultiArray,callback2)
rospy.Subscriber('robot3_goal_cm' ,Int32MultiArray,callback3)
rospy.Subscriber('len_route3' ,Int32,callback4)
def smooth():
# get the error in the global reference frame
if ((Xf > 0) and (Yf >0)) :
global Flag
errorX= Xf - Xc
errorY= Yf - Yc
error_th = Theta_c - Theta_g
error_th_rad = error_th * (math.pi / 180)
theta_rad= Theta_c * (math.pi / 180)
#get error in the robot's ref frame
gr_X=round( (errorX*math.cos(theta_rad))+(errorY*math.sin(theta_rad)),2)
gr_Y=round( (-errorX*math.sin(theta_rad))+(errorY*math.cos(theta_rad)),2)
#calculate Rho and alpha
rho =round((math.sqrt(gr_X**2 + gr_Y**2)),2)
alpha = round(error_th_rad,2)
if alpha > math.pi: #alpha [ -pi , pi ]
alpha = alpha - (math.pi*2)
#define gains
K_rho=0.14
K_alpha=0.3102
#calculate control commands
while ((abs(gr_X) <= 4 ) and (abs(gr_Y) <= 4) ):
print 'Reached The goal'
#if reached goal set angular velocities zero and raise the flag
WR= 0
WL =0
Flag =1
#publish angular velocities and raised flags
pub1.publish(WR)
pub2.publish(WL)
pub3.publish(Flag)
#updating the error
listener()
errorX= Xf - Xc
errorY= Yf - Yc
error_th = Theta_c - Theta_g
theta_rad= Theta_c * (math.pi / 180)
gr_X=round( (errorX*math.cos(theta_rad))+(errorY*math.sin(theta_rad)),2)
gr_Y=round( (-errorX*math.sin(theta_rad))+(errorY*math.cos(theta_rad)),2)
#reset flag
Flag =0
pub3.publish(Flag)
#calculate linear and angular velocity
V=round((K_rho *rho),2)
V=max(min(15,V),1.8)
W= round((K_alpha *alpha),2)
#kinematics
WR = round(abs((V + ((W*L)/2)) /R)) #right_wheel_angular_vel
WL = round(abs((V - ((W*L)/2)) /R)) #left_wheel_angular_vel
pub1.publish(WR)
pub2.publish(WL)
listener()
#print WR ,WL
#
os.system('clear')
if __name__ == '__main__':
while not rospy.is_shutdown():
listener()
rospy.spin()
|
nilq/baby-python
|
python
|
from datetime import datetime
from django import forms
from .models import Location
class StrikeFilterForm(forms.Form):
daterange = forms.CharField(label='Date', max_length=23)
country__name = forms.ChoiceField(label='Country', choices=())
province = forms.CharField(label='Province', max_length=100, required=False)
town = forms.CharField(label='City / Town', max_length=100, required=False)
def __init__(self, *args, **kwargs):
super(StrikeFilterForm, self).__init__(*args, **kwargs)
country_choices = [
(l, l) for l in Location.objects.all().values_list(
'country__name', flat=True).distinct()]
country_choices.append(('all', ''))
self.fields['country__name'] = forms.ChoiceField(choices=country_choices)
def get_values(self):
"""
Get filter unpackable values.
"""
if not self.is_valid():
return {}
# Only retrieve existing data.
data = {}
for item in self.cleaned_data:
if self.cleaned_data[item] not in ['', None]:
data[item] = self.cleaned_data[item]
# Set province key
if 'province' in data:
data['location'] = data.pop('province')
# Set country default value
if data.get('country__name', '') == 'all':
data.pop('country__name')
return data
def clean_daterange(self):
"""
Parses and validates daterange string.
"""
error = forms.ValidationError("Date range must be 'mm/dd/yyyy - mm/dd/yyyy'.")
if not self.is_valid():
raise error
daterange = self.cleaned_data['daterange']
dates = daterange.split(' - ')
if len(dates) != 2:
raise error
try:
daterange = {
'date__gte': datetime.strptime(dates[0], '%m/%d/%Y').date(),
'date__lte': datetime.strptime(dates[1], '%m/%d/%Y').date(),
}
except ValueError:
raise error
return daterange
|
nilq/baby-python
|
python
|
"""Model generation"""
from abc import ABC
from collections import namedtuple
from copy import copy
import functools
import itertools
import numpy as np
from scipy.special import expit # pylint: disable = no-name-in-module
import sympy
from sympy.utilities.lambdify import lambdify
from synmod import constants
from synmod.aggregators import Aggregator, TabularAggregator
Polynomial = namedtuple("Polynomial", ["relevant_feature_map", "sym_polynomial_fn", "polynomial_fn"])
# pylint: disable = invalid-name
class Model(ABC):
"""Model base class"""
def __init__(self, aggregator, polynomial, X=None):
# pylint: disable = unused-argument
self._aggregator = aggregator # object to perform aggregation over time and generate feature vector
# relevant_feature-map: Mapping from frozensets containing one or more feature names to their polynomial coefficients
self.relevant_feature_map, self.sym_polynomial_fn, self._polynomial_fn = polynomial
@property
def relevant_feature_names(self):
"""Convenience function to get feature names"""
return list(functools.reduce(set.union, self.relevant_feature_map, set()))
def predict(self, X, **kwargs):
"""Predict outputs on input instances"""
class Classifier(Model):
"""Classification model"""
def __init__(self, aggregator, polynomial, X):
super().__init__(aggregator, polynomial)
assert X is not None
self._threshold = np.median(self._polynomial_fn(self._aggregator.operate(X).transpose(), 0))
def predict(self, X, **kwargs):
"""
Predict output probabilities on instances in X by aggregating features over time, applying a polynomial,
thresholding, then applying a sigmoid.
Parameters
----------
X: Matrix/tensor
Instances to predict model outputs for
labels: bool, optional, default False
Flag to return output labels instead of probabilities
noise: 1D float array, optional, default 0
Noise term(s) to add to polynomial before applying sigmoid
"""
labels = kwargs.get("labels", False)
noise = kwargs.get("noise", 0)
values = expit(self._polynomial_fn(self._aggregator.operate(X).transpose(), noise) - self._threshold) # Sigmoid output
if labels:
values = (values > 0.5).astype(np.int32)
return values
class Regressor(Model):
"""Regression model"""
def predict(self, X, **kwargs):
"""
Predict outputs on instances in X by aggregating features over time and applying a polynomial
Parameters
----------
X: Matrix/tensor
Instances to predict model outputs for
noise: 1D float array, optional, default 0
Noise term(s) to add to polynomial
"""
noise = kwargs.get("noise", 0) # TODO: this is the noise multiplier
return self._polynomial_fn(self._aggregator.operate(X).transpose(), noise)
def get_model(args, features, instances):
"""Generate and return model"""
args = copy(args)
args.rng = np.random.default_rng(args.seed) # Reset RNG for consistent model independent of instances
# Select relevant features
relevant_features = get_relevant_features(args)
polynomial = gen_polynomial(args, relevant_features)
if args.synthesis_type == constants.TABULAR:
aggregator = TabularAggregator()
else:
# Select time window for each feature
windows = [feature.window for feature in features]
for fid, _ in enumerate(features):
relevance = "relevant" if fid in relevant_features else "irrelevant"
args.logger.info(f"Window for {relevance} feature id {fid}: ({windows[fid][0]}, {windows[fid][1]})")
aggregator = Aggregator([feature.aggregation_fn for feature in features], windows, instances, args.standardize_features)
# Select model
model_class = {constants.CLASSIFIER: Classifier, constants.REGRESSOR: Regressor}[args.model_type]
return model_class(aggregator, polynomial, instances)
def get_window(args):
"""Randomly select appropriate window for model to operate in"""
# TODO: allow soft-edged windows (smooth decay of influence of feature values outside window)
right = args.sequence_length - 1 # Anchor half the windows on the right
if args.rng.uniform() < 0.5:
right = args.rng.choice(range(args.sequence_length // 2, args.sequence_length))
left = args.rng.choice(range(0, right))
return (left, right)
def gen_polynomial(args, relevant_features):
"""Generate polynomial which decides the ground truth and noisy model"""
# Note: using sympy to build function appears to be 1.5-2x slower than erstwhile raw numpy implementation (for linear terms)
sym_features = sympy.symbols([f"x_{x}" for x in range(args.num_features)])
sym_noise = sympy.Symbol("beta", real=True) # multiplier for irrelevant features in approximate model
relevant_feature_map = {} # map of relevant feature sets to coefficients
# Generate polynomial expression
# Pairwise interaction terms
sym_polynomial_fn = 0
sym_polynomial_fn = update_interaction_terms(args, relevant_features, relevant_feature_map, sym_features, sym_polynomial_fn)
# Linear terms
sym_polynomial_fn = update_linear_terms(args, relevant_features, relevant_feature_map, sym_features, sym_noise, sym_polynomial_fn)
args.logger.info(f"Ground truth polynomial:\ny = {sym_polynomial_fn}")
# Generate model expression
polynomial_fn = lambdify([sym_features, sym_noise], sym_polynomial_fn, "numpy")
return Polynomial(relevant_feature_map, sym_polynomial_fn, polynomial_fn)
def get_relevant_features(args):
"""Get set of relevant feature identifiers"""
num_relevant_features = max(1, round(args.num_features * args.fraction_relevant_features))
coefficients = np.zeros(args.num_features)
coefficients[:num_relevant_features] = 1
args.rng.shuffle(coefficients)
relevant_features = {idx for idx in range(args.num_features) if coefficients[idx]}
return relevant_features
def update_interaction_terms(args, relevant_features, relevant_feature_map, sym_features, sym_polynomial_fn):
"""Pairwise interaction terms for polynomial"""
# TODO: higher-order interactions
num_relevant_features = len(relevant_features)
num_interactions = min(args.num_interactions, num_relevant_features * (num_relevant_features - 1) / 2)
if not num_interactions:
return sym_polynomial_fn
potential_pairs = list(itertools.combinations(sorted(relevant_features), 2))
potential_pairs_arr = np.empty(len(potential_pairs), dtype=np.object)
potential_pairs_arr[:] = potential_pairs
interaction_pairs = args.rng.choice(potential_pairs_arr, size=num_interactions, replace=False)
for interaction_pair in interaction_pairs:
coefficient = args.rng.uniform()
if args.model_type == constants.CLASSIFIER:
coefficient *= args.rng.choice([-1, 1]) # Randomly flip sign
relevant_feature_map[frozenset(interaction_pair)] = coefficient
sym_polynomial_fn += coefficient * functools.reduce(lambda sym_x, y: sym_x * sym_features[y], interaction_pair, 1)
return sym_polynomial_fn
# pylint: disable = too-many-arguments
def update_linear_terms(args, relevant_features, relevant_feature_map, sym_features, sym_noise, sym_polynomial_fn):
"""Order one terms for polynomial"""
interaction_features = set()
for interaction in relevant_feature_map.keys():
interaction_features.update(interaction)
# Let half the interaction features have nonzero interaction coefficients but zero linear coefficients
interaction_only_features = []
if interaction_features and args.include_interaction_only_features:
interaction_only_features = args.rng.choice(sorted(interaction_features),
len(interaction_features) // 2,
replace=False)
linear_features = sorted(relevant_features.difference(interaction_only_features))
coefficients = sym_noise * np.ones(args.num_features)
coefficients[list(relevant_features)] = 1
coefficients *= args.rng.uniform(-1, 1, size=args.num_features)
for linear_feature in linear_features:
relevant_feature_map[frozenset([linear_feature])] = coefficients[linear_feature]
sym_polynomial_fn += coefficients.dot(sym_features)
return sym_polynomial_fn
|
nilq/baby-python
|
python
|
# -*- coding: UTF-8 -*-
# Copyright 2011-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""Desktop UI for this plugin.
Documentation is in :doc:`/specs/users` and :doc:`/dev/users`
"""
from __future__ import unicode_literals
from textwrap import wrap
from django.conf import settings
from django.db import models
from lino.api import dd, rt, _
from lino.core import actions
from lino.core.roles import SiteAdmin, SiteUser
from lino.core.utils import djangoname
from .choicelists import UserTypes
from .actions import SendWelcomeMail, SignIn, SignInWithSocialAuth
def mywrap(t, ls=80):
t = '\n'.join([
ln.strip() for ln in t.splitlines() if ln.strip()])
return '\n'.join(wrap(t, ls))
class UserDetail(dd.DetailLayout):
box1 = """
username user_type:20 partner
first_name last_name initials
email language time_zone
id created modified
"""
main = """
box1 #MembershipsByUser:20
remarks:40 AuthoritiesGiven:20 SocialAuthsByUser:30
"""
main_m = """
username
user_type
partner
first_name last_name
initials
email language time_zone
id created modified
remarks
AuthoritiesGiven
"""
class UserInsertLayout(dd.InsertLayout):
window_size = (60, 'auto')
main = """
username email
first_name last_name
partner
language user_type
"""
class Users(dd.Table):
#~ debug_actions = True
model = 'users.User'
#~ order_by = "last_name first_name".split()
order_by = ["username"]
active_fields = 'partner'
parameters = dict(
user_type=UserTypes.field(blank=True))
simple_parameters = ['user_type']
#~ column_names = 'username first_name last_name is_active is_staff is_expert is_superuser *'
column_names = 'username user_type first_name last_name *'
detail_layout = 'users.UserDetail'
insert_layout = UserInsertLayout()
column_names_m = 'mobile_item *'
@classmethod
def render_list_item(cls, obj, ar):
return "<p>{}</p>".format(obj.username)
#~ @classmethod
#~ def get_row_permission(cls,action,user,obj):
#~ """
#~ Only system managers may edit other users.
#~ See also :meth:`User.disabled_fields`.
#~ """
#~ if not super(Users,cls).get_row_permission(action,user,obj):
#~ return False
#~ if user.level >= UserLevel.manager: return True
#~ if action.readonly: return True
#~ if user is not None and user == obj: return True
#~ return False
class AllUsers(Users):
required_roles = dd.login_required(SiteAdmin)
send_welcome_email = SendWelcomeMail()
class UsersOverview(Users):
required_roles = set([])
column_names = 'username user_type language'
exclude = dict(user_type='')
sign_in = SignIn()
# if settings.SITE.social_auth_backends is None:
# sign_in = SignIn()
# else:
# sign_in = SignInWithSocialAuth()
class MySettings(Users):
# use_as_default_table = False
# hide_top_toolbar = True
required_roles = dd.login_required()
default_list_action_name = 'detail'
# detail_layout = 'users.UserDetail'
@classmethod
def get_default_action(cls):
return actions.ShowDetail(cls.detail_layout, hide_navigator=True)
class Authorities(dd.Table):
required_roles = dd.login_required(SiteAdmin)
model = 'users.Authority'
class AuthoritiesGiven(Authorities):
required_roles = dd.login_required()
master_key = 'user'
label = _("Authorities given")
column_names = 'authorized'
auto_fit_column_widths = True
class AuthoritiesTaken(Authorities):
required_roles = dd.login_required()
master_key = 'authorized'
label = _("Authorities taken")
column_names = 'user'
auto_fit_column_widths = True
if settings.SITE.social_auth_backends:
try:
import social_django
except ImportError:
raise Exception(
"Sites with social_auth_backends must also install PSA "
"into their environment: "
"$ pip install social-auth-app-django")
class SocialAuths(dd.Table):
label = _("Third-party authorizations")
required_roles = dd.login_required(SiteAdmin)
model = 'social_django.UserSocialAuth'
class SocialAuthsByUser(SocialAuths):
required_roles = dd.login_required(SiteUser)
master_key = 'user'
else:
class SocialAuthsByUser(dd.Dummy):
pass
class UserRoles(dd.VirtualTable):
label = _("User roles")
required_roles = dd.login_required(SiteAdmin)
@classmethod
def get_data_rows(self, ar):
return settings.SITE.user_roles
@dd.displayfield(_("Name"))
def name(self, obj, ar):
return djangoname(obj)
@dd.displayfield(_("Description"))
def description(self, obj, ar):
return mywrap(obj.__doc__ or '', 40)
@classmethod
def setup_columns(cls):
def w(ut):
def func(fld, obj, ar):
if isinstance(ut.role, obj):
return "☑"
return ""
return func
names = []
for ut in UserTypes.get_list_items():
name = "ut" + ut.value
# vf = dd.VirtualField(
# models.BooleanField(str(ut.value)), w(ut))
vf = dd.VirtualField(
dd.DisplayField(str(ut.value)), w(ut))
cls.add_virtual_field(name, vf)
names.append(name+":3")
# cls.column_names = "name:20 description:40 " + ' '.join(names)
cls.column_names = "name:20 " + ' '.join(names)
|
nilq/baby-python
|
python
|
"""
Checks the bam header:
* to make sure all rgs have the same sample
* enforce PL to be ILLUMINA
Writes out a new header with the aliquot submitter id as the SM
and/or PL as ILLUMINA as needed.
@author: Kyle Hernandez
"""
import os
import time
import sys
import pysam
import argparse
import logging
PLATFORM = "ILLUMINA"
def main(args: argparse.Namespace) -> None:
"""
Main wrapper for processing bam file headers.
"""
logger.info("Extracting bam header...")
bam = pysam.AlignmentFile(args.input_bam, mode="rb")
try:
pass_sm = check_samples(bam)
pass_pl = check_platforms(bam)
conditionally_generate_new_header(
bam, pass_sm, pass_pl, args.aliquot_id, args.output_header
)
finally:
bam.close()
def check_samples(bam: pysam.AlignmentFile) -> bool:
"""
Checks the bam readgroups for missing SM fields and mismatched
SMs.
"""
samples = []
for item in bam.header["RG"]:
if not item.get("SM", "").strip():
logger.warn("Unable to find sample in rg {}".format(item))
return False
else:
samples.append(item["SM"])
if len(set(samples)) != 1:
logger.warn("Found multiple sample IDs! {}".format(set(samples)))
return False
return True
def check_platforms(bam: pysam.AlignmentFile) -> bool:
"""
Checks whether the bam rgs all have PL set to PLATFORM
"""
for item in bam.header["RG"]:
if not item.get("PL", "").strip():
logger.warn("Unable to find platform in rg {}".format(item))
return False
elif item["PL"] != PLATFORM:
logger.warn(
"Found readgroup with platform != '{}' - {}".format(PLATFORM, item)
)
return False
return True
def conditionally_generate_new_header(
bam: pysam.AlignmentFile,
pass_sm: bool,
pass_pl: bool,
aliquot_id: str,
out_file: str,
) -> None:
"""
If pass_sm or pass_pl are False, generates the new bam header, otherwise does nothing.
"""
if pass_sm and pass_pl:
logger.info("No issues detected. No header written.")
else:
logger.info("Detected RG problems, will create new header.")
fix_header = {}
for key, vals in bam.header.items():
if key not in fix_header:
fix_header[key] = []
if key == "RG":
for item in vals:
if not pass_sm:
item["SM"] = aliquot_id
if not pass_pl:
item["PL"] = PLATFORM
fix_header[key].append(item)
else:
fix_header[key] = vals
obam = pysam.AlignmentFile(out_file, mode="w", header=fix_header)
obam.close()
def setup_logger():
"""
Sets up the logger.
"""
logger = logging.getLogger("check_bam_header")
LoggerFormat = "[%(levelname)s] [%(asctime)s] [%(name)s] - %(message)s"
logger.setLevel(level=logging.INFO)
handler = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter(LoggerFormat, datefmt="%Y%m%d %H:%M:%S")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
if __name__ == "__main__":
"""
CLI Entrypoint.
"""
start = time.time()
logger = setup_logger()
logger.info("-" * 80)
logger.info("check_bam_header_samples.py")
logger.info("Program Args: {0}".format(" ".join(sys.argv)))
logger.info("-" * 80)
p = argparse.ArgumentParser(
"Utility for checking samples in bam header and fixing if needed"
)
p.add_argument("--input_bam", required=True, help="Input bam file.")
p.add_argument(
"--aliquot_id",
required=True,
help="Aliquot id to use for sample name if new header is needed.",
)
p.add_argument(
"--output_header",
required=True,
help="Output header file name if a new header is needed.",
)
args = p.parse_args()
# Process
logger.info("Processing bam file {0}...".format(args.input_bam))
main(args)
# Done
logger.info("Finished, took {0} seconds.".format(time.time() - start))
|
nilq/baby-python
|
python
|
from AndroidFTPBackup.utils import FileHelper, ConfigHelper
configHelper: ConfigHelper = None
fileHelper: FileHelper = None
|
nilq/baby-python
|
python
|
# python3
import sys, threading
from collections import deque
def compute_height_brute_force(n, parents):
# Replace this code with a faster implementation
max_height = 0
for vertex in range(n):
height = 0
current = vertex
while current != -1:
height += 1
current = parents[current]
max_height = max(max_height, height)
return max_height
class Tree:
''' a sample class to refresh your memories about a tree data structure '''
def __init__(self, value, children=[]):
self._value = value
self._children = children # a list of subtrees (recursive)
def __str__(self):
ans = "["
ans += str(self._value)
for child in self._children:
ans += ", "
ans += str(child)
return ans + "]"
@property
def value(self):
return self._value
def children(self):
for child in self._children:
yield child
def height(self):
height = 1
for child in self._children:
height = max(height, child.height() + 1)
return height
def compute_height_recursive(n, parents):
''' this function only works for trees of medium size(number of nodes) such as 2,000,
when applied on trees with more than 100,000 nodes, it definitely fails.
To handle large inputs, recursion is always a very bad idea, even the memoization cannot save you.
Whenever you expect the input data to be very huge, please find an alternative algorithm.
'''
X = {} # height for each subtree, for memoization
def build_tree_height(node):
if node not in X:
if node not in parents: # a leaf
X[node] = 1
return X[node]
children = []
for node_id, node_parent in enumerate(parents):
if node_parent == node:
if node_id not in X:
X[node_id] = build_tree_height(node_id)
children.append(X[node_id])
X[node] = max(children) + 1
return X[node]
for node in range(n):
if parents[node] == -1:
root = node
X[node] = build_tree_height(node)
return X[root]
def compute_height_BFS(n, parents):
''' In fact, trees are just a special form of undirected/directed graphs, depends on how you model it.
all the graph algorithms you've learned can always be slightly modified and then applied on trees.
for instance, to compute the height/depth of a tree, it's pretty much similar to computing the
total number of layers for the breadth-first search algorithm to fully traverse a graph.
Here we'll replace the tree recursion with a BFS traversal, since BFS has linear running time.
To apply the BFS, we need to build a tree graph and avoid any recursion, so don't use Class Tree().
'''
G = {} # represent the tree graph by adjacency lists {parent:[children], ...}
for child, parent in enumerate(parents):
if child not in G:
G[child] = []
if parent == -1:
root = child
if parent not in G:
G[parent] = [child]
else:
G[parent].append(child)
Q = deque([root])
layer = {root:1}
while Q:
node = Q.popleft()
for child in G[node]:
layer[child] = layer[node] + 1
Q.append(child)
# print("G:", G) # for debugging
# print("layer:", layer) # for debugging
return max(layer.values())
def main():
n = int(input())
parents = list(map(int, input().split()))
print(compute_height_BFS(n, parents))
# In Python, the default limit on recursion depth is rather low,
# so raise it here for this problem. Note that to take advantage
# of bigger stack, we have to launch the computation in a new thread.
sys.setrecursionlimit(10**7) # max depth of recursion
threading.stack_size(2**27) # new thread will get stack of such size
threading.Thread(target=main).start()
|
nilq/baby-python
|
python
|
import time
import pickle # To work with cookies
import json
from selenium.webdriver.support.wait import WebDriverWait
class Login():
def __init__(self, driver, profile, password):
self.profile = profile
self.driver = driver
self.password = password
def run(self):
self.driver.get('https://www.instagram.com/') # open and logging without cookies
self.driver.implicitly_wait(20)
print("Logging into instagram")
self.driver.find_element_by_name('username').send_keys(self.profile) # passing username to logging
self.driver.find_element_by_name('password').send_keys(self.password) # passing password to logging
self.driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[3]/button').click()
print("Logging successfully completed")
time.sleep(5)
|
nilq/baby-python
|
python
|
a = (2 ** 2)
b = (2 ** 2)
c = 2
print("a ** b ** c =", a ** b ** c)
|
nilq/baby-python
|
python
|
# Code adapted from Fei Xia
import glob
import os
import cv2
import meshcut
import numpy as np
from tqdm import tqdm
from PIL import Image
def load_obj_np(filename_obj, normalization=False, texture_size=4, load_texture=False,
texture_wrapping='REPEAT', use_bilinear=True):
"""Load Wavefront .obj file into numpy array
This function only supports vertices (v x x x) and faces (f x x x).
"""
# load vertices
vertices = []
with open(filename_obj) as f:
lines = f.readlines()
for line in lines:
if len(line.split()) == 0:
continue
if line.split()[0] == 'v':
vertices.append([float(v) for v in line.split()[1:4]])
vertices = np.vstack(vertices).astype(np.float32)
# load faces
faces = []
for line in lines:
if len(line.split()) == 0:
continue
if line.split()[0] == 'f':
vs = line.split()[1:]
nv = len(vs)
v0 = int(vs[0].split('/')[0])
for i in range(nv - 2):
v1 = int(vs[i + 1].split('/')[0])
v2 = int(vs[i + 2].split('/')[0])
faces.append((v0, v1, v2))
faces = np.vstack(faces).astype(np.int32) - 1
# load textures
textures = None
assert load_texture is False # Since I commented out the block below
# if load_texture:
# for line in lines:
# if line.startswith('mtllib'):
# filename_mtl = os.path.join(os.path.dirname(filename_obj), line.split()[1])
# textures = load_textures(filename_obj, filename_mtl, texture_size,
# texture_wrapping=texture_wrapping,
# use_bilinear=use_bilinear)
# if textures is None:
# raise Exception('Failed to load textures.')
# textures = textures.cpu().numpy()
assert normalization is False # Since I commented out the block below
# # normalize into a unit cube centered zero
# if normalization:
# vertices -= vertices.min(0)[0][None, :]
# vertices /= torch.abs(vertices).max()
# vertices *= 2
# vertices -= vertices.max(0)[0][None, :] / 2
if load_texture:
return vertices, faces, textures
else:
return vertices, faces
def get_hist_num_faces(obj_filepath):
vertices, faces = load_obj_np(obj_filepath)
z_faces = []
weights = []
z = np.array([0, 0, 1])
for face in tqdm(faces):
normal = np.cross(vertices[face[2]] - vertices[face[1]], vertices[face[1]] - vertices[face[0]])
dist = np.dot(normal, z) / np.linalg.norm(normal)
if dist < -0.99:
z_faces.append(vertices[face[0]][-1])
a = np.linalg.norm(vertices[face[2]] - vertices[face[1]])
b = np.linalg.norm(vertices[face[2]] - vertices[face[0]])
c = np.linalg.norm(vertices[face[0]] - vertices[face[1]])
s = (a + b + c) / 2
area = (s*(s-a)*(s-b)*(s-c)) ** 0.5
weights.append(area)
hist = np.histogram(np.array(z_faces), bins=100, weights=np.array(weights))
return hist
def get_floor_height(hist, n_floors=1):
heights = []
for i in range(n_floors):
pos = np.where(hist[0] == np.max(hist[0]))[0][0]
height = (hist[1][pos] + hist[1][pos + 1]) / 2.0
hist[0][np.abs(hist[1][1:] - height) < 0.5] = 0
heights.append(height)
return heights
def gen_map(obj_filepath, mesh_dir, img_filename_format='floor_{}.png'):
vertices, faces = load_obj_np(obj_filepath)
xmin, ymin, _ = vertices.min(axis=0)
xmax, ymax, _ = vertices.max(axis=0)
max_length = np.max([np.abs(xmin), np.abs(ymin), np.abs(xmax), np.abs(ymax)])
max_length = np.ceil(max_length).astype(np.int)
with open(os.path.join(mesh_dir, 'floors.txt')) as f:
floors = map(float, f.readlines())
floors = sorted(floors)
print(floors)
for i_floor, floor in enumerate(floors):
z = float(floor) + 0.5
cross_section = meshcut.cross_section(vertices, faces, plane_orig=(0, 0, z), plane_normal=(0, 0, 1))
floor_map = np.ones((2 * max_length * 100, 2 * max_length * 100))
for item in cross_section:
for i in range(len(item) - 1):
x1, x2 = (item[i:i+2, 0]+max_length) * 100
y1, y2 = (item[i:i+2, 1]+max_length) * 100
cv2.line(floor_map, (x1, y1), (x2, y2), color=(0, 0, 0), thickness=2)
cur_img = Image.fromarray((floor_map * 255).astype(np.uint8))
#cur_img = Image.fromarray(np.flipud(cur_img))
img_filename = img_filename_format.format(i_floor)
cur_img.save(os.path.join(mesh_dir, img_filename))
write_yaml(mesh_dir, np.array(cur_img), img_filename, 'floor_{}.yaml'.format(i_floor),
resolution=0.01)
def get_obj_filepath(mesh_dir):
return mesh_dir + '/mesh_z_up.obj'
def get_n_floors(mesh_dir):
return 1
#def get_n_floors(mesh_dir):
# house_seg_filepaths = glob.glob(os.path.join(mesh_dir, 'house_segmentations', '*.house'))
# assert len(house_seg_filepaths) == 1
# with open(house_seg_filepaths[0]) as f:
# content = f.readlines()
# content = [x.strip() for x in content]#
# n_levels = 0
# for line in content:
# if line.startswith('L '):
# n_levels += 1
# return n_levels
def fill_template(map_filepath, resolution, origin): # NOTE: Copied from generate_map_yaml.py
"""Return a string that contains the contents for the yaml file, filling out the blanks where
appropriate.
Args:
map_filepath: Absolute path to map file (e.g. PNG).
resolution: Resolution of each pixel in the map in meters.
origin: Uhhh.
"""
template = """image: MAP_FILEPATH
resolution: RESOLUTION
origin: [ORIGIN_X, ORIGIN_Y, YAW]
negate: 0
occupied_thresh: 0.65
free_thresh: 0.196
"""
template = template.replace('MAP_FILEPATH', map_filepath)
template = template.replace('RESOLUTION', str(resolution))
template = template.replace('ORIGIN_X', str(origin[0]))
template = template.replace('ORIGIN_Y', str(origin[1]))
template = template.replace('YAW', str(origin[2]))
return template
def write_yaml(mesh_dir, map_img, map_img_filepath, yaml_filename, resolution=0.01): # NOTE: Copied from generate_map_yaml.py
origin_px_coord = (map_img.shape[0] / 2, map_img.shape[1] / 2) # (row, col)
cur_origin_map_coord = (-float(origin_px_coord[1]) * resolution,
float(origin_px_coord[0] - map_img.shape[0]) * resolution,
0.0) # (x, y, yaw)
yaml_content = fill_template(map_img_filepath, resolution=resolution,
origin=cur_origin_map_coord)
cur_yaml_filepath = os.path.join(mesh_dir, yaml_filename)
print('Writing to:', cur_yaml_filepath)
with open(cur_yaml_filepath, 'w') as f:
f.write(yaml_content)
def generate_floorplan(mesh_dir):
obj_filepath = get_obj_filepath(mesh_dir)
# Generate floors.txt files
print(mesh_dir)
n_floors = get_n_floors(mesh_dir) # Get number of floors
hist = get_hist_num_faces(obj_filepath)
hist = list(hist)
hist[0] = np.nan_to_num(hist[0])
hist = tuple(hist)
heights = get_floor_height(hist, n_floors=n_floors)
with open(os.path.join(mesh_dir, 'floors.txt'), 'w') as f:
for height in heights:
f.write("{}\n".format(height))
gen_map(obj_filepath, mesh_dir) # Generate floor maps
import sys
if __name__ == '__main__':
generate_floorplan(sys.argv[1])
|
nilq/baby-python
|
python
|
import os.path
import ranger.api
import ranger.core.fm
import ranger.ext.signals
from subprocess import Popen, PIPE
hook_init_prev = ranger.api.hook_init
def hook_init(fm):
def zoxide_add(signal):
path = signal.new.path
process = Popen(["zoxide", "add", path])
process.wait()
fm.signal_bind("cd", zoxide_add)
return hook_init_prev(fm)
ranger.api.hook_init = hook_init
class z(ranger.api.commands.Command):
"""
:z
Jump around with zoxide (z)
"""
def execute(self):
results = self.query(self.args[1:])
if os.path.isdir(results[0]):
self.fm.cd(results[0])
def query(self, args):
try:
p = Popen(
["zoxide", "query"] + self.args[1:],
stdout=PIPE,
stderr=PIPE
)
stdout, stderr = p.communicate()
if p.returncode == 0:
output = stdout.decode("utf-8").strip()
if output:
return output.splitlines()
else:
self.fm.notify("zoxide exited with status {}".format(p.returncode), bad=True)
else:
output = stderr.decode("utf-8").strip() or "zoxide: unexpected error"
self.fm.notify(output, bad=True)
except Exception as e:
self.fm.notify(e, bad=True)
def tab(self, tabnum):
results = self.query(self.args[1:])
return ["z {}".format(x) for x in results]
|
nilq/baby-python
|
python
|
from __future__ import division
# These functions have their own module in order to be compiled with the right
# __future__ flag (and be tested alongside the 2.x legacy division operator).
def truediv_usecase(x, y):
return x / y
def itruediv_usecase(x, y):
x /= y
return x
|
nilq/baby-python
|
python
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers.pooling import max_pooling3d
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import training
class BackpropTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testAggregateGradients(self):
def fn(x):
ind1 = constant_op.constant(np.array([0, 1]))
ind2 = constant_op.constant(np.array([2, 3]))
ind3 = constant_op.constant(np.array([1, 3]))
# A mixture of IndexedSlices and dense tensor to aggregate.
g1 = embedding_ops.embedding_lookup(x, ind1)
g2 = embedding_ops.embedding_lookup(x, ind2)
g3 = embedding_ops.embedding_lookup(x, ind3)
g4 = math_ops.reduce_sum(x * constant_op.constant(2.0))
return g1 * g2 * g3 * g4
var_np = np.random.rand(4, 2).astype(np.float32)
var = constant_op.constant(var_np)
grad = backprop.gradients_function(fn, [0])(var)[0]
grad = self.evaluate(ops.convert_to_tensor(grad))
if not context.executing_eagerly():
tf_var = array_ops.constant(var_np, dtypes.float32)
tf_ind1 = array_ops.constant([0, 1])
tf_ind2 = array_ops.constant([2, 3])
tf_ind3 = array_ops.constant([1, 3])
tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
tf_g4 = math_ops.reduce_sum(tf_var * 2.0, reduction_indices=(0, 1))
tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4
tf_grad = gradients.gradients(tf_y, [tf_var])[0]
tf_dense_grad = math_ops.unsorted_segment_sum(
tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])
self.assertAllClose(grad, self.evaluate(tf_dense_grad))
def testImplicitGradWithResourceVariable(self):
x = resource_variable_ops.ResourceVariable(
initial_value=constant_op.constant(1.0), name='x')
def fn():
b = constant_op.constant(2.0)
c = math_ops.add(x.value(), b)
return math_ops.add(c, constant_op.constant(3.0))
grads_and_vars = backprop.implicit_grad(fn)()
self.assertAllEqual(grads_and_vars[0][0], 1.0)
self.assertAllEqual(id(grads_and_vars[0][1]), id(x))
def testGradientInsideLoop(self):
with ops.Graph().as_default():
v = resource_variable_ops.ResourceVariable(1.0)
def body(_):
_ = v + 1.0 # This reads the variable inside the loop context
with backprop.GradientTape() as t:
result = v * 2
self.assertTrue(t.gradient(result, v) is not None)
return 1.0
control_flow_ops.while_loop(lambda i: False, body, [1.0])
def testWhereGradient(self):
# Note: where is special because only some of its arguments are of
# differentiable dtypes.
def f(x):
return array_ops.where(x < 10, x, x * x)
g = backprop.gradients_function(f)
self.assertAllEqual(g(5.)[0], 1.0)
self.assertAllEqual(g(50.)[0], 100.0)
def testTwoTargets(self):
with backprop.GradientTape() as t:
x = constant_op.constant(3.0)
y = constant_op.constant(2.0)
t.watch([x, y])
xx = 2 * x
yy = 3 * y
dx, dy = t.gradient([xx, yy], [x, y])
self.assertAllEqual(dx, 2.0)
self.assertAllEqual(dy, 3.0)
def testOutputGradUsedInComputation(self):
with backprop.GradientTape() as t:
x = constant_op.constant(3.0)
y = constant_op.constant(2.0)
t.watch([x, y])
loss = x * y
dx, = t.gradient([loss, x], [x], output_gradients=[1.0, 2.0])
self.assertAllEqual(dx, 4.0)
def testDy(self):
def f(x):
return x
grad_fn = backprop.gradients_function(f)
self.assertAllEqual(2., grad_fn(1., dy=2.)[0])
def testGradientInteger(self):
def f(x):
return x + x
int_tensor = constant_op.constant(1)
self.assertEqual(backprop.gradients_function(f)(int_tensor)[0], None)
def testErrors(self):
@custom_gradient.custom_gradient
def f(x):
def grad(_):
raise RuntimeError('x')
return x, grad
# TODO(apassos) raise the right error here
with self.assertRaises(RuntimeError):
backprop.gradients_function(f)(constant_op.constant(1.0))
def testGradientsFunctionInCustomGradient(self):
@custom_gradient.custom_gradient
def f(x):
(y,) = backprop.gradients_function(lambda x: x * x)(x)
def grad(dy):
return [2 * dy]
return y, grad
self.assertAllEqual(f(1.0), 2.0)
def testImplicitGradOverEmbeddingLookup(self):
batch_size = 8
embedding_size = 512
vocab_size = 1000
lrn_rate = 0.1
random_init = random_ops.random_uniform([vocab_size, embedding_size])
x = array_ops.ones((batch_size), dtypes.int64)
embedding = resource_variable_ops.ResourceVariable(
initial_value=random_init, dtype=dtypes.float32, name='embedding')
def f():
embedded_x = embedding_ops.embedding_lookup(embedding, x)
return constant_op.constant(1.0, dtypes.float32) - embedded_x
grad = backprop.implicit_grad(f)()[0][0]
opt = training.GradientDescentOptimizer(lrn_rate)
with ops.Graph().as_default(), self.cached_session():
tf_x = array_ops.ones((batch_size), dtypes.int64)
# TODO(ashankar,apassos): Change to ResourceVariable.
tf_embedding = variables.Variable(
random_init.numpy(), name='tf_embedding')
tf_embedded_x = embedding_ops.embedding_lookup(tf_embedding, tf_x)
tf_y = 1.0 - tf_embedded_x
tf_grad = gradients.gradients(tf_y, [tf_embedding])[0]
tf_opt = training.GradientDescentOptimizer(0.1)
tf_embedding.initializer.run()
self.assertAllClose(tf_grad.indices.eval(), grad.indices)
self.assertAllClose(tf_grad.values.eval(), grad.values)
tf_opt.apply_gradients([(tf_grad, tf_embedding)]).run()
expected = self.evaluate(tf_embedding)
opt.apply_gradients([(grad, embedding)])
self.assertAllClose(expected, embedding.read_value())
def testImplicitGradOrdering(self):
v0 = resource_variable_ops.ResourceVariable(1.0)
v1 = resource_variable_ops.ResourceVariable(2.0)
def f():
x = v1 * v1
y = v0 * v0
return x + y
grads = backprop.implicit_grad(f)()
ordered_variables = [x[1] for x in grads]
self.assertTrue(ordered_variables[0] is v0)
self.assertTrue(ordered_variables[1] is v1)
def testTapeNoOpGradient(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as t:
t.watch(x)
y = x
self.assertEqual(t.gradient(y, x).numpy(), 1.0)
def testTapeIdentityGradientIsIdentity(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as t:
t.watch(x)
y = array_ops.identity(x)
self.assertEqual(t.gradient(y, x).numpy(), 1.0)
def testTapeGradientMultiTargetOneIsSource(self):
x = constant_op.constant(2.0)
with backprop.GradientTape() as t:
t.watch(x)
y = x*x
self.assertEqual(t.gradient([x, y], x).numpy(), 5.0)
def testTapeNoOpGradientWithMultiTargetAllSource(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as t:
t.watch(x)
y = x
self.assertEqual(t.gradient([y, y], x).numpy(), 2.0)
def testTapeNoOpGradientWithMultiTargetMultiSource(self):
x = constant_op.constant(3.0)
y = constant_op.constant(5.0)
with backprop.GradientTape() as t:
t.watch(x)
t.watch(y)
z = y * y
self.assertAllEqual(t.gradient([x, y, z], [x, y]), [1.0, 11.0])
def testTapeNoOpOnVariableIsIdentity(self):
v0 = resource_variable_ops.ResourceVariable(1.0)
with backprop.GradientTape() as t:
y = v0.read_value()
self.assertEqual(t.gradient(y, v0).numpy(), 1.0)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testTapeNoOpGradient2By2(self):
a_2_by_2 = constant_op.constant(2.0, shape=[2, 2])
with backprop.GradientTape(persistent=True) as tape:
tape.watch(a_2_by_2)
dy_dy = tape.gradient(a_2_by_2, [a_2_by_2])[0]
self.assertAllEqual(dy_dy.numpy(),
constant_op.constant(1.0, shape=[2, 2]).numpy())
@test_util.assert_no_new_pyobjects_executing_eagerly
def testTapeNoOpGradientMultiTarget2By2(self):
a_2_by_2 = constant_op.constant(2.0, shape=[2, 2])
with backprop.GradientTape(persistent=True) as tape:
tape.watch(a_2_by_2)
dy_dy = tape.gradient([a_2_by_2, a_2_by_2], [a_2_by_2])[0]
self.assertAllEqual(dy_dy.numpy(),
constant_op.constant(2.0, shape=[2, 2]).numpy())
def testTapeStopRecording(self):
with backprop.GradientTape() as t:
x = resource_variable_ops.ResourceVariable(1.0)
with t.stop_recording():
y = x * x
self.assertEqual(t.gradient(y, x), None)
def testTapeStopStartRecording(self):
with backprop.GradientTape(persistent=True) as t:
x = resource_variable_ops.ResourceVariable(1.0)
x2 = x * 2 # This should be differentiated through.
with t.stop_recording():
y = x2 * x2
z = x2 * x2
self.assertEqual(t.gradient(y, x2), None)
# If the x*2 was not differentiated through, this would be 2.0, not 4.0
self.assertEqual(t.gradient(z, x2).numpy(), 4.0)
def testTapeReset(self):
with backprop.GradientTape() as t:
v = resource_variable_ops.ResourceVariable(1.0)
loss = v * v
t.reset()
loss += v * v
self.assertAllEqual(t.gradient(loss, v), 2.0)
def testAutomaticWatchedVariables(self):
with backprop.GradientTape() as t:
self.assertEqual(0, len(t.watched_variables()))
v = resource_variable_ops.ResourceVariable(1.0)
loss = v * v
self.assertAllEqual([v], t.watched_variables())
t.reset()
self.assertEqual(0, len(t.watched_variables()))
loss += v * v
self.assertAllEqual([v], t.watched_variables())
def testExplicitWatchedVariables(self):
with backprop.GradientTape() as t:
self.assertEqual(0, len(t.watched_variables()))
v = resource_variable_ops.ResourceVariable(1.0)
t.watch(v)
self.assertAllEqual([v], t.watched_variables())
t.reset()
self.assertEqual(0, len(t.watched_variables()))
t.watch(v)
self.assertAllEqual([v], t.watched_variables())
@test_util.assert_no_new_tensors
def testGradientNone(self):
def loss(x, l):
return math_ops.reduce_mean(
nn_ops.softmax_cross_entropy_with_logits(logits=x, labels=l),
constant_op.constant([0]))
logits = constant_op.constant([[0.0, 0.0]])
labels = constant_op.constant([[1.0, 0.0]])
# softmax_cross_entropy_with_logits returns two outputs and in this case the
# gradient wrt the second is None.
g, = backprop.gradients_function(loss, [0])(logits, labels)
self.assertAllEqual(g.numpy(), [[-0.5, 0.5]])
@test_util.run_in_graph_and_eager_modes
def testGradientWithinTapeBlock(self):
v1 = resource_variable_ops.ResourceVariable(1.)
self.evaluate(v1.initializer)
with backprop.GradientTape() as t:
loss = 2 * v1
grad = t.gradient(loss, v1)
self.assertAllEqual(self.evaluate(grad), 2.0)
with backprop.GradientTape(persistent=True) as t:
loss = 2 * v1
grad = t.gradient(loss, v1)
self.assertAllEqual(self.evaluate(grad), 2.0)
@test_util.run_in_graph_and_eager_modes
def testNestedSelfContexts(self):
v1 = resource_variable_ops.ResourceVariable(1.)
self.evaluate(v1.initializer)
with backprop.GradientTape() as t:
with self.assertRaises(ValueError):
with t:
pass
@test_util.assert_no_new_tensors
def testSecondGrad(self):
def first(x):
l = constant_op.constant([[0.0]])
x = nn_ops.softmax_cross_entropy_with_logits(labels=l, logits=x)
x = math_ops.reduce_sum(x, constant_op.constant([0]))
return x
def second(x):
grad = backprop.gradients_function(first, [0])(x)[0]
return math_ops.reduce_sum(grad, constant_op.constant([0]))
f = constant_op.constant([[0.1]])
grad = backprop.gradients_function(second, [0])(f)[0]
self.assertAllEqual([[0.0]], grad)
@test_util.run_in_graph_and_eager_modes
def testWatchingIsTapeLocal(self):
x1 = resource_variable_ops.ResourceVariable(2.0, trainable=False)
x2 = resource_variable_ops.ResourceVariable(2.0, trainable=False)
with backprop.GradientTape() as tape1:
with backprop.GradientTape() as tape2:
tape1.watch(x1)
tape2.watch([x1, x2])
y = x1 ** 3
z = x2 ** 2
dy, dz = tape2.gradient([y, z], [x1, x2])
d2y, d2z = tape1.gradient([dy, dz], [x1, x2])
self.evaluate([x1.initializer, x2.initializer])
self.assertEqual(self.evaluate(d2y), 12.0)
self.assertIsNone(d2z)
@test_util.assert_no_new_tensors
def testMakeVJP(self):
def f(x):
return x * x
wrapped_fn = backprop.make_vjp(f, persistent=False)
result, vjp = wrapped_fn(constant_op.constant(3.0))
self.assertAllEqual(result, 9.0)
self.assertAllEqual(vjp(2.0)[0], 12.0)
def testPersistentMakeVJP(self):
def f(x):
return x * x
wrapped_fn = backprop.make_vjp(f, persistent=True)
_, vjp = wrapped_fn(constant_op.constant(3.0))
vjp_result1 = vjp(2.0)[0]
vjp_result2 = vjp(2.0)[0]
self.assertAllEqual(vjp_result1, vjp_result2, 12.0)
@test_util.assert_no_new_tensors
def testGradGrad(self):
def sq(x):
return x * x
def grad(x):
value = backprop.gradients_function(sq, [0])(x)[0]
return value
gradgrad = backprop.gradients_function(grad, [0])
self.assertAllEqual(gradgrad(constant_op.constant(3.0))[0], 2.0)
@test_util.assert_no_new_tensors
def testGradGradExp(self):
def grad(x):
value = backprop.gradients_function(math_ops.exp, [0])(x)[0]
return value
gradgrad = backprop.gradients_function(grad, [0])
self.assertAllEqual(gradgrad(constant_op.constant(0.0))[0], 1.0)
@test_util.assert_no_new_tensors
def testStopGradient(self):
grad = backprop.gradients_function(
lambda x: array_ops.stop_gradient(math_ops.argmax(x)))
self.assertAllEqual(grad([0.0])[0], None)
@test_util.assert_no_new_tensors
def testArgmax(self):
def argmax(x):
i = math_ops.argmax(x)
return array_ops.stop_gradient(i)
grad = backprop.gradients_function(argmax)
self.assertAllEqual(grad([0.0])[0], None)
@test_util.assert_no_new_tensors
def testGPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
def fn(x):
with context.device('/gpu:0'):
b = constant_op.constant(2.0)
c = math_ops.add(x.gpu(), b)
# TODO(apassos): remove cpu below by making TensorVSPace aware
# of devices.
return math_ops.add(c, constant_op.constant(3.0)).cpu()
grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0]
self.assertAllEqual(grad, 1.0)
@test_util.assert_no_new_tensors
def testGPUImplicitGrad(self):
if not context.context().num_gpus():
self.skipTest('No GPU found')
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(
constant_op.constant(1.0), name='v')
def f():
with context.device('gpu:0'):
return v.read_value()
self.assertEqual(
backprop.implicit_grad(f)()[0][0].cpu().numpy(), 1.0)
@test_util.assert_no_new_tensors
def testCPU(self):
def fn(x):
b = constant_op.constant(2.0)
c = math_ops.add(x, b)
return math_ops.add(c, constant_op.constant(3.0))
grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0]
self.assertAllEqual(grad, 1.0)
@test_util.assert_no_new_tensors
def testTensorCopyGPU2CPU2GPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
def f(a, b):
return a.cpu() + b.cpu()
with context.device('/gpu:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
grad = backprop.gradients_function(f, [0])(a, b)[0]
self.assertAllEqual(grad, 1.0)
@test_util.assert_no_new_tensors
def testEmptyParams(self):
def fn(a, b):
return a * b
x = constant_op.constant(1.0)
y = constant_op.constant(2.0)
dx, dy = backprop.gradients_function(fn)(x, y)
self.assertAllEqual(dx, y.numpy())
self.assertAllEqual(dy, x.numpy())
@test_util.assert_no_new_tensors
def testUnconnectedNone(self):
v = resource_variable_ops.ResourceVariable(
1.0, name='testUnconnectedNone')
def f():
v.read_value()
return constant_op.constant(1.0)
self.assertEqual(backprop.implicit_grad(f)()[0][0], None)
@test_util.assert_no_new_tensors
def testGradientTapeReEnterContext(self):
g = backprop.GradientTape()
with g:
x = constant_op.constant(3.0)
g.watch(x)
y = 2*x
with g:
z = 2*y
grad = g.gradient(target=z, sources=[x])
self.assertEqual(self.evaluate(grad), [4.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTapeRepeatedSource(self):
with backprop.GradientTape(persistent=False) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = 2 * x
grad = g.gradient(target=y, sources=[x, x])
self.assertEqual(self.evaluate(grad), [2.0, 2.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testPersistentGradientTapeRepeatedSource(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
y = constant_op.constant(5.0)
g.watch(x)
g.watch(y)
z = x * x + x * y
grad = g.gradient(target=z, sources=[x, x])
self.assertEqual(self.evaluate(grad), [11.0, 11.0])
grad = g.gradient(target=z, sources=[y, x])
self.assertEqual(self.evaluate(grad), [3.0, 11.0])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTapeStructure(self):
with backprop.GradientTape(persistent=True) as g:
# Using different constant values because constant tensors are
# cached, leading to a different gradient then what one might expect.
x1 = constant_op.constant(3.0)
x2 = constant_op.constant(3.1)
x3 = constant_op.constant(3.2)
g.watch(x1)
g.watch(x2)
g.watch(x3)
y = x1 + 2 * x2 + 3 * x3
self.assertEqual(self.evaluate(g.gradient(y, x1)), [1.0])
self.assertEqual(self.evaluate(g.gradient(y, (x1,))), (1.0,))
self.assertEqual(self.evaluate(g.gradient(y, (x1, x2))), (1.0, 2.0))
self.assertEqual(self.evaluate(g.gradient(y, [(x1, x2), (x2, x3)])),
[(1.0, 2.0), (2.0, 3.0)])
self.assertEqual(self.evaluate(g.gradient(y, (x1, x2, [x1, x3]))),
(1.0, 2.0, [1.0, 3.0]))
self.assertEqual(self.evaluate(g.gradient(y, [x1, {'x2': x2, 'x3': x3}])),
[1.0, {'x2': 2.0, 'x3': 3.0}])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTape(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
with backprop.GradientTape() as gg:
gg.watch(y)
z = 2 * y
inner_grad = gg.gradient(z, [y])[0]
self.assertEqual(self.evaluate(inner_grad), 2.0)
y += inner_grad
grad = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(grad), 6.0)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGadientTapeCalledOnConstantTarget(self):
with backprop.GradientTape() as g:
x = variables.Variable([3.0])
y = variables.Variable([2.0])
with self.assertRaisesRegexp(
ValueError,
'GradientTape.gradient is not supported for variable targets.'):
g.gradient(x, y)
@test_util.run_in_graph_and_eager_modes
def testGradientTapeWithCond(self):
x = constant_op.constant(3.0)
def true_fn():
return x
def false_fn():
return x * x
with backprop.GradientTape() as g:
g.watch(x)
y = control_flow_ops.cond(x < x, true_fn, false_fn)
if not context.executing_eagerly():
with self.assertRaisesRegexp(NotImplementedError, 'tf.gradients'):
dy = g.gradient(y, [x])[0]
else:
dy = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(dy), 6.0)
@test_util.run_in_graph_and_eager_modes
def testGradientTapeWithWhileLoop(self):
i = constant_op.constant(1)
x = constant_op.constant(2.)
def cond(i, _):
return i < 3
def body(i, x):
return i + 1, x * 2
with backprop.GradientTape() as g:
g.watch([x])
_, y = control_flow_ops.while_loop(cond, body, [i, x])
if not context.executing_eagerly():
with self.assertRaisesRegexp(NotImplementedError, 'tf.gradients'):
dy = g.gradient(y, [x])[0]
else:
dy = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(dy), 4.0)
@test_util.assert_no_new_tensors
def testGradientTapeGradientCalledMultipleTimes(self):
with backprop.GradientTape() as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
z = y * y
g.gradient(z, [x])
with self.assertRaisesRegexp(
RuntimeError, 'GradientTape.gradient can only be called once'):
g.gradient(y, [x])
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testPersistentTape(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
z = y * y
dz_dx = g.gradient(z, [x])[0]
self.assertEqual(self.evaluate(dz_dx), 4 * 3 * 3 * 3)
dy_dx = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(dy_dx), 2 * 3)
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testHigherOrderGradient(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x ** 3 # y := x^3
dy_dx = g.gradient(y, x) # dy/dx := 3x^2
d2y_dx2 = g.gradient(dy_dx, x) # d2y/dx2 := 6x
d3y_dx3 = g.gradient(d2y_dx2, x) # d3y/dx3 := 6
x = 3
self.assertEqual(self.evaluate(y), x ** 3)
self.assertEqual(self.evaluate(dy_dx), 3 * x ** 2)
self.assertEqual(self.evaluate(d2y_dx2), 6 * x)
self.assertEqual(self.evaluate(d3y_dx3), 6)
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testPersistentNestedTape(self):
with backprop.GradientTape(persistent=True) as g:
x = constant_op.constant(3.0)
g.watch(x)
y = x * x
with backprop.GradientTape(persistent=True) as gg:
gg.watch(y)
z = 2 * y
for _ in range(2):
inner_grad = gg.gradient(z, [y])[0]
self.assertEqual(self.evaluate(inner_grad), 2.0)
y += inner_grad
del gg
grad = g.gradient(y, [x])[0]
self.assertEqual(self.evaluate(grad), 6.0)
grad = g.gradient(z, [x])[0]
self.assertEqual(self.evaluate(grad), 12.0)
del g
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testGradientTapeVariable(self):
v = resource_variable_ops.ResourceVariable(1.0, name='v')
self.evaluate(v.initializer)
with backprop.GradientTape() as g:
y = v * v
grad = g.gradient(y, [v])[0]
self.assertAllEqual(self.evaluate(grad), 2.0)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testNestedGradients(self):
x = constant_op.constant(3.0)
with backprop.GradientTape() as g:
g.watch(x)
y = x * x
z = y * y
dz_dx, dz_dy = g.gradient(z, [x, y])
self.assertEqual(self.evaluate(dz_dx), 108.0)
self.assertEqual(self.evaluate(dz_dy), 18.0)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsDefault(self):
x = constant_op.constant(1.0)
y = constant_op.constant(3.0)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
dz_dx = g.gradient(z, x)
self.assertEqual(dz_dx, None)
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsZeros(self):
x = constant_op.constant(1.0, shape=[2, 2])
y = constant_op.constant(3.0)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
dz_dx = g.gradient(z, x, unconnected_gradients='zero')
self.assertAllEqual([[0.0, 0.0], [0.0, 0.0]], self.evaluate(dz_dx))
@test_util.assert_no_new_tensors
@test_util.run_in_graph_and_eager_modes
def testUnknownUnconnectedGradientsValueGiven(self):
x = constant_op.constant(1.0)
y = constant_op.constant(1.0)
with backprop.GradientTape() as g:
g.watch([x, y])
z = y * 2
with self.assertRaisesRegexp(
ValueError, "Unknown value for unconnected_gradients: 'nonsense'"):
g.gradient(z, x, unconnected_gradients='nonsense')
@test_util.run_in_graph_and_eager_modes
def testUnconnectedGradientsNestedDefunZeros(self):
@function.defun
def f(x):
return x * x
@function.defun
def h(y):
z = f(y)
return array_ops.stop_gradient(z)
x = constant_op.constant(1.0)
with backprop.GradientTape() as g:
g.watch(x)
y = h(x)
dy_dx = g.gradient(y, x, unconnected_gradients='zero')
self.assertEqual(0.0, self.evaluate(dy_dx))
@test_util.assert_no_new_tensors
def testEmptyParamsForValueAndGradFunction(self):
def fn(a, b):
return a * b
val_and_grads_fn = backprop.val_and_grad_function(fn)
x = 2.0
y = 3.0
val, (dx, dy) = val_and_grads_fn(x, y)
self.assertAllClose(val, x * y)
self.assertAllEqual(dx, y)
self.assertAllEqual(dy, x)
@test_util.assert_no_new_tensors
def testNonEmptyParamsForValueAndGradFunction(self):
def fn(a, b):
return a * b
val_and_grad_fn = backprop.val_and_grad_function(fn, params=[1])
x = 2.0
y = 3.0
val, grads = val_and_grad_fn(x, y)
self.assertAllClose(val, x * y)
self.assertEqual(1, len(grads))
self.assertAllEqual(grads[0], x)
@test_util.assert_no_new_tensors
def testTensorCopyCPU2GPU2CPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
# forward: a (cpu->gpu) -> add (gpu) -> c (gpu->cpu) -> add (cpu) -> e (cpu)
# back: e (cpu) -> add (cpu) -> c (cpu->gpu) -> add (gpu) -> grad (gpu->cpu)
def f(a, b):
with context.device('/gpu:0'):
c = math_ops.add(a.gpu(0), b.gpu(0))
return math_ops.add(c.cpu(), constant_op.constant(3.0))
with context.device('/cpu:0'):
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
grad = backprop.gradients_function(f, [0])(a, b)[0]
self.assertAllEqual(grad, 1.0)
def testGetAttrType(self):
typ = backprop.op_attr_type('Add', 'T')
self.assertEqual(typ, pywrap_tensorflow.TF_ATTR_TYPE)
def testGetAttrList(self):
typ = backprop.op_attr_type('MaxPool', 'ksize')
self.assertEqual(typ, [pywrap_tensorflow.TF_ATTR_INT])
def testMakeAttrType(self):
self.assertEqual(dtypes.float32,
backprop.make_attr(pywrap_tensorflow.TF_ATTR_TYPE, 1))
def testMakeAttrTypeList(self):
self.assertEqual([dtypes.float32],
backprop.make_attr([pywrap_tensorflow.TF_ATTR_TYPE], [1]))
def testMulType(self):
def mul(x):
return math_ops._mul_dispatch(x, x) # pylint: disable=protected-access
self.assertAllEqual(
backprop.gradients_function(mul)(3.0)[0].numpy(),
6.0)
def testMakeAttrShape(self):
for s in ([], None, [1, 2, 3], [None, None], [1, None, 3]):
expected = tensor_shape.TensorShape(s).as_proto()
actual = backprop.make_attr(pywrap_tensorflow.TF_ATTR_SHAPE, s)
self.assertEqual(
expected,
actual,
msg=('For shape %r, expected %r != %r actual' % (s, expected,
actual)))
def testMakeAttrShapeList(self):
shape_list = [[], None, [1, 2, 3], [None, None], [1, None, 3]]
self.assertEqual(
[tensor_shape.TensorShape(s).as_proto() for s in shape_list],
backprop.make_attr([pywrap_tensorflow.TF_ATTR_SHAPE], shape_list))
def testArgsGradientFunction(self):
def f(*args):
return args[0] * args[0]
grad = backprop.gradients_function(f)
self.assertAllEqual(grad(1.0)[0], 2.0)
def testPartial(self):
def f(x, y):
return x * y
part = functools.partial(f, constant_op.constant(2.0))
self.assertAllEqual(
backprop.gradients_function(part)(constant_op.constant(1.0))[0],
2.0)
def testReturnSameThing(self):
def f(x):
return x, 2 * x
self.assertAllEqual(backprop.gradients_function(f)(1.0)[0], 3.0)
@test_util.assert_no_new_tensors
def testExceptionSafety(self):
def f(unused_x):
raise ValueError()
try:
backprop.gradients_function(f)(1.0)
except ValueError:
pass
def real_f(x):
return x * x
self.assertAllEqual(backprop.gradients_function(real_f)(1.0)[0], 2.0)
@test_util.assert_no_new_tensors
def testMultiValueConvertToTensor(self):
x = resource_variable_ops.ResourceVariable(
initial_value=array_ops.constant([1.0]), name='x')
def fn():
a = math_ops.add(x.value(), 1.0)
# Make sure convert_to_tensor works correctly with list of TensorNodes.
b = array_ops.stack([a, a], axis=0)
return math_ops.reduce_mean(b)
grad = backprop.implicit_grad(fn)()[0][0]
self.assertAllEqual([1.0], grad)
def testOutput(self):
def multiout(x):
return x + 2, x * x
x = constant_op.constant([0.0, 1.0, 2.0])
grad = backprop.gradients_function(multiout)(x)[0]
self.assertAllEqual([1.0, 3.0, 5.0], grad)
def testMultiValuePreservesIfNotDiffedAgainst(self):
def tfe_conv2d(timage, tkernel, conv2dstrides):
return nn_ops.conv2d(timage, tkernel, conv2dstrides, 'SAME')
i = constant_op.constant([[[[1.0]]]])
k = constant_op.constant([[[[2.0]]]])
s = [1, 1, 1, 1]
grad = backprop.gradients_function(tfe_conv2d, params=(0,))(i, k, s)[0]
self.assertAllEqual([[[[2.0]]]], grad)
def testSameObjectForMultipleArguments(self):
def f(x, y):
return math_ops.multiply(x, y)
g = backprop.gradients_function(f)
def np_g(x, y):
dx, dy = g(x, y)
return [dx.numpy(), dy.numpy()]
x = constant_op.constant(1.)
self.assertAllEqual([1., 1.], np_g(x, x))
x = 1.
self.assertAllEqual([1., 1.], np_g(x, x))
x = constant_op.constant([[1.]])
self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x))
x = [[1.]]
self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x))
v = resource_variable_ops.ResourceVariable(
initial_value=1., name='testSameObjectForMultipleArguments.Variable')
self.assertAllEqual([1., 1.], np_g(v, v))
@test_util.assert_no_new_tensors
def testImplicitGradientsCustomGradientAndCachedVariableValue(self):
@custom_gradient.custom_gradient
def my_square(x):
result = math_ops.square(x)
def grad(dr):
return 2 * dr * x + 1
return result, grad
x = resource_variable_ops.ResourceVariable(
initial_value=3., name='X.' + self.id())
def f():
return my_square(x)
g = backprop.implicit_grad(f)
grads_and_vars = g()
self.assertEqual(1, len(grads_and_vars))
grad, var = grads_and_vars[0]
self.assertAllEqual(7, grad)
self.assertAllEqual(x, var)
@test_util.assert_no_new_tensors
def testCustomGradient(self):
@custom_gradient.custom_gradient
def my_mul(x, y):
result = x*y
def grad(dr):
return [dr*y, dr*x]
return result, grad
lr = 0.25
x = resource_variable_ops.ResourceVariable(2., name='x')
def loss(x):
return my_mul(2., x.read_value())
loss_grads_fn = backprop.implicit_val_and_grad(loss)
losses = []
for _ in range(5):
loss, grads_and_vars = loss_grads_fn(x)
losses.append(loss.numpy())
for (grad, var) in grads_and_vars:
var.assign_sub(lr*grad)
self.assertAllEqual(losses, [4.0, 3., 2., 1., 0.])
@test_util.assert_no_new_tensors
def testCustomGradientIdentity(self):
@custom_gradient.custom_gradient
def my_identity(x):
def grad(dresult):
return [2 * dresult]
return x, grad
self.assertAllEqual(backprop.gradients_function(my_identity)(1.0)[0], 2.0)
def testDifferentiatingFunctionThatReturnsNone(self):
def fn(x, y):
result = x*y # pylint: disable=unused-variable
x = constant_op.constant(1)
y = constant_op.constant(2)
loss_grads_fn = backprop.implicit_val_and_grad(fn)
with self.assertRaisesRegexp(
ValueError, 'Cannot differentiate a function that returns None; '
'did you forget to return a value from fn?'):
loss_grads_fn(x, y)
val_and_grads_fn = backprop.val_and_grad_function(fn)
with self.assertRaisesRegexp(
ValueError, 'Cannot differentiate a function that returns None; '
'did you forget to return a value from fn?'):
val_and_grads_fn(x, y)
def testZerosCacheDoesntLeakAcrossGraphs(self):
with ops.Graph().as_default():
def get_grad():
with ops.Graph().as_default(), self.cached_session():
t = constant_op.constant(1, dtype=dtypes.float32, shape=(10, 4))
x = constant_op.constant(2, dtype=dtypes.float32, shape=(10, 4))
with backprop.GradientTape() as tape:
tape.watch(x)
x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1)
y1 = x1**2
y = array_ops.concat([y1, t], axis=1)
return self.evaluate(tape.gradient(y, x))
grad1 = get_grad()
grad2 = get_grad()
self.assertAllEqual(grad1, grad2)
@test_util.run_in_graph_and_eager_modes
def testSelectivelyWatchVariables(self):
x1 = resource_variable_ops.ResourceVariable(1.0)
x2 = resource_variable_ops.ResourceVariable(1.0)
with backprop.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(x2)
y = x1**2
z = x2**3
self.assertTupleEqual(tape.watched_variables(), (x2,))
dy, dz = tape.gradient([y, z], [x1, x2])
self.evaluate([x1.initializer, x2.initializer])
self.assertIsNone(dy)
self.assertEqual(self.evaluate(dz), 3.0)
@test_util.run_in_graph_and_eager_modes
def testDifferentiatingScalarCache(self):
# In the following test, if x2 = x1 (i.e the objects are the exact same),
# then y is essentially, 2*x1, and dy/dx1 = 2.
# When we had a pure scalar cache in eager, this would be the case. This
# test prevents us from going back to that case.
with backprop.GradientTape(persistent=False) as g:
x1 = constant_op.constant(3.0)
x2 = constant_op.constant(3.0)
g.watch(x1)
g.watch(x2)
y = x1 + x2
grad = g.gradient(target=y, sources=[x1])
self.assertEqual(self.evaluate(grad), [1.0])
def testVariablesAndConstantsProduceTheSameGradients(self):
# In the following test, differentiating [y, z] against [a, b] gives:
# (dy/da + dz/da, dy/db + dz/db).
# If a and b are the same constant, dz/da will not be 0 (which it should
# be).
# This is solved by using variable since doing a read_value on a tensor will
# produce a new tensor and corresponding TensorHandle, and not reuse the
# same tensor (which would happen if we are using a cache and reusing
# EagerTensor objects).
def get_grads(a, b):
with backprop.GradientTape() as tape:
tape.watch([a, b])
y = a**3
z = b**2
return tape.gradient([y, z], [a, b])
gradients_constants = get_grads(
constant_op.constant(2.0), constant_op.constant(2.0))
gradients_variables = get_grads(
resource_variable_ops.ResourceVariable(2.0),
resource_variable_ops.ResourceVariable(2.0))
self.assertAllEqual(gradients_constants, gradients_variables)
def testUnknownShapes(self):
with ops.Graph().as_default():
with backprop.GradientTape() as tape:
a = array_ops.placeholder(dtype=dtypes.float32, shape=None)
tape.watch(a)
b = a**3
db_da = tape.gradient(b, a)
with self.cached_session() as sess:
self.assertEqual((8.0, 12.0), sess.run((b, db_da), feed_dict={a: 2.0}))
@test_util.run_in_graph_and_eager_modes
def testCustomGradientInEagerAndGraph(self):
@custom_gradient.custom_gradient
def f(x):
y = x * x
def grad(dy):
return [4 * dy]
return y, grad
with backprop.GradientTape() as t:
c = constant_op.constant(1.0)
t.watch(c)
g = f(c)
self.assertAllEqual(self.evaluate(t.gradient(g, c)), 4.0)
@test_util.run_in_graph_and_eager_modes
def testMaxPooling3DGradient(self):
def forward(a):
r = max_pooling3d(a, pool_size=pool_size, strides=strides, padding='SAME')
return r
input_sizes = [1, 3, 2, 4, 1]
pool_size = (2, 2, 1)
strides = (1, 1, 1)
total_size = np.prod(input_sizes)
x = np.arange(1, total_size + 1, dtype=np.float32)
aa = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32)
da = backprop.gradients_function(forward)(aa)
if not context.executing_eagerly():
tf_aa = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32)
tf_max = max_pooling3d(
tf_aa, pool_size=pool_size, strides=strides, padding='SAME')
tf_da = gradients.gradients(tf_max, [tf_aa])
self.assertAllEqual(da[0], tf_da[0].eval())
if __name__ == '__main__':
test.main()
|
nilq/baby-python
|
python
|
"""
Manage sound and music
"""
from engine.const import CONST
snd_manager = None
class SndManager():
def __init__(self):
self.sounds = {}
self.permanent_sound = []
self.playlist = []
self.music_index = 0
self.music = None
self.sounds_playing = []
def set_playlist(self,music_list):
pass
def add_music_to_playlist(self, name):
pass
def play_music(self,name):
pass
def update_music_status(self):
pass
def sanitize_sounds(self,delete_sounds=[]):
del_snd_tmp = []
if delete_sounds == []:
for snd_filename in self.sounds.keys():
if snd_filename not in self.permanent_sound:
del_snd_tmp.append(snd_filename)
else:
del_snd_tmp = delete_sounds
for snd_filename in del_snd_tmp:
del self.sounds[snd_filename]
snd_manager = SndManager()
if CONST.render == 'sfml':
from render_engine.sfml_engine.sfml_snd_manager import SFMLSndManager
snd_manager = SFMLSndManager()
'''
elif CONST.render == 'pookoo':
def set_playlist(music_list):
"""
Set a new playlist and play the first element
"""
global playlist, music
music = pookoo.audio.AudioStreamObject(playlist[0])
def add_music_to_playlist(self, name):
"""
Add a music at the end of the playlist
"""
global playlist
playlist.append(name)
def fadeout_music(t=0):
"""TODO: Fadeout and then stop it after time t (seconds)"""
pass
def play_music(name):
"""
Set the playlist as one element and play it
"""
global playlist
set_playlist([name])
def update_music_status():
"""
Switch to next music if it's over,
must be called to have smooth transition
"""
global music, music_index, playlist, sounds_playing
if CONST.render == 'sfml':
pass
def check_music_status():
"""
Return True if a music is currently playing
"""
global music
if CONST.render == 'sfml':
return music.status == sfml.Music.STOPPED
def load_sound(name, permanent=False):
"""Load a sound in the system and returns it"""
global sounds, permanent_sound
try:
sounds[name]
except KeyError:
if CONST.render == 'sfml':
sounds[name] = sfml.SoundBuffer.from_file(name)
elif CONST.render == 'pookoo':
sounds[name] = pookoo.audio.AudioSoundObject(name)
if permanent:
permanent_sound.append(name)
return sounds[name]
def play_sound(sound):
"""
Plays a given sound
"""
global sounds_playing
if CONST.render == 'sfml':
sound_playing = sfml.Sound(sound)
sound_playing.play()
sounds_playing.append(sound_playing)
'''
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import base
import cw
class Summary(base.CWBinaryBase):
"""見出しデータ(Summary.wsm)。
type:見出しデータには"-1"の値を付与する。
"""
def __init__(self, parent, f, yadodata=False, nameonly=False, materialdir="Material", image_export=True,
wpt120=False):
base.CWBinaryBase.__init__(self, parent, f, yadodata, materialdir, image_export)
self.type = -1
self.image = f.image()
self.name = f.string()
if nameonly:
return
self.description = f.string()
self.author = f.string()
self.required_coupons = f.string(True)
self.required_coupons_num = f.dword()
self.area_id = f.dword()
if self.area_id <= 19999:
self.version = 0
elif self.area_id <= 39999:
self.version = 2
self.area_id = self.area_id - 20000
elif self.area_id <= 49999:
self.version = 4
self.area_id = self.area_id - 40000
else:
# version 5~6は存在しない
self.version = 7
self.area_id = self.area_id - 70000
steps_num = f.dword()
self.steps = [Step(self, f) for _cnt in xrange(steps_num)]
flags_num = f.dword()
self.flags = [Flag(self, f) for _cnt in xrange(flags_num)]
if wpt120:
return
_w = f.dword() # 不明
if 0 < self.version:
self.level_min = f.dword()
self.level_max = f.dword()
else:
self.level_min = 0
self.level_max = 0
# タグとスキンタイプ。読み込みが終わった後から操作する
self.skintype = ""
self.tags = ""
self.data = None
def get_data(self):
if self.data is None:
if self.image:
self.imgpath = self.export_image()
else:
self.imgpath = ""
self.data = cw.data.make_element("Summary")
prop = cw.data.make_element("Property")
e = cw.data.make_element("Name", self.name)
prop.append(e)
e = cw.data.make_element("ImagePath", self.imgpath)
prop.append(e)
e = cw.data.make_element("Author", self.author)
prop.append(e)
e = cw.data.make_element("Description", self.description)
prop.append(e)
e = cw.data.make_element("Level")
e.set("min", str(self.level_min))
e.set("max", str(self.level_max))
prop.append(e)
e = cw.data.make_element("RequiredCoupons", self.required_coupons)
e.set("number", str(self.required_coupons_num))
prop.append(e)
e = cw.data.make_element("StartAreaId", str(self.area_id))
prop.append(e)
e = cw.data.make_element("Tags", self.tags)
prop.append(e)
e = cw.data.make_element("Type", self.skintype)
prop.append(e)
self.data.append(prop)
e = cw.data.make_element("Flags")
for flag in self.flags:
e.append(flag.get_data())
self.data.append(e)
e = cw.data.make_element("Steps")
for step in self.steps:
e.append(step.get_data())
self.data.append(e)
e = cw.data.make_element("Labels", "")
self.data.append(e)
return self.data
@staticmethod
def unconv(f, data):
image = None
name = ""
description = ""
author = ""
required_coupons = ""
required_coupons_num = 0
area_id = 0
steps = []
flags = []
variants = []
level_min = 0
level_max = 0
for e in data:
if e.tag == "Property":
for prop in e:
if prop.tag == "Name":
name = prop.text
elif prop.tag in ("ImagePath", "ImagePaths"):
image = base.CWBinaryBase.import_image(f, prop)
elif prop.tag == "Author":
author = prop.text
elif prop.tag == "Description":
description = prop.text
elif prop.tag == "Level":
level_min = int(prop.get("min"))
level_max = int(prop.get("max"))
elif prop.tag == "RequiredCoupons":
required_coupons = prop.text
required_coupons_num = int(prop.get("number"))
elif prop.tag == "StartAreaId":
level_max = int(prop.text)
elif e.tag == "Flags":
flags = e
elif e.tag == "Steps":
steps = e
elif e.tag == "Variants":
variants = e
f.write_image(image)
f.write_string(name)
f.write_string(description)
f.write_string(author)
f.write_string(required_coupons, True)
f.write_dword(required_coupons_num)
f.write_dword(area_id + 40000)
f.write_dword(len(steps))
for step in steps:
Step.unconv(f, step)
f.write_dword(len(flags))
for flag in flags:
Flag.unconv(f, flag)
for variant in variants:
f.check_wsnversion("4", u"コモン")
f.write_dword(0) # 不明
f.write_dword(level_min)
f.write_dword(level_max)
class Step(base.CWBinaryBase):
"""ステップ定義。"""
def __init__(self, parent, f, yadodata=False):
base.CWBinaryBase.__init__(self, parent, f, yadodata)
self.name = f.string()
self.default = f.dword()
self.variable_names = [f.string() for _cnt in xrange(10)]
self.data = None
def get_data(self):
if self.data is None:
self.data = cw.data.make_element("Step")
self.data.set("default", str(self.default))
e = cw.data.make_element("Name", self.name)
self.data.append(e)
e = cw.data.make_element("Value0", self.variable_names[0])
self.data.append(e)
e = cw.data.make_element("Value1", self.variable_names[1])
self.data.append(e)
e = cw.data.make_element("Value2", self.variable_names[2])
self.data.append(e)
e = cw.data.make_element("Value3", self.variable_names[3])
self.data.append(e)
e = cw.data.make_element("Value4", self.variable_names[4])
self.data.append(e)
e = cw.data.make_element("Value5", self.variable_names[5])
self.data.append(e)
e = cw.data.make_element("Value6", self.variable_names[6])
self.data.append(e)
e = cw.data.make_element("Value7", self.variable_names[7])
self.data.append(e)
e = cw.data.make_element("Value8", self.variable_names[8])
self.data.append(e)
e = cw.data.make_element("Value9", self.variable_names[9])
self.data.append(e)
return self.data
@staticmethod
def unconv(f, data):
name = ""
default = int(data.get("default"))
if data.getbool(".", "spchars", False):
f.check_wsnversion("2", u"ステップ値中の特殊文字の展開")
variable_names = [""] * 10
for e in data:
if e.tag == "Name":
name = e.text
elif e.tag.startswith("Value"):
variable_names[int(e.tag[5:])] = e.text
f.write_string(name)
f.write_dword(default)
for variable_name in variable_names:
f.write_string(variable_name)
class Flag(base.CWBinaryBase):
"""フラグ定義。"""
def __init__(self, parent, f, yadodata=False):
base.CWBinaryBase.__init__(self, parent, f, yadodata)
self.name = f.string()
self.default = f.bool()
self.variable_names = [f.string() for _cnt in xrange(2)]
self.data = None
def get_data(self):
if self.data is None:
self.data = cw.data.make_element("Flag")
self.data.set("default", str(self.default))
e = cw.data.make_element("Name", self.name)
self.data.append(e)
e = cw.data.make_element("True", self.variable_names[0])
self.data.append(e)
e = cw.data.make_element("False", self.variable_names[1])
self.data.append(e)
return self.data
@staticmethod
def unconv(f, data):
name = ""
default = cw.util.str2bool(data.get("default"))
if data.getbool(".", "spchars", False):
f.check_wsnversion("2", u"フラグ値中の特殊文字の展開")
variable_names = [""] * 2
for e in data:
if e.tag == "Name":
name = e.text
elif e.tag == "True":
variable_names[0] = e.text
elif e.tag == "False":
variable_names[1] = e.text
f.write_string(name)
f.write_bool(default)
for variable_name in variable_names:
f.write_string(variable_name)
def main():
pass
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from django.urls import path
from moim.views import *
urlpatterns = [
path('', MoimView.as_view()),
path('<int:moim_id>/', MoimDetailView.as_view()),
path('<int:moim_id>/apply/', MoimApplyView.as_view())
]
|
nilq/baby-python
|
python
|
#
# PySNMP MIB module PPP-SEC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/PPP-SEC-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:41:52 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint")
ppp, = mibBuilder.importSymbols("PPP-LCP-MIB", "ppp")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, iso, Unsigned32, Counter64, IpAddress, ModuleIdentity, Bits, TimeTicks, Integer32, NotificationType, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "iso", "Unsigned32", "Counter64", "IpAddress", "ModuleIdentity", "Bits", "TimeTicks", "Integer32", "NotificationType", "Gauge32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
pppSecurity = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 23, 2))
pppSecurityProtocols = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 23, 2, 1))
pppSecurityPapProtocol = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 23, 2, 1, 1))
pppSecurityChapMD5Protocol = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 23, 2, 1, 2))
pppSecurityConfigTable = MibTable((1, 3, 6, 1, 2, 1, 10, 23, 2, 2), )
if mibBuilder.loadTexts: pppSecurityConfigTable.setStatus('mandatory')
if mibBuilder.loadTexts: pppSecurityConfigTable.setDescription('Table containing the configuration and preference parameters for PPP Security.')
pppSecurityConfigEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 23, 2, 2, 1), ).setIndexNames((0, "PPP-SEC-MIB", "pppSecurityConfigLink"), (0, "PPP-SEC-MIB", "pppSecurityConfigPreference"))
if mibBuilder.loadTexts: pppSecurityConfigEntry.setStatus('mandatory')
if mibBuilder.loadTexts: pppSecurityConfigEntry.setDescription('Security configuration information for a particular PPP link.')
pppSecurityConfigLink = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: pppSecurityConfigLink.setStatus('mandatory')
if mibBuilder.loadTexts: pppSecurityConfigLink.setDescription("The value of ifIndex that identifies the entry in the interface table that is associated with the local PPP entity's link for which this particular security algorithm shall be attempted. A value of 0 indicates the default algorithm - i.e., this entry applies to all links for which explicit entries in the table do not exist.")
pppSecurityConfigPreference = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: pppSecurityConfigPreference.setStatus('mandatory')
if mibBuilder.loadTexts: pppSecurityConfigPreference.setDescription('The relative preference of the security protocol identified by pppSecurityConfigProtocol. Security protocols with lower values of pppSecurityConfigPreference are tried before protocols with higher values of pppSecurityConfigPreference.')
pppSecurityConfigProtocol = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 2, 1, 3), ObjectIdentifier()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: pppSecurityConfigProtocol.setStatus('mandatory')
if mibBuilder.loadTexts: pppSecurityConfigProtocol.setDescription('Identifies the security protocol to be attempted on the link identified by pppSecurityConfigLink at the preference level identified by pppSecurityConfigPreference. ')
pppSecurityConfigStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("invalid", 1), ("valid", 2))).clone('valid')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: pppSecurityConfigStatus.setStatus('mandatory')
if mibBuilder.loadTexts: pppSecurityConfigStatus.setDescription('Setting this object to the value invalid(1) has the effect of invalidating the corresponding entry in the pppSecurityConfigTable. It is an implementation-specific matter as to whether the agent removes an invalidated entry from the table. Accordingly, management stations must be prepared to receive tabular information from agents that corresponds to entries not currently in use. Proper interpretation of such entries requires examination of the relevant pppSecurityConfigStatus object.')
pppSecuritySecretsTable = MibTable((1, 3, 6, 1, 2, 1, 10, 23, 2, 3), )
if mibBuilder.loadTexts: pppSecuritySecretsTable.setStatus('mandatory')
if mibBuilder.loadTexts: pppSecuritySecretsTable.setDescription('Table containing the identities and secrets used by the PPP authentication protocols. As this table contains secret information, it is expected that access to this table be limited to those SNMP Party-Pairs for which a privacy protocol is in use for all SNMP messages that the parties exchange. This table contains both the ID and secret pair(s) that the local PPP entity will advertise to the remote entity and the pair(s) that the local entity will expect from the remote entity. This table allows for multiple id/secret password pairs to be specified for a particular link by using the pppSecuritySecretsIdIndex object.')
pppSecuritySecretsEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 23, 2, 3, 1), ).setIndexNames((0, "PPP-SEC-MIB", "pppSecuritySecretsLink"), (0, "PPP-SEC-MIB", "pppSecuritySecretsIdIndex"))
if mibBuilder.loadTexts: pppSecuritySecretsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: pppSecuritySecretsEntry.setDescription('Secret information.')
pppSecuritySecretsLink = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: pppSecuritySecretsLink.setStatus('mandatory')
if mibBuilder.loadTexts: pppSecuritySecretsLink.setDescription('The link to which this ID/Secret pair applies. By convention, if the value of this object is 0 then the ID/Secret pair applies to all links.')
pppSecuritySecretsIdIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: pppSecuritySecretsIdIndex.setStatus('mandatory')
if mibBuilder.loadTexts: pppSecuritySecretsIdIndex.setDescription('A unique value for each ID/Secret pair that has been defined for use on this link. This allows multiple ID/Secret pairs to be defined for each link. How the local entity selects which pair to use is a local implementation decision.')
pppSecuritySecretsDirection = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("local-to-remote", 1), ("remote-to-local", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: pppSecuritySecretsDirection.setStatus('mandatory')
if mibBuilder.loadTexts: pppSecuritySecretsDirection.setDescription('This object defines the direction in which a particular ID/Secret pair is valid. If this object is local-to-remote then the local PPP entity will use the ID/Secret pair when attempting to authenticate the local PPP entity to the remote PPP entity. If this object is remote-to-local then the local PPP entity will expect the ID/Secret pair to be used by the remote PPP entity when the remote PPP entity attempts to authenticate itself to the local PPP entity.')
pppSecuritySecretsProtocol = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 3, 1, 4), ObjectIdentifier()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: pppSecuritySecretsProtocol.setStatus('mandatory')
if mibBuilder.loadTexts: pppSecuritySecretsProtocol.setDescription('The security protocol (e.g. CHAP or PAP) to which this ID/Secret pair applies.')
pppSecuritySecretsIdentity = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 3, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: pppSecuritySecretsIdentity.setStatus('mandatory')
if mibBuilder.loadTexts: pppSecuritySecretsIdentity.setDescription('The Identity of the ID/Secret pair. The actual format, semantics, and use of pppSecuritySecretsIdentity depends on the actual security protocol used. For example, if pppSecuritySecretsProtocol is pppSecurityPapProtocol then this object will contain a PAP Peer-ID. If pppSecuritySecretsProtocol is pppSecurityChapMD5Protocol then this object would contain the CHAP NAME parameter.')
pppSecuritySecretsSecret = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 3, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: pppSecuritySecretsSecret.setStatus('mandatory')
if mibBuilder.loadTexts: pppSecuritySecretsSecret.setDescription('The secret of the ID/Secret pair. The actual format, semantics, and use of pppSecuritySecretsSecret depends on the actual security protocol used. For example, if pppSecuritySecretsProtocol is pppSecurityPapProtocol then this object will contain a PAP Password. If pppSecuritySecretsProtocol is pppSecurityChapMD5Protocol then this object would contain the CHAP MD5 Secret.')
pppSecuritySecretsStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 3, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("invalid", 1), ("valid", 2))).clone('valid')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: pppSecuritySecretsStatus.setStatus('mandatory')
if mibBuilder.loadTexts: pppSecuritySecretsStatus.setDescription('Setting this object to the value invalid(1) has the effect of invalidating the corresponding entry in the pppSecuritySecretsTable. It is an implementation-specific matter as to whether the agent removes an invalidated entry from the table. Accordingly, management stations must be prepared to receive tabular information from agents that corresponds to entries not currently in use. Proper interpretation of such entries requires examination of the relevant pppSecuritySecretsStatus object.')
mibBuilder.exportSymbols("PPP-SEC-MIB", pppSecurityConfigPreference=pppSecurityConfigPreference, pppSecurity=pppSecurity, pppSecuritySecretsStatus=pppSecuritySecretsStatus, pppSecurityConfigLink=pppSecurityConfigLink, pppSecuritySecretsProtocol=pppSecuritySecretsProtocol, pppSecurityChapMD5Protocol=pppSecurityChapMD5Protocol, pppSecuritySecretsLink=pppSecuritySecretsLink, pppSecuritySecretsSecret=pppSecuritySecretsSecret, pppSecuritySecretsIdentity=pppSecuritySecretsIdentity, pppSecuritySecretsDirection=pppSecuritySecretsDirection, pppSecurityPapProtocol=pppSecurityPapProtocol, pppSecuritySecretsTable=pppSecuritySecretsTable, pppSecuritySecretsEntry=pppSecuritySecretsEntry, pppSecurityConfigProtocol=pppSecurityConfigProtocol, pppSecurityConfigStatus=pppSecurityConfigStatus, pppSecurityConfigEntry=pppSecurityConfigEntry, pppSecurityConfigTable=pppSecurityConfigTable, pppSecuritySecretsIdIndex=pppSecuritySecretsIdIndex, pppSecurityProtocols=pppSecurityProtocols)
|
nilq/baby-python
|
python
|
import random
import math
Menor = int(input("Insira o menor limite : "))
Maior = int(input("Insira o maior limite : "))
## Retorna um número entre os x e y (Os 2 inclusos)
Rand = random.randint(Menor,Maior)
# Número mínimo de adivinhação = log 2 (limite superior - limite inferior + 1)
print("\n\t\tVocê tem apenas ", round(math.log(Maior - Menor + 1, 2))," chances para adivinhar o número!\n")
Tentativas = round(math.log(Maior - Menor + 1, 2))
Cont = 0
while Cont < Tentativas:
Cont += 1
Chute = int(input("Tente um número : "))
if Rand == Chute:
print("Parabéns, você acertou em ", Cont, " tentativa(s)!!")
break
elif Rand > Chute:
print("Arriscou um valor muito baixo...")
elif Rand < Chute:
print("Arriscou um valor muito alto...")
if Cont >= Tentativas:
print("\n\tO número era %d."%Rand)
print("\tBoa sorte na próxima vez !!")
|
nilq/baby-python
|
python
|
class News:
def __init__(self,title,description,urlToImage,content,url):
self.title=title
self.description=description
self.urlToImage=urlToImage
self.content=content
self.url=url
class Sources:
def __init__(self, id, name, description, url, category):
self.id = id
self.name = name
self.description = description
self.url = url
self.category = category
class Articles:
def __init__(self,title,author,description,url,urlToImage,publishedAt):
self.title=title
self.author=author
self.description=description
self.url=url
self.urlToImage=urlToImage
self.publishedAt=publishedAt
|
nilq/baby-python
|
python
|
"""Some utilities for caching pages."""
import zlib
from beaker.util import func_namespace
from mako.runtime import capture
def cache_content(request, key, do_work):
"""Argh!
Okay, so. Use this when you want to cache the BODY of a page but not
the CHROME (i.e., wrapper or base or whatever).
``request``
The pyramid.request.Request object for the current request.
``key``
The key that uniquely identifies this particular rendering of this
page content.
``do_work``
Some function that will stuff a bunch of expensive data in c. This
will only be called if the page hasn't yet been cached. It'll be
passed the key.
The name and module of this function will be used as part of the cache
key.
Also, DO NOT FORGET TO wrap the cachable part of your template in a
<%lib:cache_content> tag, or nothing will get cached!
If a page body is pulled from cache, c.timer.from_cache will be set to
True. If the page had to be generated, it will be set to False. (If
this function wasn't involved at all, it will be set to None.)
"""
cache = request.environ.get('beaker.cache', None)
c = request.tmpl_context
# Content needs to be cached per-language
# TODO(pyramid)
#key = u"{0}/{1}".format(key, c.lang)
key += u';' + c.game_language.identifier
if request.session.get('cheat_obdurate', False):
key += u';obdurate'
# If the cache isn't configured for whatever reason (such as when we're
# running in a test environment), just skip it.
if cache is None:
# call do_work immediately so that it isn't skipped during testing
# (since tests don't call the renderer)
do_work(request, key)
def skip_cache(context, mako_def):
mako_def.body()
c._cache_me = skip_cache
return
namespace = func_namespace(do_work)
# Cache for... ten hours? Sure, whatever
# TODO: use get_cache_region instead
content_cache = cache.get_cache('content_cache:' + namespace,
expiretime=36000)
# XXX This is dumb. Caches don't actually respect the 'enabled'
# setting, so we gotta fake it.
if not content_cache.nsargs.get('enabled', True):
def skip_cache(context, mako_def):
do_work(request, key)
mako_def.body()
c._cache_me = skip_cache
return
# These pages can be pretty big. In the case of e.g. memcached, that's
# a lot of RAM spent on giant pages that consist half of whitespace.
# Solution: gzip everything. Use level 1 for speed!
def cache_me(context, mako_def):
c.timer.from_cache = True
def generate_page():
c.timer.from_cache = False
do_work(request, key)
data = capture(context, mako_def.body).encode('utf8')
return zlib.compress(data, 1)
data = content_cache.get_value(key=key, createfunc=generate_page)
context.write(
zlib.decompress(data).decode('utf8')
)
c._cache_me = cache_me
return
|
nilq/baby-python
|
python
|
import os
import pickle
import numpy as np
import soundfile as sf
from scipy import signal
from scipy.signal import get_window
import librosa
from librosa.filters import mel
from numpy.random import RandomState
from pathlib import Path
import ipdb
from tqdm import tqdm
def butter_highpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype='high', analog=False)
return b, a
def pySTFT(x, fft_length=1024, hop_length=256):
x = np.pad(x, int(fft_length//2), mode='reflect')
noverlap = fft_length - hop_length
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//hop_length, fft_length)
strides = x.strides[:-1]+(hop_length*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
fft_window = get_window('hann', fft_length, fftbins=True)
result = np.fft.rfft(fft_window * result, n=fft_length).T
return np.abs(result)
mel_basis = mel(16000, 1024, fmin=90, fmax=7600, n_mels=80).T
min_level = np.exp(-100 / 20 * np.log(10))
b, a = butter_highpass(30, 16000, order=5)
# audio file directory
rootDir = './wavs'
# rootDir = './kids_speech/wav/'
# spectrogram directory
rootDirs = [
'../data/LibriTTS/train-clean-100',
'../data/kids_speech/wavs'
]
# rootDir = '/home/shacharm/Projects/ug/data/LibriTTS/train-clean-100'
# rootDir = '/home/shacharm/Projects/ug/data/kids_speech/wavs'
targetDir = './spmel'
for rootDir in rootDirs:
assert Path(rootDir).exists(), "{} does not exist".format(rootDirs)
dirName, subdirList, _ = next(os.walk(rootDir))
print('Found directory: %s' % dirName)
SAMPLE_RATE = 16000
for subdir in tqdm(sorted(subdirList)):
if False:
files = (Path(rootDir) / subdir).glob('**/*.wav')
if not os.path.exists(os.path.join(targetDir, subdir)):
os.makedirs(os.path.join(targetDir, subdir))
_,_, fileList = next(os.walk(os.path.join(dirName,subdir)))
try:
prng = RandomState(int(subdir[1:]))
except:
prng = RandomState()
for fileName in tqdm(list((Path(rootDir) / subdir).glob('**/*.wav'))):
targetSubDir = targetDir / fileName.relative_to(rootDir).parent
targetSubDir.mkdir(parents=True, exist_ok=True)
targetFile = (targetSubDir / fileName.stem).with_suffix('.npy')
if targetFile.exists():
continue
# Read audio file
#x, fs = sf.read(os.path.join(dirName,subdir,fileName))
x, fs = sf.read(str(fileName))
x = librosa.resample(x, fs, SAMPLE_RATE)
fs = SAMPLE_RATE
# Remove drifting noise
y = signal.filtfilt(b, a, x)
# Ddd a little random noise for model roubstness
wav = y * 0.96 + (prng.rand(y.shape[0])-0.5)*1e-06
# Compute spect
D = pySTFT(wav).T
# Convert to mel and normalize
D_mel = np.dot(D, mel_basis)
D_db = 20 * np.log10(np.maximum(min_level, D_mel)) - 16
S = np.clip((D_db + 100) / 100, 0, 1)
# save spect
np.save(targetFile, S.astype(np.float32), allow_pickle=False)
|
nilq/baby-python
|
python
|
import tkinter as tk
from tkinter import messagebox as mbox
from tkinter import filedialog
from Phase0 import phase0
from Phase0_1 import phase0_1
from Phase1 import phase1
from Phase2 import phase2
from Phase3 import phase3
from form_viewer import form_viewer
#Tk class generating
root = tk.Tk()
# screen size
root.geometry("700x500")
# screen title
root.title("N1MM to JARL log converter")
# パラメータ
folder_path = tk.StringVar()
form_file = tk.StringVar()
adif_file = tk.StringVar()
log_file = tk.StringVar()
HL_file = tk.StringVar()
Ph0_data = []
Callsign = ""
FD_coe = 1
Contest_name = ""
Multi = ""
def ask_form():
""" form.txtファイル選択ボタンの動作
"""
global path
global folder_path
path = filedialog.askdirectory()
# form_f = filedialog.askopenfilename(filetypes = [('テキストファイル','*.txt')] , initialdir = './' )
folder_path.set(path)
# form_file.set(form_f)
# print( "-------- ask_input() " )
# print( "path : ", path )
# print( "folder_path: ", folder_path )
# print( "form_f ; ", form_f )
# print( "form_file : ",form_file )
return
def ask_adif():
""" adif.adiファイル選択ボタンの動作
"""
# path = filedialog.askdirectory()
adif_f = filedialog.askopenfilename(filetypes = [('テキストファイル','*.adi')] , initialdir = './' )
# folder_path.set(path)
adif_file.set(adif_f)
print( "-------- ask_adif() " )
# print( "path: ", path )
# print( "folder_path: ", folder_path )
print( "adif_f ; ", adif_f )
print( "adif_file: ",adif_file )
return
def data_clear():
Remarks1.delete(0, tk.END)
My_multi.delete(0, tk.END)
Guest.set(False)
FD_contest.set(False)
Multi_Op.set(False)
Contest_type.set(False)
AA_contest.set(False)
Power_code.set(False)
JST_convert_flag.set(False)
# Time_convert.set(False)
QSLyesno.set(False)
form_file.set('')
adif_file.set('')
def ok_click() :
Multi = My_multi.get()
mbox.showinfo('My Multi', Multi )
return Callsign
def log_generate() :
Guest_op =Guest.get()
FD = FD_contest.get()
Mop = Multi_Op.get()
# form = form_file.get()
# file_path = folder_path.get()
Ph0_data = phase0(Guest_op, FD, Mop )
Callsign = Ph0_data[0]
FD_coe = int(Ph0_data[1])
Contest_name = phase0_1( Callsign )
# mbox.showinfo('Log Remarks', 'Remark: ' + a )
# Phase1を起動
# ADIFファイルのログライン分割を1ラインに修正
phase1( Callsign )
# Phase2を起動
# スコアサマリーの生成、JARLサマリーシートへ得点を転記
phase2( Callsign , FD_coe , Contest_name )
# Phase3を起動
Multi = My_multi.get()
QSL = QSLyesno.get()
JST_conv = JST_convert_flag.get()
Power = Power_code.get()
AA = AA_contest.get()
phase3( Callsign , Contest_name, QSL, JST_conv, Power, Multi, AA, Remarks1.get() )
def form_view() :
form_viewer()
def closing():
# exit()
root.destroy()
# チェックON・OFF変数
Guest = tk.BooleanVar()
Guest.set(False)
FD_contest = tk.BooleanVar()
FD_contest.set(False)
Multi_Op = tk.BooleanVar()
Multi_Op.set(False)
Contest_type = tk.BooleanVar()
Contest_type.set(False)
AA_contest = tk.BooleanVar()
AA_contest.set(False)
Power_code = tk.BooleanVar()
Power_code.set(False)
JST_convert_flag = tk.BooleanVar()
JST_convert_flag.set(False)
#Time_convert = tk.BooleanVar()
#Time_convert.set(False)
QSLyesno = tk.BooleanVar()
QSLyesno.set(False)
# check botton
check_Guest = tk.Checkbutton(root, variable = Guest, text ="ゲストオペ運用ですか?")
check_Guest.place(x=140, y=50)
check_FD_contest = tk.Checkbutton(root, variable = FD_contest , text ="FDコンテストですか?")
check_FD_contest.place(x=140, y=70)
check_Multi_Op = tk.Checkbutton(root, variable = Multi_Op , text ="マルチオペ運用ですか?")
check_Multi_Op.place(x=140, y=90)
check_Contest_type = tk.Checkbutton(root, variable = Contest_type , text ="通常のContestですか?")
check_Contest_type.place(x=140, y=110)
check_AA_contest = tk.Checkbutton(root, variable = AA_contest , text ="ALL Asia DX contestですか?")
check_AA_contest.place(x=140, y=130)
check_Power_code = tk.Checkbutton(root, variable = Power_code , text ="1.2GHzバンド以上のパワーコードをMからLに変換します?")
check_Power_code.place(x=140, y=150)
check_JST_convert_flag = tk.Checkbutton(root, variable = JST_convert_flag , text ="ロギングはUTCでJSTに変換しますか?")
check_JST_convert_flag.place(x=140, y=170)
#check_Time_convert = tk.Checkbutton(root, variable = Time_convert , text ="UTCをJSTに変換しますか?")
#check_Time_convert.place(x=140, y=190)
check_QSLyesno = tk.Checkbutton(root, variable = QSLyesno , text ="QSLカードを発行しますか?")
check_QSLyesno.place(x=140, y=190)
# label
label_contest_number = tk.Label( text="My Contest Multi: ")
label_contest_number.place(x=30, y=230)
Remarks1 = tk.Label( text="Hamlog Remarks1: ")
Remarks1.place(x=30, y=250)
label_top = tk.Label( text ="N1MM+ ADIFファイルからJARLコンテストログ生成ツール")
label_top.pack()
label_term1 = tk.Label( text ="1.パラメータ設定")
label_term1.place(x=10,y=30)
label_term2 = tk.Label( text ="2.")
label_term2.place(x=10,y=350)
label_term2 = tk.Label( text ="3.")
label_term2.place(x=10,y=390)
# ウィジット作成(form.txtファイル)
#form_label = tk.Label(root, text="データフォルダ指定")
#form_label.place(x=30, y=290)
#form_box = tk.Entry(root, textvariable= form_file, width=80)
#form_box = tk.Entry(root, textvariable= folder_path, width=80)
#form_box.place(x=145, y=290)
#form_btn = tk.Button(root, text="参照", command=ask_form)
#form_btn.place(x=650, y=290)
# ウィジット作成(ADIFファイル)
#output_label = tk.Label(root, text="ADIFファイル:")
#output_label.place(x=30, y=310)
#output_box = tk.Entry(root, textvariable=adif_file, width=80)
#output_box.place(x=145, y=310)
#output_btn = tk.Button(root, text="参照", command=ask_adif)
#output_btn.place(x=650, y=310)
# text box
My_multi = tk.Entry(width=10)
My_multi.place(x=145,y=230)
Remarks1 = tk.Entry(width=40)
Remarks1.place(x=145,y=250)
clear_Button = tk.Button(root,text='パラメータClear', command = data_clear )
#clear_Button.pack( fill = 'none', padx=20, side = 'bottom' )
clear_Button.place(x=40 , y=50)
okButton =tk.Button( root, text='form.txtファイルの確認と修正', command = form_view )
#okButton.pack( fill = 'none', padx=20, side = 'bottom' )
okButton.place(x=40 , y=350)
okButton =tk.Button( root, text='コンテストログ生成', command = log_generate )
#okButton.pack( fill = 'none', padx=20, side = 'bottom' )
okButton.place(x=40 , y=390)
closeButton =tk.Button( root, text='Close', command = closing )
closeButton.place(x=370 , y=470)
root.mainloop()
|
nilq/baby-python
|
python
|
import numpy as np
import os
import tensorflow as tf
from sandbox.rocky.tf.optimizers.conjugate_gradient_optimizer import FiniteDifferenceHvp, ConjugateGradientOptimizer
from hgail.algos.gail import GAIL
import auto_validator
import hyperparams
import utils
# setup
args = hyperparams.parse_args()
exp_dir = utils.set_up_experiment(exp_name=args.exp_name, phase='imitate')
saver_dir = os.path.join(exp_dir, 'imitate', 'log')
saver_filepath = os.path.join(saver_dir, 'checkpoint')
np.savez(os.path.join(saver_dir, 'args'), args=args)
summary_writer = tf.summary.FileWriter(os.path.join(exp_dir, 'imitate', 'summaries'))
# build components
env, act_low, act_high = utils.build_ngsim_env(args, exp_dir, vectorize=args.vectorize)
data = utils.load_data(
args.expert_filepath,
act_low=act_low,
act_high=act_high,
min_length=args.env_H + args.env_primesteps,
clip_std_multiple=args.normalize_clip_std_multiple,
ngsim_filename=args.ngsim_filename
)
critic = utils.build_critic(args, data, env, summary_writer)
policy = utils.build_policy(args, env)
recognition_model = utils.build_recognition_model(args, env, summary_writer)
baseline = utils.build_baseline(args, env)
reward_handler = utils.build_reward_handler(args, summary_writer)
validator = auto_validator.AutoValidator(
summary_writer,
data['obs_mean'],
data['obs_std'],
render=args.validator_render,
render_every=args.render_every,
flat_recurrent=args.policy_recurrent
)
# build algo
saver = tf.train.Saver(max_to_keep=100, keep_checkpoint_every_n_hours=.5)
sampler_args = dict(n_envs=args.n_envs) if args.vectorize else None
if args.policy_recurrent:
optimizer = ConjugateGradientOptimizer(
max_backtracks=50,
hvp_approach=FiniteDifferenceHvp(base_eps=1e-5)
)
else:
optimizer = None
algo = GAIL(
critic=critic,
recognition=recognition_model,
reward_handler=reward_handler,
env=env,
policy=policy,
baseline=baseline,
validator=validator,
batch_size=args.batch_size,
max_path_length=args.max_path_length,
n_itr=args.n_itr,
discount=args.discount,
step_size=args.trpo_step_size,
saver=saver,
saver_filepath=saver_filepath,
force_batch_sampler=False if args.vectorize else True,
sampler_args=sampler_args,
snapshot_env=False,
plot=False,
optimizer=optimizer,
optimizer_args=dict(
max_backtracks=50,
debug_nan=True
)
)
# run it
with tf.Session() as session:
# running the initialization here to allow for later loading
# NOTE: rllab batchpolopt runs this before training as well
# this means that any loading subsequent to this is nullified
# you have to comment of that initialization for any loading to work
session.run(tf.global_variables_initializer())
# loading
if args.params_filepath != '':
algo.load(args.params_filepath)
# run training
algo.train(sess=session)
|
nilq/baby-python
|
python
|
# Copyright (c) 2019, CMCC Technologies Co., Ltd.
# Copyright (c) 2019, ZTE Corporation.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from lcm.pub.database.models import NSLcmOpOccModel
from lcm.pub.exceptions import NSLCMException
from lcm.ns.const import NS_OCC_BASE_URI, NS_INSTANCE_BASE_URI
logger = logging.getLogger(__name__)
FILTERS = {
'id': 'id',
'operationState': 'operation_state',
'stateEnteredTime': 'state_entered_time',
'startTime': 'start_time',
'nsInstanceId': 'ns_instance_id',
'operation': 'operation'
}
class QueryNsLcmOpOcc:
def __init__(self, data, lcm_op_occ_id=''):
self.ns_lcm_op_occ_id = lcm_op_occ_id
self.params = data
def query_multi_ns_lcm_op_occ(self):
query_data = {}
logger.debug("QueryMultiNsLcmOpOccs--get--biz::> Check for filters in query params" % self.params)
for query, value in list(self.params.items()):
if query in FILTERS:
query_data[FILTERS[query]] = value
# Query the database with filters if the request has fields in request params, else fetch all records
if query_data:
lcm_ops = NSLcmOpOccModel.objects.filter(**query_data)
else:
lcm_ops = NSLcmOpOccModel.objects.all()
if not lcm_ops.exists():
return []
# raise NSLCMException('LCM Operation Occurances do not exist')
return [self.fill_resp_data(lcm_op) for lcm_op in lcm_ops]
def fill_resp_data(self, lcm_op):
NS_LCM_OP_OCC_URI = NS_OCC_BASE_URI % lcm_op.id
resp_data = {
'id': lcm_op.id,
'operationState': lcm_op.operation_state,
'stateEnteredTime': lcm_op.state_entered_time,
'startTime': lcm_op.start_time,
'nsInstanceId': lcm_op.ns_instance_id,
'operation': lcm_op.operation,
'isAutomaticInvocation': lcm_op.is_automatic_invocation,
'operationParams': json.loads(lcm_op.operation_params),
'isCancelPending': lcm_op.is_cancel_pending,
'cancelMode': lcm_op.cancel_mode,
'error': None if not lcm_op.error else json.loads(lcm_op.error),
'resourceChanges': None if not lcm_op.resource_changes else json.loads(lcm_op.resource_changes),
'_links': {
'self': {'href': NS_LCM_OP_OCC_URI},
'nsInstance': {'href': NS_INSTANCE_BASE_URI % lcm_op.ns_instance_id},
'retry': {'href': NS_LCM_OP_OCC_URI + '/retry'},
'rollback': {'href': NS_LCM_OP_OCC_URI + '/rollback'},
'continue': {'href': NS_LCM_OP_OCC_URI + '/continue'},
'fail': {'href': NS_LCM_OP_OCC_URI + '/fail'},
'cancel': {'href': NS_LCM_OP_OCC_URI + '/cancel'}
} # json.loads(lcm_op.links)
}
return resp_data
def query_single_ns_lcm_op_occ(self):
lcm_op = NSLcmOpOccModel.objects.filter(id=self.ns_lcm_op_occ_id)
if not lcm_op.exists():
raise NSLCMException('LCM Operation Occurance does not exist')
resp_data = self.fill_resp_data(lcm_op[0])
return resp_data
|
nilq/baby-python
|
python
|
# Computes the transition temperature Tc from the temperature dependence of the leading
# Bethe-Salpeter eigenvalue.
#
# Usage: python compute_tc.py T=*
#
# Author: Urs R. Haehner (haehneru@itp.phys.ethz.ch)
import numpy as np
from scipy import optimize
from matplotlib import pyplot as plt
import h5py
import os
import sys
################################################################################
# Computes the temperature at which an instability occurs, i.e. the temperature T where the leading
# eigenvalue eigval crosses 1.
# Uses a fit function of the form eigval(T) = p0/(T-p1)^p2.
# The transition temperature Tc is then given by Tc = p1^(1/p0) + p1.
def computeTransitionTemp(T, eigval):
print('\nTemperature/eigenvalue pairs for fit:')
for T_ind, T_val in enumerate(T):
print(str(T_val) + '\t' + str(eigval[T_ind]))
fitfunc = lambda p, x: p[0] / pow((x-p[1]), p[2]) # Target function
errfunc = lambda p, x, y: fitfunc(p, x) - y # Distance to the target function
p0 = [1., 0., 1.] # Initial guess for the parameters
p, success = optimize.leastsq(errfunc, p0[:], args=(T, eigval))
Tc = pow(p[0], 1./p[2]) + p[1]
print('\nTc = ' + '{0:.3g}'.format(Tc))
T_fine = np.linspace(T[0], T[-1], 100)
l_fine = fitfunc(p, T_fine)
return Tc, T_fine, l_fine
################################################################################
dirs = sys.argv[1:] # T=... directories
T = []
eigval = []
# Read leading eigenvalue for each temperature.
for d in dirs:
filename = d + '/analysis.hdf5'
if (os.path.isfile(filename)):
T.append(float(d[2:]))
print('Reading ' + filename)
data = h5py.File(filename,'r')
# Store real part of leading eigenvalue (imaginary part = 0).
# Eigenvalues are sorted w.r.t. size in decreasing order.
leading_eigenvalues = data['analysis-functions']['leading-eigenvalues']['data'][:]
eigval.append(leading_eigenvalues[0][0])
data.close()
# Compute the transition temperature Tc.
Tc, T_fine, eigval_fine = computeTransitionTemp(T, eigval)
# Plot temperature dependence of leading eigenvalue.
filename = 'eigval_vs_temp.pdf'
print('\nPlotting temperature dependence of leading eigenvalue: ' + filename)
xmin = T_fine[0]-0.005
xmax = T_fine[-1]+0.005
plt.plot(T_fine, eigval_fine, '--', label=r'$T_c$ = '+'{0:.3g}'.format(Tc))
plt.plot(T, eigval, 'o')
plt.hlines(1., xmin, xmax, 'k')
plt.xlim(xmin, xmax)
plt.xticks([0.07, 0.08, 0.09, 0.1])
plt.xlabel(r'$T/t$')
plt.ylabel(r'$\lambda_d$')
plt.legend(loc='best')
plt.grid()
plt.savefig(filename)
|
nilq/baby-python
|
python
|
import numpy as np
import openslide
import sys
import os
from PIL import Image
from color_norm.color_normalize import reinhard_normalizer
def white_ratio(pat):
white_count = 0.0
total_count = 0.001
for x in range(0, pat.shape[0]-200, 100):
for y in range(0, pat.shape[1]-200, 100):
p = pat[x:x+200, y:y+200, :]
whiteness = (np.std(p[:,:,0]) + np.std(p[:,:,1]) + np.std(p[:,:,2])) / 3.0
if whiteness < 14:
white_count += 1.0
total_count += 1.0
return white_count/total_count
def stain_normalized_tiling(slide_name, patch_size, do_actually_read_image=True):
margin = 5
try:
oslide = openslide.OpenSlide(slide_name)
if openslide.PROPERTY_NAME_MPP_X in oslide.properties:
mpp = float(oslide.properties[openslide.PROPERTY_NAME_MPP_X])
elif "XResolution" in oslide.properties:
mpp = float(oslide.properties["XResolution"]);
elif "tiff.XResolution" in oslide.properties:
mpp = float(oslide.properties["tiff.XResolution"]);
if mpp > 2.0:
mpp = 10000.0/mpp;
else:
mpp = 0.250
if mpp < 0.375:
scale_factor = 1
else:
scale_factor = 2
pw = patch_size
width = oslide.dimensions[0]
height = oslide.dimensions[1]
except:
print 'Error in {}: exception caught exiting'.format(slide_name)
raise Exception('{}: exception caught exiting'.format(slide_name))
return
n40X = reinhard_normalizer('color_norm/target_40X.png')
for x in range(1, width, pw):
for y in range(1, height, pw):
if x + pw > width - margin:
pw_x = width - x - margin
else:
pw_x = pw
if y + pw > height - margin:
pw_y = height - y - margin
else:
pw_y = pw
if pw_x <= 3 or pw_y <= 3:
continue
if do_actually_read_image:
try:
patch = oslide.read_region((x, y), 0, (pw_x, pw_y)).convert('RGB')
except:
print '{}: exception caught'.format(slide_name)
continue
else:
patch = Image.new('RGB', (pw_x, pw_y), (255, 255, 255))
ori_size0 = patch.size[0]
ori_size1 = patch.size[1]
patch = np.array(patch.resize(
(patch.size[0]*scale_factor, patch.size[1]*scale_factor), Image.ANTIALIAS))
if white_ratio(patch) < 0.25:
patch = n40X.normalize(patch)
yield patch, (x, y, pw_x, pw_y, ori_size0, ori_size1, mpp, scale_factor), (width, height)
|
nilq/baby-python
|
python
|
import win32ui
import pyautogui
from win10toast import ToastNotifier
path = pyautogui.prompt('Please enter the path below:')
path = path+"/?"
pyautogui.keyDown("win")
pyautogui.press("r")
pyautogui.keyUp("win")
pyautogui.typewrite("cmd")
pyautogui.press("enter")
pyautogui.press("enter")
pyautogui.typewrite(f"{path}")
pyautogui.press("enter")
wnd = win32ui.GetForegroundWindow()
print (wnd.GetWindowText())
if "cmd.exe" in wnd.GetWindowText():
pyautogui.typewrite("exit")
pyautogui.press("enter")
toaster = ToastNotifier()
toaster.show_toast("Testing", "File Does Not Have Any Silent Switches", threaded=True, icon_path=None, duration=3)
else:
toaster = ToastNotifier()
toaster.show_toast("Testing", "File Has Silent Switches", threaded=True, icon_path=None, duration=3)
|
nilq/baby-python
|
python
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Writes metadata and label file to the Bert NL classifier models."""
from typing import List, Optional, Union
from tensorflow_lite_support.metadata.python.metadata_writers import metadata_info
from tensorflow_lite_support.metadata.python.metadata_writers import metadata_writer
from tensorflow_lite_support.metadata.python.metadata_writers import writer_utils
_MODEL_NAME = "BertNLClassifier"
_MODEL_DESCRIPTION = ("Classify the input text into a set of known categories.")
_OUTPUT_NAME = "probability"
_OUTPUT_DESCRIPTION = "Probabilities of the labels respectively."
# The input tensor names of models created by Model Maker.
_DEFAULT_ID_NAME = "serving_default_input_word_ids:0"
_DEFAULT_MASK_NAME = "serving_default_input_mask:0"
_DEFAULT_SEGMENT_ID_NAME = "serving_default_input_type_ids:0"
class MetadataWriter(metadata_writer.MetadataWriter):
"""Writes metadata into the Bert NL classifier."""
@classmethod
def create_from_metadata_info(
cls,
model_buffer: bytearray,
general_md: Optional[metadata_info.GeneralMd] = None,
input_md: Optional[metadata_info.BertInputTensorsMd] = None,
output_md: Optional[metadata_info.ClassificationTensorMd] = None):
"""Creates MetadataWriter based on general/input/output information.
Args:
model_buffer: valid buffer of the model file.
general_md: general information about the model. If not specified, default
general metadata will be generated.
input_md: input tensor information. If not specified, default input
metadata will be generated.
output_md: output classification tensor informaton. If not specified,
default output metadata will be generated.
Returns:
A MetadataWriter object.
"""
if general_md is None:
general_md = metadata_info.GeneralMd(
name=_MODEL_NAME, description=_MODEL_DESCRIPTION)
if input_md is None:
input_md = metadata_info.BertInputTensorsMd(model_buffer,
_DEFAULT_ID_NAME,
_DEFAULT_MASK_NAME,
_DEFAULT_SEGMENT_ID_NAME)
if output_md is None:
output_md = metadata_info.ClassificationTensorMd(
name=_OUTPUT_NAME, description=_OUTPUT_DESCRIPTION)
if output_md.associated_files is None:
output_md.associated_files = []
return cls.create_from_metadata(
model_buffer,
model_metadata=general_md.create_metadata(),
input_metadata=input_md.create_input_tesnor_metadata(),
output_metadata=[output_md.create_metadata()],
associated_files=[
file.file_path for file in output_md.associated_files
] + input_md.get_tokenizer_associated_files(),
input_process_units=input_md.create_input_process_unit_metadata())
@classmethod
def create_for_inference(
cls,
model_buffer: bytearray,
tokenizer_md: Union[metadata_info.BertTokenizerMd,
metadata_info.SentencePieceTokenizerMd],
label_file_paths: List[str],
ids_name: str = _DEFAULT_ID_NAME,
mask_name: str = _DEFAULT_MASK_NAME,
segment_name: str = _DEFAULT_SEGMENT_ID_NAME,
):
"""Creates mandatory metadata for TFLite Support inference.
The parameters required in this method are mandatory when using TFLite
Support features, such as Task library and Codegen tool (Android Studio ML
Binding). Other metadata fields will be set to default. If other fields need
to be filled, use the method `create_from_metadata_info` to edit them.
`ids_name`, `mask_name`, and `segment_name` correspond to the `Tensor.name`
in the TFLite schema, which help to determine the tensor order when
populating metadata. The default values come from Model Maker.
Args:
model_buffer: valid buffer of the model file.
tokenizer_md: information of the tokenizer used to process the input
string, if any. Supported tokenziers are: `BertTokenizer` [1] and
`SentencePieceTokenizer` [2]. If the tokenizer is `RegexTokenizer`
[3], refer to `nl_classifier.MetadataWriter`.
[1]:
https://github.com/tensorflow/tflite-support/blob/b80289c4cd1224d0e1836c7654e82f070f9eefaa/tensorflow_lite_support/metadata/metadata_schema.fbs#L436
[2]:
https://github.com/tensorflow/tflite-support/blob/b80289c4cd1224d0e1836c7654e82f070f9eefaa/tensorflow_lite_support/metadata/metadata_schema.fbs#L473
[3]:
https://github.com/tensorflow/tflite-support/blob/b80289c4cd1224d0e1836c7654e82f070f9eefaa/tensorflow_lite_support/metadata/metadata_schema.fbs#L475
label_file_paths: paths to the label files [4] in the classification
tensor. Pass in an empty list if the model does not have any label file.
[4]:
https://github.com/tensorflow/tflite-support/blob/b80289c4cd1224d0e1836c7654e82f070f9eefaa/tensorflow_lite_support/metadata/metadata_schema.fbs#L95
ids_name: name of the ids tensor, which represents the tokenized ids of
the input text.
mask_name: name of the mask tensor, which represents the mask with 1 for
real tokens and 0 for padding tokens.
segment_name: name of the segment ids tensor, where `0` stands for the
first sequence, and `1` stands for the second sequence if exists.
Returns:
A MetadataWriter object.
"""
input_md = metadata_info.BertInputTensorsMd(
model_buffer,
ids_name,
mask_name,
segment_name,
tokenizer_md=tokenizer_md)
output_md = metadata_info.ClassificationTensorMd(
name=_OUTPUT_NAME,
description=_OUTPUT_DESCRIPTION,
label_files=[
metadata_info.LabelFileMd(file_path=file_path)
for file_path in label_file_paths
],
tensor_type=writer_utils.get_output_tensor_types(model_buffer)[0])
return cls.create_from_metadata_info(
model_buffer, input_md=input_md, output_md=output_md)
|
nilq/baby-python
|
python
|
from lib.base import PowerDNSClient
class SuggestZone(PowerDNSClient):
def _run(self, *args, **kwargs):
return self.api.suggest_zone(*args, **kwargs)
|
nilq/baby-python
|
python
|
"""
The :mod:`websockets.server` module defines a simple WebSocket server API.
"""
import asyncio
import collections.abc
import email.message
import logging
from .compatibility import asyncio_ensure_future
from .exceptions import InvalidHandshake, InvalidOrigin
from .handshake import build_response, check_request
from .http import USER_AGENT, read_request
from .protocol import CONNECTING, OPEN, WebSocketCommonProtocol
__all__ = ['serve', 'WebSocketServerProtocol']
logger = logging.getLogger(__name__)
class WebSocketServerProtocol(WebSocketCommonProtocol):
"""
Complete WebSocket server implementation as an :class:`asyncio.Protocol`.
This class inherits most of its methods from
:class:`~websockets.protocol.WebSocketCommonProtocol`.
For the sake of simplicity, it doesn't rely on a full HTTP implementation.
Its support for HTTP responses is very limited.
"""
state = CONNECTING
def __init__(self, ws_handler, ws_server, *,
origins=None, subprotocols=None, extra_headers=None, **kwds):
self.ws_handler = ws_handler
self.ws_server = ws_server
self.origins = origins
self.subprotocols = subprotocols
self.extra_headers = extra_headers
super().__init__(**kwds)
def connection_made(self, transport):
super().connection_made(transport)
# Register the connection with the server when creating the handler
# task. (Registering at the beginning of the handler coroutine would
# create a race condition between the creation of the task, which
# schedules its execution, and the moment the handler starts running.)
self.ws_server.register(self)
self.handler_task = asyncio_ensure_future(
self.handler(), loop=self.loop)
@asyncio.coroutine
def handler(self):
# Since this method doesn't have a caller able to handle exceptions,
# it attemps to log relevant ones and close the connection properly.
try:
try:
path = yield from self.handshake(
origins=self.origins, subprotocols=self.subprotocols,
extra_headers=self.extra_headers)
except ConnectionError as exc:
logger.info('Connection error during opening handshake', exc_info=True)
raise
except Exception as exc:
if self._is_server_shutting_down(exc):
response = ('HTTP/1.1 503 Service Unavailable\r\n\r\n'
'Server is shutting down.')
elif isinstance(exc, InvalidOrigin):
response = 'HTTP/1.1 403 Forbidden\r\n\r\n' + str(exc)
elif isinstance(exc, InvalidHandshake):
response = 'HTTP/1.1 400 Bad Request\r\n\r\n' + str(exc)
else:
logger.warning("Error in opening handshake", exc_info=True)
response = ('HTTP/1.1 500 Internal Server Error\r\n\r\n'
'See server log for more information.')
self.writer.write(response.encode())
raise
try:
yield from self.ws_handler(self, path)
except Exception as exc:
if self._is_server_shutting_down(exc):
yield from self.fail_connection(1001)
else:
logger.error("Error in connection handler", exc_info=True)
yield from self.fail_connection(1011)
raise
try:
yield from self.close()
except ConnectionError as exc:
if self._is_server_shutting_down(exc):
pass
logger.info('Connection error in closing handshake', exc_info=True)
raise
except Exception as exc:
if self._is_server_shutting_down(exc):
pass
else:
logger.warning("Error in closing handshake", exc_info=True)
raise
except Exception:
# Last-ditch attempt to avoid leaking connections on errors.
try:
self.writer.close()
except Exception: # pragma: no cover
pass
finally:
# Unregister the connection with the server when the handler task
# terminates. Registration is tied to the lifecycle of the handler
# task because the server waits for tasks attached to registered
# connections before terminating.
self.ws_server.unregister(self)
def _is_server_shutting_down(self, exc):
"""
Decide whether an exception means that the server is shutting down.
"""
return (
isinstance(exc, asyncio.CancelledError) and
self.ws_server.closing
)
@asyncio.coroutine
def handshake(self, origins=None, subprotocols=None, extra_headers=None):
"""
Perform the server side of the opening handshake.
If provided, ``origins`` is a list of acceptable HTTP Origin values.
Include ``''`` if the lack of an origin is acceptable.
If provided, ``subprotocols`` is a list of supported subprotocols in
order of decreasing preference.
If provided, ``extra_headers`` sets additional HTTP response headers.
It can be a mapping or an iterable of (name, value) pairs. It can also
be a callable taking the request path and headers in arguments.
Return the URI of the request.
"""
# Read handshake request.
try:
path, headers = yield from read_request(self.reader)
except ValueError as exc:
raise InvalidHandshake("Malformed HTTP message") from exc
self.request_headers = headers
self.raw_request_headers = list(headers.raw_items())
get_header = lambda k: headers.get(k, '')
key = check_request(get_header)
if origins is not None:
origin = get_header('Origin')
if not set(origin.split() or ['']) <= set(origins):
raise InvalidOrigin("Origin not allowed: {}".format(origin))
if subprotocols is not None:
protocol = get_header('Sec-WebSocket-Protocol')
if protocol:
client_subprotocols = [p.strip() for p in protocol.split(',')]
self.subprotocol = self.select_subprotocol(
client_subprotocols, subprotocols)
headers = []
set_header = lambda k, v: headers.append((k, v))
set_header('Server', USER_AGENT)
if self.subprotocol:
set_header('Sec-WebSocket-Protocol', self.subprotocol)
if extra_headers is not None:
if callable(extra_headers):
extra_headers = extra_headers(path, self.raw_request_headers)
if isinstance(extra_headers, collections.abc.Mapping):
extra_headers = extra_headers.items()
for name, value in extra_headers:
set_header(name, value)
build_response(set_header, key)
self.response_headers = email.message.Message()
for name, value in headers:
self.response_headers[name] = value
self.raw_response_headers = headers
# Send handshake response. Since the status line and headers only
# contain ASCII characters, we can keep this simple.
response = ['HTTP/1.1 101 Switching Protocols']
response.extend('{}: {}'.format(k, v) for k, v in headers)
response.append('\r\n')
response = '\r\n'.join(response).encode()
self.writer.write(response)
assert self.state == CONNECTING
self.state = OPEN
self.opening_handshake.set_result(True)
return path
@staticmethod
def select_subprotocol(client_protos, server_protos):
"""
Pick a subprotocol among those offered by the client.
"""
common_protos = set(client_protos) & set(server_protos)
if not common_protos:
return None
priority = lambda p: client_protos.index(p) + server_protos.index(p)
return sorted(common_protos, key=priority)[0]
class WebSocketServer(asyncio.AbstractServer):
"""
Wrapper for :class:`~asyncio.Server` that triggers the closing handshake.
"""
def __init__(self, loop):
# Store a reference to loop to avoid relying on self.server._loop.
self.loop = loop
self.closing = False
self.websockets = set()
def wrap(self, server):
"""
Attach to a given :class:`~asyncio.Server`.
Since :meth:`~asyncio.BaseEventLoop.create_server` doesn't support
injecting a custom ``Server`` class, a simple solution that doesn't
rely on private APIs is to:
- instantiate a :class:`WebSocketServer`
- give the protocol factory a reference to that instance
- call :meth:`~asyncio.BaseEventLoop.create_server` with the factory
- attach the resulting :class:`~asyncio.Server` with this method
"""
self.server = server
def register(self, protocol):
self.websockets.add(protocol)
def unregister(self, protocol):
self.websockets.remove(protocol)
def close(self):
"""
Stop accepting new connections and close open connections.
"""
# Make a note that the server is shutting down. Websocket connections
# check this attribute to decide to send a "going away" close code.
self.closing = True
# Stop accepting new connections.
self.server.close()
# Close open connections. For each connection, two tasks are running:
# 1. self.worker_task shuffles messages between the network and queues
# 2. self.handler_task runs the opening handshake, the handler provided
# by the user and the closing handshake
# In the general case, cancelling the handler task will cause the
# handler provided by the user to exit with a CancelledError, which
# will then cause the worker task to terminate.
for websocket in self.websockets:
websocket.handler_task.cancel()
@asyncio.coroutine
def wait_closed(self):
"""
Wait until all connections are closed.
This method must be called after :meth:`close()`.
"""
# asyncio.wait doesn't accept an empty first argument.
if self.websockets:
# The handler or the worker task can terminate first, depending
# on how the client behaves and the server is implemented.
yield from asyncio.wait(
[websocket.handler_task for websocket in self.websockets] +
[websocket.worker_task for websocket in self.websockets],
loop=self.loop)
yield from self.server.wait_closed()
@asyncio.coroutine
def serve(ws_handler, host=None, port=None, *,
klass=WebSocketServerProtocol,
timeout=10, max_size=2 ** 20, max_queue=2 ** 5,
loop=None, legacy_recv=False,
origins=None, subprotocols=None, extra_headers=None,
**kwds):
"""
This coroutine creates a WebSocket server.
It yields a :class:`~asyncio.Server` which provides:
* a :meth:`~asyncio.Server.close` method that closes open connections with
status code 1001 and stops accepting new connections
* a :meth:`~asyncio.Server.wait_closed` coroutine that waits until closing
handshakes complete and connections are closed.
``ws_handler`` is the WebSocket handler. It must be a coroutine accepting
two arguments: a :class:`WebSocketServerProtocol` and the request URI.
:func:`serve` is a wrapper around the event loop's
:meth:`~asyncio.BaseEventLoop.create_server` method. ``host``, ``port`` as
well as extra keyword arguments are passed to
:meth:`~asyncio.BaseEventLoop.create_server`.
For example, you can set the ``ssl`` keyword argument to a
:class:`~ssl.SSLContext` to enable TLS.
The behavior of the ``timeout``, ``max_size``, and ``max_queue`` optional
arguments is described the documentation of
:class:`~websockets.protocol.WebSocketCommonProtocol`.
:func:`serve` also accepts the following optional arguments:
* ``origins`` defines acceptable Origin HTTP headers — include
``''`` if the lack of an origin is acceptable
* ``subprotocols`` is a list of supported subprotocols in order of
decreasing preference
* ``extra_headers`` sets additional HTTP response headers — it can be a
mapping, an iterable of (name, value) pairs, or a callable taking the
request path and headers in arguments.
Whenever a client connects, the server accepts the connection, creates a
:class:`WebSocketServerProtocol`, performs the opening handshake, and
delegates to the WebSocket handler. Once the handler completes, the server
performs the closing handshake and closes the connection.
Since there's no useful way to propagate exceptions triggered in handlers,
they're sent to the ``'websockets.server'`` logger instead. Debugging is
much easier if you configure logging to print them::
import logging
logger = logging.getLogger('websockets.server')
logger.setLevel(logging.ERROR)
logger.addHandler(logging.StreamHandler())
"""
if loop is None:
loop = asyncio.get_event_loop()
ws_server = WebSocketServer(loop)
secure = kwds.get('ssl') is not None
factory = lambda: klass(
ws_handler, ws_server,
host=host, port=port, secure=secure,
timeout=timeout, max_size=max_size, max_queue=max_queue,
loop=loop, legacy_recv=legacy_recv,
origins=origins, subprotocols=subprotocols,
extra_headers=extra_headers,
)
server = yield from loop.create_server(factory, host, port, **kwds)
ws_server.wrap(server)
return ws_server
|
nilq/baby-python
|
python
|
import os
from pylearn2.utils import serial
from theano import tensor as T
from theano import function
from pylearn2ext.chbmit import CHBMIT
from tests.plot_eeg import plot_eeg_predict_seizure_period
def predict_plot(model_path, dataset):
"""
Script to perform seizure detection and plot the results.
Parameters
----------
model_path : string
Path to the directory to load the trained model.
data_path : dataset object
Dataset object.
"""
try:
model = serial.load(model_path)
except Exception, e:
print model_path + "Doesn't seem to be a valid model path, got this error when trying to load it:"
print e
print "Setting up symbolic expressions..."
X = model.get_input_space().make_theano_batch()
Y = model.fprop(X)
Y = T.argmax(Y, axis=1)
f = function([X], Y)
# Use smallish batches to avoid running out of memory
batch_size = dataset.batch_size
model.set_batch_size(batch_size)
# Dataset must be multiple of batch size of some batches will have different sizes.
# Theano convolution requires a hard-coded batch size.
m = dataset.X.shape[0]
extra = (batch_size - m) % batch_size
assert (m + extra) % batch_size == 0
import numpy as np
if extra > 0:
dataset.X = np.concatenate((dataset.X, np.zeros((extra, dataset.X.shape[1]),
dtype=dataset.X.dtype)),
axis=0)
assert dataset.X.shape[0] % batch_size == 0
print "Performing predictions..."
y = []
for i in xrange(dataset.X.shape[0] / batch_size):
x_arg = dataset.X[i*batch_size:(i+1)*batch_size,:]
if X.ndim > 2:
x_arg = dataset.get_topological_view(x_arg)
y.append(f(x_arg.astype(X.dtype)))
y = np.concatenate(y)
assert y.ndim == 1
assert y.shape[0] == dataset.X.shape[0]
# Discard any zero-padding that was used to give the batches uniform size
y = y[:m]
extra = (dataset.n_channels - y.size) % dataset.n_channels
assert (extra + y.size) % dataset.n_channels == 0
if extra > 0:
y = np.append(y, np.zeros(extra))
# Reshape
y = y.reshape(-1, y.shape[0] / dataset.n_channels)
sum_y = np.sum(y, 0)
plot_eeg_predict_seizure_period(X=dataset.raw_X,
y=np.repeat(sum_y, dataset.sampling_rate),
channel_labels=dataset.channel_labels,
seizure_seconds=dataset.seizure_seconds,
sampling_rate=dataset.sampling_rate,
start_second=3600,
end_second=3900,
is_scale=True,
n_X_ticks=6,
channel_th_y_lim=[-1, 6],
figure_width=800,
figure_height=600)
if __name__ == '__main__':
patient_id = 10
leave_one_out_file = 4
model_path = '../models'
data_path = '/Users/akara/Workspace/data/chbmit'
save_model_path = os.path.join(model_path, 'sdae_chbmit_p{0}_leave_{1}'.format(patient_id,
leave_one_out_file))
dataset = CHBMIT(patient_id=patient_id,
which_set='test',
preprocessor_path=os.path.join(save_model_path, 'sdae_scaler.pkl'),
data_dir=data_path,
transform='single_channel',
leave_one_out_file=leave_one_out_file,
window_size=256,
batch_size=20)
predict_plot(model_path=os.path.join(save_model_path, 'sdae_all.pkl'),
dataset=dataset)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
import random
import time
class IdWorker(object):
def __init__(self, worker_id, host_id):
self.worker_id = worker_id
self.host_id = host_id
self.logger = logging.getLogger("idworker")
# stats
self.ids_generated = 0
# Since epicteller start.
self.twepoch = 1577808000000
self.sequence = 0
self.worker_id_bits = 8
self.data_center_id_bits = 2
self.max_worker_id = -1 ^ (-1 << self.worker_id_bits)
self.max_data_center_id = -1 ^ (-1 << self.data_center_id_bits)
self.sequence_bits = 12
self.worker_id_shift = self.sequence_bits
self.data_center_id_shift = self.sequence_bits + self.worker_id_bits
self.timestamp_left_shift = self.sequence_bits + self.worker_id_bits + self.data_center_id_bits
self.sequence_mask = -1 ^ (-1 << self.sequence_bits)
self.last_timestamp = -1
# Sanity check for worker_id
if self.worker_id > self.max_worker_id or self.worker_id < 0:
raise Exception("worker_id", "worker id can't be greater than %i or less than 0" % self.max_worker_id)
if self.host_id > self.max_data_center_id or self.host_id < 0:
raise Exception("host_id", "data center id can't be greater than %i or less than 0" % self.max_data_center_id)
self.logger.info("worker starting. timestamp left shift %d, data center id bits %d, worker id bits %d, sequence bits %d, worker id %d" % (self.timestamp_left_shift, self.data_center_id_bits, self.worker_id_bits, self.sequence_bits, self.worker_id))
def _time_gen(self):
return int(time.time() * 1000)
def _till_next_millis(self, last_timestamp):
timestamp = self._time_gen()
while timestamp <= last_timestamp:
timestamp = self._time_gen()
return timestamp
def _next_id(self, timestamp):
if self.last_timestamp > timestamp:
self.logger.warning("clock is moving backwards. Rejecting request until %i" % self.last_timestamp)
raise Exception("Clock moved backwards. Refusing to generate id for %i milliseocnds" % self.last_timestamp)
if self.last_timestamp == timestamp:
self.sequence = (self.sequence + 1) & self.sequence_mask
if self.sequence == 0:
timestamp = self._till_next_millis(self.last_timestamp)
else:
self.sequence = 0
self.last_timestamp = timestamp
new_id = ((timestamp - self.twepoch) << self.timestamp_left_shift) | (self.host_id << self.data_center_id_shift) | (self.worker_id << self.worker_id_shift) | self.sequence
self.ids_generated += 1
return new_id
def get_worker_id(self):
return self.worker_id
def get_timestamp(self):
return self._time_gen()
def get_id(self):
timestamp = self._time_gen()
new_id = self._next_id(timestamp)
self.logger.debug("id: %i worker_id: %i host_id: %i" % (new_id, self.worker_id, self.host_id))
return new_id
def get_host_id(self):
return self.host_id
_host_id = os.getenv('HOST_ID', random.randint(0, 3))
_worker_id = os.getenv('WORKER_ID', random.randint(0, 255))
_worker = IdWorker(_worker_id, _host_id)
def get_id() -> int:
return _worker.get_id()
|
nilq/baby-python
|
python
|
""" CPG locomotion controller. """
import itertools
import os
from argparse import ArgumentParser
from pathlib import Path
import farms_pylog as pylog
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import yaml
from farms_container import Container
from farms_network.networkx_model import NetworkXModel
from farms_network.neural_system import NeuralSystem
pylog.set_level("error")
def add_mutual_connection(network, node_1, node_2, weight, phi):
"""
Add mutual connection between two nodes
"""
network.add_edge(node_1, node_2, weight=weight, phi=phi)
network.add_edge(node_2, node_1, weight=weight, phi=-1*phi)
def add_connection_antagonist(network, node_1, node_2, **kwargs):
"""
Add mutual connection between two nodes
"""
weight = kwargs.pop('weight', 1.0)
phi = kwargs.pop('phi', 0.0)
add_mutual_connection(
network, f"{node_1}_flexion", f"{node_2}_flexion", weight=weight,
phi=phi
)
add_mutual_connection(
network, f"{node_1}_extension", f"{node_2}_extension", weight=weight,
phi=phi
)
def create_oscillator_network(export_path, **kwargs):
"""Create the drosophila reduced network.
"""
# Network properties
default_weight = kwargs.pop("default_weight", 100.0)
default_phi = kwargs.pop("default_phi", 0.0)
# Initialize di graph network
network = nx.DiGraph()
# Generate list of controlled joints in the model
sides = ('L', 'R')
positions = ('F', 'M', 'H')
segments = ('Coxa', 'Femur', 'Tibia')
nodes = [
f"joint_{side}{position}{segment}_roll"
if (position in ["M", "H"]) and (segment == "Coxa")
else f"joint_{side}{position}{segment}"
for side in sides
for position in positions
for segment in segments
]
# Create flexion-extension oscillator for each node
for node in nodes:
network.add_node(f"{node}_flexion", model="oscillator", f=3.0,
R=1.0, a=1.0)
network.add_node(f"{node}_extension", model="oscillator", f=3.0,
R=1.0, a=1.0)
# Connect flexion-extension nodes
for node in nodes:
if node.split("_")[-1][2:] not in ['Femur', 'Tibia']:
add_mutual_connection(
network, f"{node}_flexion", f"{node}_extension",
weight=default_weight, phi=np.pi
)
# Connect leg oscillators
for side in sides:
for position in positions:
for j in range(len(segments[:-1])):
node_1 = segments[j]
node_2 = segments[j+1]
if (position in ["M", "H"]) and (segments[j] == "Coxa"):
node_1 = "Coxa_roll"
add_mutual_connection(
network, f"joint_{side}{position}{node_1}_flexion",
f"joint_{side}{position}{node_2}_flexion",
weight=default_weight, phi=np.pi/2
)
add_mutual_connection(
network, f"joint_{side}{position}{node_1}_extension",
f"joint_{side}{position}{node_2}_extension",
weight=default_weight, phi=np.pi/2
)
#: Connect base nodes
base_connections = [
['LFCoxa', 'RFCoxa', {'weight': default_weight, 'phi': np.pi}],
['LFCoxa', 'RMCoxa_roll', {'weight': default_weight, 'phi': np.pi}],
['RMCoxa_roll', 'LHCoxa_roll', {'weight': default_weight, 'phi': 0.0}],
['RFCoxa', 'LMCoxa_roll', {'weight': default_weight, 'phi': np.pi}],
['LMCoxa_roll', 'RHCoxa_roll', {'weight': default_weight, 'phi': 0.0}],
]
for n1, n2, data in base_connections:
add_connection_antagonist(network, f"joint_{n1}", f"joint_{n2}",
**data)
# Update node positions for visualization
with open('locomotion_network_node_positions.yaml', 'r') as file:
node_positions = yaml.load(file, yaml.SafeLoader)
for node, data in node_positions.items():
network.nodes[node]['x'] = data[0]
network.nodes[node]['y'] = data[1]
network.nodes[node]['z'] = data[2]
# Export graph
print(export_path)
nx.write_graphml(network, export_path)
def run_network(network_path):
""" Run the network.
Parameters
----------
network_path : <Path>
Path to the network config file
"""
# Initialize network
dt = 1e-3 #: Time step (1ms)
duration = 2
time_vec = np.arange(0, duration, dt) #: Time
container = Container(duration/dt)
net = NeuralSystem(network_path, container)
# initialize network parameters
container.initialize()
net.setup_integrator()
#: Integrate the network
pylog.debug('Begin Integration!')
for t in time_vec:
net.step(dt=dt)
container.update_log()
#: Results
container.dump(overwrite=True)
# Plot results
neural_data = container.neural
neural_outputs = neural_data.outputs.log
neural_outputs_names = neural_data.outputs.names
neural_outputs_name_id = neural_data.outputs.name_index
# Plot Intra-limb activations
for leg in ("RF", "RM", "RH", "LH", "LM", "LH"):
leg_data = np.asarray(
[
neural_outputs[:, neural_outputs_name_id[name]]
for name in neural_outputs_names
if leg in name
]
).T
leg_names = [
name for name in neural_outputs_names
if leg in name
]
fig, axs = plt.subplots(nrows=3, ncols=1)
axs[0].plot(time_vec, 1 + np.sin(leg_data[:, :2]))
axs[1].plot(time_vec, 1 + np.sin(leg_data[:, 2:4]))
axs[2].plot(time_vec, 1 + np.sin(leg_data[:, 4:]))
axs[0].axes.xaxis.set_visible(False)
axs[1].axes.xaxis.set_visible(False)
axs[0].set_title(leg_names[0].split('_')[2])
axs[1].set_title(leg_names[2].split('_')[2])
axs[2].set_title(leg_names[4].split('_')[2])
axs[2].set_xlabel("Time[s]")
# Plot Inter-limb activations
leg_data = np.asarray(
[
neural_outputs[:, neural_outputs_name_id[name]]
for name in neural_outputs_names
if "Coxa" in name and "flexion" in name
]
).T
leg_names = [
name for name in neural_outputs_names
if "Coxa" in name
]
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(time_vec, 1 + np.sin(leg_data[:, :]))
ax.set_title("Coxa")
ax.set_xlabel("Time[s]")
#: Show network
net.visualize_network(edge_labels=False)
plt.show()
def parse_args():
"""Parse command line arguments to generate and simulate the network.
"""
parser = ArgumentParser("Network parser")
parser.add_argument(
"--export-path", required=False, type=str,
default=(
Path(__file__).parent.absolute()
).joinpath("../config/network/locomotion_network.graphml"),
dest="export_path"
)
parser.add_argument(
"--run-network", required=False, type=bool,
default=True, dest="run_network"
)
return parser.parse_args()
if __name__ == '__main__':
# main()
clargs = parse_args()
create_oscillator_network(clargs.export_path)
if clargs.run_network:
run_network(clargs.export_path)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import socket
import threading
import datetime
#from threading import Lock
from Utils import DebugLock as Lock
from Utils import Utils
try:
from Event import Event
from NaptSocket import NaptSocket, NaptSocketStatus
from NaptConnectionEventArgs import NaptConnectionEventArgs
except Exception as ex:
Utils.print_exception(ex)
class NaptConnection(object):
def __init__(self, client, server):
Utils.expects_type(socket.socket, client, 'client')
Utils.expects_type(socket.socket, server, 'server', True)
self.lock = Lock()
self.id = 0
self.client = NaptSocket(self, client, True)
self.server = NaptSocket(self, server, False)
self.is_initial = True;
self.is_connecting = False
self.is_connected = False
self.is_closed = False
self.tag = None
self.tls = False
self.debug = True
self.lastrecvtime = datetime.datetime.now()
self.connected = Event()
self.closed = Event()
self.client_closing = Event()
self.server_closing = Event()
self.client_closed = Event()
self.server_closed = Event()
self.client_recieved= Event()
self.server_recieved= Event()
def __str__(self):
return 'NaptConnection{ %s }' % ', '.join([
'id=%d' % self.id,
'client=%s' % str(self.client),
'server=%s' % str(self.server),
'is_connecting=%s' % str(self.is_connecting),
'is_connected=%s' % str(self.is_connected)])
# public
def connect(self, endpoint):
Utils.assertion(self.lock.locked(), 'need lock')
if self.is_connecting:
raise Exception() # InvalidOperationException
self.is_connecting = True
self.server.status = NaptSocketStatus.Connecting
threading.Thread(target = self.do_connect, args = (endpoint,), name = self.__class__.__name__).start()
# private
def do_connect(self, endpoint):
try:
self.server.connect(endpoint) # blocking
with self.lock:
if self.is_closed:
# todo close
return
self.is_connected = True
print('INVOKE: on_connected')
self.on_connected(None)
except Exception as ex:
print(' endpoint: %s' % str(endpoint))
Utils.print_exception(ex)
# private
def update_lastrecvtime(self):
self.lastrecvtime = datetime.datetime.now()
# public
def close(self):
if self.debug:
print('NaptConnection.close: %s' % str(self))
with self.lock:
if self.is_closed:
return
self.close_client()
self.close_server()
self.is_closed = True
self.on_closed(None)
# public
def close2(self):
Utils.assertion(self.lock.locked(), 'need lock')
if self.debug:
print('NaptConnection.close: %s' % str(self))
if self.is_closed:
return
self.close_client()
self.close_server()
self.is_closed = True
#self.on_closed(None) # todo lock for log
# protected virtual
def on_connected(self, e):
self.connected(self, e)
# protected virtual
def on_closed(self, e):
self.closed(self, e)
# protected virtual
def on_client_closing(self, e):
self.client_closing(self, e)
# protected virtual
def on_server_closing(self, e):
self.server_closing(self, e)
# protected virtual
def on_client_closed(self, e):
self.client_closed(self, e)
# protected virtual
def on_server_closed(self, e):
self.server_closed(self, e)
# protected virtual
def on_client_recieved(self, e): # NaptConnectionEventArgs
self.client_recieved(self, e)
# protected virtual
def on_server_recieved(self, e): # NaptConnectionEventArgs
self.server_recieved(self, e)
# internal
def recv(self, so):
Utils.expects_type(NaptSocket, so, 'so')
self.update_lastrecvtime();
if so.is_client:
self.recv_client()
else:
self.recv_server()
# internal
def error(self, so):
Utils.expects_type(NaptSocket, so, 'so')
# todo error
# private
def recv_client(self):
try:
#data= self.client.socket.recv(4096)
data= Utils.recv(self.client.socket, 4096)
e = NaptConnectionEventArgs(self, data, 0, len(data))
if len(data) == 0: # closed
#self.close_client();
self.close()
return
print(' DATA: %s' % str(data))
self.on_client_recieved(e)
self.server.push(data, 0, len(data))
except Exception as ex: # SocketException
Utils.print_exception(ex)
self.close()
# private
def recv_server(self):
try:
#data= self.server.socket.recv(4096)
data= Utils.recv(self.server.socket, 4096)
e = NaptConnectionEventArgs(self, data, 0, len(data))
if len(data) == 0: # closed
#self.close_server()
self.close()
return
print(' DATA: %s' % str(data))
self.on_server_recieved(e)
self.client.push(data, 0, len(data))
except Exception as ex: # SocketException
Utils.print_exception(ex)
self.close()
# private
def close_client(self):
if self.debug:
print(' NaptConnection.close_client: %s' % str(self.client))
try:
self.on_client_closing(None)
if self.client.close():
self.on_client_closed(None)
except Exception as ex:
Utils.print_exception(ex)
# private void
def close_server(self):
if self.debug:
print(' NaptConnection.close_server: %s' % str(self.server))
try:
self.on_server_closing(None)
if self.server.close():
self.on_server_closed(None);
except Exception as ex:
Utils.print_exception(ex)
|
nilq/baby-python
|
python
|
import cv2
cap = cv2.VideoCapture(0)
fgbg =cv2.createBackgroundSubtractorMOG2()
while (1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
edges = cv2.Canny(fgmask,100,200)
cv2.imshow('Original', frame)
cv2.imshow('MOG2', fgmask)
cv2.imshow('Output', edges)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
nilq/baby-python
|
python
|
import argparse
import logging
import string
import jsonlines
from Levenshtein import distance
from tqdm.auto import tqdm
from src.models.bart_seq2seq_kilt import BartSeq2Seq
from src.models.bert_binary_kilt import BertBinary
from src.utils import batch_it, chunk_it
def normalize(sent):
return (
sent.lower()
.replace(" ", "")
.translate(str.maketrans("", "", string.punctuation))
)
def predictions_and_alternatives(model, sentences, binary):
if binary:
return [
(
p[0],
["SUPPORTS" if p[0] == "REFUTES" else "REFUTES"],
p[1],
)
for p in model.sample(sentences)
]
else:
return [
(
p[0],
list(
set(
[
a.replace(".", "")
for a in p[1:]
if (len(a) < 5 and normalize(p[0]) != normalize(a))
or distance(normalize(p[0]), normalize(a)) > 4
]
).difference({p[0]})
),
None,
)
for p in batch_it(
model.sample(
sentences,
min_length=0,
num_beams=5,
num_return_sequences=5,
),
5,
)
]
def filtered_rephrases(model, input_, rephrases, binary):
pred = model.sample(
[input_] + rephrases,
min_length=0,
num_beams=5,
num_return_sequences=1,
)
if binary:
return [r for p, r in zip(pred[1:], rephrases) if p[0] == pred[0][0]]
else:
return [
r for p, r in zip(pred[1:], rephrases) if normalize(p) == normalize(pred[0])
]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_filename",
type=str,
help="Filename of the KILT dataset",
default="../datasets/structured_zeroshot-dev-new.jsonl",
)
parser.add_argument(
"--output_filename",
type=str,
help="Filename of the KILT dataset",
default="../datasets/structured_zeroshot-dev-new_annotated.jsonl",
)
parser.add_argument(
"--model",
type=str,
help="Filename of the model",
default="models/bart_seq2seq_structured_zeroshot/version_0/checkpoints/model-epoch=17-valid_acc=0.2207.ckpt",
)
parser.add_argument(
"--device",
type=str,
default="cuda:0",
)
parser.add_argument(
"--batch_size",
type=int,
default=12,
)
parser.add_argument(
"--binary",
action="store_true",
)
parser.add_argument(
"-d",
"--debug",
help="Print lots of debugging statements",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.WARNING,
)
parser.add_argument(
"-v",
"--verbose",
help="Be verbose",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args, _ = parser.parse_known_args()
logging.basicConfig(
level=args.loglevel,
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
)
logging.info("Loading model")
if args.binary:
model = (
BertBinary.load_from_checkpoint(args.model, strict=False)
.eval()
.to(args.device)
)
else:
model = (
BartSeq2Seq.load_from_checkpoint(args.model, strict=False)
.eval()
.to(args.device)
)
model.freeze()
filename = args.input_filename
logging.info("Loading {}".format(filename))
with jsonlines.open(filename) as f:
dataset = list(f)
if not args.binary:
dataset = [
{**d, "input": q} for d in dataset for q in d["meta"]["template_questions"]
]
for docs in batch_it(tqdm(dataset, desc="Predicting"), args.batch_size):
for d, (p, a, l) in zip(
docs,
predictions_and_alternatives(
model,
[d["input"] for d in docs],
args.binary,
),
):
d["prediction"] = p
d["alternatives"] = a
d["filtered_rephrases"] = filtered_rephrases(
model,
d["input"],
d["rephrases"],
args.binary,
)
if l:
d["logit"] = l.item()
filename = args.output_filename
logging.info("Saving {}".format(filename))
with jsonlines.open(filename, "w") as f:
f.write_all(dataset)
|
nilq/baby-python
|
python
|
from ._base import *
from ..tinygrail.bigc import BigC
from ..tinygrail.model import TBid
@click.command()
@click.argument("player_name", type=TG_PLAYER)
@click.argument("character_ids", type=int, nargs=-1)
def force_view(player_name, character_ids):
for cid in character_ids:
big_c = BigC(player_name, cid)
big_c.create_bid(TBid(Price=2, Amount=2))
|
nilq/baby-python
|
python
|
from algosdk.v2client.indexer import IndexerClient
from algosdk.v2client.algod import AlgodClient
from tinyman.v1.client import TinymanMainnetClient
from tinyman.v1.pools import get_pool_info_from_account_info
import datetime
import statistics
class AlgoTools:
def __init__(self, address = None):
### Setup Stuff ###
self.indexer_address = 'https://algoexplorerapi.io/idx2'
self.indexer_token = ''
self.algod_address = 'https://algoexplorerapi.io'
self.algod_token = ''
self.address = address
# Set up API instances
self.indexer_client = IndexerClient(self.indexer_token, self.indexer_address, headers={'User-Agent': 'algosdk'})
self.algod_client = AlgodClient(self.algod_token, self.algod_address, headers={'User-Agent': 'algosdk'})
self.tiny = TinymanMainnetClient(algod_client=self.algod_client, user_address=self.address)
### End Setup ###
### Start Functions ###
def GetPools(self, address):
# Creates a dict of all tinyman pools associated with address.
# Contents of each pool will have:
# 'pair_name'
# 'pool_id'
# 'asset1'
# 'asset2'
all_pools = {}
tp = 0
algod = self.algod_client.account_info(address)
for asset in algod['assets']:
# Look for tinyman assets and pull pools.
try:
asset_info = self.algod_client.asset_info(asset['asset-id'])
except:
continue
asset_name = asset_info['params']['name']
if 'Tinyman Pool' in asset_name:
tinypool = {}
pool_info = self.algod_client.account_info(asset_info['params']['creator'])
pool = get_pool_info_from_account_info(pool_info)
asset1 = self.tiny.fetch_asset(pool['asset1_id'])
asset2 = self.tiny.fetch_asset(pool['asset2_id'])
tinypool['pair_name'] = asset_name
tinypool['pool_id'] = pool['liquidity_asset_id']
tinypool['asset1'] = asset1
tinypool['asset2'] = asset2
all_pools[tp] = tinypool
tp = tp+1
del tinypool
return all_pools
#####
def ConvertDate(self, date):
if isinstance(date, str):
newdate = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
elif isinstance(date, datetime.datetime):
newdate = date
newstrdate = str(newdate.day) + '-' + str(newdate.month) + '-' + str(newdate.year)
return newstrdate
#####
def CalculateAPY(self, value_start, value_now, day1, today = datetime.datetime.now()):
# Not quite ready for prime time
if isinstance(day1, str):
day1_dt = datetime.datetime.strptime(day1, '%d-%m-%Y')
deltadate = today - day1_dt
APY = ((value_now / value_start) - 1) * (deltadate.days) / 365
return APY
#####
def GetPriceFromPool(self, ASSET, block_id = 0, num_blocks = 133): # 133 ~ +/-10 minutes from transaction
ALGO = self.tiny.fetch_asset(0)
pool = self.tiny.fetch_pool(ALGO, ASSET)
if block_id == 0:
# Current price
quote = pool.fetch_fixed_input_swap_quote(ALGO(1_000_000), slippage=0.01)
asset_price = 1/(quote.amount_out.amount * 10**(-ASSET.decimals))
else:
tx_past = self.indexer_client.search_transactions_by_address(pool.address,
min_round = block_id-num_blocks,
max_round = block_id+num_blocks)
groupID_last = None
algo_per_asset = []
asset_amt = 0
algo_amt = 0
for tx in tx_past['transactions']:
if 'group' not in tx:
# Skip if tx is not part of a group
continue
elif asset_amt != 0 and algo_amt != 0:
# After getting an asset value and algo value, calculate the price
algo_per_asset.append(algo_amt / asset_amt)
continue
elif tx['group'] != groupID_last:
# Start a new group transaction to calculate price
groupID_last = tx['group']
asset_amt = 0
algo_amt = 0
else:
if tx['tx-type'] == 'axfer':
if tx['asset-transfer-transaction']['asset-id'] == ASSET.id:
asset_amt = tx['asset-transfer-transaction']['amount'] * 10**(-ASSET.decimals)
elif tx['tx-type'] == 'pay':
# Check if the value is >A0.01 as this would most likely be a fee
if tx['payment-transaction']['amount'] >= 1e4:
algo_amt = tx['payment-transaction']['amount'] * 10**(-ALGO.decimals)
if len(algo_per_asset) < 10: # Use minimum 10 txns to get an average
if num_blocks >= 3192:
# Stops trying after timespan = 8 hours (+/-4 hours)
print('Could not find enough transactions to estimate price.')
asset_price = -1
else:
# Keep adding +/-10 minutes until we get enough data
print('Time band: +/-' + str(num_blocks/13.3 + 10) + ' minutes')
asset_price = self.GetPriceFromPool(ASSET, block_id, num_blocks+133)
else:
# Use the median to calculate the price to ensure lopsided trades are not included
asset_price = statistics.median(algo_per_asset)
return asset_price
#####
def ALGOtoUSD(self, price_in_algo, usdc_price_algo, usdt_price_algo):
usd_price_algo = (usdc_price_algo + usdt_price_algo) / 2
# Average of usdc and usdt in case one of them is a bit off from the dollar
asset_price_usd = price_in_algo / usd_price_algo
return asset_price_usd
### End Functions ###
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import unittest
class test_sample_hook(unittest.TestCase):
def test_nothing(self):
#do nothing
return
|
nilq/baby-python
|
python
|
# coding: utf-8
import responses
import os
import json
import io
import watson_developer_cloud
from watson_developer_cloud.discovery_v1 import TrainingDataSet, TrainingQuery, TrainingExample
try:
from urllib.parse import urlparse, urljoin
except ImportError:
from urlparse import urlparse, urljoin
base_discovery_url = 'https://gateway.watsonplatform.net/discovery/api/v1/'
platform_url = 'https://gateway.watsonplatform.net'
service_path = '/discovery/api'
base_url = '{0}{1}'.format(platform_url, service_path)
version = '2016-12-01'
environment_id = 'envid'
collection_id = 'collid'
@responses.activate
def test_environments():
discovery_url = urljoin(base_discovery_url, 'environments')
discovery_response_body = """{
"environments": [
{
"environment_id": "string",
"name": "envname",
"description": "",
"created": "2016-11-20T01:03:17.645Z",
"updated": "2016-11-20T01:03:17.645Z",
"status": "status",
"index_capacity": {
"disk_usage": {
"used_bytes": 0,
"total_bytes": 0,
"used": "string",
"total": "string",
"percent_used": 0
},
"memory_usage": {
"used_bytes": 0,
"total_bytes": 0,
"used": "string",
"total": "string",
"percent_used": 0
}
}
}
]
}"""
responses.add(responses.GET, discovery_url,
body=discovery_response_body, status=200,
content_type='application/json')
discovery = watson_developer_cloud.DiscoveryV1('2016-11-07',
username='username',
password='password')
discovery.list_environments()
url_str = "{0}?version=2016-11-07".format(discovery_url)
assert responses.calls[0].request.url == url_str
assert responses.calls[0].response.text == discovery_response_body
assert len(responses.calls) == 1
@responses.activate
def test_get_environment():
discovery_url = urljoin(base_discovery_url, 'environments/envid')
responses.add(responses.GET, discovery_url,
body="{\"resulting_key\": true}", status=200,
content_type='application/json')
discovery = watson_developer_cloud.DiscoveryV1('2016-11-07',
username='username',
password='password')
discovery.get_environment(environment_id='envid')
url_str = "{0}?version=2016-11-07".format(discovery_url)
assert responses.calls[0].request.url == url_str
assert len(responses.calls) == 1
@responses.activate
def test_create_environment():
discovery_url = urljoin(base_discovery_url, 'environments')
responses.add(responses.POST, discovery_url,
body="{\"resulting_key\": true}", status=200,
content_type='application/json')
discovery = watson_developer_cloud.DiscoveryV1('2016-11-07',
username='username',
password='password')
discovery.create_environment(name="my name", description="my description")
assert len(responses.calls) == 1
@responses.activate
def test_update_environment():
discovery_url = urljoin(base_discovery_url, 'environments/envid')
responses.add(responses.PUT, discovery_url,
body="{\"resulting_key\": true}", status=200,
content_type='application/json')
discovery = watson_developer_cloud.DiscoveryV1('2016-11-07',
username='username',
password='password')
discovery.update_environment('envid', name="hello", description="new")
assert len(responses.calls) == 1
@responses.activate
def test_delete_environment():
discovery_url = urljoin(base_discovery_url, 'environments/envid')
responses.add(responses.DELETE, discovery_url,
body="{\"resulting_key\": true}", status=200,
content_type='application/json')
discovery = watson_developer_cloud.DiscoveryV1('2016-11-07',
username='username',
password='password')
discovery.delete_environment('envid')
assert len(responses.calls) == 1
@responses.activate
def test_collections():
discovery_url = urljoin(base_discovery_url,
'environments/envid/collections')
responses.add(responses.GET, discovery_url,
body="{\"body\": \"hello\"}", status=200,
content_type='application/json')
discovery = watson_developer_cloud.DiscoveryV1('2016-11-07',
username='username',
password='password')
discovery.list_collections('envid')
called_url = urlparse(responses.calls[0].request.url)
test_url = urlparse(discovery_url)
assert called_url.netloc == test_url.netloc
assert called_url.path == test_url.path
assert len(responses.calls) == 1
@responses.activate
def test_collection():
discovery_url = urljoin(base_discovery_url,
'environments/envid/collections/collid')
discovery_fields = urljoin(base_discovery_url,
'environments/envid/collections/collid/fields')
config_url = urljoin(base_discovery_url,
'environments/envid/configurations')
responses.add(responses.GET, config_url,
body="{\"body\": \"hello\"}",
status=200,
content_type='application/json')
responses.add(responses.GET, discovery_fields,
body="{\"body\": \"hello\"}", status=200,
content_type='application/json')
responses.add(responses.GET, discovery_url,
body="{\"body\": \"hello\"}", status=200,
content_type='application/json')
responses.add(responses.DELETE, discovery_url,
body="{\"body\": \"hello\"}", status=200,
content_type='application/json')
responses.add(responses.POST,
urljoin(base_discovery_url,
'environments/envid/collections'),
body="{\"body\": \"create\"}",
status=200,
content_type='application/json')
discovery = watson_developer_cloud.DiscoveryV1('2016-11-07',
username='username',
password='password')
discovery.create_collection(environment_id='envid',
name="name",
description="",
language="",
configuration_id='confid')
discovery.create_collection(environment_id='envid',
name="name",
language="es",
description="")
discovery.get_collection('envid', 'collid')
called_url = urlparse(responses.calls[2].request.url)
test_url = urlparse(discovery_url)
assert called_url.netloc == test_url.netloc
assert called_url.path == test_url.path
discovery.delete_collection(environment_id='envid',
collection_id='collid')
discovery.list_collection_fields(environment_id='envid',
collection_id='collid')
assert len(responses.calls) == 5
@responses.activate
def test_query():
discovery_url = urljoin(base_discovery_url,
'environments/envid/collections/collid/query')
responses.add(responses.GET, discovery_url,
body="{\"body\": \"hello\"}", status=200,
content_type='application/json')
discovery = watson_developer_cloud.DiscoveryV1('2016-11-07',
username='username',
password='password')
discovery.query('envid', 'collid', {'count': 10})
called_url = urlparse(responses.calls[0].request.url)
test_url = urlparse(discovery_url)
assert called_url.netloc == test_url.netloc
assert called_url.path == test_url.path
assert len(responses.calls) == 1
@responses.activate
def test_query_relations():
discovery_url = urljoin(
base_discovery_url,
'environments/envid/collections/collid/query_relations')
responses.add(
responses.POST,
discovery_url,
body="{\"body\": \"hello\"}",
status=200,
content_type='application/json')
discovery = watson_developer_cloud.DiscoveryV1(
'2016-11-07', username='username', password='password')
discovery.query_relations('envid', 'collid', count=10)
called_url = urlparse(responses.calls[0].request.url)
test_url = urlparse(discovery_url)
assert called_url.netloc == test_url.netloc
assert called_url.path == test_url.path
assert len(responses.calls) == 1
@responses.activate
def test_query_entities():
discovery_url = urljoin(
base_discovery_url,
'environments/envid/collections/collid/query_entities')
responses.add(
responses.POST,
discovery_url,
body="{\"body\": \"hello\"}",
status=200,
content_type='application/json')
discovery = watson_developer_cloud.DiscoveryV1(
'2016-11-07', username='username', password='password')
discovery.query_entities('envid', 'collid', {'count': 10})
called_url = urlparse(responses.calls[0].request.url)
test_url = urlparse(discovery_url)
assert called_url.netloc == test_url.netloc
assert called_url.path == test_url.path
assert len(responses.calls) == 1
@responses.activate
def test_configs():
discovery_url = urljoin(base_discovery_url,
'environments/envid/configurations')
discovery_config_id = urljoin(base_discovery_url,
'environments/envid/configurations/confid')
results = {"configurations":
[{"name": "Default Configuration",
"configuration_id": "confid"}]}
responses.add(responses.GET, discovery_url,
body=json.dumps(results),
status=200,
content_type='application/json')
responses.add(responses.GET, discovery_config_id,
body=json.dumps(results['configurations'][0]),
status=200,
content_type='application/json')
responses.add(responses.POST, discovery_url,
body=json.dumps(results['configurations'][0]),
status=200,
content_type='application/json')
responses.add(responses.PUT, discovery_config_id,
body=json.dumps(results['configurations'][0]),
status=200,
content_type='application/json')
responses.add(responses.DELETE, discovery_config_id,
body=json.dumps({'deleted': 'bogus -- ok'}),
status=200,
content_type='application/json')
discovery = watson_developer_cloud.DiscoveryV1('2016-11-07',
username='username',
password='password')
discovery.list_configurations(environment_id='envid')
discovery.get_configuration(environment_id='envid',
configuration_id='confid')
assert len(responses.calls) == 2
discovery.create_configuration(environment_id='envid',
name='my name')
discovery.update_configuration(environment_id='envid',
configuration_id='confid',
name='my new name')
discovery.delete_configuration(environment_id='envid',
configuration_id='confid')
assert len(responses.calls) == 5
@responses.activate
def test_document():
discovery_url = urljoin(base_discovery_url,
'environments/envid/preview')
config_url = urljoin(base_discovery_url,
'environments/envid/configurations')
responses.add(responses.POST, discovery_url,
body="{\"configurations\": []}",
status=200,
content_type='application/json')
responses.add(responses.GET, config_url,
body=json.dumps({"configurations":
[{"name": "Default Configuration",
"configuration_id": "confid"}]}),
status=200,
content_type='application/json')
discovery = watson_developer_cloud.DiscoveryV1('2016-11-07',
username='username',
password='password')
html_path = os.path.join(os.getcwd(), 'resources', 'simple.html')
with open(html_path) as fileinfo:
conf_id = discovery.test_configuration_in_environment(environment_id='envid',
configuration_id='bogus',
file=fileinfo)
assert conf_id is not None
conf_id = discovery.test_configuration_in_environment(environment_id='envid',
file=fileinfo)
assert conf_id is not None
assert len(responses.calls) == 2
add_doc_url = urljoin(base_discovery_url,
'environments/envid/collections/collid/documents')
doc_id_path = 'environments/envid/collections/collid/documents/docid'
update_doc_url = urljoin(base_discovery_url, doc_id_path)
del_doc_url = urljoin(base_discovery_url,
doc_id_path)
responses.add(responses.POST, add_doc_url,
body="{\"body\": []}",
status=200,
content_type='application/json')
doc_status = {
"document_id": "45556e23-f2b1-449d-8f27-489b514000ff",
"configuration_id": "2e079259-7dd2-40a9-998f-3e716f5a7b88",
"created" : "2016-06-16T10:56:54.957Z",
"updated" : "2017-05-16T13:56:54.957Z",
"status": "available",
"status_description": "Document is successfully ingested and indexed with no warnings",
"notices": []
}
responses.add(responses.GET, del_doc_url,
body=json.dumps(doc_status),
status=200,
content_type='application/json')
responses.add(responses.POST, update_doc_url,
body="{\"body\": []}",
status=200,
content_type='application/json')
responses.add(responses.DELETE, del_doc_url,
body="{\"body\": []}",
status=200,
content_type='application/json')
html_path = os.path.join(os.getcwd(), 'resources', 'simple.html')
with open(html_path) as fileinfo:
conf_id = discovery.add_document(environment_id='envid',
collection_id='collid',
file=fileinfo)
assert conf_id is not None
assert len(responses.calls) == 3
discovery.get_document_status(environment_id='envid',
collection_id='collid',
document_id='docid')
assert len(responses.calls) == 4
discovery.update_document(environment_id='envid',
collection_id='collid',
document_id='docid')
assert len(responses.calls) == 5
discovery.update_document(environment_id='envid',
collection_id='collid',
document_id='docid')
assert len(responses.calls) == 6
discovery.delete_document(environment_id='envid',
collection_id='collid',
document_id='docid')
assert len(responses.calls) == 7
conf_id = discovery.add_document(environment_id='envid',
collection_id='collid',
file=io.StringIO(u'my string of file'),
filename='file.txt')
assert len(responses.calls) == 8
conf_id = discovery.add_document(environment_id='envid',
collection_id='collid',
file=io.StringIO(u'<h1>my string of file</h1>'),
filename='file.html',
file_content_type='application/html')
assert len(responses.calls) == 9
conf_id = discovery.add_document(environment_id='envid',
collection_id='collid',
file=io.StringIO(u'<h1>my string of file</h1>'),
filename='file.html',
file_content_type='application/html',
metadata=io.StringIO(u'{"stuff": "woot!"}'))
assert len(responses.calls) == 10
@responses.activate
def test_delete_all_training_data():
training_endpoint = '/v1/environments/{0}/collections/{1}/training_data'
endpoint = training_endpoint.format(environment_id, collection_id)
url = '{0}{1}'.format(base_url, endpoint)
responses.add(responses.DELETE, url, status=204)
service = watson_developer_cloud.DiscoveryV1(version,
username='username',
password='password')
response = service.delete_all_training_data(environment_id=environment_id,
collection_id=collection_id)
assert response is None
@responses.activate
def test_list_training_data():
training_endpoint = '/v1/environments/{0}/collections/{1}/training_data'
endpoint = training_endpoint.format(environment_id, collection_id)
url = '{0}{1}'.format(base_url, endpoint)
mock_response = {
"environment_id": "string",
"collection_id": "string",
"queries": [
{
"query_id": "string",
"natural_language_query": "string",
"filter": "string",
"examples": [
{
"document_id": "string",
"cross_reference": "string",
"relevance": 0
}
]
}
]
}
responses.add(responses.GET,
url,
body=json.dumps(mock_response),
status=200,
content_type='application/json')
service = watson_developer_cloud.DiscoveryV1(version,
username='username',
password='password')
response = service.list_training_data(environment_id=environment_id,
collection_id=collection_id)
assert response == mock_response
# Verify that response can be converted to a TrainingDataSet
TrainingDataSet._from_dict(response)
@responses.activate
def test_add_training_data():
training_endpoint = '/v1/environments/{0}/collections/{1}/training_data'
endpoint = training_endpoint.format(environment_id, collection_id)
url = '{0}{1}'.format(base_url, endpoint)
natural_language_query = "why is the sky blue"
filter = "text:meteorology"
examples = [
{
"document_id": "54f95ac0-3e4f-4756-bea6-7a67b2713c81",
"relevance": 1
},
{
"document_id": "01bcca32-7300-4c9f-8d32-33ed7ea643da",
"cross_reference": "my_id_field:1463",
"relevance": 5
}
]
mock_response = {
"query_id": "string",
"natural_language_query": "string",
"filter": "string",
"examples": [
{
"document_id": "string",
"cross_reference": "string",
"relevance": 0
}
]
}
responses.add(responses.POST,
url,
body=json.dumps(mock_response),
status=200,
content_type='application/json')
service = watson_developer_cloud.DiscoveryV1(version,
username='username',
password='password')
response = service.add_training_data(
environment_id=environment_id,
collection_id=collection_id,
natural_language_query=natural_language_query,
filter=filter,
examples=examples)
assert response == mock_response
# Verify that response can be converted to a TrainingQuery
TrainingQuery._from_dict(response)
@responses.activate
def test_delete_training_data():
training_endpoint = '/v1/environments/{0}/collections/{1}/training_data/{2}'
query_id = 'queryid'
endpoint = training_endpoint.format(
environment_id, collection_id, query_id)
url = '{0}{1}'.format(base_url, endpoint)
responses.add(responses.DELETE, url, status=204)
service = watson_developer_cloud.DiscoveryV1(version,
username='username',
password='password')
response = service.delete_training_data(environment_id=environment_id,
collection_id=collection_id,
query_id=query_id)
assert response is None
@responses.activate
def test_get_training_data():
training_endpoint = '/v1/environments/{0}/collections/{1}/training_data/{2}'
query_id = 'queryid'
endpoint = training_endpoint.format(
environment_id, collection_id, query_id)
url = '{0}{1}'.format(base_url, endpoint)
mock_response = {
"query_id": "string",
"natural_language_query": "string",
"filter": "string",
"examples": [
{
"document_id": "string",
"cross_reference": "string",
"relevance": 0
}
]
}
responses.add(responses.GET,
url,
body=json.dumps(mock_response),
status=200,
content_type='application/json')
service = watson_developer_cloud.DiscoveryV1(version,
username='username',
password='password')
response = service.get_training_data(environment_id=environment_id,
collection_id=collection_id,
query_id=query_id)
assert response == mock_response
# Verify that response can be converted to a TrainingQuery
TrainingQuery._from_dict(response)
@responses.activate
def test_create_training_example():
examples_endpoint = '/v1/environments/{0}/collections/{1}/training_data' + \
'/{2}/examples'
query_id = 'queryid'
endpoint = examples_endpoint.format(
environment_id, collection_id, query_id)
url = '{0}{1}'.format(base_url, endpoint)
document_id = "string"
relevance = 0
cross_reference = "string"
mock_response = {
"document_id": "string",
"cross_reference": "string",
"relevance": 0
}
responses.add(responses.POST,
url,
body=json.dumps(mock_response),
status=201,
content_type='application/json')
service = watson_developer_cloud.DiscoveryV1(version,
username='username',
password='password')
response = service.create_training_example(
environment_id=environment_id,
collection_id=collection_id,
query_id=query_id,
document_id=document_id,
relevance=relevance,
cross_reference=cross_reference)
assert response == mock_response
# Verify that response can be converted to a TrainingExample
TrainingExample._from_dict(response)
@responses.activate
def test_delete_training_example():
examples_endpoint = '/v1/environments/{0}/collections/{1}/training_data' + \
'/{2}/examples/{3}'
query_id = 'queryid'
example_id = 'exampleid'
endpoint = examples_endpoint.format(environment_id,
collection_id,
query_id,
example_id)
url = '{0}{1}'.format(base_url, endpoint)
responses.add(responses.DELETE, url, status=204)
service = watson_developer_cloud.DiscoveryV1(version,
username='username',
password='password')
response = service.delete_training_example(
environment_id=environment_id,
collection_id=collection_id,
query_id=query_id,
example_id=example_id)
assert response is None
@responses.activate
def test_get_training_example():
examples_endpoint = '/v1/environments/{0}/collections/{1}/training_data' + \
'/{2}/examples/{3}'
query_id = 'queryid'
example_id = 'exampleid'
endpoint = examples_endpoint.format(environment_id,
collection_id,
query_id,
example_id)
url = '{0}{1}'.format(base_url, endpoint)
mock_response = {
"document_id": "string",
"cross_reference": "string",
"relevance": 0
}
responses.add(responses.GET,
url,
body=json.dumps(mock_response),
status=200,
content_type='application/json')
service = watson_developer_cloud.DiscoveryV1(version,
username='username',
password='password')
response = service.get_training_example(
environment_id=environment_id,
collection_id=collection_id,
query_id=query_id,
example_id=example_id)
assert response == mock_response
# Verify that response can be converted to a TrainingExample
TrainingExample._from_dict(response)
@responses.activate
def test_update_training_example():
examples_endpoint = '/v1/environments/{0}/collections/{1}/training_data' + \
'/{2}/examples/{3}'
query_id = 'queryid'
example_id = 'exampleid'
endpoint = examples_endpoint.format(environment_id,
collection_id,
query_id,
example_id)
url = '{0}{1}'.format(base_url, endpoint)
relevance = 0
cross_reference = "string"
mock_response = {
"document_id": "string",
"cross_reference": "string",
"relevance": 0
}
responses.add(responses.PUT,
url,
body=json.dumps(mock_response),
status=200,
content_type='application/json')
service = watson_developer_cloud.DiscoveryV1(version,
username='username',
password='password')
response = service.update_training_example(
environment_id=environment_id,
collection_id=collection_id,
query_id=query_id,
example_id=example_id,
relevance=relevance,
cross_reference=cross_reference)
assert response == mock_response
# Verify that response can be converted to a TrainingExample
TrainingExample._from_dict(response)
@responses.activate
def test_expansions():
url = 'https://gateway.watsonplatform.net/discovery/api/v1/environments/envid/collections/colid/expansions'
responses.add(
responses.GET,
url,
body='{"expansions": "results"}',
status=200,
content_type='application_json')
responses.add(
responses.DELETE,
url,
body='{"description": "success" }',
status=200,
content_type='application_json')
responses.add(
responses.POST,
url,
body='{"expansions": "success" }',
status=200,
content_type='application_json')
discovery = watson_developer_cloud.DiscoveryV1('2017-11-07', username="username", password="password")
discovery.list_expansions('envid', 'colid')
assert responses.calls[0].response.json() == {"expansions": "results"}
discovery.create_expansions('envid', 'colid', [{"input_terms": "dumb", "expanded_terms": "dumb2"}])
assert responses.calls[1].response.json() == {"expansions": "success"}
discovery.delete_expansions('envid', 'colid')
assert responses.calls[2].response.json() == {"description": "success"}
assert len(responses.calls) == 3
|
nilq/baby-python
|
python
|
import glob
import datetime
import string
import pandas as pd
current_year = datetime.datetime.today().year
def age_binner(age):
if age < 5:
return "04 and under"
elif 5 <= age <= 9:
return "05 to 09 years"
elif 10 <= age <= 14:
return "10 to 14 years"
elif 15 <= age <= 19:
return "15 to 19 years"
elif 20 <= age <= 24:
return "20 to 24 years"
elif 25 <= age <= 29:
return "25 to 29 years"
elif 30 <= age <= 34:
return "30 to 34 years"
elif 35 <= age <= 39:
return "35 to 39 years"
elif 40 <= age <= 44:
return "40 to 44 years"
elif 45 <= age <= 49:
return "45 to 49 years"
elif 50 <= age <= 54:
return "50 to 54 years"
elif 55 <= age <= 59:
return "55 to 59 years"
elif 60 <= age <= 64:
return "60 to 64 years"
elif 65 <= age <= 69:
return "65 to 69 years"
elif 70 <= age <= 74:
return "70 to 74 years"
elif 75 <= age <=79:
return "75 to 79 years"
elif 80 <= age <=84:
return "80 to 84 years"
else:
return "85 years and over"
def get_data():
d = {}
columns = ['state', 'sex', 'year', 'name', 'occurences']
for file in glob.glob('namesbystate/*.TXT'):
print file
state = file.replace(".TXT","").replace('namesbystate/','')
df = pd.read_csv(file, names=columns, header=None)
df['current_age'] = current_year - df['year']
df['age_bin'] = df['current_age'].apply(age_binner)
df['name'] = df['name'].apply(string.lower)
d[state] = df[['age_bin', 'name', 'sex','occurrences']].groupby(['age_bin', 'name', 'sex']).sum()
by_state = pd.Panel.from_dict(d)
total = by_state.minor_xs('occurrences').sum(1)
return by_state, total
class DiscreteDistribution(object):
def __init__(self, prior):
self.posterior = prior
self._prior = prior
self.n = 0
def update(self, p):
#P(age_bin) = P(age_bin | 'Sara')*P('Sara' | Alive)*P(Alive) + ..
# boils down to the below formula.
self.posterior = p.fillna(0) + self.posterior
return
def normalize_vector(v):
return v/v.sum()
def name_distribution(name, data, prior):
try:
return normalize_vector(data.ix[name,:, :]['occurrences'].sum(level='age_bin').reindex(prior.index))
except KeyError:
return prior
|
nilq/baby-python
|
python
|
class MethodsManager:
"""My Methods Manager
"""
def __init__(self):
self.heap = {}
def insert(self, elems):
"""Insert for main
Args:
elems (list): Tokens form user input
"""
if elems[1][0].isupper():
name = elems[1]
# I have super?
if ":" in elems[2:]:
if self.have(elems[3]):
super_class = elems[3]
if len(elems) > 3:
methods = elems[4:]
else:
methods = []
self.insert_simple(name, super_class, *methods)
str_methods = ' '.join(map(str, methods))
print(f"Se creo {name} con sus métodos {str_methods}\n")
else:
print(f"Error: {elems[3]} no es una clase declarada\n")
# I dont have super
else:
super_class = None
if len(elems)>1:
methods = elems[2:]
else:
methods=[]
self.insert_simple(name, super_class, *methods)
str_methods = ' '.join(map(str, methods))
print(f"Se creo {name} con sus métodos {str_methods}\n")
else:
print("Error: El nombre de las clases debe ser en mayúsculas\n")
def insert_simple(self, name, super_class, *kwargs):
"""Format my input
Args:
name (string): Name of my Class
super_class (string): Name of my Super Class
"""
elem = {"super":super_class, "methods":[*kwargs]}
self.heap[name]=elem
def have(self, name):
"""To know if i have a class with this name
Args:
name (string): Name of my Class
Returns:
bool: True if name is in my heap else False
"""
try:
self.heap[name]
return True
except:
return False
def search_methods(self, name):
"""Description for main
Args:
name (string): Name of my Class
Returns:
string: String of all of methods for my Class
"""
if self.have(name):
base = self.heap[name]
ancestors=[name]
# Search for ancestors
while base["super"]!=None:
ancestors.append(base["super"])
base=self.heap[base["super"]]
# Older first
ancestors=ancestors[::-1]
methods_with_ancestor={}
# For ancestor insert method
for ancestor in ancestors:
methods=self.heap[ancestor]["methods"]
for method in methods:
methods_with_ancestor[method]=ancestor
# Pretty print
response = ""
for method in methods_with_ancestor:
response = response + f"{method} -> {methods_with_ancestor[method]} :: {method}\n"
return response
else:
return None
def __str__(self):
return str(self.heap)
|
nilq/baby-python
|
python
|
from dataclasses import asdict
from dataclasses import dataclass
from dataclasses import field
from typing import List
from unittest import mock
from unittest.case import TestCase
from lxml.etree import Element
from lxml.etree import QName
from tests.fixtures.books import BookForm
from tests.fixtures.books import Books
from xsdata.exceptions import ParserError
from xsdata.formats.dataclass.parsers.config import ParserConfig
from xsdata.formats.dataclass.parsers.nodes import PrimitiveNode
from xsdata.formats.dataclass.parsers.nodes import RootNode
from xsdata.formats.dataclass.parsers.nodes import SkipNode
from xsdata.formats.dataclass.parsers.xml import XmlParser
from xsdata.models.enums import EventType
class XmlParserTests(TestCase):
def setUp(self):
super(XmlParserTests, self).setUp()
self.parser = XmlParser()
self.parser.index = 10
self.parser.objects = [(QName(x), x) for x in "abcde"]
def test_parse_context_raises_exception(self):
with self.assertRaises(ParserError) as cm:
self.parser.parse_context([], Books)
self.assertEqual("Failed to create target class `Books`", str(cm.exception))
def test_add_namespace(self):
self.parser.add_namespace(("foo", "bar"))
self.assertEqual({"foo": "bar"}, self.parser.namespaces.ns_map)
@mock.patch.object(RootNode, "next_node")
@mock.patch.object(XmlParser, "emit_event")
def test_queue(self, mock_emit_event, mock_next_node):
primitive_node = PrimitiveNode(position=1, types=[int])
mock_next_node.return_value = primitive_node
element = Element("{urn:books}books")
config = ParserConfig()
root_queue_item = RootNode(
position=0,
meta=self.parser.context.build(Books),
default=None,
config=config,
)
objects = list()
queue = list()
queue.append(root_queue_item)
self.parser.queue(element, queue, objects)
self.assertEqual(2, len(queue))
self.assertEqual(root_queue_item, queue[0])
self.assertEqual(primitive_node, queue[1])
mock_emit_event.assert_called_once_with(
EventType.START, element.tag, item=root_queue_item, element=element
)
@mock.patch.object(XmlParser, "emit_event")
@mock.patch.object(PrimitiveNode, "parse_element", return_value=("q", "result"))
def test_dequeue(self, mock_parse_element, mock_emit_event):
element = Element("author", nsmap={"prefix": "uri"})
element.text = "foobar"
objects = list()
queue = list()
queue.append(PrimitiveNode(position=0, types=[str], default=None))
result = self.parser.dequeue(element, queue, objects)
self.assertEqual("result", result)
self.assertEqual(0, len(queue))
self.assertEqual(("q", result), objects[-1])
mock_parse_element.assert_called_once_with(element, objects)
mock_emit_event.assert_called_once_with(
EventType.END, element.tag, obj=result, element=element
)
@mock.patch.object(XmlParser, "emit_event")
def test_dequeue_with_none_qname(self, mock_emit_event):
element = Element("author", nsmap={"prefix": "uri"})
element.text = "foobar"
objects = list()
queue = list()
queue.append(SkipNode(position=0))
result = self.parser.dequeue(element, queue, objects)
self.assertIsNone(result)
self.assertEqual(0, len(queue))
self.assertEqual(0, len(objects))
self.assertEqual(0, mock_emit_event.call_count)
def test_emit_event(self):
mock_func = mock.Mock()
self.parser.foo_bar_element = mock_func
self.parser.emit_event("foo", "{tns}barElement", a=1, b=2)
mock_func.assert_called_once_with(a=1, b=2)
self.assertEqual({"{tns}barElement": "bar_element"}, self.parser.event_names)
class XmlParserIntegrationTest(TestCase):
def setUp(self):
super(XmlParserIntegrationTest, self).setUp()
self.books = Books(
book=[
BookForm(
id="bk001",
author="Hightower, Kim",
title="The First Book",
genre="Fiction",
price=44.95,
pub_date="2000-10-01",
review="An amazing story of nothing.",
),
BookForm(
id="bk002",
author="Nagata, Suanne",
title="Becoming Somebody",
genre="Biography",
review="A masterpiece of the fine art of gossiping.",
),
]
)
def test_parse(self):
xml = (
'<?xml version="1.0" encoding="UTF-8"?>\n'
'<brk:books xmlns:brk="urn:books">\n'
' <book id="bk001">\n'
" <author>Hightower, Kim</author>\n"
" <title>The First Book</title>\n"
" <genre>Fiction</genre>\n"
" <price>44.95</price>\n"
" <pub_date>2000-10-01</pub_date>\n"
" <review>An amazing story of nothing.</review>\n"
" </book>\n"
' <book id="bk002">\n'
" <author>Nagata, Suanne</author>\n"
" <title>Becoming Somebody</title>\n"
" <genre>Biography</genre>\n"
" <review>A masterpiece of the fine art of gossiping.</review>\n"
" </book>\n"
"</brk:books>\n"
)
parser = XmlParser()
actual = parser.from_string(xml, Books)
self.assertEqual(self.books, actual)
self.assertEqual({"brk": "urn:books"}, parser.namespaces.ns_map)
def test_parse_with_fail_on_unknown_properties_false(self):
xml = (
'<?xml version="1.0" encoding="UTF-8"?>\n'
"<books>\n"
' <book id="bk001">\n'
" <author>Hightower, Kim</author>\n"
" <title>The First Book</title>\n"
" </book>\n"
' <book id="bk002">\n'
" <author>Nagata, Suanne</author>\n"
" <title>Becoming Somebody</title>\n"
" </book>\n"
"</books>\n"
)
@dataclass
class Book:
author: str = field(metadata=dict(type="Element"))
@dataclass
class MyBooks:
class Meta:
name = "books"
book: List[Book] = field(
default_factory=list, metadata=dict(type="Element")
)
config = ParserConfig(fail_on_unknown_properties=False)
parser = XmlParser(config=config)
actual = parser.from_string(xml, MyBooks)
expected = {
"book": [{"author": "Hightower, Kim"}, {"author": "Nagata, Suanne"}]
}
self.assertEqual(expected, asdict(actual))
|
nilq/baby-python
|
python
|
# Morando Nicolò
import pandas as pd
file_path = 'filepath.csv'
data = pd.read_csv(file_path)
data.describe()
|
nilq/baby-python
|
python
|
# 0611.py
"""
ref: https://gist.github.com/jsheedy/3913ab49d344fac4d02bcc887ba4277d
ref: http://felix.abecassis.me/2011/09/opencv-morphological-skeleton/
"""
import cv2
import numpy as np
#1
src = cv2.imread('./data/T.jpg', cv2.IMREAD_GRAYSCALE)
##src = cv2.imread('alphabet.bmp', cv2.IMREAD_GRAYSCALE)
##src = cv2.bitwise_not(src)
ret, A = cv2.threshold(src, 128, 255, cv2.THRESH_BINARY)
skel_dst = np.zeros(src.shape, np.uint8)
#2
shape1=cv2.MORPH_CROSS
shape2=cv2.MORPH_RECT
B= cv2.getStructuringElement(shape=shape1, ksize=(3,3))
done = True
while done:
erode = cv2.erode(A, B)
## opening = cv2.dilate(erode,B)
opening = cv2.morphologyEx(erode, cv2.MORPH_OPEN, B)
tmp = cv2.subtract(erode, opening) # cv2.absdiff(erode, opening)
skel_dst = cv2.bitwise_or(skel_dst, tmp)
A = erode.copy()
done = cv2.countNonZero(A) != 0
## cv2.imshow('opening', opening)
## cv2.imshow('tmp', tmp)
## cv2.imshow('skel_dst', skel_dst)
## cv2.waitKey()
cv2.imshow('src', src)
cv2.imshow('skel_dst', skel_dst)
cv2.waitKey()
cv2.destroyAllWindows()
|
nilq/baby-python
|
python
|
import dicom
import argparse
import pylab
import os
import tqdm
parser = argparse.ArgumentParser(description="由dicom格式文件生成png图片")
parser.add_argument("origin", help="文件源路径(文件或文件夹)")
parser.add_argument("--output", "-o", help="输出路径", default="./")
argv = parser.parse_args()
def get_path_filelist(path):
files = os.listdir(path)
file_list = []
for f in files:
if os.path.isfile(path + '/' + f):
if '.dcm' in f:
file_list.append(path + '/' + f)
return file_list
if os.path.isdir(argv.origin):
filelist = get_path_filelist(argv.origin)
else:
if '.dcm' not in argv.origin:
exit("Uncorrect origin file.")
filelist = [argv.origin]
for file in tqdm.tqdm(filelist):
dcm = dicom.read_file(file)
filename = os.path.basename(file).replace(".dcm", "")
pylab.imsave(argv.output + '/' + filename + '.png', dcm.pixel_array, cmap=pylab.cm.bone)
|
nilq/baby-python
|
python
|
import platform
from selenium.webdriver import Chrome, DesiredCapabilities
from selenium.webdriver.chrome.options import Options
from tests.util.web.platform.browser.generic import ManagedBrowser
class ChromeManagedBrowser(
ManagedBrowser
):
"""
ChromeManagedBrowser provides a Chrome edition of ManagedTestBrowser
for use in Selenium based tests.
"""
def __init__(self, url: str):
"""
Initializes the ChromeManagedBrowser to anticipate sessions targeting
the provided URL.
:param url: The URL to target when establishing new sessions.
"""
super().__init__(
url
)
self.platform = "chrome"
def __str__(self):
return str(self.__repr__())
def __repr__(self):
return str(
{
'url': self.url,
'platform': self.platform,
'headless': self.headless,
'remote_browser': f"{self.remote_browser}:"
f"{self.remote_browser_port}",
'session_active': self.session_active(),
}
)
def _get_chrome_capabilities(self) -> DesiredCapabilities:
"""
Provides a DesiredCapabilities object suitable for a Chrome webdriver
session. Specifically:
- Permit insecure SSL certs, such as what might be used in dev
:return: A DesiredCapabilities object
"""
capabilities = DesiredCapabilities.CHROME.copy()
capabilities['acceptSslCerts'] = True
capabilities['acceptInsecureCerts'] = True
return capabilities
def _get_chrome_options(self) -> Options:
"""
Provides an Options object suitable for initializing a Chrome
webdriver session. Specifically:
- Disable notifications
- Do not check for default browser status
- Download permissions and preferences
- Safe browsing OFF
- Headless per ManagedTestBrowser setting
:return: An Options object
"""
opts = Options()
# Options for user interaction and session tracing
opts.add_argument("--enable-logging=stderr --v=1")
opts.add_argument("--disable-notifications")
opts.add_argument("no-default-browser-check")
# Options affecting memory and storage
opts.add_argument("--no-sandbox")
opts.add_argument("--allow-no-sandbox-job")
opts.add_argument("--disable-dev-shm-usage")
opts.add_argument("download.prompt_for_download=False")
opts.add_argument('download.default_directory="/tmp/"')
# Options permitting local files to be read
opts.add_argument("safebrowsing.enabled=False")
# Options to reduce system hardware requirements
opts.add_argument("--disable-gpu")
if self.remote_browser:
if platform.system() in ["Windows"]:
opts.add_experimental_option(
"debuggerAddress",
f"localhost:{self.remote_browser_port}"
)
else:
opts.add_argument(
f"--remote-debugging-port={self.remote_browser_port}"
)
if self.headless or self.remote_browser:
opts.add_argument("--headless")
if self.headless:
opts.add_argument("--window-size=1600,1600")
return opts
def get_new_session(self):
"""
Overrides _get_browser_session to provide an initialized Chrome
webdriver object ready for a new session.
:return: A Chrome webdriver object
"""
return Chrome(
options=self._get_chrome_options(),
desired_capabilities=self._get_chrome_capabilities(),
)
def get_new_browser(self, url, remote=False):
"""
Overrides get_new_session to provide a Chrome session.
:return: A Chrome webdriver object
"""
browser = ChromeManagedBrowser(url)
browser.remote_browser = remote
return browser
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
# import models into model package
from .v1_persistent_volume import V1PersistentVolume
from .v1_tcp_socket_action import V1TCPSocketAction
from .v1_resource_quota_status import V1ResourceQuotaStatus
from .v1_container_state_terminated import V1ContainerStateTerminated
from .v1_replication_controller_list import V1ReplicationControllerList
from .v1_capability import V1Capability
from .v1_pod import V1Pod
from .v1_event import V1Event
from .v1_node_daemon_endpoints import V1NodeDaemonEndpoints
from .v1_host_path_volume_source import V1HostPathVolumeSource
from .v1_config_map_key_selector import V1ConfigMapKeySelector
from .v1_volume import V1Volume
from .v1_container_state_running import V1ContainerStateRunning
from .v1_delete_options import V1DeleteOptions
from .v1_pod_template_spec import V1PodTemplateSpec
from .v1_secret_list import V1SecretList
from .v1_nfs_volume_source import V1NFSVolumeSource
from .v1_ceph_fs_volume_source import V1CephFSVolumeSource
from .v1_capabilities import V1Capabilities
from .v1_component_condition import V1ComponentCondition
from .unversioned_status import UnversionedStatus
from .v1_service_status import V1ServiceStatus
from .unversioned_status_details import UnversionedStatusDetails
from .v1_secret_volume_source import V1SecretVolumeSource
from .v1_resource_requirements import V1ResourceRequirements
from .v1_persistent_volume_claim import V1PersistentVolumeClaim
from .unversioned_patch import UnversionedPatch
from .v1_namespace_status import V1NamespaceStatus
from .v1_persistent_volume_access_mode import V1PersistentVolumeAccessMode
from .v1_resource_quota_spec import V1ResourceQuotaSpec
from .v1_persistent_volume_spec import V1PersistentVolumeSpec
from .v1_exec_action import V1ExecAction
from .v1_persistent_volume_claim_volume_source import V1PersistentVolumeClaimVolumeSource
from .v1_service_spec import V1ServiceSpec
from .v1_service_list import V1ServiceList
from .v1_persistent_volume_list import V1PersistentVolumeList
from .v1_container_status import V1ContainerStatus
from .v1_handler import V1Handler
from .v1_node_address import V1NodeAddress
from .v1_fc_volume_source import V1FCVolumeSource
from .v1_endpoint_port import V1EndpointPort
from .v1_downward_api_volume_file import V1DownwardAPIVolumeFile
from .v1_endpoint_subset import V1EndpointSubset
from .v1_limit_range_list import V1LimitRangeList
from .v1_container import V1Container
from .v1_pod_spec import V1PodSpec
from .v1_flocker_volume_source import V1FlockerVolumeSource
from .v1_persistent_volume_status import V1PersistentVolumeStatus
from .v1_rbd_volume_source import V1RBDVolumeSource
from .v1_load_balancer_ingress import V1LoadBalancerIngress
from .v1_security_context import V1SecurityContext
from .v1_service_port import V1ServicePort
from .v1_namespace import V1Namespace
from .v1_gce_persistent_disk_volume_source import V1GCEPersistentDiskVolumeSource
from .v1_endpoints_list import V1EndpointsList
from .v1_node_list import V1NodeList
from .v1_event_source import V1EventSource
from .v1_env_var_source import V1EnvVarSource
from .unversioned_list_meta import UnversionedListMeta
from .v1_limit_range_spec import V1LimitRangeSpec
from .v1_persistent_volume_claim_spec import V1PersistentVolumeClaimSpec
from .v1_replication_controller import V1ReplicationController
from .v1_namespace_list import V1NamespaceList
from .integer import Integer
from .v1_volume_mount import V1VolumeMount
from .v1_node_status import V1NodeStatus
from .v1_replication_controller_status import V1ReplicationControllerStatus
from .v1_pod_condition import V1PodCondition
from .v1_node_condition import V1NodeCondition
from .v1_pod_security_context import V1PodSecurityContext
from .v1_service_account import V1ServiceAccount
from .v1_pod_template import V1PodTemplate
from .v1_pod_list import V1PodList
from .v1_empty_dir_volume_source import V1EmptyDirVolumeSource
from .v1_node_spec import V1NodeSpec
from .v1_http_get_action import V1HTTPGetAction
from .v1_resource_quota_list import V1ResourceQuotaList
from .v1_daemon_endpoint import V1DaemonEndpoint
from .v1_service_account_list import V1ServiceAccountList
from .v1_probe import V1Probe
from .v1_namespace_spec import V1NamespaceSpec
from .v1_iscsi_volume_source import V1ISCSIVolumeSource
from .v1_event_list import V1EventList
from .v1_load_balancer_status import V1LoadBalancerStatus
from .v1_persistent_volume_claim_list import V1PersistentVolumeClaimList
from .v1_component_status import V1ComponentStatus
from .v1_git_repo_volume_source import V1GitRepoVolumeSource
from .v1_object_meta import V1ObjectMeta
from .v1_secret_key_selector import V1SecretKeySelector
from .v1_local_object_reference import V1LocalObjectReference
from .v1_flex_volume_source import V1FlexVolumeSource
from .v1_container_port import V1ContainerPort
from .v1_secret import V1Secret
from .v1_downward_api_volume_source import V1DownwardAPIVolumeSource
from .v1_container_state import V1ContainerState
from .v1_endpoints import V1Endpoints
from .v1_cinder_volume_source import V1CinderVolumeSource
from .v1_pod_status import V1PodStatus
from .v1_se_linux_options import V1SELinuxOptions
from .v1_service import V1Service
from .v1_object_reference import V1ObjectReference
from .v1_object_field_selector import V1ObjectFieldSelector
from .v1_component_status_list import V1ComponentStatusList
from .v1_lifecycle import V1Lifecycle
from .v1_node_system_info import V1NodeSystemInfo
from .json_watch_event import JsonWatchEvent
from .v1_endpoint_address import V1EndpointAddress
from .v1_aws_elastic_block_store_volume_source import V1AWSElasticBlockStoreVolumeSource
from .v1_binding import V1Binding
from .v1_node import V1Node
from .v1_resource_quota import V1ResourceQuota
from .v1_env_var import V1EnvVar
from .unversioned_status_cause import UnversionedStatusCause
from .v1_replication_controller_spec import V1ReplicationControllerSpec
from .v1_container_state_waiting import V1ContainerStateWaiting
from .v1_pod_template_list import V1PodTemplateList
from .v1_limit_range_item import V1LimitRangeItem
from .v1_finalizer_name import V1FinalizerName
from .v1_limit_range import V1LimitRange
from .v1_glusterfs_volume_source import V1GlusterfsVolumeSource
from .v1_container_image import V1ContainerImage
from .v1_persistent_volume_claim_status import V1PersistentVolumeClaimStatus
|
nilq/baby-python
|
python
|
from .logger import get_logger
|
nilq/baby-python
|
python
|
from django.test import TestCase
from django.core.management import call_command
from databuilder.tests import utils
from databuilder import models
# noinspection SpellCheckingInspection
sample_name = 'Bob Bobski'
class TestTask1(TestCase):
def setUp(self):
self.model_name = models.SampleTest.__name__.lower()
models.SampleTest.objects.create(name=sample_name)
def test_dump(self):
total_records = models.SampleTest.objects.all().count()
print(f'Your model has {total_records} dummy record.')
# noinspection SpellCheckingInspection
with utils.capture(call_command, 'toandroid') as output:
self.assertIn(self.model_name, output) # CREATE Table statement
self.assertIn(sample_name, output) # INSERT Statement
|
nilq/baby-python
|
python
|
import argparse
import os
import xml.etree.ElementTree as ET
import sys
import configparser
import os
from os import path
import codecs
import re
parser = argparse.ArgumentParser()
parser.add_argument("-raw_path", default='../raw_data/xml/schaeftlarn')
parser.add_argument("-save_path", default='../raw_data/stories/la')
parser.add_argument('-log_file', default='../logs/converting.log')
parser.add_argument('-verbose', default=False, type=lambda x: (str(x).lower() == 'true'))
args = parser.parse_args()
# xml/html tag regex
TAG_RE = re.compile(r'<[^>]+>')
def parse(path_to_file):
tree = ET.parse(path_to_file)
root = tree.getroot()
identifier = ''
for div in root.iter("{http://www.tei-c.org/ns/1.0}div"):
if 'n' in div.attrib and 'type' in div.attrib:
if 'textpart' != div.get('type'):
identifier = div.get('n')
regest = ''
for front in root.iter('{http://www.tei-c.org/ns/1.0}front'):
if '{http://www.w3.org/XML/1998/namespace}lang' in front.attrib:
# excluding non-german regests
if 'deu' == front.get('{http://www.w3.org/XML/1998/namespace}lang'):
for div in front.iter('{http://www.tei-c.org/ns/1.0}div'):
if 'subtype' in div.attrib:
if 'regest' == div.get('subtype'):
for p in div.iter('{http://www.tei-c.org/ns/1.0}p'):
try:
regest = regest + p.text.replace(' ','').replace('\n','')
except:
regest = regest
text = ''
for body in root.iter('{http://www.tei-c.org/ns/1.0}body'):
for div in body.iter('{http://www.tei-c.org/ns/1.0}div'):
if 'type' in div.attrib:
if 'textpart' == div.get('type'):
for p in div.iter('{http://www.tei-c.org/ns/1.0}p'):
# get the raw text because it includes the punctuation marks
# punctuation marks are crucial for the translation quality
raw_text = str(ET.tostring(p, encoding="unicode", method="xml"))
# remove xml tags
raw_text = TAG_RE.sub('', raw_text)
raw_text = raw_text.replace(' ','').replace('\n','')
text += raw_text + ' '
return identifier, regest, text
def write_log_file(no_id_found, no_regest_found, no_text_found):
log_path = os.path.abspath(args.log_file)
print('writing the log file to: ',log_path)
file = codecs.open(log_path, 'w', 'utf-8')
file.write('no identifier:\n')
for path in no_id_found:
file.write('\n'+path)
file.write('no regest:\n')
for path in no_regest_found:
file.write('\n'+path)
file.write('no text:\n')
for path in no_text_found:
file.write('\n'+path)
file.close()
def get_files(args):
path = os.path.abspath(args.raw_path)
files = []
# r=root, d=directories, f = files
print('start to load all formulae from: '+path)
for r, d, f in os.walk(path):
for file in f:
if '.xml' in file:
if '__cts__.xml' != file and '__capitains__.xml' != file and '.lat' in file:
files.append(os.path.join(r, file))
print('found: '+str(len(files))+ ' files')
return files
if __name__ == '__main__':
files = get_files(args)
count = 0
no_regest_found = []
no_id_found = []
no_text_found = []
for f in files:
identifier, regest, text = parse(f)
if (''== identifier):
no_id_found.append(f)
elif (''== regest):
no_regest_found.append(f)
elif (''== text):
no_text_found.append(f)
else:
save_path = os.path.abspath(args.save_path)
identifier = identifier.replace(':','.')
save_path = os.path.join(save_path, identifier+'.story')
file = codecs.open(save_path, 'w', 'utf-8')
file.write(text)
file.write('\n\n@highlight\n\n')
file.write(regest)
file.close()
count += 1
if args.verbose:
sys.stdout.write('.')
if(50==count):
print('.')
sys.stdout.write('\n')
sys.stdout.flush()
write_log_file(no_id_found, no_regest_found, no_text_found)
print('successfully loaded:', count, 'files. for more info see the log file', )
|
nilq/baby-python
|
python
|
#name: CurateChemStructures
#description: curating a molecules set for structural data homogenization
#top-menu: Chem | Curate...
#language: python
#sample: chem/chem_standards.csv
#tags: demo, chem, rdkit
#input: dataframe data [Input data table]
#input: column smiles {type:categorical; semType: Molecule} [Molecules, in SMILES format]
#input: bool kekulization = false
#input: bool normalization = false
#input: bool reionization = false
#input: bool neutralization = false
#input: bool tautomerization = false
#input: bool mainFragment = false
#output: dataframe curated {action:join(data); semType: Molecule} [Molecules, in SMILES format]
import numpy as np
from rdkit import Chem
from rdkit.Chem.MolStandardize import rdMolStandardize
smiles = data[smiles]
length = len(smiles)
standardized = np.full(length, None, dtype=object)
def neutralize_atoms(mol):
pattern = Chem.MolFromSmarts("[+1!h0!$([*]~[-1,-2,-3,-4]),-1!$([*]~[+1,+2,+3,+4])]")
at_matches = mol.GetSubstructMatches(pattern)
at_matches_list = [y[0] for y in at_matches]
if len(at_matches_list) > 0:
for at_idx in at_matches_list:
atom = mol.GetAtomWithIdx(at_idx)
chg = atom.GetFormalCharge()
hcount = atom.GetTotalNumHs()
atom.SetFormalCharge(0)
atom.SetNumExplicitHs(hcount - chg)
atom.UpdatePropertyCache()
return mol
if tautomerization:
enumerator = rdMolStandardize.TautomerEnumerator()
for n in range(0, length):
mol = Chem.MolFromSmiles(smiles[n], sanitize = True)
if mol is None or mol.GetNumAtoms() == 0:
continue
if tautomerization:
mol = enumerator.Canonicalize(mol)
if normalization:
mol = rdMolStandardize.Normalize(mol)
if reionization:
mol = rdMolStandardize.Reionize(mol)
if neutralization:
neutralize_atoms(mol)
if mainFragment:
mol = rdMolStandardize.FragmentParent(mol)
if kekulization:
Chem.Kekulize(mol)
standardized[n] = Chem.MolToSmiles(mol, kekuleSmiles = kekulization)
curated = pd.DataFrame(standardized, columns = ['curated_molecule'])
|
nilq/baby-python
|
python
|
import json
import falcon
class HealthCheck:
def on_get(self, req, resp):
resp.body = json.dumps({'status': 'happy and health!'})
resp.status = falcon.HTTP_200
|
nilq/baby-python
|
python
|
import attr
from operator import itemgetter, methodcaller, attrgetter
from django.conf import settings
import spacy
from .service import Service
from .states import states
from .loaders import table_loader
from .language_model import nlp
from ..forms import QuestionForm
@attr.s
class NlpMiddleware:
get_response = attr.ib()
def __call__(self, request) :
data_service = Service(initial_state=states.OK)
(
data_service.of_(request.GET)
.filter_(lambda qd: 'q' in qd , error_code=states.NO_URL_PARAM)
.map_(QuestionForm)
.assign(fieldname='form')
.filter_(methodcaller('is_valid'),
error_code=states.INVALID_FORM)
.map_(attrgetter('cleaned_data'))
.map_(itemgetter('q'))
.assign(fieldname='question')
.maybe(nlp, error_code=states.NO_MODEL)
.assign(fieldname='document')
.map_(lambda doc: doc._.qtype)
.maybe(table_loader, error_code=states.UNRECOGNIZED)
.bind(lambda doc: methodcaller('find_answer', doc._.kb_ident),
data_from='document')
.maybe(lambda result: f"${result.min_pay} to ${result.max_pay}",
error_code=states.NO_RECORDS_FOUND)
.assign(fieldname='answer')
)
request.context = data_service
return self.get_response(request)
def process_template_response(self, request, response) :
data_service = request.context
if data_service.in_state(states.INVALID_FORM) :
response.context_data.update(**data_service.to_dict())
return response
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
This module contains all functions for response of optical elements.
Created on Wed May 22 12:15:23 2019
@author: Swarnav Banik
sbanik1@umd.edu
"""
import numpy as np
import numpy.fft as fourier
import scipy as scp
from PIL import Image
# %% Common Functions #########################################################
# The following functions take inputs
# Wave Vector k in units um
# Minimum Waist w0 in units um
# Position r,z in units um
# Lens Action ###################################################################
def SphLensAction(E,X,Y,k,f,**kwargs):
# Evaluates the response of a spherical lens at its front focal plane
# Inputs: E - 2D Field pattern
# X,Y - 2D grid representing co-ordinates
# k - Wave vector [um^-1]
# f - focal length [mm]
# FocussedAxis - Along what axis is the beam focused at the back
# focal plane
if (E.shape != X.shape or X.shape != Y.shape):
raise Exception('OpticalElements::SphLensAction::E,X and Y should have same dimensions.')
for key, value in kwargs.items():
if key == 'FocussedAxis': FocAxis = value
f = f*10**3
Transform = fourier.fft2(E)
if FocAxis == 'X':
Transform = fourier.fftshift(Transform, axes = 0)
elif FocAxis == 'Y':
Transform = fourier.fftshift(Transform, axes = 1)
elif FocAxis == 'NONE':
Transform = fourier.fftshift(Transform)
dx = X[0,1]-X[0,0]
Xfrq = (2*np.pi*f/k)*fourier.fftshift(fourier.fftfreq(X.shape[1], d=dx))
dy = dx = Y[1,0]-Y[0,0]
Yfrq = (2*np.pi*f/k)*fourier.fftshift(fourier.fftfreq(Y.shape[0], d=dy))
[X, Y] = np.meshgrid(Xfrq,Yfrq)
return [Transform, X, Y]
def CylLensAction(E,X,Y,k,f,**kwargs):
# Evaluates the response of a cylindrical lens at its front focal plane
# Inputs: E - 2D Field pattern
# X,Y - 2D grid representing co-ordinates
# k - Wave vector [um^-1]
# f - focal length [mm]
# FocussedAxis - Along what axis is the beam focused at the back
# focal plane
# FocusingAxis - Along what axis does the lens focus
if (E.shape != X.shape or X.shape != Y.shape):
raise Exception('OpticalElements::CylLensAction::E,X and Y should have same dimensions.')
for key, value in kwargs.items():
if key == 'FocusingAxis': FocAxis = value
f = f*10**3
if FocAxis == 'X':
Transform = fourier.fft(E, axis = 1)
Transform = fourier.fftshift(Transform, axes = 1)
dx = X[0,1]-X[0,0]
Xfrq = (2*np.pi*f/k)*fourier.fftshift(fourier.fftfreq(X.shape[1], d=dx))
Yfrq = Y[:,0]
elif FocAxis == 'Y':
Transform = fourier.fft(E, axis = 0)
Transform = fourier.fftshift(Transform, axes = 0)
dy = dx = Y[1,0]-Y[0,0]
Yfrq = (2*np.pi*f/k)*fourier.fftshift(fourier.fftfreq(Y.shape[0], d=dy))
Xfrq = X[0,:]
else: raise Exception('OpticalElements::CylLensAction::Focussing xxis needs to be specified.')
[X, Y] = np.meshgrid(Xfrq,Yfrq)
return [Transform, X, Y]
def PiPlateAction(E,X,Y,y_offset,tilt):
# Evaluates the response of an imaging system via the PSF
# Inputs:
# X,Y - 2D grid representing co-ordinates at the plane of pi plate
# E: The light field at the plane of pi plate
# y_offset, titlt: Offset and tilt of the pi plate
# Outputs:
# The light field after passing through the pi plate
if (E.shape != X.shape or X.shape != Y.shape):
raise Exception('OpticalElements::PiPlateAction::E, X and Y should have same dimensions.')
Phase = np.angle(E)
for ii in range(Y.shape[0]):
for jj in range(Y.shape[1]):
if Y[ii,jj]>(np.tan(tilt)*X[ii,jj]+y_offset):
Phase[ii,jj] = Phase[ii,jj]+np.pi
return np.abs(E)*np.exp(1j*Phase)
def MatrixFreeProp(q_in,d):
A = 1
B = d
C = 0
D = 1
q_out = (A*q_in+B)/(C*q_in+D)
return q_out
def MatrixLens(q_in,f):
A = 1
B = 0
C = -1/f
D = 1
q_out = (A*q_in+B)/(C*q_in+D)
return q_out
# Imaging #####################################################################
def ImageViaPSF(X_o, Y_o, E_o, ASF, **kwargs):
# Evaluates the response of an imaging system via the PSF
# Inputs:
# X_o,Y_o - 2D grid representing co-ordinates in object plane
# E_o: The light field at the object plane
# ASF: Amplitude Spread Function = sqrt(PSF)
# norm (optional): Normalize the ASF by some factor
# Outputs:
# I_i: The light field at the image plane
for key, value in kwargs.items():
if key == 'norm':
ASF = ASF*value
E_ft = fourier.fftshift(fourier.fft2(E_o))
ASF_ft = fourier.fftshift(fourier.fft2(ASF))
E_i = fourier.ifftshift(fourier.ifft2(E_ft*ASF_ft))
I_i = np.abs(E_i)**2
return I_i
def ASF(X_o,Y_o,R_airy,**kwargs):
# Evaluates the Amplitude Spread Function of an imaging system
# Inputs:
# X_o,Y_o - 2D grid representing co-ordinates in object plane
# R_airy: Radial extent of the PSF/ ASF
# kind (optional): Kind of ASF, default is airy
# Outputs:
# ASF: The ASF = sqrt(PSF)
kind = 'airy'
for key, value in kwargs.items():
if key == 'kind':
kind = value
R = np.sqrt(X_o**2+Y_o**2)
if kind == 'airy':
ASF = scp.special.jv(1,3.8317*R/R_airy)/(3.8317*R/R_airy)
ASF[R==0] = 0.5
if kind == 'gaussian':
R_airy = R_airy*2.672/3.8317;
ASF = np.exp(-(X_o**2+Y_o**2)/R_airy**2)
ASF = ASF/np.sum(np.abs(ASF)**2)
return ASF
def PixelizeImage(I_org,X_org,Y_org,PixSize_cam):
# Pixelize the image
# Inputs:
# X_org,Y_org - 2D grid representing co-ordinates in object plane
# I_org: The image
# PixSize_cam: The pixel size of the camera
# Outputs:
# X_cam,Y_cam - 2D grid representing co-ordinates in object plane on camera
# I_cam: The pixelated image
# PixSize_cam: The pixel size on the camera
if (I_org.shape != X_org.shape or X_org.shape != Y_org.shape):
raise Exception('OpticalElements::PixelizeImage::I_org,X_org and Y_org should have same dimensions.')
if (X_org[0,0]-X_org[0,1] != Y_org[0,0]-Y_org[1,0]):
raise Exception('OpticalElements::PixelizeImage::Pixel size in X and Y are not same')
nptsx = int(round(X_org[0,-1]-X_org[0,0]/PixSize_cam))
nptsy = int(round(Y_org[-1,0]-Y_org[0,0]/PixSize_cam))
PixSize_cam = [(X_org[0,0]-X_org[0,-1])/nptsx, (Y_org[0,0]-Y_org[-1,0])/nptsy]
x = np.linspace(X_org[0,0],X_org[0,-1],nptsx)
y = np.linspace(Y_org[0,0],Y_org[-1,0],nptsy)
[X_cam,Y_cam] = np.meshgrid(x,y)
I_org_img = Image.fromarray(I_org)
I_cam_img = I_org_img.resize((nptsy,nptsx),resample=Image.BILINEAR)
I_cam = np.asarray(I_cam_img)
return [X_cam,Y_cam,I_cam, PixSize_cam]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
"""
Created on Fri May 4 13:43:46 2018
@author: xingshuli
"""
import os
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras import backend as K
#from NIN_16 import NIN16
#from model_vgg16 import VGG16
#from Rnet import New_net
from Bridge_VGG19 import Bridge_VGG
from learning_rate import choose
#pre-parameters
os.environ['CUDA_VISIBLE_DEVICES'] = '0' # '1' or '0' GPU
img_height, img_width = 224, 224
if K.image_dim_ordering() == 'th':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
batch_size = 16
epochs = 500
train_data_dir = os.path.join(os.getcwd(), 'image_Data/train')
validation_data_dir = os.path.join(os.getcwd(), 'image_Data/validation')
num_classes = 24
nb_train_samples = 10402
nb_validation_samples = 2159
#model = New_net(input_shape = input_shape, classes = num_classes)
#model = VGG16(input_shape = input_shape, classes = num_classes)
model = Bridge_VGG(input_shape = input_shape, classes = num_classes)
optimizer = SGD(lr = 0.001, momentum = 0.9, nesterov = True)
model.compile(loss = 'categorical_crossentropy', optimizer = optimizer, metrics = ['accuracy'])
model.summary()
train_datagen = ImageDataGenerator(rescale = 1. / 255,
rotation_range = 15,
width_shift_range = 0.2,
height_shift_range = 0.2,
horizontal_flip = True,
zoom_range = 0.2,
shear_range = 0.2)
test_datagen = ImageDataGenerator(rescale = 1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
#set learning rate schedule
lr_monitorable = True
lr_reduce = choose(lr_monitorable = lr_monitorable)
#set callbacks for model fit
callbacks = [lr_reduce]
#model fit
hist = model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples //batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples //batch_size,
callbacks=callbacks)
#print acc and stored into acc.txt
f = open('/home/xingshuli/Desktop/acc.txt','w')
f.write(str(hist.history['acc']))
f.close()
#print val_acc and stored into val_acc.txt
f = open('/home/xingshuli/Desktop/val_acc.txt','w')
f.write(str(hist.history['val_acc']))
f.close()
#print val_loss and stored into val_loss.txt
f = open('/home/xingshuli/Desktop/val_loss.txt', 'w')
f.write(str(hist.history['val_loss']))
f.close()
#the reasonable accuracy of model should be calculated based on
#the value of patience in EarlyStopping: accur = accur[-patience + 1:]/patience
Er_patience = 10
accur = []
with open('/home/xingshuli/Desktop/val_acc.txt','r') as f1:
data1 = f1.readlines()
for line in data1:
odom = line.strip('[]\n').split(',')
num_float = list(map(float, odom))
accur.append(num_float)
f1.close()
y = sum(accur, [])
ave = sum(y[-Er_patience:]) / len(y[-Er_patience:])
print('Validation Accuracy = %.4f' % (ave))
#save model
save_dir = os.path.join(os.getcwd(), 'Wide_ResNet_Model')
model_name = 'keras_trained_model.h5'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
save_path = os.path.join(save_dir, model_name)
model.save(save_path)
print('the model has been saved at %s' %save_path)
|
nilq/baby-python
|
python
|
from flask_login import current_user
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField, FileField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from flask_wtf.file import FileField, FileAllowed, FileRequired
from app.models import User
from app.extensions import photos
class RegisterForm(FlaskForm):
username = StringField('用户名', validators=[DataRequired(message='请填写用户名'), Length(4, 20, message='长度在4到20个字符之间')])
email = StringField('邮箱(务必填写正确,否则无法激活登录)', validators=[DataRequired(message='请填写邮箱'), Email(message='请填写正确的邮箱格式')])
password = PasswordField('密码', validators=[DataRequired(message='请填写密码'), Length(8, 20, message='密码长度在8到20之间'),
EqualTo('confirm', message='密码不一致')])
confirm = PasswordField('密码确认')
submit = SubmitField('注册')
# 检验username是否存在
def validate_username(self, field):
user = User.query.filter_by(username=field.data).first()
if user:
raise ValidationError('用户名已存在')
# 校验邮箱是否已存在
def validate_email(self, field):
user = User.query.filter_by(email=field.data).first()
if user:
raise ValidationError('邮箱已存在')
# 定义登录的form表单
class LoginForm(FlaskForm):
username = StringField('用户名或邮箱', validators=[DataRequired(message='用户名不能为空')])
password = PasswordField('密码', validators=[DataRequired(message='密码不能为空')])
remember = BooleanField('记住我', default=True)
submit = SubmitField('登录')
# 定义修改密码的表单
class UserPasswordForm(FlaskForm):
oldpwd = PasswordField('原密码', validators=[DataRequired(message='原密码不能为空')])
newpwd = PasswordField('新密码', validators=[DataRequired(message='请填写新密码'), Length(8, 20, message='密码长度在8到20之间'),
EqualTo('confirm', message='密码不一致')])
confirm = PasswordField('密码确认')
submit = SubmitField('注册')
# 校验原密码是否正确
def validate_oldpwd(self, field):
# 获取真实user对象
user = current_user._get_current_object()
if not user.verify_password(field.data):
raise ValidationError('原密码错误')
# 校验新老密码不能一致
def validate_newpwd(self, field):
# 获取真实user对象
user = current_user._get_current_object()
if user.verify_password(field.data):
raise ValidationError('新旧密码不能一样')
# 添加头像表单
class IconForm(FlaskForm):
icon = FileField('头像', render_kw={'class': 'btn btn-default'},
validators=[FileAllowed(photos, message='只能上传图片'), FileRequired(message='请先选择文件')])
submit = SubmitField('修改')
# 填写新邮箱来修改邮箱
class EmailForm(FlaskForm):
email = StringField('新邮箱(务必填写正确,否则无法收到修改邮件)',
validators=[DataRequired(message='请填写新邮箱'), Email(message='请填写正确的邮箱格式')])
submit = SubmitField('提交')
# 用来提交用户名或邮箱来重置密码
class EUForm(FlaskForm):
username = StringField('用户名或有效的邮箱', validators=[DataRequired(message='用户名不能为空')])
submit = SubmitField('下一步', render_kw={'style': "float: right"})
# 用来提交验证码
class AuthCodeForm(FlaskForm):
authcode = StringField('验证码', validators=[DataRequired(message='验证码不能为空')])
submit = SubmitField('提交', render_kw={'style': "float: right"})
# 重置密码
class ResetPwdForm(FlaskForm):
password = PasswordField('新密码', validators=[DataRequired(message='请填写密码'), Length(8, 20, message='密码长度在8到20之间'),
EqualTo('confirm', message='密码不一致')])
confirm = PasswordField('密码确认')
submit = SubmitField('确定', render_kw={'style': "float: right"})
|
nilq/baby-python
|
python
|
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
fromFT = ee.FeatureCollection("users/wqs/Pipestem/Pipestem_HUC10")
# This function computes the feature's geometry area and adds it as a property.
def addArea(feature):
return feature.set({'areaHa': feature.geometry().area().divide(100 * 100)})
# Map the area getting function over the FeatureCollection.
areaAdded = fromFT.map(addArea)
# Print the first feature from the collection with the added property.
first = areaAdded.first()
print('First feature: ', first.getInfo())
print("areaHa: ", first.get("areaHa").getInfo())
# Display the map.
Map
|
nilq/baby-python
|
python
|
import keras
# initializer = keras.initializers.glorot_uniform(seed=0)
initializer = keras.initializers.glorot_normal()
"""
Creates Residual Network with 50 layers
"""
def create_model(input_shape=(64, 64, 3), classes=1):
# Define the input as a tensor with shape input_shape
X_input = keras.layers.Input(input_shape)
# Zero-Padding
X = keras.layers.ZeroPadding2D((3, 3))(X_input)
# Stage 1
X = keras.layers.Conv2D(64, (7, 7), strides=(2, 2), name='conv1',
kernel_initializer=initializer)(X)
X = keras.layers.BatchNormalization(axis=3, name='bn_conv1')(X)
X = keras.layers.Activation('relu')(X)
X = keras.layers.MaxPooling2D((3, 3), strides=(2, 2))(X)
# Stage 2
X = convolutional_block(X, f = 3, filters=[64, 64, 256], stage=2, block='a', s=1)
X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')
# Stage 3
X = convolutional_block(X, f = 3, filters=[128, 128, 512], stage=3, block='a', s=2)
X = identity_block(X, 3, [128, 128, 512], stage=3, block='b')
X = identity_block(X, 3, [128, 128, 512], stage=3, block='c')
X = identity_block(X, 3, [128, 128, 512], stage=3, block='d')
# Stage 4
X = convolutional_block(X, f = 3, filters=[256, 256, 1024], stage=4, block='a', s=2)
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f')
# Stage 5
X = convolutional_block(X, f = 3, filters=[512, 512, 2048], stage=5, block='a', s=2)
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c')
# AVGPOOL
X = keras.layers.AveragePooling2D(pool_size=(2, 2))(X)
# output layer
X = keras.layers.Flatten()(X)
X = keras.layers.Dense(classes, activation='sigmoid', name='fc{}'
.format(classes), kernel_initializer=initializer)(X)
# Create model
model = keras.models.Model(inputs=X_input, outputs=X, name='resnet50')
return model
"""
Identity Block of ResNet
"""
def identity_block(X, f, filters, stage, block):
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value. You'll need this later to add back to the main path.
X_shortcut = X
# First component of main path
X = keras.layers.Conv2D(filters=F1, kernel_size=(1, 1), strides=(1,1), padding='valid',
name=conv_name_base + '2a', kernel_initializer=initializer)(X)
X = keras.layers.BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
X = keras.layers.Activation('relu')(X)
X = keras.layers.Dropout(0.5)(X)
# Second component of main path
X = keras.layers.Conv2D(filters=F2, kernel_size=(f, f), strides=(1,1), padding='same',
name=conv_name_base + '2b', kernel_initializer=initializer)(X)
X = keras.layers.BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
X = keras.layers.Activation('relu')(X)
X = keras.layers.Dropout(0.5)(X)
# Third component of main path
X = keras.layers.Conv2D(filters=F3, kernel_size=(1, 1), strides=(1,1), padding='valid',
name=conv_name_base + '2c', kernel_initializer=initializer)(X)
X = keras.layers.BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
# Add shortcut value to main path, and pass it through a RELU activation
X = keras.layers.Add()([X, X_shortcut])
X = keras.layers.Activation('relu')(X)
return X
"""
Convolutional Block of ResNet
"""
def convolutional_block(X, f, filters, stage, block, s=2):
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value
X_shortcut = X
# First component of main path
X = keras.layers.Conv2D(F1, (1, 1), strides=(s, s), name=conv_name_base + '2a',
padding='valid', kernel_initializer=initializer)(X)
X = keras.layers.BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
X = keras.layers.Activation('relu')(X)
X = keras.layers.Dropout(0.5)(X)
# Second component of main path
X = keras.layers.Conv2D(F2, (f, f), strides=(1, 1), name=conv_name_base + '2b',
padding='same', kernel_initializer=initializer)(X)
X = keras.layers.BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
X = keras.layers.Activation('relu')(X)
X = keras.layers.Dropout(0.5)(X)
# Third component of main path
X = keras.layers.Conv2D(F3, (1, 1), strides=(1, 1), name=conv_name_base + '2c',
padding='valid', kernel_initializer=initializer)(X)
X = keras.layers.BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
X_shortcut = keras.layers.Conv2D(F3, (1, 1), strides=(s,s), name=conv_name_base + '1',
padding='valid', kernel_initializer=initializer)(X_shortcut)
X_shortcut = keras.layers.BatchNormalization(axis=3, name=bn_name_base + '1')(X_shortcut)
# Add shortcut value to main path, and pass it through a RELU activation
X = keras.layers.Add()([X, X_shortcut])
X = keras.layers.Activation('relu')(X)
return X
|
nilq/baby-python
|
python
|
from typing import Union, List, Any
from ..core.client import ClientBase
from ..core.connect import AsyncTCPConnection
Key = Union[int, float, str]
class MasterClient(ClientBase):
def get_shard(self, key):
return self._execute("get_shard", key)
def get_map(self):
return self._execute("get_map")
def stat(self):
return self._execute("stat")
def create_index(self, index):
return self._execute("create_index", index)
class AsyncMasterClient(MasterClient):
def __init__(self, host, port, transport_class=AsyncTCPConnection, **kwargs):
super(AsyncMasterClient, self).__init__(host, port, transport_class, **kwargs)
|
nilq/baby-python
|
python
|
from fastapi import APIRouter, HTTPException
import pandas as pd
import plotly.express as px
import numpy as np
import plotly.graph_objects as go
router = APIRouter()
@router.get('/vizprices')
async def visual():
# load in airbnb dataset
DATA_PATH = 'https://raw.githubusercontent.com/Air-BnB-2-BW/data-science/master/airbnb_bw.csv'
df = pd.read_csv(DATA_PATH, index_col=0)
x = ['$0-25', '$25-50', '$50-75', '$75-100', '$100-125', '$125-150', '$150-175', '$175-200', '$200+']
y = [27, 272, 325, 125, 164, 93, 45, 22 ,13]
fig = go.Figure(data=[go.Bar(x=x, y=y)])
fig.update_traces(marker_color='rgb(158,202,225)', marker_line_color='rgb(8,48,107)',
marker_line_width=4.5, opacity=0.6)
fig.update_layout(title_text='Cost Per Person')
fig.update_layout(width=2000,
height=1000,
margin={"r": 1, "t": 1, "l": 1, "b": 1})
fig.show()
return fig.to_json()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest import TestCase
from tests.utils import assert_equal_dict
from polyaxon_schemas.ml.hooks import StepLoggingTensorHookConfig
from polyaxon_schemas.ml.processing.pipelines import TFRecordSequencePipelineConfig
from polyaxon_schemas.ml.train import TrainConfig
class TestTrainConfigs(TestCase):
def test_train_config(self):
config_dict = {
"data_pipeline": TFRecordSequencePipelineConfig(
data_files=["~/data_file"],
meta_data_file="~/meta_data_file",
shuffle=True,
num_epochs=10,
batch_size=64,
).to_schema(),
"steps": 300,
"hooks": [
StepLoggingTensorHookConfig(
["Dense_1", "Conv2D_4"], every_n_iter=100
).to_schema()
],
}
config = TrainConfig.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
|
nilq/baby-python
|
python
|
from cereal import car
from common.realtime import DT_CTRL
from common.numpy_fast import clip
from common.params import Params
from selfdrive.car import apply_std_steer_torque_limits
from selfdrive.car.hyundai.hyundaican import create_lkas11, create_clu11, create_lfahda_mfc, \
create_scc11, create_scc12, create_scc13, create_scc14, \
create_mdps12
from selfdrive.car.hyundai.values import Buttons, CarControllerParams, CAR
from opendbc.can.packer import CANPacker
from selfdrive.config import Conversions as CV
#DIY cruise...
from common.numpy_fast import interp
import cereal.messaging as messaging
sm = messaging.SubMaster(['radarState', 'controlsState'])
VisualAlert = car.CarControl.HUDControl.VisualAlert
min_set_speed = 30 * CV.KPH_TO_MS
# Accel limits
ACCEL_HYST_GAP = 0.02 # don't change accel command for small oscillations within this value
ACCEL_MAX = 1.5 # 1.5 m/s2
ACCEL_MIN = -3.0 # 3 m/s2
ACCEL_SCALE = max(ACCEL_MAX, -ACCEL_MIN)
def accel_hysteresis(accel, accel_steady):
# for small accel oscillations within ACCEL_HYST_GAP, don't change the accel command
if accel > accel_steady + ACCEL_HYST_GAP:
accel_steady = accel - ACCEL_HYST_GAP
elif accel < accel_steady - ACCEL_HYST_GAP:
accel_steady = accel + ACCEL_HYST_GAP
accel = accel_steady
return accel, accel_steady
def process_hud_alert(enabled, fingerprint, visual_alert, left_lane,
right_lane, left_lane_depart, right_lane_depart):
sys_warning = (visual_alert in [VisualAlert.steerRequired, VisualAlert.ldw])
# initialize to no line visible
sys_state = 1
if left_lane and right_lane or sys_warning: # HUD alert only display when LKAS status is active
sys_state = 3 if enabled or sys_warning else 4
elif left_lane:
sys_state = 5
elif right_lane:
sys_state = 6
# initialize to no warnings
left_lane_warning = 0
right_lane_warning = 0
if left_lane_depart:
left_lane_warning = 1 if fingerprint in [CAR.GENESIS_G90, CAR.GENESIS_G80] else 2
if right_lane_depart:
right_lane_warning = 1 if fingerprint in [CAR.GENESIS_G90, CAR.GENESIS_G80] else 2
return sys_warning, sys_state, left_lane_warning, right_lane_warning
class CarController():
def __init__(self, dbc_name, CP, VM):
self.p = CarControllerParams(CP)
self.packer = CANPacker(dbc_name)
self.apply_steer_last = 0
self.car_fingerprint = CP.carFingerprint
self.steer_rate_limited = False
self.lkas11_cnt = 0
self.scc12_cnt = 0
self.last_resume_frame = 0
self.resume_cnt = 0
self.last_lead_distance = 0
self.turning_signal_timer = 0
self.longcontrol = CP.openpilotLongitudinalControl
self.scc_live = not CP.radarOffCan
self.accel_steady = 0
# params init
self.lfamfc = Params().get("MfcSelect", encoding='utf8') == "2"
#DIY cruise...
self.released_clutch = False
self.manual_gearbox = CP.manualGearbox
self.btn_cnt = 0
self.btn_pressed = False
self.prev_btn = 0
self.gap_size = 4 #set gap size. lower number == less gap
self.btn_combo = []
def update(self, enabled, CS, frame, actuators, pcm_cancel_cmd, visual_alert,
left_lane, right_lane, left_lane_depart, right_lane_depart, set_speed, lead_visible):
# *** compute control surfaces ***
# Steering Torque
new_steer = int(round(actuators.steer * self.p.STEER_MAX))
apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, self.p)
self.steer_rate_limited = new_steer != apply_steer
# disable if steer angle reach 90 deg, otherwise mdps fault in some models
lkas_active = enabled and abs(CS.out.steeringAngleDeg) < CS.CP.maxSteeringAngleDeg
# Disable steering while turning blinker on and speed below 60 kph
if CS.out.leftBlinker or CS.out.rightBlinker:
self.turning_signal_timer = 0.5 / DT_CTRL # Disable for 0.5 Seconds after blinker turned off
# if self.turning_indicator_alert: # set and clear by interface
# lkas_active = 0
if self.turning_signal_timer > 0:
self.turning_signal_timer -= 1
if not lkas_active:
apply_steer = 0
self.apply_steer_last = apply_steer
sys_warning, sys_state, left_lane_warning, right_lane_warning = \
process_hud_alert(enabled, self.car_fingerprint, visual_alert,
left_lane, right_lane, left_lane_depart, right_lane_depart)
clu11_speed = CS.clu11["CF_Clu_Vanz"]
enabled_speed = 38 if CS.is_set_speed_in_mph else 60
if clu11_speed > enabled_speed or not lkas_active:
enabled_speed = clu11_speed
if not(min_set_speed < set_speed < 255 * CV.KPH_TO_MS):
set_speed = min_set_speed
set_speed *= CV.MS_TO_MPH if CS.is_set_speed_in_mph else CV.MS_TO_KPH
can_sends = []
can_sends.append(create_lkas11(self.packer, frame, self.car_fingerprint, apply_steer, lkas_active,
CS.lkas11, sys_warning, sys_state, enabled, left_lane, right_lane,
left_lane_warning, right_lane_warning, 0))
if CS.mdps_bus or CS.scc_bus == 1: # send lkas11 bus 1 if mdps or scc is on bus 1
can_sends.append(create_lkas11(self.packer, frame, self.car_fingerprint, apply_steer, lkas_active,
CS.lkas11, sys_warning, sys_state, enabled, left_lane, right_lane,
left_lane_warning, right_lane_warning, 1))
if frame % 2 and CS.mdps_bus: # send clu11 to mdps if it is not on bus 0
can_sends.append(create_clu11(self.packer, frame, CS.mdps_bus, CS.clu11, Buttons.NONE, enabled_speed))
if pcm_cancel_cmd and self.longcontrol:
can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.CANCEL, clu11_speed))
if CS.mdps_bus: # send mdps12 to LKAS to prevent LKAS error
can_sends.append(create_mdps12(self.packer, frame, CS.mdps12))
# 20 Hz LFA MFA message
if frame % 5 == 0 and self.lfamfc:
can_sends.append(create_lfahda_mfc(self.packer, enabled))
# DIY cruise...
if enabled:
sm.update(0)
lead_data = sm['radarState'].leadOne
lead_one = sm['radarState'].leadOne
lead_two = sm['radarState'].leadTwo
vel_cruise = sm['controlsState'].vCruise #target max speed seen on screen. In km/h
if lead_one.status == True:
lead_data = lead_one
if lead_two.status == True and ((lead_one.dRel - lead_two.dRel) > 3.0):
lead_data = lead_two
lead_rel_dist = lead_data.dRel
lead_rel_vel = lead_data.vRel
lead_vel = lead_data.vLead
cruise_curr_set_speed = CS.out.cruiseState.speed #cruise speed m/s
max_cru_speed = vel_cruise * CV.KPH_TO_MS #speed limit
press_button_speed = 3 #press two times every 3 frames
lead_speed_diff = 2.5 * CV.KPH_TO_MS #we're slower than the lead car by this amount. km/h
#button up/down combination to set gap size
#UP DOWN UP DOWN smaller gap
#DOWN UP DOWN UP bigger gap
if CS.cruise_buttons == Buttons.RES_ACCEL or CS.cruise_buttons == Buttons.SET_DECEL:
self.btn_pressed = True
if self.btn_pressed:
self.btn_cnt += 1
if self.btn_cnt > 0 and self.btn_cnt < 100:
if CS.cruise_buttons == Buttons.RES_ACCEL and not self.prev_btn == Buttons.RES_ACCEL:
self.btn_combo.append(Buttons.RES_ACCEL)
self.prev_btn = Buttons.RES_ACCEL
if CS.cruise_buttons == Buttons.SET_DECEL and not self.prev_btn == Buttons.SET_DECEL:
self.btn_combo.append(Buttons.SET_DECEL)
self.prev_btn = Buttons.SET_DECEL
else:
self.btn_cnt = 0
self.btn_pressed = False
self.prev_btn = 0
self.btn_combo = []
if self.btn_combo == [Buttons.RES_ACCEL, Buttons.SET_DECEL, Buttons.RES_ACCEL, Buttons.SET_DECEL]:
self.gap_size -= 1
self.btn_combo = []
if self.btn_combo == [Buttons.SET_DECEL, Buttons.RES_ACCEL, Buttons.SET_DECEL, Buttons.RES_ACCEL]:
self.gap_size += 1
self.btn_combo = []
#press down if high lateral acceleration
bpV = [30., 130.]
lat_acc = abs(CS.out.cruiseState.lateralAcceleration)
speed_interp = int(CS.out.vEgo * CV.MS_TO_KPH) + 4
acc_range = [0.25, 0.40]
acc_thresh = interp(speed_interp, bpV, acc_range)
#we drive slower than lead to get the gap and later the distance will pull us back in until a balance is found
lead_vel -= lead_speed_diff
#set gap
if self.gap_size > 0:
lead_vel += ((lead_rel_dist / self.gap_size) * CV.KPH_TO_MS)
#when following logic
following = lead_data.status and lead_rel_dist < 130. and lead_rel_dist > 1. and not ((CS.out.leftBlinker or CS.out.rightBlinker) and CS.out.vEgo > (60 * CV.KPH_TO_MS))
#prevents disabling cruise if speed <30km/h
if clu11_speed <= 30:
clu11_speed = 30
if following:
if cruise_curr_set_speed < lead_vel and max_cru_speed > cruise_curr_set_speed and frame % press_button_speed < 2:
can_sends.append(create_clu11(self.packer, frame, 0, CS.clu11, Buttons.RES_ACCEL, clu11_speed))
if (cruise_curr_set_speed * CV.MS_TO_KPH) > 30:
if max_cru_speed < cruise_curr_set_speed or cruise_curr_set_speed > lead_vel and frame % press_button_speed < 2:
can_sends.append(create_clu11(self.packer, frame, 0, CS.clu11, Buttons.SET_DECEL, clu11_speed))
elif not following:
if cruise_curr_set_speed < max_cru_speed and frame % press_button_speed < 2 and lat_acc < acc_thresh:
can_sends.append(create_clu11(self.packer, frame, 0, CS.clu11, Buttons.RES_ACCEL, clu11_speed))
elif (cruise_curr_set_speed > max_cru_speed and (cruise_curr_set_speed * CV.MS_TO_KPH) > 30 and frame % press_button_speed < 2) or \
(lat_acc > acc_thresh and frame % press_button_speed < 2):
can_sends.append(create_clu11(self.packer, frame, 0, CS.clu11, Buttons.SET_DECEL, clu11_speed))
if self.manual_gearbox:
if CS.out.clutchPressed == True and self.released_clutch == False:
self.released_clutch = True
if CS.out.clutchPressed == False and self.released_clutch == True:
can_sends.append(create_clu11(self.packer, frame, 0, CS.clu11, Buttons.SET_DECEL, clu11_speed))
if frame % press_button_speed >= 2:
self.released_clutch = False
return can_sends
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Khronos Group Inc.
# SPDX-License-Identifier: Apache-2.0
from itertools import product
from shared import PLATFORMS, TRUE_FALSE, VS_VERSION, make_win_artifact_name
if __name__ == "__main__":
for platform, uwp in product(PLATFORMS, TRUE_FALSE):
print(make_win_artifact_name(platform, uwp))
|
nilq/baby-python
|
python
|
import tkinter as tk
class DashboardGUI:
def __init__(self, master, interpreter):
self.master = master
self.interpreter = interpreter
h = 316
w = 480
self.top_bar_canvas = tk.Canvas(master,bg="black",height=h,width=w/20)
self.top_bar_canvas.grid(row=0,column=0,rowspan=2)
self.time_text = self.top_bar_canvas.create_text(12,0.67*h,text="IDK:IDK AM",
angle=90,
fill='white',
font=('Helvetica', '12', 'bold'))
self.sat_num_text = self.top_bar_canvas.create_text(12,0.15*h,text="0 SAT",
angle=90,
fill='white',
font=('Helvetica', '12', 'bold'))
self.speed_label_canvas = tk.Canvas(master,bg="black", height=h/2, width=w/12)
self.speed_label_canvas.grid(row=0,column=1)
self.speed_label_text = self.speed_label_canvas.create_text(20,80,text="SPEED (MPH)",
angle=90,
fill='white',
font=('Helvetica', '15', 'bold'))
self.rpm_label_canvas = tk.Canvas(master,bg="black", height=h/2, width=w/12)
self.rpm_label_canvas.grid(row=1,column=1)
self.rpm_label_text = self.rpm_label_canvas.create_text(20,80,text="CADENCE (RPM)",
angle=90,
fill='white',
font=('Helvetica', '12', 'bold'))
self.speed_canvas = tk.Canvas(master,height=h/2,width=w/6,bg='black')
self.speed_canvas.grid(row=0,column=2)
self.speed_text = self.speed_canvas.create_text(40,80,text="0.0",
angle=90,
fill='yellow',
font=('Helvetica', '50', 'bold'))
self.cadence_canvas = tk.Canvas(master,height=h/2,width=w/6,bg='black')
self.cadence_canvas.grid(row=1,column=2)
self.cadence_text = self.cadence_canvas.create_text(40,80,text="0.0",
angle=90,
fill='yellow',
font=('Helvetica', '50', 'bold'))
self.avg_speed_label_canvas = tk.Canvas(master,height=h/2,width=w/12,bg='black')
self.avg_speed_label_canvas.grid(row=0,column=3)
self.avg_speed_label_text = self.avg_speed_label_canvas.create_text(20,80,text="AVG SPEED",
angle=90,
fill='white',
font=('Helvetica', '15', 'bold'))
self.distance_label_canvas = tk.Canvas(master,height=h/2,width=w/12,bg='black')
self.distance_label_canvas.grid(row=1,column=3)
self.distance_label_text = self.distance_label_canvas.create_text(20,80,text="DISTANCE (MILES)",
angle=90,
fill='white',
font=('Helvetica', '11', 'bold'))
self.avg_speed_canvas = tk.Canvas(master,height=h/2,width=w/6,bg='black')
self.avg_speed_canvas.grid(row=0,column=4)
self.avg_speed_text = self.avg_speed_canvas.create_text(40,80,text="0.0",
angle=90,
fill='yellow',
font=('Helvetica', '50', 'bold'))
self.distance_canvas = tk.Canvas(master,height=h/2,width=w/6,bg='black')
self.distance_canvas.grid(row=1,column=4)
self.distance_text = self.distance_canvas.create_text(40,80,text="0.0",
angle=90,
fill='yellow',
font=('Helvetica', '50', 'bold'))
self.direction_label_canvas = tk.Canvas(master,height=h/2,width=w/12,bg='black')
self.direction_label_canvas.grid(row=0,column=5)
self.direction_label_text = self.direction_label_canvas.create_text(20,80,text="DIRECTION",
angle=90,
fill='white',
font=('Helvetica', '15', 'bold'))
self.incline_label_canvas = tk.Canvas(master,height=h/2,width=w/12,bg='black')
self.incline_label_canvas.grid(row=1,column=5)
self.incline_label_text = self.incline_label_canvas.create_text(20,80,text="INCLINE (DEG)",
angle=90,
fill='white',
font=('Helvetica', '13', 'bold'))
self.direction_canvas = tk.Canvas(master,height=h/2,width=w/6,bg='black')
self.direction_canvas.grid(row=0,column=6)
self.direction_text = self.direction_canvas.create_text(40,80,text="N",
angle=90,
fill='yellow',
font=('Helvetica', '50', 'bold'))
self.incline_canvas = tk.Canvas(master,height=h/2,width=w/6,bg='black')
self.incline_canvas.grid(row=1,column=6)
self.incline_text = self.incline_canvas.create_text(40,80,text="0.0",
angle=90,
fill='yellow',
font=('Helvetica', '50', 'bold'))
self.lights_button = LatchingButton(master, width=60,height=60,fills=("white","red"),command=self.toggle_lights)
self.lights_button.grid(row=0,column=7,rowspan=1)
self.lights = 0
def toggle_lights(self):
self.lights = 1 - self.lights
def update_display(self):
self.interpreter.updateData(self.lights)
self.top_bar_canvas.itemconfigure(self.sat_num_text,text='%.1f SAT'%self.interpreter.SAT)
self.top_bar_canvas.itemconfigure(self.time_text,text=self.interpreter.getDisplayTimeString())
if self.interpreter.getSpeed()>25:
self.speed_canvas.itemconfigure(self.speed_text,text='%.1f'%self.interpreter.getSpeed(),fill="red")
else:
self.speed_canvas.itemconfigure(self.speed_text,text='%.1f'%self.interpreter.getSpeed(),fill="yellow")
self.cadence_canvas.itemconfigure(self.cadence_text,text='%.0f'%self.interpreter.PED)
self.avg_speed_canvas.itemconfigure(self.avg_speed_text,text='%.0f'%self.interpreter.getAvgSpeed())
self.distance_canvas.itemconfigure(self.distance_text,text='%.0f'%self.interpreter.getDistance())
self.direction_canvas.itemconfigure(self.direction_text,text=self.interpreter.getDirection())
self.incline_canvas.itemconfigure(self.incline_text,text='%.0f'%self.interpreter.getIncline())
self.master.after(1,self.update_display) #this ensures this process continually repeats
class LatchingButton(tk.Canvas):
def __init__(self, parent, width, height, fills, command=None):
tk.Canvas.__init__(self, parent, borderwidth=1, highlightthickness=0)
self.command = command
self.fills=fills
self.fill_index = 0
padding = 4
self.oval = self.create_oval((padding,padding, width+padding, height+padding),
outline="black",
fill=self.fills[self.fill_index])
(x0,y0,x1,y1) = self.bbox("all")
width = (x1-x0) + padding
height = (y1-y0) + padding
self.configure(width=width, height=height)
self.bind("<ButtonPress-1>", self._on_press)
self.bind("<ButtonRelease-1>", self._on_release)
def _on_press(self, event):
pass
def _on_release(self, event):
self.fill_index = 1 - self.fill_index
self.itemconfigure(self.oval,fill=self.fills[self.fill_index])
if self.command is not None:
self.command()
if __name__ == '__main__':
import serial
import serial.tools.list_ports
import interpreter
ports = serial.tools.list_ports.comports()
for port, desc, hwid in sorted(ports):
if desc=="Arduino Micro":
print("{}: {} [{}]".format(port, desc, hwid))
break
arduino = serial.Serial(port, 115200, timeout=0.1, write_timeout=0)
path = '~/bike-computer/data/'
intrptr = interpreter.Interpreter(arduino,path)
root = tk.Tk()
dbg = DashboardGUI(root, intrptr)
root.overrideredirect(True)
root.after(10, dbg.update_display)
root.mainloop()
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time, random
import numpy as np
import tensorflow as tf
from tensorflow.python.layers import core as layers_core
import argparse
from tensorflow.python.client import device_lib
import os
from utils import *
class Option(object):
def __init__(self, d):
self.__dict__ = d
def save(self):
with open(os.path.join(self.this_expsdir, "option.txt"), "w") as f:
for key, value in sorted(self.__dict__.items(), key=lambda x: x[0]):
f.write("%s, %s\n" % (key, str(value)))
logging = tf.logging
def data_type():
return tf.float32
class PTBModel(object):
#The language model.
def __init__(self, is_training, is_test_LM=False):
self._is_training = is_training
self.batch_size = config.batch_size
self.num_steps = config.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
self._input=tf.placeholder(shape=[None, config.num_steps], dtype=tf.int32)
self._target=tf.placeholder(shape=[None, config.num_steps], dtype=tf.int32)
self._sequence_length=tf.placeholder(shape=[None], dtype=tf.int32)
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [vocab_size, size], dtype=data_type())
inputs = tf.nn.embedding_lookup(embedding, self._input)
softmax_w = tf.get_variable(
"softmax_w", [size, vocab_size], dtype=data_type())
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
output = self._build_rnn_graph(inputs, self._sequence_length, is_training)
output=tf.reshape(output, [-1, config.hidden_size])
logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
# Reshape logits to be a 3-D tensor for sequence loss
logits = tf.reshape(logits, [-1, self.num_steps, vocab_size])
self._output_prob=tf.nn.softmax(logits)
# Use the contrib sequence loss and average over the batches
mask=tf.sequence_mask(lengths=self._sequence_length, maxlen=self.num_steps, dtype=data_type())
loss = tf.contrib.seq2seq.sequence_loss(
logits,
self._target,
mask,
average_across_timesteps=True,
average_across_batch=True)
# Update the cost
self._cost = loss
#self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars),
config.max_grad_norm)
optimizer = tf.train.AdamOptimizer()
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.train.get_or_create_global_step())
def _build_rnn_graph(self, inputs, sequence_length, is_training):
return self._build_rnn_graph_lstm(inputs, sequence_length, is_training)
def _get_lstm_cell(self, is_training):
return tf.contrib.rnn.BasicLSTMCell(
config.hidden_size, forget_bias=0.0, state_is_tuple=True,
reuse=not is_training)
def _build_rnn_graph_lstm(self, inputs, sequence_length, is_training):
"""Build the inference graph using canonical LSTM cells."""
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
def make_cell():
cell = self._get_lstm_cell( is_training)
if is_training and config.keep_prob < 1:
cell = tf.contrib.rnn.DropoutWrapper(
cell, output_keep_prob=config.keep_prob)
return cell
cell = tf.contrib.rnn.MultiRNNCell(
[make_cell() for _ in range(config.num_layers)], state_is_tuple=True)
outputs, states=tf.nn.dynamic_rnn(cell=cell, inputs=inputs, sequence_length=sequence_length, dtype=data_type())
return outputs
def run_epoch(sess, model, input, sequence_length, target=None, mode='train'):
#Runs the model on the given data.
if mode=='train':
#train language model
_,cost = sess.run([model._train_op, model._cost], feed_dict={model._input: input, model._target:target, model._sequence_length:sequence_length})
return cost
elif mode=='test':
#test language model
cost = sess.run(model._cost, feed_dict={model._input: input, model._target:target, model._sequence_length:sequence_length})
return cost
else:
#use the language model to calculate sentence probability
output_prob = sess.run(model._output_prob, feed_dict={model._input: input, model._sequence_length:sequence_length})
return output_prob
def main(config):
if config.mode=='forward' or config.mode=='use':
with tf.name_scope("forward_train"):
with tf.variable_scope("forward", reuse=None):
m_forward = PTBModel(is_training=True)
with tf.name_scope("forward_test"):
with tf.variable_scope("forward", reuse=True):
mtest_forward = PTBModel(is_training=False)
var=tf.trainable_variables()
var_forward=[x for x in var if x.name.startswith('forward')]
saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
if config.mode=='backward' or config.mode=='use':
with tf.name_scope("backward_train"):
with tf.variable_scope("backward", reuse=None):
m_backward = PTBModel(is_training=True)
with tf.name_scope("backward_test"):
with tf.variable_scope("backward", reuse=True):
mtest_backward = PTBModel(is_training=False)
var=tf.trainable_variables()
var_backward=[x for x in var if x.name.startswith('backward')]
saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
init = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init)
input = [[3,4,5,6,6,7,8,9,4,5,6,7,8,9,2]]
sequence_length = [10]
prob_old=run_epoch(session, mtest_forward, input, sequence_length, mode='use')
print(prob_old)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Experiment setup")
# misc
parser.add_argument('--seed', default=33, type=int)
parser.add_argument('--gpu', default="3", type=str)
parser.add_argument('--no_train', default=False, action="store_true")
parser.add_argument('--no_preds', default=False, action="store_true")
parser.add_argument('--exps_dir', default=None, type=str)
parser.add_argument('--exp_name', default=None, type=str)
parser.add_argument('--load', default=None, type=str)
# data property
parser.add_argument('--data_path', default='data/quora/quora.txt', type=str)
parser.add_argument('--dict_path', default='data/quora/dict.pkl', type=str)
parser.add_argument('--dict_size', default=30000, type=int)
parser.add_argument('--vocab_size', default=30003, type=int)
parser.add_argument('--backward', default=False, action="store_true")
parser.add_argument('--keyword_pos', default=True, action="store_false")
# model architecture
parser.add_argument('--num_steps', default=15, type=int)
parser.add_argument('--num_layers', default=2, type=int)
parser.add_argument('--emb_size', default=256, type=int)
parser.add_argument('--hidden_size', default=300, type=int)
parser.add_argument('--dropout', default=0.0, type=float)
parser.add_argument('--model', default=0, type=int)
# optimization
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--epochs', default=200, type=int)
parser.add_argument('--learning_rate', default=0.001, type=float)
parser.add_argument('--weight_decay', default=0.00, type=float)
parser.add_argument('--clip_norm', default=0.00, type=float)
parser.add_argument('--no_cuda', default=False, action="store_true")
parser.add_argument('--local', default=False, action="store_true")
parser.add_argument('--threshold', default=0.1, type=float)
# evaluation
parser.add_argument('--sim', default='word_max', type=str)
parser.add_argument('--mode', default='sa', type=str)
parser.add_argument('--accuracy', default=False, action="store_true")
parser.add_argument('--top_k', default=10, type=int)
parser.add_argument('--accumulate_step', default=1, type=int)
parser.add_argument('--backward_path', default=None, type=str)
parser.add_argument('--forward_path', default=None, type=str)
# sampling
parser.add_argument('--use_data_path', default='data/input/input.txt', type=str)
parser.add_argument('--reference_path', default=None, type=str)
parser.add_argument('--pos_path', default='POS/english-models', type=str)
parser.add_argument('--emb_path', default='data/quora/emb.pkl', type=str)
parser.add_argument('--max_key', default=3, type=float)
parser.add_argument('--max_key_rate', default=0.5, type=float)
parser.add_argument('--rare_since', default=30000, type=int)
parser.add_argument('--sample_time', default=100, type=int)
parser.add_argument('--search_size', default=100, type=int)
parser.add_argument('--action_prob', default=[0.3,0.3,0.3,0.3], type=list)
parser.add_argument('--just_acc_rate', default=0.0, type=float)
parser.add_argument('--sim_mode', default='keyword', type=str)
parser.add_argument('--save_path', default='temp.txt', type=str)
parser.add_argument('--forward_save_path', default='data/tfmodel/forward.ckpt', type=str)
parser.add_argument('--backward_save_path', default='data/tfmodel/backward.ckpt', type=str)
parser.add_argument('--max_grad_norm', default=5, type=float)
parser.add_argument('--keep_prob', default=1, type=float)
d = vars(parser.parse_args())
option = Option(d)
random.seed(option.seed)
np.random.seed(option.seed)
os.environ["CUDA_VISIBLE_DEVICES"] = option.gpu
config = option
main(option)
|
nilq/baby-python
|
python
|
import numpy as np
from ..Tools.Downloading._ReadDataIndex import _ReadDataIndex
from .. import Globals
def ReadIndex(subcomp,L,prod):
'''
Reads the index file for a given data product.
Inputs
======
subcomp : string
Name of sub component of instrument
L : int
Level of data to download
prod : str
Data product to download
Available data products
=======================
subcomp L prod
efd 2 'E_spin'
efd 2 'pot'
efd 2 'spec'
hfa 2 'high'
hfa 2 'low'
hfa 2 'monit'
hfa 3 ''
ofa 2 'complex'
ofa 2 'matrix'
ofa 2 'spec'
Returns
=======
numpy.recarray
'''
if subcomp == 'hfa' and L == 3:
idxfname = Globals.DataPath + 'PWE/Index-L{:01d}-{:s}.dat'.format(L,subcomp)
datapath = Globals.DataPath + 'PWE/{:s}/L{:01d}/'.format(subcomp,L)
else:
idxfname = Globals.DataPath + 'PWE/Index-L{:01d}-{:s}-{:s}.dat'.format(L,subcomp,prod)
datapath = Globals.DataPath + 'PWE/{:s}/L{:01d}/{:s}/'.format(subcomp,L,prod)
return _ReadDataIndex(idxfname)
|
nilq/baby-python
|
python
|
# -*- coding: UTF-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2007-2020 NV Access Limited, Peter Vágner
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
import time
import nvwave
import threading
import queue
from ctypes import cdll
from ctypes import *
import config
import globalVars
from logHandler import log
import os
import codecs
isSpeaking = False
onIndexReached = None
bgThread=None
bgQueue = None
player = None
espeakDLL=None
#: Keeps count of the number of bytes pushed for the current utterance.
#: This is necessary because index positions are given as ms since the start of the utterance.
_numBytesPushed = 0
#Parameter bounds
minRate=80
maxRate=450
minPitch=0
maxPitch=99
#event types
espeakEVENT_LIST_TERMINATED=0
espeakEVENT_WORD=1
espeakEVENT_SENTENCE=2
espeakEVENT_MARK=3
espeakEVENT_PLAY=4
espeakEVENT_END=5
espeakEVENT_MSG_TERMINATED=6
espeakEVENT_PHONEME=7
#position types
POS_CHARACTER=1
POS_WORD=2
POS_SENTENCE=3
#output types
AUDIO_OUTPUT_PLAYBACK=0
AUDIO_OUTPUT_RETRIEVAL=1
AUDIO_OUTPUT_SYNCHRONOUS=2
AUDIO_OUTPUT_SYNCH_PLAYBACK=3
#synth flags
espeakCHARS_AUTO=0
espeakCHARS_UTF8=1
espeakCHARS_8BIT=2
espeakCHARS_WCHAR=3
espeakSSML=0x10
espeakPHONEMES=0x100
espeakENDPAUSE=0x1000
espeakKEEP_NAMEDATA=0x2000
#speech parameters
espeakSILENCE=0
espeakRATE=1
espeakVOLUME=2
espeakPITCH=3
espeakRANGE=4
espeakPUNCTUATION=5
espeakCAPITALS=6
espeakWORDGAP=7
espeakOPTIONS=8 # reserved for misc. options. not yet used
espeakINTONATION=9
espeakRESERVED1=10
espeakRESERVED2=11
#error codes
EE_OK=0
#EE_INTERNAL_ERROR=-1
#EE_BUFFER_FULL=1
#EE_NOT_FOUND=2
# eSpeak initialization flags
espeakINITIALIZE_DONT_EXIT = 0x8000
class espeak_EVENT_id(Union):
_fields_=[
('number',c_int),
('name',c_char_p),
('string',c_char*8),
]
class espeak_EVENT(Structure):
_fields_=[
('type',c_int),
('unique_identifier',c_uint),
('text_position',c_int),
('length',c_int),
('audio_position',c_int),
('sample',c_int),
('user_data',c_void_p),
('id',espeak_EVENT_id),
]
class espeak_VOICE(Structure):
_fields_=[
('name',c_char_p),
('languages',c_char_p),
('identifier',c_char_p),
('gender',c_byte),
('age',c_byte),
('variant',c_byte),
('xx1',c_byte),
('score',c_int),
('spare',c_void_p),
]
def __eq__(self, other):
return isinstance(other, type(self)) and addressof(self) == addressof(other)
# As __eq__ was defined on this class, we must provide __hash__ to remain hashable.
# The default hash implementation is fine for our purposes.
def __hash__(self):
return super().__hash__()
# constants that can be returned by espeak_callback
CALLBACK_CONTINUE_SYNTHESIS=0
CALLBACK_ABORT_SYNTHESIS=1
def encodeEspeakString(text):
return text.encode('utf8')
def decodeEspeakString(data):
return data.decode('utf8')
t_espeak_callback=CFUNCTYPE(c_int,POINTER(c_short),c_int,POINTER(espeak_EVENT))
@t_espeak_callback
def callback(wav,numsamples,event):
try:
global player, isSpeaking, _numBytesPushed
if not isSpeaking:
return CALLBACK_ABORT_SYNTHESIS
indexes = []
for e in event:
if e.type==espeakEVENT_MARK:
indexNum = int(decodeEspeakString(e.id.name))
# e.audio_position is ms since the start of this utterance.
# Convert to bytes since the start of the utterance.
BYTES_PER_SAMPLE = 2
MS_PER_SEC = 1000
bytesPerMS = player.samplesPerSec * BYTES_PER_SAMPLE // MS_PER_SEC
indexByte = e.audio_position * bytesPerMS
# Subtract bytes in the utterance that have already been handled
# to give us the byte offset into the samples for this callback.
indexByte -= _numBytesPushed
indexes.append((indexNum, indexByte))
elif e.type==espeakEVENT_LIST_TERMINATED:
break
if not wav:
player.idle()
onIndexReached(None)
isSpeaking = False
return CALLBACK_CONTINUE_SYNTHESIS
wav = string_at(wav, numsamples * sizeof(c_short)) if numsamples>0 else b""
prevByte = 0
for indexNum, indexByte in indexes:
player.feed(wav[prevByte:indexByte],
onDone=lambda indexNum=indexNum: onIndexReached(indexNum))
prevByte = indexByte
if not isSpeaking:
return CALLBACK_ABORT_SYNTHESIS
player.feed(wav[prevByte:])
_numBytesPushed += len(wav)
return CALLBACK_CONTINUE_SYNTHESIS
except:
log.error("callback", exc_info=True)
class BgThread(threading.Thread):
def __init__(self):
super().__init__(name=f"{self.__class__.__module__}.{self.__class__.__qualname__}")
self.setDaemon(True)
def run(self):
global isSpeaking
while True:
func, args, kwargs = bgQueue.get()
if not func:
break
try:
func(*args, **kwargs)
except:
log.error("Error running function from queue", exc_info=True)
bgQueue.task_done()
def _execWhenDone(func, *args, mustBeAsync=False, **kwargs):
global bgQueue
if mustBeAsync or bgQueue.unfinished_tasks != 0:
# Either this operation must be asynchronous or There is still an operation in progress.
# Therefore, run this asynchronously in the background thread.
bgQueue.put((func, args, kwargs))
else:
func(*args, **kwargs)
def _speak(text):
global isSpeaking, _numBytesPushed
uniqueID=c_int()
# if eSpeak was interupted while speaking ssml that changed parameters such as pitch,
# It may not reset those runtime values back to the user-configured values.
# Therefore forcefully cause eSpeak to reset its parameters each time beginning to speak again after not speaking.
if not isSpeaking:
espeakDLL.espeak_ng_Cancel()
isSpeaking = True
_numBytesPushed = 0
# eSpeak can only process compound emojis when using a UTF8 encoding
text=text.encode('utf8',errors='ignore')
flags = espeakCHARS_UTF8 | espeakSSML | espeakPHONEMES
return espeakDLL.espeak_Synth(text,0,0,0,0,flags,byref(uniqueID),0)
def speak(text):
global bgQueue
_execWhenDone(_speak, text, mustBeAsync=True)
def stop():
global isSpeaking, bgQueue
# Kill all speech from now.
# We still want parameter changes to occur, so requeue them.
params = []
try:
while True:
item = bgQueue.get_nowait()
if item[0] != _speak:
params.append(item)
bgQueue.task_done()
except queue.Empty:
# Let the exception break us out of this loop, as queue.empty() is not reliable anyway.
pass
for item in params:
bgQueue.put(item)
isSpeaking = False
player.stop()
def pause(switch):
global player
player.pause(switch)
def setParameter(param,value,relative):
_execWhenDone(espeakDLL.espeak_SetParameter,param,value,relative)
def getParameter(param,current):
return espeakDLL.espeak_GetParameter(param,current)
def getVoiceList():
voices=espeakDLL.espeak_ListVoices(None)
voiceList=[]
for voice in voices:
if not voice: break
voiceList.append(voice.contents)
return voiceList
def getCurrentVoice():
voice = espeakDLL.espeak_GetCurrentVoice()
if voice:
return voice.contents
else:
return None
def setVoice(voice):
# For some weird reason, espeak_EspeakSetVoiceByProperties throws an integer divide by zero exception.
setVoiceByName(voice.identifier)
def setVoiceByName(name):
_execWhenDone(espeakDLL.espeak_SetVoiceByName,encodeEspeakString(name))
def _setVoiceAndVariant(voice=None, variant=None):
v=getCurrentVoice()
res = decodeEspeakString(v.identifier).split("+")
if not voice:
voice = res[0]
if not variant:
if len(res) == 2:
variant = res[1]
else:
variant = "none"
if variant == "none":
espeakDLL.espeak_SetVoiceByName(encodeEspeakString(voice))
else:
try:
espeakDLL.espeak_SetVoiceByName(encodeEspeakString("%s+%s" % (voice, variant)))
except:
espeakDLL.espeak_SetVoiceByName(encodeEspeakString(voice))
def setVoiceAndVariant(voice=None, variant=None):
_execWhenDone(_setVoiceAndVariant, voice=voice, variant=variant)
def _setVoiceByLanguage(lang):
v=espeak_VOICE()
lang=lang.replace('_','-')
if lang[:2] == 'ja':
lang = 'en-us'
v.languages=encodeEspeakString(lang)
try:
espeakDLL.espeak_SetVoiceByProperties(byref(v))
except:
v.languages=encodeEspeakString("en")
espeakDLL.espeak_SetVoiceByProperties(byref(v))
def setVoiceByLanguage(lang):
_execWhenDone(_setVoiceByLanguage, lang)
def espeak_errcheck(res, func, args):
if res != EE_OK:
raise RuntimeError("%s: code %d" % (func.__name__, res))
return res
def initialize(indexCallback=None):
"""
@param indexCallback: A function which is called when eSpeak reaches an index.
It is called with one argument:
the number of the index or C{None} when speech stops.
"""
global espeakDLL, bgThread, bgQueue, player, onIndexReached
espeakDLL = cdll.LoadLibrary(os.path.join(globalVars.appDir, "synthDrivers", "espeak.dll"))
espeakDLL.espeak_Info.restype=c_char_p
espeakDLL.espeak_Synth.errcheck=espeak_errcheck
espeakDLL.espeak_SetVoiceByName.errcheck=espeak_errcheck
espeakDLL.espeak_SetVoiceByProperties.errcheck=espeak_errcheck
espeakDLL.espeak_SetParameter.errcheck=espeak_errcheck
espeakDLL.espeak_Terminate.errcheck=espeak_errcheck
espeakDLL.espeak_ListVoices.restype=POINTER(POINTER(espeak_VOICE))
espeakDLL.espeak_GetCurrentVoice.restype=POINTER(espeak_VOICE)
espeakDLL.espeak_SetVoiceByName.argtypes=(c_char_p,)
eSpeakPath = os.path.join(globalVars.appDir, "synthDrivers")
sampleRate = espeakDLL.espeak_Initialize(
AUDIO_OUTPUT_SYNCHRONOUS, 300,
os.fsencode(eSpeakPath),
# #10607: ensure espeak does not exit NVDA's process on errors such as the espeak path being invalid.
espeakINITIALIZE_DONT_EXIT
)
if sampleRate <= 0:
raise OSError(f"espeak_Initialize failed with code {sampleRate}. Given Espeak data path of {eSpeakPath}")
player = nvwave.WavePlayer(
channels=1,
samplesPerSec=sampleRate,
bitsPerSample=16,
outputDevice=config.conf["speech"]["outputDevice"],
buffered=True
)
onIndexReached = indexCallback
espeakDLL.espeak_SetSynthCallback(callback)
bgQueue = queue.Queue()
bgThread=BgThread()
bgThread.start()
def terminate():
global bgThread, bgQueue, player, espeakDLL , onIndexReached
stop()
bgQueue.put((None, None, None))
bgThread.join()
espeakDLL.espeak_Terminate()
bgThread=None
bgQueue=None
player.close()
player=None
espeakDLL=None
onIndexReached = None
def info():
# Python 3.8: a path string must be specified, a NULL is fine when what we need is version string.
return espeakDLL.espeak_Info(None).decode()
def getVariantDict():
dir = os.path.join(globalVars.appDir, "synthDrivers", "espeak-ng-data", "voices", "!v")
# Translators: name of the default espeak varient.
variantDict={"none": pgettext("espeakVarient", "none")}
for fileName in os.listdir(dir):
absFilePath = os.path.join(dir, fileName)
if os.path.isfile(absFilePath):
# In python 3, open assumes the default system encoding by default.
# This fails if Windows' "use Unicode UTF-8 for worldwide language support" option is enabled.
# The expected encoding is unknown, therefore use latin-1 to stay as close to Python 2 behavior as possible.
try:
with open(absFilePath, 'r', encoding="latin-1") as file:
for line in file:
if line.startswith('name '):
temp=line.split(" ")
if len(temp) ==2:
name=temp[1].rstrip()
break
name=None
except:
log.error("Couldn't parse espeak variant file %s" % fileName, exc_info=True)
continue
if name is not None:
variantDict[fileName]=name
return variantDict
|
nilq/baby-python
|
python
|
import mock
import pytest
from django import forms
from django.db import models
from filer.models import Image
from barbeque.filer import FilerFileField, AdminFileFormField
from barbeque.tests.factories.filer import ImageFactory
class FileModel(models.Model):
file1 = FilerFileField(null=True)
file2 = FilerFileField(blank=True)
file3 = FilerFileField()
@pytest.mark.django_db
class TestAdminFileFormField:
def test_super_not_clean(self):
field = AdminFileFormField(
mock.Mock(), Image.objects.all(), 'id', required=False)
assert field.clean('') is None
def test_without_alt_text_disabled(self):
image = ImageFactory.create(default_alt_text=None)
field = AdminFileFormField(
mock.Mock(), Image.objects.all(), 'id', alt_text_required=False)
assert isinstance(field.clean(str(image.pk)), Image)
def test_without_alt_text_enabled(self):
image = ImageFactory.create(default_alt_text=None)
field = AdminFileFormField(mock.Mock(), Image.objects.all(), 'id')
with pytest.raises(forms.ValidationError):
field.clean(str(image.pk))
def test_with_alt_text_enabled(self):
image = ImageFactory.create(default_alt_text='Test')
field = AdminFileFormField(mock.Mock(), Image.objects.all(), 'id')
assert isinstance(field.clean(str(image.pk)), Image)
def test_extensions_invalid_disabled(self):
image = ImageFactory.create(default_alt_text='Test')
field = AdminFileFormField(
mock.Mock(), Image.objects.all(), 'id')
assert isinstance(field.clean(str(image.pk)), Image)
def test_extensions_valid_enabled(self):
image = ImageFactory.create(default_alt_text='Test')
field = AdminFileFormField(
mock.Mock(), Image.objects.all(), 'id', extensions=['jpg', 'gif'])
assert isinstance(field.clean(str(image.pk)), Image)
def test_extensions_invalid_enabled(self):
image = ImageFactory.create(default_alt_text='Test')
field = AdminFileFormField(
mock.Mock(), Image.objects.all(), 'id', extensions=['png', 'gif'])
with pytest.raises(forms.ValidationError):
field.clean(str(image.pk))
class TestFilerFileField:
def test_formfield(self):
form_class = forms.models.modelform_factory(FileModel, fields='__all__')
assert isinstance(form_class().fields['file1'], AdminFileFormField)
@pytest.mark.django_db
def test_blank_null(self):
assert FileModel._meta.get_field('file1').blank is True
assert FileModel._meta.get_field('file1').null is True
assert FileModel._meta.get_field('file2').blank is True
assert FileModel._meta.get_field('file2').null is True
assert FileModel._meta.get_field('file3').blank is False
assert FileModel._meta.get_field('file3').null is False
|
nilq/baby-python
|
python
|
import shutil
from pathlib import Path
import dask.dataframe as dd
import numpy as np
import pandas as pd
from bokeh.io import export_png
from bokeh.io import output_file
from bokeh.models import Column
from bokeh.models import Div
from bokeh.plotting import figure
from bokeh.plotting import save
from sid.colors import get_colors
from sid.statistics import calculate_r_effective
from sid.statistics import calculate_r_zero
def visualize_simulation_results(
data,
outdir_path,
infection_vars,
background_vars,
window_length=7,
):
"""Visualize the results one or more simulation results.
Args:
data (str, pandas.DataFrame, Path, list): list of paths to the pickled
simulation results.
outdir_path (path): path to the folder where to save the results.
Careful, all contents are removed when the function is called.
infection_vars (list): list of infection rates to plot
background_vars (list): list of background variables by whose value to group
the results. Have to be present in all simulation results.
window_length (int): How many dates to use for the reproduction numbers.
"""
colors = get_colors("categorical", 12)
if isinstance(background_vars, str):
background_vars = [background_vars]
outdir_path = Path(outdir_path)
datasets = [data] if isinstance(data, (str, pd.DataFrame, Path)) else data
datasets = [
Path(path_or_df) if isinstance(path_or_df, str) else path_or_df
for path_or_df in datasets
]
_create_folders(outdir_path, background_vars)
rates = _create_rates_for_all_data(
datasets,
infection_vars,
background_vars,
window_length,
)
for bg_var in ["general"] + background_vars:
if bg_var == "general":
title = "Rates in the General Population"
else:
title = f"Rates According to {_nice_str(bg_var)}"
rate_plots = _create_rate_plots(rates[bg_var], colors, title)
title_element = Div(text=title, style={"font-size": "150%"})
_export_plots_and_layout(
title=title_element,
plots=rate_plots,
outdir_path=outdir_path / bg_var,
)
def _create_folders(outdir_path, background_vars):
if outdir_path.exists():
shutil.rmtree(outdir_path)
outdir_path.mkdir()
for var in ["general"] + background_vars:
outdir_path.joinpath(var).mkdir()
def _create_rates_for_all_data(
datasets, infection_vars, background_vars, window_length
):
"""Create the statistics for each dataset and merge them into one dataset.
Args:
datasets (list): list of str, Paths to pickled DataFrames or pd.DataFrames.
infection_vars (list): list of infection rates to plot
background_vars (list): list of background variables by whose value to group
the results. Have to be present in all simulation results.
window_length (int): How many dates to use for the reproduction numbers.
rates (pandas.DataFrame): DataFrame with the dates as index.
The columns are a MultiIndex with four levels: The outermost is the
"bg_var" ("general" for the overall rate).
The next is the "rate" (e.g. the infectious rate or r zero),
then "bg_value", the value of the background variable and last "data_id".
"""
name_to_statistics = {}
for i, df_or_path in enumerate(datasets):
vars_for_r_zero = ["immunity", "n_has_infected", "cd_infectious_false"]
keep_vars = sorted(
set(infection_vars + background_vars + vars_for_r_zero + ["date"])
)
df_name, df = _load_data(df_or_path, keep_vars, i)
name_to_statistics[df_name] = _create_statistics(
df=df,
infection_vars=infection_vars,
background_vars=background_vars,
window_length=window_length,
)
rates = pd.concat(name_to_statistics, axis=1, names=["data_id"])
order = ["bg_var", "rate", "bg_value", "data_id"]
rates = rates.reorder_levels(order=order, axis=1)
return rates
def _load_data(df_or_path, keep_vars, i):
if isinstance(df_or_path, pd.DataFrame):
df = df_or_path[keep_vars]
df_name = i
elif isinstance(df_or_path, Path):
df = dd.read_parquet(df_or_path, engine="fastparquet")[keep_vars].compute()
df_name = df_or_path.stem
else:
raise NotImplementedError
return df_name, df
def _create_statistics(df, infection_vars, background_vars, window_length):
"""Calculate the infection rates and reproduction numbers for each date.
Args:
df (pandas.DataFrame): The simulation results.
infection_vars (list): list of infection rates to plot
background_vars (list): list of background variables by whose value to group
the results. Have to be present in all simulation results.
window_length (int): How many dates to use for the reproduction numbers.
Returns:
rates (pandas.DataFrame): DataFrame with the statistics of one simulation run.
The index are the dates. The columns are a MultiIndex with three levels:
The outermost is the "bg_var" ("general" for the overall rate).
The next is the "bg_value", the last is the "rate"
(e.g. the infectious rate or r zero).
"""
gb = df.groupby("date")
overall = gb.mean()[infection_vars]
overall["r_zero"] = gb.apply(calculate_r_zero, window_length)
overall["r_effective"] = gb.apply(calculate_r_effective, window_length)
# add column levels for later
overall.columns.name = "rate"
overall = _prepend_column_level(overall, "general", "bg_value")
overall = _prepend_column_level(overall, "general", "bg_var")
single_df_rates = [overall]
for bg_var in background_vars:
gb = df.groupby([bg_var, "date"])
infection_rates = gb.mean()[infection_vars].unstack(level=0)
r_zeros = gb.apply(calculate_r_zero, window_length).unstack(level=0)
r_zeros = _prepend_column_level(r_zeros, "r_zero", "rate")
r_eff = gb.apply(calculate_r_effective, window_length).unstack(level=0)
r_eff = _prepend_column_level(r_eff, "r_effective", "rate")
rates_by_group = pd.concat([infection_rates, r_zeros, r_eff], axis=1)
rates_by_group.columns.names = ["rate", "bg_value"]
rates_by_group = _prepend_column_level(rates_by_group, bg_var, "bg_var")
rates_by_group = rates_by_group.swaplevel("rate", "bg_value", axis=1)
single_df_rates.append(rates_by_group)
rates = pd.concat(single_df_rates, axis=1).fillna(0)
return rates
def _prepend_column_level(df, key, name):
prepended = pd.concat([df], keys=[key], names=[name], axis=1)
return prepended
def _create_rate_plots(rates, colors, title):
"""Plot all rates for a single background variable
Args:
rates (pandas.DataFrame): DataFrame with the dates as index. The columns are a
MultiIndex with three levels: The outermost is the variable name (e.g.
infectious or r_zero). The next are the values the background variable can
take, the last "data_id".
colors (list): list of colors to use.
title (str): the plot title will be the name of the rate plus this string.
Returns:
plots (list): list of bokeh plots.
"""
vars_to_plot = rates.columns.levels[0]
plots = []
full_range_vars = [
"ever_infected",
"immunity",
"symptomatic_among_infectious",
]
for var, color in zip(vars_to_plot, colors):
y_range = (0, 1) if var in full_range_vars else None
bg_values = rates[var].columns.unique().levels[0]
for bg_val in bg_values:
plot_title = f"{_nice_str(var)} {title}"
if bg_val != "general":
plot_title += f": {bg_val}"
p = _plot_rates(
rates=rates[var][bg_val],
title=plot_title,
color=color,
y_range=y_range,
)
p.name = var if bg_val == "general" else f"{var}_{bg_val.replace(' ', '')}"
plots.append(p)
return plots
def _plot_rates(rates, title, color, y_range):
"""Plot the rates over time.
Args:
rates (DataFrame): the index are the x values, the values the y values.
Every column is plotted as a separate line.
color (str): color.
title (str): plot title.
y_range (tuple or None): range of the y axis.
Returns:
p (bokeh figure)
"""
xs = rates.index
p = figure(
tools=[],
plot_height=400,
plot_width=800,
title=title,
y_range=y_range,
x_axis_type="datetime",
)
# plot the median
p.line(x=xs, y=rates.median(axis=1), alpha=1, line_width=2.75, line_color=color)
# plot the confidence band
q5 = rates.apply(np.nanpercentile, q=5, axis=1)
q95 = rates.apply(np.nanpercentile, q=95, axis=1)
p.varea(x=xs, y1=q95, y2=q5, alpha=0.2, color=color)
# add the trajectories
for var in rates:
p.line(x=xs, y=rates[var], line_width=1, line_color=color, alpha=0.3)
p = _style(p)
return p
def _export_plots_and_layout(title, plots, outdir_path):
"""Save all plots as png and the layout as html.
Args:
title (bokeh.Div): title element.
plots (list): list of bokeh plots
outdir_path (pathlib.Path): base path to which to append the plot name to build
the path where to save each plot.
"""
for p in plots:
outpath = outdir_path / f"{p.name}.png"
output_file(outpath)
export_png(p, filename=outpath)
output_file(outdir_path / "overview.html")
save(Column(title, *plots))
def _style(p):
gray = "#808080"
p.outline_line_color = None
p.xgrid.visible = False
p.ygrid.visible = False
p.axis.minor_tick_line_color = None
p.axis.axis_line_color = gray
p.axis.major_label_text_color = gray
p.axis.major_tick_line_color = gray
return p
def _nice_str(s):
return s.replace("_", " ").title()
|
nilq/baby-python
|
python
|
import sys, logging, time, resource, gc, os
import multiprocessing
from multiprocessing import Pool
from util import print_datetime
import numpy as np
import gurobipy as grb
import torch
def estimate_weights_no_neighbors(YT, M, XT, prior_x_parameter_set, sigma_yx_inverse, X_constraint, dropout_mode, replicate):
"""Estimate weights for a single replicate in the SpiceMix model without considering neighbors.
This is essentially a benchmarking convenience function, and should return similar results to running vanilla NMF.
Args:
YT: transpose of gene expression matrix for sample, with shape (num_cells, num_genes)
M: current estimate of metagene matrix, with shape (num_genes, num_metagenes)
XT: transpose of metagene weights for sample, with shape
Returns:
New estimate of transposed metagene weight matrix XT.
"""
if dropout_mode != 'raw':
raise NotImplemented
logging.info(f'{print_datetime()}Estimating weights without neighbors in repli {replicate}')
_, num_metagenes = XT.shape
updated_XT = np.zeros_like(XT)
weight_model = grb.Model('X w/o n')
weight_model.Params.OptimalityTol=1e-4
weight_model.Params.FeasibilityTol=1e-4
weight_model.setParam('OutputFlag', False)
weight_model.Params.Threads = 1
weight_variables = weight_model.addVars(num_metagenes, lb=0.)
assert X_constraint == 'none'
# Adding shared components of the objective
# quadratic term in log Pr[ Y | X, Theta ]
shared_objective = 0
if dropout_mode == 'raw':
# MTM = M.T @ M * (sigma_yx_inverse**2 / 2.)
MTM = (M.T @ M + 1e-6 * np.eye(num_metagenes)) * (sigma_yx_inverse ** 2 / 2.)
shared_objective += grb.quicksum([weight_variables[index] * MTM[index, index] * weight_variables[index] for index in range(num_metagenes)])
MTM *= 2
shared_objective += grb.quicksum([weight_variables[index] * MTM[index, j] * weight_variables[j] for index in range(num_metagenes) for j in range(index+1, num_metagenes)])
del MTM
YTM = YT @ M * (-sigma_yx_inverse ** 2)
else:
raise NotImplementedError
# prior on X
prior_x_mode, *prior_x_parameters = prior_x_parameter_set
if prior_x_mode in ('Truncated Gaussian', 'Gaussian'):
mu_x, sigma_x_inv = prior_x_parameters
assert (sigma_x_inv > 0).all()
t = sigma_x_inv ** 2 / 2
shared_objective += grb.quicksum([t[metagene] * weight_variables[metagene] * weight_variables[metagene] for metagene in range(num_metagenes)])
t *= - 2 * mu_x
shared_objective += grb.quicksum([t[metagene] * weight_variables[metagene] for metagene in range(num_metagenes)])
shared_objective += np.dot(mu_x**2, sigma_x_inv**2) / 2
elif prior_x_mode in ('Exponential', 'Exponential shared', 'Exponential shared fixed'):
lambda_x, = prior_x_parameters
assert (lambda_x >= 0).all()
shared_objective += grb.quicksum([lambda_x[metagene] * weight_variables[metagene] for metagene in range(num_metagenes)])
else:
raise NotImplementedError
for cell_index, (y, yTM) in enumerate(zip(YT, YTM)):
objective = shared_objective + grb.quicksum(yTM[metagene] * weight_variables[metagene] for metagene in range(num_metagenes)) + np.dot(y, y) * sigma_yx_inverse / 2.
weight_model.setObjective(objective, grb.GRB.MINIMIZE)
weight_model.optimize()
updated_XT[cell_index] = [weight_variables[metagene].x for metagene in range(num_metagenes)]
return updated_XT
def estimate_weights_icm(YT, E, M, XT, prior_x_parameter_set, sigma_yx_inverse, sigma_x_inverse, X_constraint, dropout_mode, pairwise_potential_mode, replicate):
r"""Estimate weights for a single replicate in the SpiceMix model using the Iterated Conditional Model (ICM).
Notes:
.. math::
\hat{X}_{\text{MAP}} &= \mathop{\text{\argmax}}_{X \in \mathbb{R}_+^{K \times N}} \left{ \sum_{i \in \mathcal{V}}\right} \\
s_i &= \frac{ - \lambda_x^\top z_i}{(Mz_i)^\top Mz_i} \\
z_i &= \frac{}{}
We write XT in terms of size factors S such that XT = S * ZT.
Args:
YT: transpose of gene expression matrix for replicate, with shape (num_cells, num_genes)
E: adjacency list for neighborhood graph in this replicate
M: current estimate of metagene matrix, with shape (num_genes, num_metagenes)
XT: transpose of weight matrix, with shape (num_cells, num_metagenes)
prior_x_parameter_set: set of parameters defining prior distribution on weights, with structure (prior_x_mode, ∗prior_x_parameters)
sigma_yx_inverse: TODO
sigma_x_inverse: inverse of metagene affinity matrix
X_constraint: constraint on elements of weight matrix
dropout_mode: TODO:
pairwise_potential_mode: TODO
Returns:
New estimate of transposed metagene weight matrix XT.
"""
prior_x_mode, *prior_x_parameters = prior_x_parameter_set
num_cells, _ = YT.shape
_, num_metagenes = M.shape
MTM = None
YTM = None
# Precomputing some important matrix products
if dropout_mode == 'raw':
MTM = M.T @ M * sigma_yx_inverse**2 / 2
YTM = YT @ M * sigma_yx_inverse**2 / 2
else:
raise NotImplementedError
def calculate_objective(S, ZT):
"""Calculate current value of ICM objective.
Args:
YT: transpose of gene expression matrix for a particular sample
S: a vector of total metagene expressions for each cell
ZT: current estimate of weights for the sample, divided by the total for each cell
Returns:
value of ICM objective
"""
objective = 0
difference = YT - ( S * ZT ) @ M.T
if dropout_mode == 'raw':
difference = difference.ravel()
else:
raise NotImplementedError
objective += np.dot(difference, difference) * sigma_yx_inverse**2 / 2
if pairwise_potential_mode == 'normalized':
for neighbors, z_i in zip(E.values(), ZT):
objective += z_i @ sigma_x_inverse @ ZT[neighbors].sum(axis=0) / 2
else:
raise NotImplementedError
if prior_x_mode in ('Exponential', 'Exponential shared', 'Exponential shared fixed'):
lambda_x, = prior_x_parameters
objective += lambda_x @ (S * ZT).sum(axis=0)
del lambda_x
else:
raise NotImplementedError
objective /= YT.size
return objective
def update_s_i(z_i, yTM):
"""Calculate closed form update for s_i.
Assuming fixed value of z_i, update for s_i takes the following form:
TODO
Args:
z_i: current estimate of normalized metagene expression
neighbors: list of neighbors of current cell
yTM: row of YTM corresponding to current cell
MTM: row of MTM corresponding to current cell
Returns:
Updated estimate of s_i
"""
denominator = z_i @ MTM @ z_i
numerator = yTM @ z_i
if prior_x_mode in ('Exponential', 'Exponential shared', 'Exponential shared fixed'):
lambda_x, = prior_x_parameters
# TODO: do we need the 1/2 here?
numerator -= lambda_x @ z_i / 2
del lambda_x
else:
raise NotImplementedError
numerator = np.maximum(numerator, 0)
s_i_new = numerator / denominator
return s_i_new
def update_z_i(s_i, y_i, yTM, eta):
"""Calculate update for z_i using Gurobi simplex algorithm.
Assuming fixed value of s_i, update for z_i is a linear program of the following form:
TODO
Args:
s_i: current estimate of size factor
yTM: row of YTM corresponding to current cell
eta: aggregate contribution of neighbor z_j's, weighted by affinity matrix (sigma_x_inverse)
Returns:
Updated estimate of z_i
"""
objective = 0
# Element-wise matrix multiplication (Mz_is_i)^\top(Mz_is_i)
factor = s_i**2 * MTM
objective += grb.quicksum([weight_variables[index] * factor[index, index] * weight_variables[index] for index in range(num_metagenes)])
factor *= 2
objective += grb.quicksum([weight_variables[index] * factor[index, j] * weight_variables[j] for index in range(num_metagenes) for j in range(index+1, num_metagenes)])
# Adding terms for -2 y_i M z_i s_i
factor = -2 * s_i * yTM
# TODO: fix formula below
# objective += grb.quicksum([weight_variables[index] * factor[index] for index in range(num_metagenes)])
# objective += y_i @ y_i
# objective *= sigma_yx_inverse**2 / 2
factor += eta
# factor = eta
if prior_x_mode in ('Exponential'):
lambda_x, = prior_x_parameters
factor += lambda_x * s_i
del lambda_x
elif prior_x_mode in ('Exponential shared', 'Exponential shared fixed'):
pass
else:
raise NotImplementedError
objective += grb.quicksum([weight_variables[index] * factor[index] for index in range(num_metagenes)])
# TODO: is this line necessary? Doesn't seem like z_i affects this term of the objective
objective += y_i @ y_i * sigma_yx_inverse**2 / 2
weight_model.setObjective(objective, grb.GRB.MINIMIZE)
weight_model.optimize()
z_i_new = np.array([weight_variables[index].x for index in range(num_metagenes)])
return z_i_new
global_iterations = 100
local_iterations = 100
weight_model = grb.Model('ICM')
weight_model.Params.OptimalityTol=1e-4
weight_model.Params.FeasibilityTol=1e-4
weight_model.Params.OutputFlag = False
weight_model.Params.Threads = 1
weight_model.Params.BarConvTol = 1e-6
weight_variables = weight_model.addVars(num_metagenes, lb=0.)
weight_model.addConstr(weight_variables.sum() == 1)
S = XT.sum(axis=1, keepdims=True)
ZT = XT / (S + 1e-30)
last_objective = calculate_objective(S, ZT)
best_objective, best_iteration = last_objective, -1
for global_iteration in range(global_iterations):
last_ZT = np.copy(ZT)
last_S = np.copy(S)
locally_converged = False
if pairwise_potential_mode == 'normalized':
for index, (neighbors, y_i, yTM, z_i, s_i) in enumerate(zip(E.values(), YT, YTM, ZT, S)):
eta = ZT[neighbors].sum(axis=0) @ sigma_x_inverse
for local_iteration in range(local_iterations):
s_i_new = update_s_i(z_i, yTM)
s_i_new = np.maximum(s_i_new, 1e-15)
delta_s_i = s_i_new - s_i
s_i = s_i_new
z_i_new = update_z_i(s_i, y_i, yTM, eta)
delta_z_i = z_i_new - z_i
z_i = z_i_new
locally_converged |= (np.abs(delta_s_i) / (s_i + 1e-15) < 1e-3 and np.abs(delta_z_i).max() < 1e-3)
if locally_converged:
break
if not locally_converged:
logging.warning(f'Cell {i} in the {replicate}-th replicate did not converge in {local_iterations} iterations;\ts = {s:.2e}, delta_s_i = {delta_s_i:.2e}, max delta_z_i = {np.abs(delta_z_i).max():.2e}')
ZT[index] = z_i
S[index] = s_i
else:
raise NotImplementedError
globally_converged = False
dZT = ZT - last_ZT
dS = S - last_S
current_objective = calculate_objective(S, ZT)
globally_converged |= (np.abs(dZT).max() < 1e-2 and np.abs(dS / (S + 1e-15)).max() < 1e-2 and current_objective > last_objective - 1e-4)
# TODO: do we need to keep this?
force_show_flag = False
# force_show_flag |= np.abs(dZT).max() > 1-1e-5
if global_iteration % 5 == 0 or globally_converged or force_show_flag:
print(f'>{replicate} current_objective at iteration {global_iteration} = {current_objective:.2e},\tdiff = {np.abs(dZT).max():.2e}\t{np.abs(dS).max():.2e}\t{current_objective - last_objective:.2e}')
print(
f'ZT summary statistics: '
f'# <0 = {(ZT < 0).sum().astype(np.float) / num_cells:.1f}, '
f'# =0 = {(ZT == 0).sum().astype(np.float) / num_cells:.1f}, '
f'# <1e-10 = {(ZT < 1e-10).sum().astype(np.float) / num_cells:.1f}, '
f'# <1e-5 = {(ZT < 1e-5).sum().astype(np.float) / num_cells:.1f}, '
f'# <1e-2 = {(ZT < 1e-2).sum().astype(np.float) / num_cells:.1f}, '
f'# >1e-1 = {(ZT > 1e-1).sum().astype(np.float) / num_cells:.1f}'
)
print(
f'S summary statistics: '
f'# 0 = {(S == 0).sum()}, '
f'min = {S.min():.1e}, '
f'max = {S.max():.1e}'
)
sys.stdout.flush()
# TODO: do we need this assertion still?
assert not current_objective > last_objective + 1e-6
last_objective = current_objective
if current_objective < best_objective:
best_objective, best_iteration = current_objective, global_iteration
if globally_converged:
break
del weight_model
# Enforce positivity constraint on S
XT = np.maximum(S, 1e-15) * ZT
return XT
|
nilq/baby-python
|
python
|
# visualizer.py
# Contains functions for image visualization
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
import random
import skimage.io as io
import torch
from operator import itemgetter
from PIL import Image
from torchvision import datasets, models, transforms
from metrics import getPercentMask, calculateIoU
# Minimum fraction of an image the object must occupy in order to be considered prominent
PROMINENT_PERCENT_THRESHOLD = 0.3
# Extract images with one very prominent object and other possible smaller objects
OTHER_OBJ_THRESH = 0.1
# Maximum fraction of an image the object must occupy in order to be considered prominent
MAX_PERCENT = 0.9
# Default input dimensions
IMG_SIZE = 224
# Maximum number of objects that are considered to be prominent
MAX_PROMINENT_NUM = 4
# Displays an image
def imshow(img, show_axis=False, save=False, save_path=None):
if not show_axis: plt.axis('off')
plt.imshow(img)
if save: plt.savefig(save_path)
plt.show()
plt.clf()
# Returns bit mask for objects of interset in image
def getBitMask(annotations, cocoData):
mask = cocoData.coco.annToMask(annotations[0])
# Create conglomerate mask over all objects in image
for i in range(len(annotations)):
mask = mask | cocoData.coco.annToMask(annotations[i])
#imshow(mask)
return mask
# Returns masked image
def getMaskedImg(img, mask):
mask_arr = np.array(mask)
# Reshape to give 3rd axis for broadcasting to 3 channels
mask_arr = np.expand_dims(mask_arr, axis=-1)
masked_img = np.array(img)
masked_img = masked_img * mask_arr
return masked_img
# Given a tensor of images in NCHW format, converts to numpy images
def tensorToNpImg(tensor, img_type='mask'):
image = tensor.detach().numpy()
# Re-normalize for imshow plotting
if(img_type != 'mask'):
image = image/255
image = np.transpose(image, [1,2,0])
return image.squeeze()
def thresholdProbMask(prob_mask, threshold=0.5):
prob_mask[prob_mask>threshold] = 1
prob_mask[prob_mask<=threshold] = 0
return prob_mask
# Given model, input image, and target mask
# Evaulates output mask using model and displays against target
def extractProminent(model, img, target):
plt.figure()
plt.subplot(1,3,1)
plt.imshow(tensorToNpImg(img, 'img')); plt.axis('off')
plt.subplot(1,3,2)
plt.imshow(tensorToNpImg(target)); plt.axis('off')
res = torch.sigmoid(model(img.unsqueeze(0).float()))
plt.subplot(1,3,3)
generatedMask = thresholdProbMask(tensorToNpImg(res.squeeze(0)))
plt.imshow(generatedMask); plt.axis('off')
print("IoU:", calculateIoU(res, target))
# Plots curve for given train and validation arrays
# ctype={'Accuracy","Loss"}
def plotCurve(train_val, valid_val, num_epochs, ctype):
plt.title('Train vs Validation {}'.format(ctype))
plt.plot(range(num_epochs), train_val, label='Train')
plt.plot(range(num_epochs), valid_val, label='Validation')
plt.xlabel('Epoch')
plt.ylabel(ctype)
plt.legend(loc='best')
plt.show()
def plotPerformance(train_loss, valid_loss, train_acc, valid_acc, num_epochs):
# Plot loss curves
plotCurve(train_loss, valid_loss, num_epochs, ctype = 'Loss')
# Plot accuracy curves
plotCurve(train_acc, valid_acc, num_epochs, ctype = 'IoU')
# Simple erosion-dilation denoiser
def denoise(img, kernel_size=5):
return cv.morphologyEx(img, cv.MORPH_OPEN, cv.getStructuringElement(cv.MORPH_RECT,(kernel_size,kernel_size)))
|
nilq/baby-python
|
python
|
import os
import pandas as pd
# Configuration and constant definitions for the API
# Search
TEMPLATES_INDEX_FILENAME = 'templates.pkl'
SEARCH_INDEX_FILENAME = 'index_clean.pkl'#os.path.join('images', 'index_4.df')
SEARCH_READER_FN = pd.read_pickle
SEARCH_COLUMNS = ['fusion_text_glove', 'title_glove', 'ocr_glove', 'img_embedding']
SEARCH_MAX_DIMS = [300, 300, 300, 512]#[30,30,30,50]
# Models
PRETRAINED_MODELS_DIR = 'pretrained'
if not os.path.isdir(PRETRAINED_MODELS_DIR):
os.makedirs(PRETRAINED_MODELS_DIR)
EMBEDDINGS_FILENAME = os.path.join(PRETRAINED_MODELS_DIR, 'glove.6B.300d_dict.pickle')
EMBEDDINGS_URL = 'https://cloud.tsinghua.edu.cn/f/0e2ab878bb5d4698b344/?dl=1'
# Temp images
ALLOWED_IMAGE_EXTENSIONS = [".jpg", ".png", ".gif"]
TEMP_IMAGES_DIR = os.path.join('images', 'external')
if not os.path.isdir(TEMP_IMAGES_DIR):
os.makedirs(TEMP_IMAGES_DIR)
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
setup(
name='acl-iitbbs',
version='0.1',
description='Fetch attendance and result from ERP and Pretty Print it on Terminal.',
author='Aman Pratap Singh',
author_email='amanprtpsingh@gmail.com',
url='https://github.com/apsknight/acl',
py_modules=['acl'],
packages=find_packages(),
install_requires=[
'Click', 'robobrowser', 'bs4', 'tabulate'
],
entry_points='''
[console_scripts]
acl=source:attendance
''',
)
|
nilq/baby-python
|
python
|
'''helper functions to deal wit datetime strings'''
from __future__ import unicode_literals, print_function
import re
from datetime import datetime
# REGEX!
DATE_RE = r'(\d{4}-\d{2}-\d{2})|(\d{4}-\d{3})'
SEC_RE = r'(:(?P<second>\d{2})(\.\d+)?)'
RAWTIME_RE = r'(?P<hour>\d{1,2})(:(?P<minute>\d{2})%s?)?' % (SEC_RE)
AMPM_RE = r'am|pm|a\.m\.|p\.m\.|AM|PM|A\.M\.|P\.M\.'
TIMEZONE_RE = r'Z|[+-]\d{2}:?\d{2}?'
TIME_RE = (r'(?P<rawtime>%s)( ?(?P<ampm>%s))?( ?(?P<tz>%s))?' %
(RAWTIME_RE, AMPM_RE, TIMEZONE_RE))
DATETIME_RE = (r'(?P<date>%s)(?P<separator>[T ])(?P<time>%s)'
% (DATE_RE, TIME_RE))
def normalize_datetime(dtstr, match=None):
"""Try to normalize a datetime string.
1. Convert 12-hour time to 24-hour time
pass match in if we have already calculated it to avoid rework
"""
match = match or (dtstr and re.match(DATETIME_RE + '$', dtstr))
if match:
datestr = match.group('date')
hourstr = match.group('hour')
minutestr = match.group('minute') or '00'
secondstr = match.group('second')
ampmstr = match.group('ampm')
separator = match.group('separator')
# convert ordinal date YYYY-DDD to YYYY-MM-DD
try:
datestr = datetime.strptime(datestr, '%Y-%j').strftime('%Y-%m-%d')
except ValueError:
# datestr was not in YYYY-DDD format
pass
# 12 to 24 time conversion
if ampmstr:
hourstr = match.group('hour')
hourint = int(hourstr)
if (ampmstr.startswith('a') or ampmstr.startswith('A')) and hourint == 12:
hourstr = '00'
if (ampmstr.startswith('p') or ampmstr.startswith('P')) and hourint < 12:
hourstr = str(hourint + 12)
dtstr = '%s%s%s:%s' % (
datestr, separator, hourstr, minutestr)
if secondstr:
dtstr += ':'+secondstr
tzstr = match.group('tz')
if tzstr:
dtstr += tzstr
return dtstr
|
nilq/baby-python
|
python
|
class SofaException(Exception):
def __init__(self, message):
super(SofaException, self).__init__(message)
class ConfigurationException(SofaException):
def __init__(self, message):
super(ConfigurationException, self).__init__(message)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# Copyright (c) 2013-2018 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import md5
from nestlabs.breadcrumbs.Event import *
UID_BYTES = 2
def backslashify(aString):
return aString.replace('\n', ' \\\n')
def applyIndent(aString, indent):
return aString.replace('$', '%s' % (' '*(2*indent)))
class EventDescriptor:
"""A simple class to wrap around the event description"""
def __init__(self, filename, name, param_sizes, description):
self.filename = filename.strip('\n')
self.name = name
self.param_sizes = param_sizes
self.description = description
return None
def __getstate__(self):
return (self.filename, self.name, self.param_sizes, self.description)
def __setstate__(self, a_dict):
self.filename = a_dict[0]
self.name = a_dict[1]
self.param_sizes = a_dict[2]
self.description = a_dict[3]
def name(self):
return self.name
def param_sizes(self):
return self.param_sizes
def description(self):
return self.description
def cksum(self):
stringToHash = '%s %s' % (self.name, self.name)
cksumr = md5.new(stringToHash)
return cksumr.hexdigest()[:UID_BYTES*2]
def __str__(self):
return "%s %s | %s" % (self.name, self.param_sizes, self.description)
def get_param_list(self):
retval = ''
i = 0
for s in range(len(self.param_sizes)):
retval += 'arg%d, ' % i
i += 1
return retval[0:-2]
def get_args(self):
retval = ''
i = 0
for sz in self.param_sizes:
retval += '$$$%s, arg%d,\n' % ( Event.get_param_size(sz[1]), i)
i += 1
return retval
def get_verify_string(self):
retval = ''
i = 0
for s in self.param_sizes:
retval += ('$$nlCHECK(sizeof(arg%d) == %s);\n' % (i, Event.get_param_size(s[1])))
i += 1
return retval
def get_macro(self, indent_val):
indent1=' '*((indent_val)*2)
indent2=' '*((indent_val + 1)*2)
aString = """\
#define nlBREADCRUMBS_%s(%s)
$do{
$$nl_breadcrumbs((k%s << %d),
%s$$$-1);
$} while(0)""" % (self.name, self.get_param_list(), self.name, UID_BYTES*8, self.get_args())
return applyIndent("%s\n\n" % backslashify(aString), 2)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- Coding: UTF-8 -*-
# @Time : 12/8/18 7:02 PM
# @Author : Terry LAI
# @Email : terry.lai@hotmail.com
# @File : keyboard.py
from pymouse import PyMouse
from pykeyboard import PyKeyboard
from socket import socket, AF_INET, SOCK_STREAM
port = 20000
# -*- coding: utf-8 -*-
client_addr = []
client_socket = {}
###########################################################################
## Python code generated with wxFormBuilder (version Sep 12 2010)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
from socketserver import ThreadingTCPServer
###########################################################################
## Class MotionGame
###########################################################################
class MotionGame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx.DefaultPosition,
size=wx.Size(500, 300), style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)
self.SetSizeHintsSz(wx.DefaultSize, wx.DefaultSize)
bSizer11 = wx.BoxSizer(wx.VERTICAL)
self.m_staticText1 = wx.StaticText(self, wx.ID_ANY, u"ECE 5413 Motion Game", wx.DefaultPosition, wx.DefaultSize,
0)
self.m_staticText1.Wrap(-1)
bSizer11.Add(self.m_staticText1, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5)
self.m_button1 = wx.Button(self, wx.ID_ANY, u"Start Server", wx.DefaultPosition, wx.DefaultSize, 0)
bSizer11.Add(self.m_button1, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5)
self.m_staticText2 = wx.StaticText(self, wx.ID_ANY, u"server is down", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText2.Wrap(-1)
bSizer11.Add(self.m_staticText2, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5)
gbSizer1 = wx.GridBagSizer(0, 0)
gbSizer1.SetFlexibleDirection(wx.BOTH)
gbSizer1.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.m_staticText12 = wx.StaticText(self, wx.ID_ANY, u"Game 1", wx.Point(20, 20), wx.DefaultSize,
wx.ALIGN_CENTRE)
self.m_staticText12.Wrap(-1)
gbSizer1.Add(self.m_staticText12, wx.GBPosition(0, 0), wx.GBSpan(1, 1),
wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 5)
self.m_button2 = wx.Button(self, wx.ID_ANY, u"Set Game 1", wx.DefaultPosition, wx.DefaultSize, 0)
gbSizer1.Add(self.m_button2, wx.GBPosition(0, 1), wx.GBSpan(1, 1), wx.ALL, 5)
self.m_staticText14 = wx.StaticText(self, wx.ID_ANY, u"Player 1", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText14.Wrap(-1)
gbSizer1.Add(self.m_staticText14, wx.GBPosition(0, 2), wx.GBSpan(1, 1), wx.ALL, 5)
self.m_staticText4 = wx.StaticText(self, wx.ID_ANY, u"disconnected", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText4.Wrap(-1)
gbSizer1.Add(self.m_staticText4, wx.GBPosition(0, 3), wx.GBSpan(1, 1), wx.ALL, 5)
bSizer11.Add(gbSizer1, 1, wx.EXPAND, 5)
gbSizer11 = wx.GridBagSizer(0, 0)
gbSizer11.SetFlexibleDirection(wx.BOTH)
gbSizer11.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.m_staticText121 = wx.StaticText(self, wx.ID_ANY, u"Game 2", wx.Point(20, 20), wx.DefaultSize,
wx.ALIGN_CENTRE)
self.m_staticText121.Wrap(-1)
gbSizer11.Add(self.m_staticText121, wx.GBPosition(0, 0), wx.GBSpan(1, 1),
wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 5)
self.m_button3 = wx.Button(self, wx.ID_ANY, u"Set Game 2", wx.DefaultPosition, wx.DefaultSize, 0)
gbSizer11.Add(self.m_button3, wx.GBPosition(0, 1), wx.GBSpan(1, 1), wx.ALL, 5)
self.m_staticText141 = wx.StaticText(self, wx.ID_ANY, u"Player 1", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText141.Wrap(-1)
gbSizer11.Add(self.m_staticText141, wx.GBPosition(0, 2), wx.GBSpan(1, 1), wx.ALL, 5)
self.m_staticText5 = wx.StaticText(self, wx.ID_ANY, u"disconnected", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText5.Wrap(-1)
gbSizer11.Add(self.m_staticText5, wx.GBPosition(0, 3), wx.GBSpan(1, 1), wx.ALL, 5)
self.m_staticText40 = wx.StaticText(self, wx.ID_ANY, u"Player 2", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText40.Wrap(-1)
gbSizer11.Add(self.m_staticText40, wx.GBPosition(0, 4), wx.GBSpan(1, 1), wx.ALL, 5)
self.m_staticText6 = wx.StaticText(self, wx.ID_ANY, u"disconnected", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText6.Wrap(-1)
gbSizer11.Add(self.m_staticText6, wx.GBPosition(0, 5), wx.GBSpan(1, 1), wx.ALL, 5)
bSizer11.Add(gbSizer11, 1, wx.EXPAND, 5)
bSizer12 = wx.BoxSizer(wx.VERTICAL)
self.m_staticText57 = wx.StaticText(self, wx.ID_ANY, u"Game 2 Link: ", wx.DefaultPosition, wx.Size(50, -1), 0)
self.m_staticText57.Wrap(-1)
self.m_staticText57.SetMaxSize(wx.Size(100, -1))
bSizer12.Add(self.m_staticText57, 1, wx.ALL | wx.EXPAND, 5)
self.m_textCtrl12 = wx.TextCtrl(self, wx.ID_ANY, u"http://www.4399.com/flash/187228_1.htm", wx.DefaultPosition,
wx.DefaultSize, 0)
bSizer12.Add(self.m_textCtrl12, 0, wx.ALL | wx.EXPAND, 5)
bSizer11.Add(bSizer12, 1, wx.EXPAND, 5)
self.SetSizer(bSizer11)
self.Layout()
self.Centre(wx.BOTH)
# Connect Events
self.m_button1.Bind(wx.EVT_BUTTON, self.start_server)
self.m_button2.Bind(wx.EVT_BUTTON, self.set_game1)
self.m_button3.Bind(wx.EVT_BUTTON, self.set_game2)
def __del__(self):
pass
# Virtual event handlers, overide them in your derived class
def start_server(self, event):
frame.m_staticText2.SetLabel("Server is Running !!! ")
print("start server")
timer = threading.Timer(timer_period, fun_timer)
timer.start()
# 第一对参数是(host, port)
server = ThreadingTCPServer(('', port), EchoHandler)
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
#sudo netstat -lntup|grep 20000
#ps -ef|grep python // 查看在python中的所有进程
#kill -9 51976 // -9指是强制关闭进程,有时候直接用`kill 51976`是杀不死进程的
def set_game1(self, event):
global mode
global mode_1_flag
global mode_2_flag
mode_1_flag = True
mode = 1
print("Mode 1")
for key,value in client_socket.items():
value.sendall(bytes([0x11,0x22,0x33]))
def set_game2(self, event):
global mode
global mode_1_flag
global mode_2_flag
mode_2_flag = True
mode = 2
print("Mode 2")
for key,value in client_socket.items():
try:
value.sendall(bytes([0x11, 0x22, 0x33]))
except IOError:
pass
else:
pass
m = PyMouse()
k = PyKeyboard()
from socketserver import BaseRequestHandler, TCPServer
buffer_size = 10
key_flag = False
import threading
timer_period = 0.1
def fun_timer():
global key_flag
#print('Hello Timer!')
key_flag = True
global timer
timer = threading.Timer(timer_period, fun_timer)
timer.start()
previous_key = 0
mode = 1
frame =None
mode_1_flag= False
mode_2_flag= False
d = {}
# 继承BaseRequestHandler这个base class,并重定义handle()
class EchoHandler(BaseRequestHandler):
def setup(self):
ip = self.client_address[0].strip() # 获取客户端的ip
port = self.client_address[1] # 获取客户端的port
print(ip+":"+str(port)+" is connect!")
client_addr.append(self.client_address) # 保存到队列中
client_socket[self.client_address] = self.request # 保存套接字socket
def finish(self):
print("client is disconnect!")
client_addr.remove(self.client_address)
del client_socket[self.client_addr]
def handle(self):
global key_flag
global previous_key
global mode_1_flag
global mode_2_flag
print('Got connection from', self.client_address)
print(type(self.request))
# self.request is the TCP socket connected to the client
count = 0
msg = []
while True:
# 8192代表每次读取8192字节
temp = self.request.recv(buffer_size)
msg.extend(temp)
while len(msg) >= 2 and (msg[0]!=0xa0 or msg[1]!=0xa1):
msg.pop(0)
if len(msg)<buffer_size:
continue
if not key_flag:
continue
up = msg[2]
down = msg[3]
left = msg[4]
right = msg[5]
node = msg[6]
if node == 1:
frame.m_staticText4.SetLabel("Connected !!! ")
frame.m_staticText5.SetLabel("Connected !!! ")
if node == 2:
frame.m_staticText6.SetLabel("Connected !!! ")
if mode == 1:
key = 0
if up and not left and not right:
key =1
if down and not left and not right:
key =2
if left:
key =3
if right:
key =4
if key != 0 and previous_key != key:
print(key)
if key == 1:
k.press_key("up")
print(" node 1 up")
# else:
# k.release_key("up")
if key == 2:
k.press_key("down")
print(" node 1 down")
# else:
# k.release_key("down")
if key == 3:
k.press_key("left")
print(" node 1 left")
# else:
# k.release_key("left")
if key == 4:
k.press_key("right")
print(" node 1 right")
# else:
# k.release_key("right")
previous_key = key
if mode == 2:
if node == 1:
if up == 1:
k.press_key("up")
print(" node 1 up")
else:
k.release_key("up")
if down == 1:
k.press_key("down")
print(" node 1 down")
else:
k.release_key("down")
if left == 1:
k.press_key("left")
print(" node 1 left")
else:
k.release_key("left")
if right == 1:
k.press_key("right")
print(" node 1 right")
else:
k.release_key("right")
if node == 2:
if up == 1:
k.press_key("w")
print(" node 2 up")
else:
k.release_key("w")
if down == 1:
k.press_key("s")
print(" node 2 down")
else:
k.release_key("s")
if left == 1:
k.press_key("a")
print(" node 2 left")
else:
k.release_key("a")
if right == 1:
k.press_key("d")
print(" node 2 right")
else:
k.release_key("d")
msg = []
#key_flag = False
if __name__ == '__main__':
app = wx.App() # 实例化一个主循环<br>
frame = MotionGame(None) # 实例化一个窗口<br>
frame.Show() # 调用窗口展示功能<br>
app.MainLoop() # 启动主循环
|
nilq/baby-python
|
python
|
from mock import MagicMock, patch
import unittest
from cassandras3.cli.restore import do_restore
from cassandras3.util.nodetool import NodeTool
class TestRestoreClient(unittest.TestCase):
@patch('cassandras3.cli.restore.ClientCache')
@patch('cassandras3.cli.restore.NodeTool')
def test_restore(self, nodetool_constructor, _):
self._setup_mocks(nodetool_constructor)
do_restore(
'us-east-1', 'localhost', 7199, 'backup-id', 'system', 'some-host', 'test')
self.mock_nodetool.restore.assert_called_with('system', 'test', 'backup-id')
@patch('cassandras3.cli.restore.ClientCache')
@patch('cassandras3.cli.restore.NodeTool')
def test_restore_no_hostname(self, nodetool_constructor, _):
self._setup_mocks(nodetool_constructor)
do_restore(
'us-east-1', 'localhost', 7199, 'backup-id', 'system', '', 'test')
self.mock_nodetool.restore.assert_called_with('system', 'test', 'backup-id')
def _setup_mocks(self, nodetool_constructor):
self.mock_nodetool = MagicMock(spec=NodeTool)
nodetool_constructor.return_value = self.mock_nodetool
|
nilq/baby-python
|
python
|
import csv
import datetime
import json
import logging
import os
import time
import click
import structlog
from dsaps import helpers
from dsaps.models import Client, Collection
logger = structlog.get_logger()
def validate_path(ctx, param, value):
"""Validates the formatting of the submitted path"""
if value[-1] == "/":
return value
else:
raise click.BadParameter("Include / at the end of the path.")
@click.group(chain=True)
@click.option(
"--url",
envvar="DSPACE_URL",
required=True,
)
@click.option(
"-e",
"--email",
envvar="DSPACE_EMAIL",
required=True,
help="The email of the user for authentication.",
)
@click.option(
"-p",
"--password",
envvar="DSPACE_PASSWORD",
required=True,
hide_input=True,
help="The password for authentication.",
)
@click.pass_context
def main(ctx, url, email, password):
ctx.obj = {}
if os.path.isdir("logs") is False:
os.mkdir("logs")
dt = datetime.datetime.utcnow().isoformat(timespec="seconds")
log_suffix = f"{dt}.log"
structlog.configure(
processors=[
structlog.stdlib.filter_by_level,
structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.TimeStamper(fmt="iso"),
structlog.processors.JSONRenderer(),
],
context_class=dict,
logger_factory=structlog.stdlib.LoggerFactory(),
)
logging.basicConfig(
format="%(message)s",
handlers=[logging.FileHandler(f"logs/log-{log_suffix}", "w")],
level=logging.INFO,
)
logger.info("Application start")
client = Client(url)
client.authenticate(email, password)
start_time = time.time()
ctx.obj["client"] = client
ctx.obj["start_time"] = start_time
ctx.obj["log_suffix"] = log_suffix
@main.command()
@click.option(
"-m",
"--metadata-csv",
required=True,
type=click.Path(exists=True, file_okay=True, dir_okay=False),
help="The path to the CSV file of metadata for the items.",
)
@click.option(
"-f",
"--field-map",
required=True,
type=click.Path(exists=True, file_okay=True, dir_okay=False),
help="The path to JSON field mapping file.",
)
@click.option(
"-d",
"--content-directory",
required=True,
type=click.Path(exists=True, dir_okay=True, file_okay=False),
help="The full path to the content, either a directory of files "
"or a URL for the storage location.",
)
@click.option(
"-t",
"--file-type",
help="The file type to be uploaded, if limited to one file " "type.",
default="*",
)
@click.option(
"-r",
"--ingest-report",
is_flag=True,
help="Create ingest report for updating other systems.",
)
@click.option(
"-c",
"--collection-handle",
help="The handle of the collection to which items are being " "added.",
default=None,
)
@click.pass_context
def additems(
ctx,
metadata_csv,
field_map,
content_directory,
file_type,
ingest_report,
collection_handle,
):
"""Add items to a specified collection from a metadata CSV, a field
mapping file, and a directory of files. May be run in conjunction with the
newcollection CLI command."""
client = ctx.obj["client"]
start_time = ctx.obj["start_time"]
if "collection_uuid" not in ctx.obj and collection_handle is None:
raise click.UsageError(
"collection_handle option must be used or "
"additems must be run after newcollection "
"command."
)
elif "collection_uuid" in ctx.obj:
collection_uuid = ctx.obj["collection_uuid"]
else:
collection_uuid = client.get_uuid_from_handle(collection_handle)
with open(metadata_csv, "r") as csvfile, open(field_map, "r") as jsonfile:
metadata = csv.DictReader(csvfile)
mapping = json.load(jsonfile)
collection = Collection.create_metadata_for_items_from_csv(metadata, mapping)
for item in collection.items:
item.bitstreams_in_directory(content_directory, file_type)
collection.uuid = collection_uuid
items = collection.post_items(client)
if ingest_report:
report_name = metadata_csv.replace(".csv", "-ingest.csv")
helpers.create_ingest_report(items, report_name)
elapsed_time = datetime.timedelta(seconds=time.time() - start_time)
logger.info(f"Total runtime : {elapsed_time}")
@main.command()
@click.option(
"-c",
"--community-handle",
required=True,
help="The handle of the community in which to create the ," "collection.",
)
@click.option(
"-n",
"--collection-name",
required=True,
help="The name of the collection to be created.",
)
@click.pass_context
def newcollection(ctx, community_handle, collection_name):
"""Post a new collection to a specified community. Used in conjunction
with the additems CLI command to populate the new collection with
items."""
client = ctx.obj["client"]
collection_uuid = client.post_coll_to_comm(community_handle, collection_name)
ctx.obj["collection_uuid"] = collection_uuid
@main.command()
@click.option(
"-m",
"--metadata-csv",
required=True,
type=click.Path(exists=True, file_okay=True, dir_okay=False),
help="The path of the CSV file of metadata.",
)
@click.option(
"-o",
"--output-directory",
type=click.Path(exists=True, file_okay=False),
default=f"{os.getcwd()}/",
callback=validate_path,
help="The path of the output files, include / at the end of the " "path.",
)
@click.option(
"-d",
"--content-directory",
required=True,
help="The full path to the content, either a directory of files "
"or a URL for the storage location.",
)
@click.option(
"-t",
"--file-type",
help="The file type to be uploaded, if limited to one file " "type.",
default="*",
)
def reconcile(metadata_csv, output_directory, content_directory, file_type):
"""Run a reconciliation of the specified files and metadata to produce
reports of files with no metadata, metadata with no files, metadata
matched to files, and an updated version of the metadata CSV with only
the records that have matching files."""
file_ids = helpers.create_file_list(content_directory, file_type)
metadata_ids = helpers.create_metadata_id_list(metadata_csv)
metadata_matches = helpers.match_metadata_to_files(file_ids, metadata_ids)
file_matches = helpers.match_files_to_metadata(file_ids, metadata_ids)
no_files = set(metadata_ids) - set(metadata_matches)
no_metadata = set(file_ids) - set(file_matches)
helpers.create_csv_from_list(no_metadata, f"{output_directory}no_metadata")
helpers.create_csv_from_list(no_files, f"{output_directory}no_files")
helpers.create_csv_from_list(
metadata_matches, f"{output_directory}metadata_matches"
)
helpers.update_metadata_csv(metadata_csv, output_directory, metadata_matches)
|
nilq/baby-python
|
python
|
from datetime import date, datetime, timedelta
#Yesterday as the request date for the client
def get_request_date():
dt = datetime.today() - timedelta(days=1)
return dt.strftime('%Y-%m-%d')
|
nilq/baby-python
|
python
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function
from astropy.extern.six import BytesIO
from astropy.table import Table
from ..query import BaseQuery
from ..utils import commons
from ..utils import async_to_sync
from . import conf
__all__ = ['Heasarc', 'HeasarcClass']
@async_to_sync
class HeasarcClass(BaseQuery):
"""HEASARC query class.
"""
URL = conf.server
TIMEOUT = conf.timeout
def query_object_async(self, object_name, mission, cache=True,
get_query_payload=False):
"""TODO: document this!
(maybe start by copying over from some other service.)
"""
request_payload = dict()
request_payload['object_name'] = object_name
request_payload['tablehead'] = ('BATCHRETRIEVALCATALOG_2.0 {}'
.format(mission))
request_payload['Action'] = 'Query'
request_payload['displaymode'] = 'FitsDisplay'
if get_query_payload:
return request_payload
response = self._request('GET', self.URL, params=request_payload,
timeout=self.TIMEOUT, cache=cache)
return response
def _parse_result(self, response, verbose=False):
# if verbose is False then suppress any VOTable related warnings
if not verbose:
commons.suppress_vo_warnings()
data = BytesIO(response.content)
table = Table.read(data, hdu=1)
return table
Heasarc = HeasarcClass()
|
nilq/baby-python
|
python
|
"""
'FRAUDAR: Bounding Graph Fraud in the Face of camouflage'
Spot fraudsters in the presence of camouflage or hijacked accounts. An algorithm that is camouflage-resistant,
provides upper bounds on the effectiveness of fraudsters, and the algorithm is effective in real-world data.
Article: https://bhooi.github.io/papers/fraudar_kdd16.pdf
"""
from UGFraud.Utils.helper import *
from UGFraud.Detector.Fraudar import *
import copy as cp
import sys
import os
sys.path.insert(0, os.path.abspath('../../'))
def listToSparseMatrix(edgesSource, edgesDest):
m = max(edgesSource) + 1
n = max(edgesDest) + 1
M = sparse.coo_matrix(([1] * len(edgesSource), (edgesSource, edgesDest)), shape=(m, n))
M1 = M > 0
return M1.astype('int')
@timer
def runFraudar(graph, multiple=0):
new_upriors = node_attr_filter(graph, 'types', 'user', 'prior')
new_rpriors = edge_attr_filter(graph, 'types', 'review', 'prior')
# print('Start detection on the new graph with Fraudar')
user_to_product = {}
prod_to_user = {}
u_id_dict = node_attr_filter(graph, 'types', 'user', 'types')
for u_id in u_id_dict.keys():
if u_id not in user_to_product:
user_to_product[u_id] = []
for p_id in graph[u_id].keys():
if p_id not in prod_to_user:
prod_to_user[p_id] = []
user_to_product[u_id].append(p_id)
prod_to_user[p_id].append(u_id)
u_id2idx = {}
p_id2idx = {}
idx2u_id = {}
idx2p_id = {}
i = 0
for u_id in user_to_product.keys():
u_id2idx[u_id] = i
idx2u_id[i] = u_id
i += 1
i = 0
for p_id in prod_to_user.keys():
p_id2idx[p_id] = i
idx2p_id[i] = p_id
i += 1
edgesSource = []
edgesDest = []
for u_id in u_id_dict.keys():
for p_id in graph[u_id].keys():
edgesSource.append(u_id2idx[u_id])
edgesDest.append(p_id2idx[p_id])
M = listToSparseMatrix(edgesSource, edgesDest)
# print("finished reading data ")
if multiple == 0:
# detect all dense blocks
res = detect_blocks(M, logWeightedAveDegree)
else:
# detect the top #multiple dense blocks
res = detectMultiple(M, logWeightedAveDegree, multiple)
detected_users = {}
weight_dict = {}
for lwRes in res:
detected_u_idx = lwRes[0][0]
detected_p_idx = lwRes[0][1]
weight = lwRes[1]
weight_dict[weight] = weight
for i in detected_u_idx:
uid_tmp = idx2u_id[i]
if uid_tmp not in detected_users.keys():
detected_users[uid_tmp] = weight
max_den = res[0][1]
min_den = res[-1][1]
den_interval = max_den - min_den
ranked_rpriors = [(review, new_rpriors[review]) for review in new_rpriors.keys()]
ranked_rpriors = sorted(ranked_rpriors, reverse=True, key=lambda x: x[1])
r_max, r_mean, r_min = ranked_rpriors[0][1], ranked_rpriors[int(len(ranked_rpriors) / 2)][1], ranked_rpriors[-1][1]
aux_rpriors = cp.deepcopy(new_rpriors)
for i, p in aux_rpriors.items():
new_rpriors[i] = (p - r_min) / (r_max - r_min)
user_density = {}
for u in new_upriors.keys():
if u in detected_users.keys():
user_density[u] = (detected_users[u] - min_den) / den_interval
else:
user_density[u] = 1e-6
user_prob = {}
review_prob = {}
for review in new_rpriors.keys():
review_prob.update({review: 1e-6})
user_prob.update({review[0]: 1e-6})
print(len(detected_users))
print(detected_users['302'])
for user in detected_users.keys():
user_prob.update({user: user_density[user]})
for prod in graph[user].keys():
review_prob.update({(user, prod): user_density[user]})
return user_prob, review_prob
if __name__ == '__main__':
# data source
file_name = 'Yelp_graph_data.json'
G = load_graph(file_name)
review_ground_truth = edge_attr_filter(G, 'types', 'review', 'label')
# run Fraudar on the reviews
userBelief, reviewBelief = runFraudar(G, multiple=0)
reviewBelief = scale_value(reviewBelief)
review_AUC, review_AP = evaluate(review_ground_truth, reviewBelief)
print('review AUC = {}'.format(review_AUC))
print('review AP = {}'.format(review_AP))
|
nilq/baby-python
|
python
|
"""
dear Nessus dev, if you want to see where there is issues with your REST API, please modify `lying_type` and
`lying_exist` to become NOP
"""
import functools
from typing import TypeVar, Mapping, Union, Callable, Any, Optional
T = TypeVar('T')
U = TypeVar('U')
V = TypeVar('V')
JsonType = Union[int, str, bool]
class Object:
def __repr__(self) -> str:
"""
more magic, we want a generic way to repr a model, so we take the current values of self and the args to the
init function and try to match them together
:return: repr of the model
"""
classname = self.__class__.__name__
init = getattr(self, '__init__')
args = init.__code__.co_varnames[1:]
args_str = ['{{{}!r}}'.format(a) for a in args]
ret = '{classname}({args})'.format(classname=classname, args=', '.join(args_str))
values = dict()
for k, v in self.__dict__.items():
if k in args:
real_key = k
else:
real_key = next(arg for arg in args if arg.endswith(k))
values[real_key] = v
return ret.format(**values)
def lying_type(value: U, excepted_type: Callable[[U], Any], actual_type: Callable[[U], T] = lambda x: x,
default: V = ...) -> Union[T,Any]:
"""
document that we excepted the given type for the given value, but it was not the case
a NOP would be `return excepted_type(value)`
:param value: value we got
:param excepted_type: type we excepted
:param actual_type: real type we got
:return: type we got
"""
if default is not ...:
return default
return actual_type(value)
def __default_if_args(if_no_arg: Callable[[], T], if_arg: Callable[[Any], T], *args) -> T:
"""
if it was given one arg, call `if_arg` with it, if got no arg, call `if_no_arg`
:param if_no_arg: to call if no arg
:param if_arg: to call if arg
:param args: passed to `if_arg`
:return: result from either `if_no_arg` or `if_arg`
"""
assert len(args) in (0, 1)
if args:
return if_arg(*args)
return if_no_arg()
def lying_exist_and_type(json_dict: Mapping[str, JsonType], excepted_name: str, excepted_type: Callable[[Any], T],
actual_type: Callable[[Any], U], default: Optional[U] = None) -> U:
if excepted_name in json_dict:
return actual_type(json_dict[excepted_name])
else:
return default
def allow_to_exist(json_dict: Mapping[str, JsonType], excepted_name: str, excepted_type: Callable[[Any], T]) -> U:
if excepted_name in json_dict:
return excepted_type(json_dict[excepted_name])
else:
return None
def lying_exist(json_dict: Mapping[str, JsonType], excepted_name: str, excepted_type: Callable[[Any], T],
default: U = ...) -> Union[T, U]:
"""
document that we excepted the given key, but it was not the case
a NOP would be `return excepted_type(json_dict[excepted_name])`
:param json_dict: where to look for the value
:param excepted_name: key we excepted to find
:param excepted_type: type of the value we excepted to find
:param default: optional default value to return (we also use a bit of magic (`...`) to be able to pass None)
:return: either the value if existing or the default
"""
# we use this magic to be able to pass either `int` as `excepted_type` (which can take (0, 1) arg or one of our
# `model.from_json` which have to have a single arg
if default is not ...:
to_call = functools.partial(__default_if_args, lambda: default, excepted_type)
else:
to_call = excepted_type
if excepted_name in json_dict:
return to_call(json_dict[excepted_name])
else:
return to_call()
|
nilq/baby-python
|
python
|
from .models import redshiftdata_backends
from ..core.models import base_decorator
mock_redshiftdata = base_decorator(redshiftdata_backends)
|
nilq/baby-python
|
python
|
import time
import unittest
from cryptography.shell_game import ShellGame
class ShellGameTests(unittest.TestCase):
def setUp(self):
self.start_time = time.time()
def tearDown(self):
t = self.start_time - time.time()
print("%s: %.3f" % (self.id(), t))
def test_1(self):
time.sleep(1)
shell = ShellGame(5, [])
self.assertEqual(5, shell.find_the_ball(), "An Empty swap does nothin")
def test_2(self):
time.sleep(2)
shell = ShellGame(0, [(0, 1), (2, 1), (0, 1)])
self.assertEqual(1, shell.find_the_ball(), "Find the ball in position 2")
def test_3(self):
time.sleep(3)
shell = ShellGame(4, [[0, 9], [9, 3], [3, 7], [7, 8], [8, 2], [4, 5]])
self.assertEqual(5, shell.find_the_ball(), "Nope! Expected 5.")
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(ShellGameTests)
unittest.TextTestRunner(verbosity=0).run(suite)
|
nilq/baby-python
|
python
|
"""
Functions for interacting with timestamps and datetime objects
"""
import datetime
from typing import Optional
def to_utc_ms(dt: datetime.datetime) -> Optional[int]:
"""
Convert a datetime object to UTC epoch milliseconds
Returns
-------
timstamp_ms : int
Timestamp
"""
if dt is None:
return None
return int(dt.replace(tzinfo=datetime.timezone.utc).timestamp() * 1000.0)
def from_utc_ms(utc: Optional[int]) -> Optional[datetime.datetime]:
"""
Convert a UTC epoch milliseconds timestamp to a datetime object
Parameters
----------
utc : int
Timestamp
Returns
-------
dt : datetime.datetime
Datetime object
"""
if utc is None:
return None
return datetime.datetime.fromtimestamp(utc / 1000.0, tz=datetime.timezone.utc)
|
nilq/baby-python
|
python
|
import datetime
import uuid
from typing import cast
from unittest import mock
from unittest.mock import ANY, patch
import pytest
import pytz
from constance.test import override_config
from django.core import mail
from django.urls.base import reverse
from django.utils import timezone
from rest_framework import status
from posthog.constants import AvailableFeature
from posthog.models import Dashboard, Organization, Team, User, organization
from posthog.models.organization import OrganizationInvite, OrganizationMembership
from posthog.test.base import APIBaseTest
from posthog.utils import get_instance_realm
MOCK_GITLAB_SSO_RESPONSE = {
"access_token": "123",
"email": "testemail@posthog.com",
"name": "John Doe",
}
class TestSignupAPI(APIBaseTest):
@classmethod
def setUpTestData(cls):
# Do not set up any test data
pass
@pytest.mark.skip_on_multitenancy
@patch("posthoganalytics.capture")
def test_api_sign_up(self, mock_capture):
# Ensure the internal system metrics org doesn't prevent org-creation
Organization.objects.create(name="PostHog Internal Metrics", for_internal_metrics=True)
response = self.client.post(
"/api/signup/",
{
"first_name": "John",
"email": "hedgehog@posthog.com",
"password": "notsecure",
"organization_name": "Hedgehogs United, LLC",
"email_opt_in": False,
},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
user = cast(User, User.objects.order_by("-pk")[0])
team = cast(Team, user.team)
organization = cast(Organization, user.organization)
self.assertEqual(
response.json(),
{
"id": user.pk,
"uuid": str(user.uuid),
"distinct_id": user.distinct_id,
"first_name": "John",
"email": "hedgehog@posthog.com",
"redirect_url": "/ingestion",
},
)
# Assert that the user was properly created
self.assertEqual(user.first_name, "John")
self.assertEqual(user.email, "hedgehog@posthog.com")
self.assertEqual(user.email_opt_in, False)
# Assert that the team was properly created
self.assertEqual(team.name, "Default Project")
# Assert that the org was properly created
self.assertEqual(organization.name, "Hedgehogs United, LLC")
# Assert that the sign up event & identify calls were sent to PostHog analytics
mock_capture.assert_called_once()
self.assertEqual(user.distinct_id, mock_capture.call_args.args[0])
self.assertEqual("user signed up", mock_capture.call_args.args[1])
# Assert that key properties were set properly
event_props = mock_capture.call_args.kwargs["properties"]
self.assertEqual(event_props["is_first_user"], True)
self.assertEqual(event_props["is_organization_first_user"], True)
self.assertEqual(event_props["new_onboarding_enabled"], False)
self.assertEqual(event_props["signup_backend_processor"], "OrganizationSignupSerializer")
self.assertEqual(event_props["signup_social_provider"], "")
self.assertEqual(event_props["realm"], get_instance_realm())
# Assert that the user is logged in
response = self.client.get("/api/users/@me/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["email"], "hedgehog@posthog.com")
# Assert that the password was correctly saved
self.assertTrue(user.check_password("notsecure"))
@pytest.mark.skip_on_multitenancy
def test_signup_disallowed_on_self_hosted_by_default(self):
with self.settings(MULTI_TENANCY=False):
response = self.client.post(
"/api/signup/", {"first_name": "Jane", "email": "hedgehog2@posthog.com", "password": "notsecure"},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.post(
"/api/signup/", {"first_name": "Jane", "email": "hedgehog2@posthog.com", "password": "notsecure"},
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
response.json(),
{
"attr": None,
"code": "permission_denied",
"detail": "New organizations cannot be created in this instance. Contact your administrator if you"
" think this is a mistake.",
"type": "authentication_error",
},
)
@pytest.mark.ee
def test_signup_allowed_on_self_hosted_with_env_var(self):
from ee.models.license import License, LicenseManager
super(LicenseManager, cast(LicenseManager, License.objects)).create(
key="key_123", plan="enterprise", valid_until=timezone.datetime(2038, 1, 19, 3, 14, 7), max_users=3,
)
Organization.objects.create(name="name")
User.objects.create(first_name="name", email="email@posthog.com")
count = Organization.objects.count()
with self.settings(MULTI_TENANCY=False, MULTI_ORG_ENABLED=True):
response = self.client.post(
"/api/signup/", {"first_name": "Jane", "email": "hedgehog4@posthog.com", "password": "notsecure"},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.json()["email"], "hedgehog4@posthog.com")
self.assertEqual(Organization.objects.count(), count + 1)
@pytest.mark.skip_on_multitenancy
@patch("posthoganalytics.capture")
@patch("posthoganalytics.identify")
def test_signup_minimum_attrs(self, mock_identify, mock_capture):
response = self.client.post(
"/api/signup/", {"first_name": "Jane", "email": "hedgehog2@posthog.com", "password": "notsecure"},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
user = cast(User, User.objects.order_by("-pk").get())
organization = cast(Organization, user.organization)
self.assertEqual(
response.json(),
{
"id": user.pk,
"uuid": str(user.uuid),
"distinct_id": user.distinct_id,
"first_name": "Jane",
"email": "hedgehog2@posthog.com",
"redirect_url": "/ingestion",
},
)
# Assert that the user & org were properly created
self.assertEqual(user.first_name, "Jane")
self.assertEqual(user.email, "hedgehog2@posthog.com")
self.assertEqual(user.email_opt_in, True) # Defaults to True
self.assertEqual(organization.name, "Jane")
# Assert that the sign up event & identify calls were sent to PostHog analytics
mock_identify.assert_called_once()
mock_capture.assert_called_once()
self.assertEqual(user.distinct_id, mock_capture.call_args.args[0])
self.assertEqual("user signed up", mock_capture.call_args.args[1])
# Assert that key properties were set properly
event_props = mock_capture.call_args.kwargs["properties"]
self.assertEqual(event_props["is_first_user"], True)
self.assertEqual(event_props["is_organization_first_user"], True)
self.assertEqual(event_props["new_onboarding_enabled"], False)
self.assertEqual(event_props["signup_backend_processor"], "OrganizationSignupSerializer")
self.assertEqual(event_props["signup_social_provider"], "")
self.assertEqual(event_props["realm"], get_instance_realm())
# Assert that the user is logged in
response = self.client.get("/api/users/@me/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["email"], "hedgehog2@posthog.com")
# Assert that the password was correctly saved
self.assertTrue(user.check_password("notsecure"))
def test_cant_sign_up_without_required_attributes(self):
count: int = User.objects.count()
team_count: int = Team.objects.count()
org_count: int = Organization.objects.count()
required_attributes = [
"first_name",
"email",
"password",
]
for attribute in required_attributes:
body = {
"first_name": "Jane",
"email": "invalid@posthog.com",
"password": "notsecure",
}
body.pop(attribute)
# Make sure the endpoint works with and without the trailing slash
response = self.client.post("/api/signup", body)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "required",
"detail": "This field is required.",
"attr": attribute,
},
)
self.assertEqual(User.objects.count(), count)
self.assertEqual(Team.objects.count(), team_count)
self.assertEqual(Organization.objects.count(), org_count)
def test_cant_sign_up_with_short_password(self):
count: int = User.objects.count()
team_count: int = Team.objects.count()
response = self.client.post(
"/api/signup/", {"first_name": "Jane", "email": "failed@posthog.com", "password": "123"},
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "password_too_short",
"detail": "This password is too short. It must contain at least 8 characters.",
"attr": "password",
},
)
self.assertEqual(User.objects.count(), count)
self.assertEqual(Team.objects.count(), team_count)
@patch("posthoganalytics.feature_enabled")
def test_default_dashboard_is_created_on_signup(self, mock_feature_enabled):
"""
Tests that the default web app dashboard is created on signup.
Note: This feature is currently behind a feature flag.
"""
response = self.client.post(
"/api/signup/",
{
"first_name": "Jane",
"email": "hedgehog75@posthog.com",
"password": "notsecure",
"redirect_url": "/ingestion",
},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
user: User = User.objects.order_by("-pk").get()
mock_feature_enabled.assert_any_call("new-onboarding-2822", user.distinct_id)
self.assertEqual(
response.json(),
{
"id": user.pk,
"uuid": str(user.uuid),
"distinct_id": user.distinct_id,
"first_name": "Jane",
"email": "hedgehog75@posthog.com",
"redirect_url": "/personalization",
},
)
dashboard: Dashboard = Dashboard.objects.first() # type: ignore
self.assertEqual(dashboard.team, user.team)
self.assertEqual(dashboard.items.count(), 1)
self.assertEqual(dashboard.name, "Web Analytics")
self.assertEqual(
dashboard.items.all()[0].description, "Shows a conversion funnel from sign up to watching a movie."
)
# Particularly assert that the default dashboards are not created (because we create special demo dashboards)
self.assertEqual(Dashboard.objects.filter(team=user.team).count(), 3) # Web, app & revenue demo dashboards
@mock.patch("social_core.backends.base.BaseAuth.request")
@pytest.mark.ee
def test_api_can_use_social_login_to_create_organization_if_enabled(self, mock_request):
Organization.objects.create(name="Test org")
from ee.models.license import License, LicenseManager
super(LicenseManager, cast(LicenseManager, License.objects)).create(
key="key_123", plan="enterprise", valid_until=timezone.datetime(2038, 1, 19, 3, 14, 7), max_users=3,
)
response = self.client.get(reverse("social:begin", kwargs={"backend": "gitlab"}))
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
url = reverse("social:complete", kwargs={"backend": "gitlab"})
url += f"?code=2&state={response.client.session['gitlab_state']}"
mock_request.return_value.json.return_value = MOCK_GITLAB_SSO_RESPONSE
with self.settings(MULTI_ORG_ENABLED=True):
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True`
self.assertRedirects(response, "/signup/finish/") # page where user will create a new org
@mock.patch("social_core.backends.base.BaseAuth.request")
@pytest.mark.ee
@pytest.mark.skip_on_multitenancy
def test_api_cannot_use_social_login_to_create_organization_if_disabled(self, mock_request):
Organization.objects.create(name="Test org")
# Even with a valid license, because `MULTI_ORG_ENABLED` is not enabled, no new organizations will be allowed.
from ee.models.license import License, LicenseManager
super(LicenseManager, cast(LicenseManager, License.objects)).create(
key="key_123", plan="enterprise", valid_until=timezone.datetime(2038, 1, 19, 3, 14, 7), max_users=3,
)
response = self.client.get(reverse("social:begin", kwargs={"backend": "gitlab"}))
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
url = reverse("social:complete", kwargs={"backend": "gitlab"})
url += f"?code=2&state={response.client.session['gitlab_state']}"
mock_request.return_value.json.return_value = MOCK_GITLAB_SSO_RESPONSE
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True`
self.assertRedirects(
response, "/login?error=no_new_organizations"
) # show the user an error; operation not permitted
@mock.patch("social_core.backends.base.BaseAuth.request")
@pytest.mark.ee
def test_api_social_login_to_create_organization(self, mock_request):
response = self.client.get(reverse("social:begin", kwargs={"backend": "google-oauth2"}))
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
url = reverse("social:complete", kwargs={"backend": "google-oauth2"})
url += f"?code=2&state={response.client.session['google-oauth2_state']}"
mock_request.return_value.json.return_value = MOCK_GITLAB_SSO_RESPONSE
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True`
self.assertRedirects(response, "/signup/finish/") # page where user will create a new org
@mock.patch("social_core.backends.base.BaseAuth.request")
@pytest.mark.skip_on_multitenancy
@pytest.mark.ee
def test_api_social_login_cannot_create_second_organization(self, mock_request):
Organization.objects.create(name="Test org")
response = self.client.get(reverse("social:begin", kwargs={"backend": "google-oauth2"}))
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
url = reverse("social:complete", kwargs={"backend": "google-oauth2"})
url += f"?code=2&state={response.client.session['google-oauth2_state']}"
mock_request.return_value.json.return_value = MOCK_GITLAB_SSO_RESPONSE
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True`
self.assertRedirects(
response, "/login?error=no_new_organizations"
) # show the user an error; operation not permitted
@mock.patch("social_core.backends.base.BaseAuth.request")
@pytest.mark.skip_on_multitenancy
@pytest.mark.ee
def test_social_signup_with_whitelisted_domain(self, mock_request):
new_org = Organization.objects.create(name="Hogflix Movies", domain_whitelist=["hogflix.posthog.com"])
new_project = Team.objects.create(organization=new_org, name="My First Project")
user_count = User.objects.count()
response = self.client.get(reverse("social:begin", kwargs={"backend": "google-oauth2"}))
self.assertEqual(response.status_code, 302)
url = reverse("social:complete", kwargs={"backend": "google-oauth2"})
url += f"?code=2&state={response.client.session['google-oauth2_state']}"
mock_request.return_value.json.return_value = {"access_token": "123", "email": "jane@hogflix.posthog.com"}
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True`
self.assertRedirects(response, "/")
self.assertEqual(User.objects.count(), user_count + 1)
user = cast(User, User.objects.last())
self.assertEqual(user.email, "jane@hogflix.posthog.com")
self.assertEqual(user.organization, new_org)
self.assertEqual(user.team, new_project)
self.assertEqual(user.organization_memberships.count(), 1)
self.assertEqual(
cast(OrganizationMembership, user.organization_memberships.first()).level,
OrganizationMembership.Level.MEMBER,
)
@mock.patch("social_core.backends.base.BaseAuth.request")
@pytest.mark.ee
def test_social_signup_to_existing_org_with_whitelisted_domains_is_disabled_in_cloud(self, mock_request):
Organization.objects.create(name="Hogflix Movies", domain_whitelist=["hogflix.posthog.com"])
user_count = User.objects.count()
org_count = Organization.objects.count()
response = self.client.get(reverse("social:begin", kwargs={"backend": "google-oauth2"}))
self.assertEqual(response.status_code, 302)
url = reverse("social:complete", kwargs={"backend": "google-oauth2"})
url += f"?code=2&state={response.client.session['google-oauth2_state']}"
mock_request.return_value.json.return_value = {"access_token": "123", "email": "jane@hogflix.posthog.com"}
with self.settings(MULTI_TENANCY=True):
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True`
self.assertRedirects(response, "/signup/finish/") # page where user will create a new org
self.assertEqual(User.objects.count(), user_count)
self.assertEqual(Organization.objects.count(), org_count)
@mock.patch("social_core.backends.base.BaseAuth.request")
@pytest.mark.skip_on_multitenancy
@pytest.mark.ee
def test_api_cannot_use_whitelist_for_different_domain(self, mock_request):
Organization.objects.create(name="Test org", domain_whitelist=["good.com"])
response = self.client.get(reverse("social:begin", kwargs={"backend": "google-oauth2"}))
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
url = reverse("social:complete", kwargs={"backend": "google-oauth2"})
url += f"?code=2&state={response.client.session['google-oauth2_state']}"
mock_request.return_value.json.return_value = {"access_token": "123", "email": "alice@evil.com"}
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True`
self.assertRedirects(
response, "/login?error=no_new_organizations"
) # show the user an error; operation not permitted
class TestInviteSignup(APIBaseTest):
"""
Tests the sign up process for users with an invite (i.e. existing organization).
"""
CONFIG_EMAIL = None
# Invite pre-validation
def test_api_invite_sign_up_prevalidate(self):
invite: OrganizationInvite = OrganizationInvite.objects.create(
target_email="test+19@posthog.com", organization=self.organization,
)
response = self.client.get(f"/api/signup/{invite.id}/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.json(),
{
"id": str(invite.id),
"target_email": "t*****9@posthog.com",
"first_name": "",
"organization_name": self.CONFIG_ORGANIZATION_NAME,
},
)
def test_api_invite_sign_up_with_first_name_prevalidate(self):
invite: OrganizationInvite = OrganizationInvite.objects.create(
target_email="test+58@posthog.com", organization=self.organization, first_name="Jane"
)
response = self.client.get(f"/api/signup/{invite.id}/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.json(),
{
"id": str(invite.id),
"target_email": "t*****8@posthog.com",
"first_name": "Jane",
"organization_name": self.CONFIG_ORGANIZATION_NAME,
},
)
def test_api_invite_sign_up_prevalidate_for_existing_user(self):
user = self._create_user("test+29@posthog.com", "test_password")
new_org = Organization.objects.create(name="Test, Inc")
invite: OrganizationInvite = OrganizationInvite.objects.create(
target_email="test+29@posthog.com", organization=new_org,
)
self.client.force_login(user)
response = self.client.get(f"/api/signup/{invite.id}/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.json(),
{
"id": str(invite.id),
"target_email": "t*****9@posthog.com",
"first_name": "",
"organization_name": "Test, Inc",
},
)
def test_api_invite_sign_up_prevalidate_invalid_invite(self):
for invalid_invite in [uuid.uuid4(), "abc", "1234"]:
response = self.client.get(f"/api/signup/{invalid_invite}/")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "invalid_input",
"detail": "The provided invite ID is not valid.",
"attr": None,
},
)
def test_existing_user_cant_claim_invite_if_it_doesnt_match_target_email(self):
user = self._create_user("test+39@posthog.com", "test_password")
invite: OrganizationInvite = OrganizationInvite.objects.create(
target_email="test+49@posthog.com", organization=self.organization,
)
self.client.force_login(user)
response = self.client.get(f"/api/signup/{invite.id}/")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "invalid_recipient",
"detail": "This invite is intended for another email address: t*****9@posthog.com."
" You tried to sign up with test+39@posthog.com.",
"attr": None,
},
)
def test_api_invite_sign_up_prevalidate_expired_invite(self):
invite: OrganizationInvite = OrganizationInvite.objects.create(
target_email="test+59@posthog.com", organization=self.organization,
)
invite.created_at = datetime.datetime(2020, 12, 1, tzinfo=pytz.UTC)
invite.save()
response = self.client.get(f"/api/signup/{invite.id}/")
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "expired",
"detail": "This invite has expired. Please ask your admin for a new one.",
"attr": None,
},
)
# Signup (using invite)
@patch("posthoganalytics.capture")
def test_api_invite_sign_up(self, mock_capture):
invite: OrganizationInvite = OrganizationInvite.objects.create(
target_email="test+99@posthog.com", organization=self.organization,
)
response = self.client.post(
f"/api/signup/{invite.id}/", {"first_name": "Alice", "password": "test_password", "email_opt_in": True},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
user = cast(User, User.objects.order_by("-pk")[0])
self.assertEqual(
response.json(),
{
"id": user.pk,
"uuid": str(user.uuid),
"distinct_id": user.distinct_id,
"first_name": "Alice",
"email": "test+99@posthog.com",
},
)
# User is now a member of the organization
self.assertEqual(user.organization_memberships.count(), 1)
self.assertEqual(user.organization_memberships.first().organization, self.organization) # type: ignore
# Defaults are set correctly
self.assertEqual(user.organization, self.organization)
self.assertEqual(user.team, self.team)
# Assert that the user was properly created
self.assertEqual(user.first_name, "Alice")
self.assertEqual(user.email, "test+99@posthog.com")
self.assertEqual(user.email_opt_in, True)
# Assert that the sign up event & identify calls were sent to PostHog analytics
mock_capture.assert_called_once()
self.assertEqual(user.distinct_id, mock_capture.call_args.args[0])
self.assertEqual("user signed up", mock_capture.call_args.args[1])
# Assert that key properties were set properly
event_props = mock_capture.call_args.kwargs["properties"]
self.assertEqual(event_props["is_first_user"], False)
self.assertEqual(event_props["is_organization_first_user"], False)
self.assertEqual(event_props["new_onboarding_enabled"], False)
self.assertEqual(event_props["signup_backend_processor"], "OrganizationInviteSignupSerializer")
self.assertEqual(event_props["signup_social_provider"], "")
self.assertEqual(event_props["realm"], get_instance_realm())
# Assert that the user is logged in
response = self.client.get("/api/users/@me/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json()["email"], "test+99@posthog.com")
# Assert that the password was correctly saved
self.assertTrue(user.check_password("test_password"))
@pytest.mark.ee
def test_api_invite_sign_up_where_there_are_no_default_non_private_projects(self):
self.client.logout()
invite: OrganizationInvite = OrganizationInvite.objects.create(
target_email="test+private@posthog.com", organization=self.organization,
)
self.organization.available_features = [AvailableFeature.PROJECT_BASED_PERMISSIONING]
self.organization.save()
self.team.access_control = True
self.team.save()
response = self.client.post(
f"/api/signup/{invite.id}/", {"first_name": "Alice", "password": "test_password", "email_opt_in": True},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
user = cast(User, User.objects.order_by("-pk")[0])
self.assertEqual(user.organization_memberships.count(), 1)
self.assertEqual(user.organization, self.organization)
# here
self.assertEqual(
user.current_team, None
) # User is not assigned to a project, as there are no non-private projects
self.assertEqual(user.team, None)
def test_api_invite_sign_up_where_default_project_is_private(self):
self.client.logout()
self.team.access_control = True
self.team.save()
team = Team.objects.create(name="Public project", organization=self.organization, access_control=False)
invite: OrganizationInvite = OrganizationInvite.objects.create(
target_email="test+privatepublic@posthog.com", organization=self.organization,
)
response = self.client.post(
f"/api/signup/{invite.id}/", {"first_name": "Charlie", "password": "test_password"},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
user = cast(User, User.objects.order_by("-pk")[0])
self.assertEqual(user.organization_memberships.count(), 1)
self.assertEqual(user.organization, self.organization)
self.assertEqual(user.current_team, team)
self.assertEqual(user.team, team)
def test_api_invite_sign_up_member_joined_email_is_not_sent_for_initial_member(self):
invite: OrganizationInvite = OrganizationInvite.objects.create(
target_email="test+100@posthog.com", organization=self.organization,
)
with self.settings(EMAIL_ENABLED=True, EMAIL_HOST="localhost", SITE_URL="http://test.posthog.com"):
response = self.client.post(
f"/api/signup/{invite.id}/", {"first_name": "Alice", "password": "test_password", "email_opt_in": True},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(len(mail.outbox), 0)
@override_config(EMAIL_HOST="localhost")
def test_api_invite_sign_up_member_joined_email_is_sent_for_next_members(self):
initial_user = User.objects.create_and_join(self.organization, "test+420@posthog.com", None)
invite: OrganizationInvite = OrganizationInvite.objects.create(
target_email="test+100@posthog.com", organization=self.organization,
)
with self.settings(EMAIL_ENABLED=True, SITE_URL="http://test.posthog.com"):
response = self.client.post(
f"/api/signup/{invite.id}/", {"first_name": "Alice", "password": "test_password", "email_opt_in": True},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(len(mail.outbox), 1)
self.assertListEqual(mail.outbox[0].to, [initial_user.email])
def test_api_invite_sign_up_member_joined_email_is_not_sent_if_disabled(self):
self.organization.is_member_join_email_enabled = False
self.organization.save()
initial_user = User.objects.create_and_join(self.organization, "test+420@posthog.com", None)
invite: OrganizationInvite = OrganizationInvite.objects.create(
target_email="test+100@posthog.com", organization=self.organization,
)
with self.settings(EMAIL_ENABLED=True, EMAIL_HOST="localhost", SITE_URL="http://test.posthog.com"):
response = self.client.post(
f"/api/signup/{invite.id}/", {"first_name": "Alice", "password": "test_password", "email_opt_in": True},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(len(mail.outbox), 0)
@patch("posthoganalytics.identify")
@patch("posthoganalytics.capture")
def test_existing_user_can_sign_up_to_a_new_organization(self, mock_capture, mock_identify):
user = self._create_user("test+159@posthog.com", "test_password")
new_org = Organization.objects.create(name="TestCo")
new_team = Team.objects.create(organization=new_org)
invite: OrganizationInvite = OrganizationInvite.objects.create(
target_email="test+159@posthog.com", organization=new_org,
)
self.client.force_login(user)
count = User.objects.count()
with self.settings(MULTI_TENANCY=True):
response = self.client.post(f"/api/signup/{invite.id}/")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
response.json(),
{
"id": user.pk,
"uuid": str(user.uuid),
"distinct_id": user.distinct_id,
"first_name": "",
"email": "test+159@posthog.com",
},
)
# No new user is created
self.assertEqual(User.objects.count(), count)
# User is now a member of the organization
user.refresh_from_db()
self.assertEqual(user.organization_memberships.count(), 2)
self.assertTrue(user.organization_memberships.filter(organization=new_org).exists())
# User is now changed to the new organization
self.assertEqual(user.organization, new_org)
self.assertEqual(user.team, new_team)
# User is not changed
self.assertEqual(user.first_name, "")
self.assertEqual(user.email, "test+159@posthog.com")
# Assert that the sign up event & identify calls were sent to PostHog analytics
mock_capture.assert_called_once_with(
user.distinct_id,
"user joined organization",
properties={
"organization_id": str(new_org.id),
"user_number_of_org_membership": 2,
"org_current_invite_count": 0,
"org_current_project_count": 1,
"org_current_members_count": 1,
},
groups={"instance": ANY, "organization": str(new_org.id)},
)
mock_identify.assert_called_once()
# Assert that the user remains logged in
response = self.client.get("/api/users/@me/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
@patch("posthoganalytics.capture")
def test_cannot_use_claim_invite_endpoint_to_update_user(self, mock_capture):
"""
Tests that a user cannot use the claim invite endpoint to change their name or password
(as this endpoint does not do any checks that might be required).
"""
new_org = Organization.objects.create(name="TestCo")
user = self._create_user("test+189@posthog.com", "test_password")
user2 = self._create_user("test+949@posthog.com")
user2.join(organization=new_org)
Team.objects.create(organization=new_org)
invite: OrganizationInvite = OrganizationInvite.objects.create(
target_email="test+189@posthog.com", organization=new_org,
)
self.client.force_login(user)
response = self.client.post(f"/api/signup/{invite.id}/", {"first_name": "Bob", "password": "new_password"})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(
response.json(),
{
"id": user.pk,
"uuid": str(user.uuid),
"distinct_id": user.distinct_id,
"first_name": "",
"email": "test+189@posthog.com",
}, # note the unchanged attributes
)
# User is subscribed to the new organization
user.refresh_from_db()
self.assertTrue(user.organization_memberships.filter(organization=new_org).exists())
# User is not changed
self.assertEqual(user.first_name, "")
self.assertFalse(user.check_password("new_password")) # Password is not updated
# Assert that the sign up event & identify calls were sent to PostHog analytics
mock_capture.assert_called_once_with(
user.distinct_id,
"user joined organization",
properties={
"organization_id": str(new_org.id),
"user_number_of_org_membership": 2,
"org_current_invite_count": 0,
"org_current_project_count": 1,
"org_current_members_count": 2,
},
groups={"instance": ANY, "organization": str(new_org.id)},
)
def test_cant_claim_sign_up_invite_without_required_attributes(self):
count: int = User.objects.count()
team_count: int = Team.objects.count()
org_count: int = Organization.objects.count()
required_attributes = [
"first_name",
"password",
]
invite: OrganizationInvite = OrganizationInvite.objects.create(
target_email="test+799@posthog.com", organization=self.organization,
)
for attribute in required_attributes:
body = {
"first_name": "Charlie",
"password": "test_password",
}
body.pop(attribute)
response = self.client.post(f"/api/signup/{invite.id}/", body)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "required",
"detail": "This field is required.",
"attr": attribute,
},
)
self.assertEqual(User.objects.count(), count)
self.assertEqual(Team.objects.count(), team_count)
self.assertEqual(Organization.objects.count(), org_count)
def test_cant_claim_invite_sign_up_with_short_password(self):
count: int = User.objects.count()
team_count: int = Team.objects.count()
org_count: int = Organization.objects.count()
invite: OrganizationInvite = OrganizationInvite.objects.create(
target_email="test+799@posthog.com", organization=self.organization,
)
response = self.client.post(f"/api/signup/{invite.id}/", {"first_name": "Charlie", "password": "123"})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "password_too_short",
"detail": "This password is too short. It must contain at least 8 characters.",
"attr": "password",
},
)
self.assertEqual(User.objects.count(), count)
self.assertEqual(Team.objects.count(), team_count)
self.assertEqual(Organization.objects.count(), org_count)
def test_cant_claim_invalid_invite(self):
count: int = User.objects.count()
team_count: int = Team.objects.count()
org_count: int = Organization.objects.count()
response = self.client.post(
f"/api/signup/{uuid.uuid4()}/", {"first_name": "Charlie", "password": "test_password"}
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "invalid_input",
"detail": "The provided invite ID is not valid.",
"attr": None,
},
)
self.assertEqual(User.objects.count(), count)
self.assertEqual(Team.objects.count(), team_count)
self.assertEqual(Organization.objects.count(), org_count)
def test_cant_claim_expired_invite(self):
count: int = User.objects.count()
team_count: int = Team.objects.count()
org_count: int = Organization.objects.count()
invite: OrganizationInvite = OrganizationInvite.objects.create(
target_email="test+799@posthog.com", organization=self.organization,
)
invite.created_at = datetime.datetime(2020, 3, 3, tzinfo=pytz.UTC)
invite.save()
response = self.client.post(f"/api/signup/{invite.id}/", {"first_name": "Charlie", "password": "test_password"})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "expired",
"detail": "This invite has expired. Please ask your admin for a new one.",
"attr": None,
},
)
self.assertEqual(User.objects.count(), count)
self.assertEqual(Team.objects.count(), team_count)
self.assertEqual(Organization.objects.count(), org_count)
# Social signup (use invite)
def test_api_social_invite_sign_up(self):
Organization.objects.all().delete() # Can only create organizations in fresh instances
# simulate SSO process started
session = self.client.session
session.update({"backend": "google-oauth2"})
session.save()
response = self.client.post("/api/social_signup", {"organization_name": "Tech R Us", "email_opt_in": False})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.json(), {"continue_url": "/complete/google-oauth2/"})
# Check the values were saved in the session
self.assertEqual(self.client.session.get("organization_name"), "Tech R Us")
self.assertEqual(self.client.session.get("email_opt_in"), False)
self.assertEqual(self.client.session.get_expiry_age(), 3600)
def test_cannot_use_social_invite_sign_up_if_social_session_is_not_active(self):
Organization.objects.all().delete() # Can only create organizations in fresh instances
response = self.client.post("/api/social_signup", {"organization_name": "Tech R Us", "email_opt_in": False})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "invalid_input",
"detail": "Inactive social login session. Go to /login and log in before continuing.",
"attr": None,
},
)
self.assertEqual(len(self.client.session.keys()), 0) # Nothing is saved in the session
def test_cannot_use_social_invite_sign_up_without_required_attributes(self):
Organization.objects.all().delete() # Can only create organizations in fresh instances
response = self.client.post("/api/social_signup", {"email_opt_in": False})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "required",
"detail": "This field is required.",
"attr": "organization_name",
},
)
self.assertEqual(len(self.client.session.keys()), 0) # Nothing is saved in the session
|
nilq/baby-python
|
python
|
name = 'libseq'
from libseq.libseq import *
|
nilq/baby-python
|
python
|
import eel
if __name__ == '__main__':
eel.init('web')
eel.start('index.html', mode="chrome", size=(1296, 775))
|
nilq/baby-python
|
python
|
import os
def create_termuxconfig():
ATTR = ["API_ID", "API_HASH", "SESSION", "DB_URI", "LOG_CHAT", "TOKEN"]
file = open("termuxconfig.py", "w+")
file.write("class Termuxconfig:\n\ttemp = 'value'\n")
for x in ATTR:
myvar = vars() # string to variable
if x == "DB_URI":
value = createdb()
else:
data = input(f"\nEnter your {x}: ")
value = int(data) if data and data == "LOG_CHAT" else f"'{data}'"
myvar[x] = value
file.write(f"""\t{x.replace('"', "")} = {value}\n""")
file.close()
return True
def startdb():
if os.path.exists("/data/data/com.termux/files/usr/var/lib/postgresql"):
os.system("pg_ctl -D $PREFIX/var/lib/postgresql start")
else:
try:
from termuxconfig import Termuxconfig
except (ImportError, ModuleNotFoundError):
os.system("cd ~ && cd Tron && ./start.sh")
try:
Termuxconfig.DB_URI
except AttributeError:
file = open("termuxconfig.py", "a")
file.write(f"\tDB_URI = {create_db()}\n")
file.close()
def createdb():
os.system("pkg install postgresql")
os.system("clear")
os.system("mkdir -p $PREFIX/var/lib/postgresql")
os.system("initdb $PREFIX/var/lib/postgresql")
os.system("clear")
username = str(input("\nEnter your database account username: "))
password = str(input("\nEnter your database account password: "))
dbname = str(input("\nEnter your database name: "))
print("\n")
os.system(f"createuser --superuser --pwprompt {username}")
os.system(f"createdb {dbname}")
os.system("pg_ctl -D $PREFIX/var/lib/postgresql start")
return f"'postgres://{username}:{password}@127.0.0.1:5432/{dbname}'"
|
nilq/baby-python
|
python
|
# GUI frame for the sineTransformations_function.py
try:
# for Python2
from Tkinter import * ## notice capitalized T in Tkinter
import tkFileDialog, tkMessageBox
except ImportError:
# for Python3
from tkinter import * ## notice lowercase 't' in tkinter here
from tkinter import filedialog as tkFileDialog
from tkinter import messagebox as tkMessageBox
import sys, os
from scipy.io.wavfile import read
import numpy as np
import sineTransformations_function as sT
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
class SineTransformations_frame:
def __init__(self, parent):
self.parent = parent
self.initUI()
def initUI(self):
choose_label = "inputFile:"
Label(self.parent, text=choose_label).grid(row=0, column=0, sticky=W, padx=5, pady=(10,2))
#TEXTBOX TO PRINT PATH OF THE SOUND FILE
self.filelocation = Entry(self.parent)
self.filelocation.focus_set()
self.filelocation["width"] = 32
self.filelocation.grid(row=0,column=0, sticky=W, padx=(70, 5), pady=(10,2))
self.filelocation.delete(0, END)
self.filelocation.insert(0, '../../sounds/mridangam.wav')
#BUTTON TO BROWSE SOUND FILE
open_file = Button(self.parent, text="...", command=self.browse_file) #see: def browse_file(self)
open_file.grid(row=0, column=0, sticky=W, padx=(340, 6), pady=(10,2)) #put it beside the filelocation textbox
#BUTTON TO PREVIEW SOUND FILE
preview = Button(self.parent, text=">", command=lambda:UF.wavplay(self.filelocation.get()), bg="gray30", fg="white")
preview.grid(row=0, column=0, sticky=W, padx=(385,6), pady=(10,2))
## SINE TRANSFORMATIONS ANALYSIS
#ANALYSIS WINDOW TYPE
wtype_label = "window:"
Label(self.parent, text=wtype_label).grid(row=1, column=0, sticky=W, padx=5, pady=(10,2))
self.w_type = StringVar()
self.w_type.set("hamming") # initial value
window_option = OptionMenu(self.parent, self.w_type, "rectangular", "hanning", "hamming", "blackman", "blackmanharris")
window_option.grid(row=1, column=0, sticky=W, padx=(65,5), pady=(10,2))
#WINDOW SIZE
M_label = "M:"
Label(self.parent, text=M_label).grid(row=1, column=0, sticky=W, padx=(180, 5), pady=(10,2))
self.M = Entry(self.parent, justify=CENTER)
self.M["width"] = 5
self.M.grid(row=1,column=0, sticky=W, padx=(200,5), pady=(10,2))
self.M.delete(0, END)
self.M.insert(0, "801")
#FFT SIZE
N_label = "N:"
Label(self.parent, text=N_label).grid(row=1, column=0, sticky=W, padx=(255, 5), pady=(10,2))
self.N = Entry(self.parent, justify=CENTER)
self.N["width"] = 5
self.N.grid(row=1,column=0, sticky=W, padx=(275,5), pady=(10,2))
self.N.delete(0, END)
self.N.insert(0, "2048")
#THRESHOLD MAGNITUDE
t_label = "t:"
Label(self.parent, text=t_label).grid(row=1, column=0, sticky=W, padx=(330,5), pady=(10,2))
self.t = Entry(self.parent, justify=CENTER)
self.t["width"] = 5
self.t.grid(row=1, column=0, sticky=W, padx=(348,5), pady=(10,2))
self.t.delete(0, END)
self.t.insert(0, "-90")
#MIN DURATION SINUSOIDAL TRACKS
minSineDur_label = "minSineDur:"
Label(self.parent, text=minSineDur_label).grid(row=2, column=0, sticky=W, padx=(5, 5), pady=(10,2))
self.minSineDur = Entry(self.parent, justify=CENTER)
self.minSineDur["width"] = 5
self.minSineDur.grid(row=2, column=0, sticky=W, padx=(87,5), pady=(10,2))
self.minSineDur.delete(0, END)
self.minSineDur.insert(0, "0.01")
#MAX NUMBER OF SINES
maxnSines_label = "maxnSines:"
Label(self.parent, text=maxnSines_label).grid(row=2, column=0, sticky=W, padx=(145,5), pady=(10,2))
self.maxnSines = Entry(self.parent, justify=CENTER)
self.maxnSines["width"] = 5
self.maxnSines.grid(row=2, column=0, sticky=W, padx=(220,5), pady=(10,2))
self.maxnSines.delete(0, END)
self.maxnSines.insert(0, "150")
#FREQUENCY DEVIATION ALLOWED
freqDevOffset_label = "freqDevOffset:"
Label(self.parent, text=freqDevOffset_label).grid(row=2, column=0, sticky=W, padx=(280,5), pady=(10,2))
self.freqDevOffset = Entry(self.parent, justify=CENTER)
self.freqDevOffset["width"] = 5
self.freqDevOffset.grid(row=2, column=0, sticky=W, padx=(372,5), pady=(10,2))
self.freqDevOffset.delete(0, END)
self.freqDevOffset.insert(0, "20")
#SLOPE OF THE FREQUENCY DEVIATION
freqDevSlope_label = "freqDevSlope:"
Label(self.parent, text=freqDevSlope_label).grid(row=3, column=0, sticky=W, padx=(5,5), pady=(10,2))
self.freqDevSlope = Entry(self.parent, justify=CENTER)
self.freqDevSlope["width"] = 5
self.freqDevSlope.grid(row=3, column=0, sticky=W, padx=(98,5), pady=(10,2))
self.freqDevSlope.delete(0, END)
self.freqDevSlope.insert(0, "0.02")
#BUTTON TO DO THE ANALYSIS OF THE SOUND
self.compute = Button(self.parent, text="Analysis/Synthesis", command=self.analysis, bg="dark red", fg="white")
self.compute.grid(row=4, column=0, padx=5, pady=(10,5), sticky=W)
#BUTTON TO PLAY ANALYSIS/SYNTHESIS OUTPUT
self.output = Button(self.parent, text=">", command=lambda:UF.wavplay('output_sounds/' + os.path.basename(self.filelocation.get())[:-4] + '_sineModel.wav'), bg="gray30", fg="white")
self.output.grid(row=4, column=0, padx=(145,5), pady=(10,5), sticky=W)
###
#SEPARATION LINE
Frame(self.parent,height=1,width=50,bg="black").grid(row=5, pady=5, sticky=W+E)
###
#FREQUENCY SCALING FACTORS
freqScaling_label = "Frequency scaling factors (time, value pairs):"
Label(self.parent, text=freqScaling_label).grid(row=6, column=0, sticky=W, padx=5, pady=(5,2))
self.freqScaling = Entry(self.parent, justify=CENTER)
self.freqScaling["width"] = 35
self.freqScaling.grid(row=7, column=0, sticky=W+E, padx=5, pady=(0,2))
self.freqScaling.delete(0, END)
self.freqScaling.insert(0, "[0, 2.0, 1, .3]")
#TIME SCALING FACTORS
timeScaling_label = "Time scaling factors (in time, value pairs):"
Label(self.parent, text=timeScaling_label).grid(row=8, column=0, sticky=W, padx=5, pady=(5,2))
self.timeScaling = Entry(self.parent, justify=CENTER)
self.timeScaling["width"] = 35
self.timeScaling.grid(row=9, column=0, sticky=W+E, padx=5, pady=(0,2))
self.timeScaling.delete(0, END)
self.timeScaling.insert(0, "[0, .0, .671, .671, 1.978, 1.978+1.0]")
#BUTTON TO DO THE SYNTHESIS
self.compute = Button(self.parent, text="Apply Transformation", command=self.transformation_synthesis, bg="dark green", fg="white")
self.compute.grid(row=13, column=0, padx=5, pady=(10,15), sticky=W)
#BUTTON TO PLAY TRANSFORMATION SYNTHESIS OUTPUT
self.transf_output = Button(self.parent, text=">", command=lambda:UF.wavplay('output_sounds/' + os.path.basename(self.filelocation.get())[:-4] + '_sineModelTransformation.wav'), bg="gray30", fg="white")
self.transf_output.grid(row=13, column=0, padx=(165,5), pady=(10,15), sticky=W)
# define options for opening file
self.file_opt = options = {}
options['defaultextension'] = '.wav'
options['filetypes'] = [('All files', '.*'), ('Wav files', '.wav')]
options['initialdir'] = '../../sounds/'
options['title'] = 'Open a mono audio file .wav with sample frequency 44100 Hz'
def browse_file(self):
self.filename = tkFileDialog.askopenfilename(**self.file_opt)
#set the text of the self.filelocation
self.filelocation.delete(0, END)
self.filelocation.insert(0,self.filename)
def analysis(self):
try:
inputFile = self.filelocation.get()
window = self.w_type.get()
M = int(self.M.get())
N = int(self.N.get())
t = int(self.t.get())
minSineDur = float(self.minSineDur.get())
maxnSines = int(self.maxnSines.get())
freqDevOffset = int(self.freqDevOffset.get())
freqDevSlope = float(self.freqDevSlope.get())
self.inputFile, self.fs, self.tfreq, self.tmag = sT.analysis(inputFile, window, M, N, t, minSineDur, maxnSines, freqDevOffset, freqDevSlope)
except ValueError:
tkMessageBox.showerror("Input values error", "Some parameters are incorrect")
def transformation_synthesis(self):
try:
inputFile = self.inputFile
fs = self.fs
tfreq = self.tfreq
tmag = self.tmag
freqScaling = np.array(eval(self.freqScaling.get()))
timeScaling = np.array(eval(self.timeScaling.get()))
sT.transformation_synthesis(inputFile, fs, tfreq, tmag, freqScaling, timeScaling)
except ValueError as errorMessage:
tkMessageBox.showerror("Input values error", errorMessage)
except AttributeError:
tkMessageBox.showerror("Analysis not computed", "First you must analyse the sound!")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 1 18:44:04 2018
@author: JavaWizards
"""
import numpy as np
file = "/Users/nuno_chicoria/Downloads/b_should_be_easy.in"
handle = open(file)
R, C, F, N, B, T = handle.readline().split()
rides = []
index = []
for i in range(int(N)):
index.append(i)
for line in handle:
rides.append(line.split())
rides_np = np.asarray(rides)
rides_np = np.column_stack([rides_np, index])
rides_np = rides_np.astype(np.int)
rides_np = rides_np[rides_np[:,5].argsort()]
vehicles = {}
for i in range(int(F)):
vehicles [i] = ["A", [0, 0], [0, 0], [0, 0], []]
for i in range(int(T)):
rides_np = rides_np[rides_np[:,5] > i]
for item in range(len(vehicles)):
if vehicles[item][0] == "A":
if rides_np.size != 0:
if abs(vehicles[item][1][0] - rides_np[0, 0]) + abs(vehicles[item][1][1] - rides_np[0, 1]) + i >= rides_np[0, 4]:
if abs(vehicles[item][1][0] - rides_np[0, 0]) + abs(vehicles[item][1][1] - rides_np[0, 1]) + i + abs(rides_np[0,0] - rides_np[0,2]) + abs(rides_np[0,1] - rides_np[0,3]) <= rides_np[0, 5]:
vehicles[item][0] = "C"
vehicles[item][2] = [rides_np[0, 0], rides_np[0, 1]]
vehicles[item][3] = [rides_np[0, 2], rides_np[0, 3]]
vehicles[item][4].append(rides_np[0, 6])
rides_np = np.delete(rides_np, (0), axis=0)
else:
rides_np = np.delete(rides_np, (0), axis=0)
for item in range(len(vehicles)):
if vehicles[item][0] == "C":
if vehicles[item][1][0] < vehicles[item][2][0]:
vehicles[item][1][0] = vehicles[item][1][0] + 1
elif vehicles[item][1][0] > vehicles[item][2][0]:
vehicles[item][1][0] = vehicles[item][1][0] - 1
elif vehicles[item][1][0] == vehicles[item][2][0]:
if vehicles[item][1][1] < vehicles[item][2][1]:
vehicles[item][1][1] = vehicles[item][1][1] + 1
elif vehicles[item][1][1] > vehicles[item][2][1]:
vehicles[item][1][1] = vehicles[item][1][1] - 1
else:
vehicles[item][0] = "D"
for item in range(len(vehicles)):
if vehicles[item][0] == "D":
if vehicles[item][1][0] < vehicles[item][3][0]:
vehicles[item][1][0] += 1
elif vehicles[item][1][0] > vehicles[item][3][0]:
vehicles[item][1][0] -= 1
elif vehicles[item][1][0] == vehicles[item][3][0]:
if vehicles[item][1][1] < vehicles[item][3][1]:
vehicles[item][1][1] += 1
elif vehicles[item][1][1] > vehicles[item][3][1]:
vehicles[item][1][1] -= 1
else:
vehicles[item][0] = "A"
vehicles[item][2] = None
vehicles[item][3] = None
results = open("ghc2018.txt", "w+")
for item in range(len(vehicles)):
if len(vehicles[item][4]) !=0:
results.write(str(len(vehicles[item][4])))
for ride in vehicles[item][4]:
results.write(" ")
results.write(str(ride))
results.write("\n")
results.close()
|
nilq/baby-python
|
python
|
""""
Animation code source:
https://gist.github.com/DanielTakeshi/fec9a5cd957eb05b04b6d06a16cc88ae
"""
import argparse
import time
import imageio
from PIL import Image
import numpy as np
import torch as T
import gym
import rl.environments
def evaluate(agent, env, EE, max_el, exp_name, gif=False):
print('[ Evaluation ]')
EZ = [] # Evaluation episodic return
ES = [] # Evaluation episodic score
EL = [] # Evaluation episodic
if gif: GifObs = []
for ee in range(1, EE+1):
print(f' [ Episode {ee} Agent Evaluation ] ')
o, d, Z, S, el = env.reset(), False, 0, 0, 0
while not(d or (el == max_el)):
print(f' [ Step {el} Agent Simulation ] ', end='\r')
if gif:
gifobs = env.render(mode='rgb_array', width=400, height=400)
GifObs.append(gifobs)
# Take deterministic actions at evaluation time
pi, _ = agent(o, deterministic=True)
a = pi.cpu().numpy()
o, r, d, info = env.step(a)
Z += r
S = 0# += info['score']
el += 1
EZ.append(Z)
ES.append(S/el)
EL.append(el)
env.close()
print('\nlen(GifObs): ', len(GifObs))
if gif:
print(' [ Saving a gif for evaluation ] ')
exp_path = f'./gifs/{exp_name}.gif'
with imageio.get_writer(exp_path, mode='I', duration=0.01) as writer:
for obs_np in GifObs:
writer.append_data(obs_np)
# print(' [ Saving a jpg for evaluation ] ')
# im = Image.fromarray(GifObs[50])
# im.save(f'./jpgs/{exp_name}.jpeg')
return EZ, ES, EL
def main(agent, env, alg, seed=0, epoch=0, metric='return', EE=10, gif=False):
print('\n')
print('=' * 50)
print(f'Starting a new evaluation')
print(f"\t Algorithm: {alg}")
print(f"\t Environment: {env}")
print(f"\t Random seed: {seed}")
print(f"\t Epoch: {epoch}")
print(f"\t Metric: {metric}")
print('=' * 50)
exp_name = f'{env}-{alg}-seed:{seed}'
eval_env = gym.make(env)
# eval_env.seed(seed)
# eval_env.action_space.seed(seed)
# eval_env.observation_space.seed(seed)
max_el = eval_env.env.spec.max_episode_steps
logs = dict()
agent.eval()
eval_start_real = time.time()
EZ, ES, EL = evaluate(agent, eval_env, EE, max_el, exp_name, gif)
logs['time/evaluation'] = time.time() - eval_start_real
if metric == 'score':
logs['evaluation/episodic_score_mean'] = np.mean(ES)
logs['evaluation/episodic_score_std'] = np.std(ES)
else:
logs['evaluation/episodic_return_mean'] = np.mean(EZ)
logs['evaluation/episodic_return_std'] = np.std(EZ)
logs['evaluation/episodic_length_mean'] = np.mean(EL)
for k, v in logs.items():
print(f'{k}: {round(v, 2)}')
print('\n')
print('End of the evaluation')
print('=' * 50)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-env', type=str)
parser.add_argument('-alg', type=str)
parser.add_argument('-seed', type=int)
parser.add_argument('-epoch', type=int)
parser.add_argument('-EE', type=int)
parser.add_argument('-metric', type=str)
parser.add_argument('-gif', nargs='?', const=True, type=bool)
args = parser.parse_args()
agent_path = f'./saved_agents/{args.env}-{args.alg}-seed:{args.seed}-epoch:{args.epoch}' + '.pth.tar'
agent = T.load(agent_path)
kwaergs = vars(args)
main(agent, **kwaergs)
|
nilq/baby-python
|
python
|
import numpy as np
from .Classifier import Classifier
class NearestNeighbourClassifier(Classifier):
def __init__(self) -> None:
self.x = np.array([])
self.y = np.array([])
def fit(self, x: np.ndarray, y: np.ndarray) -> None:
""" Fit the training data to the classifier.
Args:
x (np.ndarray): Instances, numpy array with shape (N,K)
y (np.ndarray): Class labels, numpy array with shape (N,)
"""
self.x = x
self.y = y
def predict(self, x: np.ndarray) -> None:
""" Perform prediction given some examples.
Args:
x (np.ndarray): Instances, numpy array with shape (N,K)
Returns:
y (np.ndarray): Predicted class labels, numpy array with shape (N,)
"""
min_elem_indices = np.empty(len(x), dtype=int)
for i, e in enumerate(x):
distances = np.empty(len(self.x))
for j, v in enumerate(self.x):
if j == i:
distances[j] = np.inf
else:
distances[j] = np.linalg.norm(e - v)
min_elem_indices[i] = np.argmin(distances)
return self.y[min_elem_indices]
|
nilq/baby-python
|
python
|
from sys import platform
import sys
try:
import caffe
except ImportError:
print("This sample can only be run if Python Caffe if available on your system")
print("Currently OpenPose does not compile Python Caffe. This may be supported in the future")
sys.exit(-1)
import os
os.environ["GLOG_minloglevel"] = "1"
import caffe
import cv2
import numpy as np
import sys
import time
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append('../../python')
dir_path + "/../../models/"
try:
from openpose import OpenPose
except:
raise Exception('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
# Params for change
defRes = 736
scales = [1,0.5]
class Param:
caffemodel = dir_path + "/../../../models/pose/body_25/pose_iter_584000.caffemodel"
prototxt = dir_path + "/../../../models/pose/body_25/pose_deploy.prototxt"
# Load OpenPose object and Caffe Nets
params = dict()
params["logging_level"] = 3
params["output_resolution"] = "-1x-1"
params["net_resolution"] = "-1x"+str(defRes)
params["model_pose"] = "BODY_25"
params["alpha_pose"] = 0.6
params["scale_gap"] = 0.5
params["scale_number"] = len(scales)
params["render_threshold"] = 0.05
params["num_gpu_start"] = 0
params["disable_blending"] = False
params["default_model_folder"] = dir_path + "/../../../models/"
openpose = OpenPose(params)
caffe.set_mode_gpu()
caffe.set_device(0)
nets = []
for scale in scales:
nets.append(caffe.Net(Param.prototxt, Param.caffemodel, caffe.TEST))
print("Net loaded")
# Test Function
first_run = True
def func(frame):
# Get image processed for network, and scaled image
imagesForNet, imagesOrig = OpenPose.process_frames(frame, defRes, scales)
# Reshape
global first_run
if first_run:
for i in range(0, len(scales)):
net = nets[i]
imageForNet = imagesForNet[i]
in_shape = net.blobs['image'].data.shape
in_shape = (1, 3, imageForNet.shape[1], imageForNet.shape[2])
net.blobs['image'].reshape(*in_shape)
net.reshape()
first_run = False
print("Reshaped")
# Forward pass to get heatmaps
heatmaps = []
for i in range(0, len(scales)):
net = nets[i]
imageForNet = imagesForNet[i]
net.blobs['image'].data[0,:,:,:] = imageForNet
net.forward()
heatmaps.append(net.blobs['net_output'].data[:,:,:,:])
# Pose from HM Test
array, frame = openpose.poseFromHM(frame, heatmaps, scales)
# Draw Heatmaps instead
#hm = heatmaps[0][:,0:18,:,:]; frame = OpenPose.draw_all(imagesOrig[0], hm, -1, 1, True)
#paf = heatmaps[0][:,20:,:,:]; frame = OpenPose.draw_all(imagesOrig[0], paf, -1, 4, False)
return frame
img = cv2.imread(dir_path + "/../../../examples/media/COCO_val2014_000000000192.jpg")
frame = func(img)
while 1:
cv2.imshow("output", frame)
cv2.waitKey(15)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.