content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
import re from fsrtools.simulation_tools._manager_utils import integer_filter def product_combination_generator(iterate_dict): total_length = 1 length_dict = {} combination_list = [] if len(iterate_dict.keys()): for key in iterate_dict.keys(): length_dict[key] = len(iterate_dict[key]) total_length = total_length * len(iterate_dict[key]) combination_list = [{} for x in range(total_length)] repeat_length = total_length previous_length = total_length for key, length in sorted(length_dict.items(), key=lambda x: -x[1]): repeat_length //= length for i in range(total_length): combination_list[i][key] = iterate_dict[key][ (i % previous_length) // repeat_length ] previous_length = repeat_length return combination_list def set_total_combinations(simulate_params,logger): simulate_params_temp = simulate_params.copy() iterate_dict = {} for key in simulate_params.keys(): if isinstance(simulate_params[key], list): iterate_dict[key] = simulate_params[key] logger('[list input : {0} : {1}]'.format(key, simulate_params[key])) elif isinstance(simulate_params[key], str): counter = 0 local_variables = {} for key_t in simulate_params.keys(): if key_t != key and \ re.search( r'\b' + key_t+ r'\b',simulate_params[key]): counter += 1 if not isinstance(simulate_params[key_t], list) and \ not isinstance(simulate_params[key_t],str): local_variables[key_t] = simulate_params[key_t] if len(local_variables) == counter: try: calculated_value = \ eval(simulate_params[key],globals(),local_variables) simulate_params_temp[key] = \ integer_filter(calculated_value) except NameError as err: logger('[{} as paremter : "{}" is input as "{}"]' .format(err,key,simulate_params[key])) simulate_params_temp[key] = simulate_params[key] logger('[{0} : {1}]'.format(key, simulate_params_temp[key])) else: for key_t in local_variables.keys(): logger('{0} is as command: depend on changing {1}' .format(key,key_t)) total_combinations = product_combination_generator(iterate_dict) return simulate_params_temp, total_combinations
nilq/baby-python
python
############### usha/ssd_distplot_seaborn.ipynb import csv import os.path my_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '..')) path = os.path.join(my_path, 'documents/Leadss.csv') fpath = os.path.join(my_path, 'static/images/distplot') import pandas as pd import numpy as np import seaborn as sns import math import matplotlib import matplotlib.pyplot as plt matplotlib.use('Agg') data = path #'C:\Users\Usha\Downloads\Ecommerce Purchases.csv' def histo(data): print(data) sep = ',' header = 'None' df = pd.read_csv(data,header,sep) datatype= df.dtypes #get only the numeric values of dataframe pp=df._get_numeric_data() #convert the pp to 2d array df1=pp.values print(pp.head(5)) #get the first columns array first_col = list(pp) print(first_col) np_2d = np.array(df1) #get the number of rows in a file n = np_2d.shape[0] #path_name = os.path.expanduser('~\Downloads') #press shift + tab for backing all selected by 1 tab and press tab only for selected things to move forward a = 1 d = 0 for num in first_col: bins = round(2*n**(1/3)) print(bins) distplot=sns.distplot(pp[num], ) print(distplot) fig=distplot.get_figure() #plt.xlabel('bins '+str(a)) print(a) #plt.ylabel(num) #plt.legend() #fig.show() a = a+1 #fig.savefig(num+'.png') fig.savefig(fpath + '\\' + 'image' + str(d) + '.png') d = d+1 plt.close() #fig.savefig(num+'.png') #histo(data)
nilq/baby-python
python
{ "includes": [ "../common.gypi" ], "targets": [ { "target_name": "libgdal_jpeg_frmt", "type": "static_library", "sources": [ "../gdal/frmts/jpeg/jpgdataset.cpp", "../gdal/frmts/jpeg/libjpeg/jcapimin.c", "../gdal/frmts/jpeg/libjpeg/jcapistd.c", "../gdal/frmts/jpeg/libjpeg/jccoefct.c", "../gdal/frmts/jpeg/libjpeg/jccolor.c", "../gdal/frmts/jpeg/libjpeg/jcdctmgr.c", "../gdal/frmts/jpeg/libjpeg/jchuff.c", "../gdal/frmts/jpeg/libjpeg/jcinit.c", "../gdal/frmts/jpeg/libjpeg/jcmainct.c", "../gdal/frmts/jpeg/libjpeg/jcmarker.c", "../gdal/frmts/jpeg/libjpeg/jcmaster.c", "../gdal/frmts/jpeg/libjpeg/jcomapi.c", "../gdal/frmts/jpeg/libjpeg/jcparam.c", "../gdal/frmts/jpeg/libjpeg/jcphuff.c", "../gdal/frmts/jpeg/libjpeg/jcprepct.c", "../gdal/frmts/jpeg/libjpeg/jcsample.c", "../gdal/frmts/jpeg/libjpeg/jctrans.c", "../gdal/frmts/jpeg/libjpeg/jdapimin.c", "../gdal/frmts/jpeg/libjpeg/jdapistd.c", "../gdal/frmts/jpeg/libjpeg/jdatadst.c", "../gdal/frmts/jpeg/libjpeg/jdatasrc.c", "../gdal/frmts/jpeg/libjpeg/jdcoefct.c", "../gdal/frmts/jpeg/libjpeg/jdcolor.c", "../gdal/frmts/jpeg/libjpeg/jddctmgr.c", "../gdal/frmts/jpeg/libjpeg/jdhuff.c", "../gdal/frmts/jpeg/libjpeg/jdinput.c", "../gdal/frmts/jpeg/libjpeg/jdmainct.c", "../gdal/frmts/jpeg/libjpeg/jdmarker.c", "../gdal/frmts/jpeg/libjpeg/jdmaster.c", "../gdal/frmts/jpeg/libjpeg/jdmerge.c", "../gdal/frmts/jpeg/libjpeg/jdphuff.c", "../gdal/frmts/jpeg/libjpeg/jdpostct.c", "../gdal/frmts/jpeg/libjpeg/jdsample.c", "../gdal/frmts/jpeg/libjpeg/jdtrans.c", "../gdal/frmts/jpeg/libjpeg/jerror.c", "../gdal/frmts/jpeg/libjpeg/jfdctflt.c", "../gdal/frmts/jpeg/libjpeg/jfdctfst.c", "../gdal/frmts/jpeg/libjpeg/jfdctint.c", "../gdal/frmts/jpeg/libjpeg/jidctflt.c", "../gdal/frmts/jpeg/libjpeg/jidctfst.c", "../gdal/frmts/jpeg/libjpeg/jidctint.c", "../gdal/frmts/jpeg/libjpeg/jidctred.c", "../gdal/frmts/jpeg/libjpeg/jmemansi.c", "../gdal/frmts/jpeg/libjpeg/jmemmgr.c", "../gdal/frmts/jpeg/libjpeg/jquant1.c", "../gdal/frmts/jpeg/libjpeg/jquant2.c", "../gdal/frmts/jpeg/libjpeg/jutils.c", "../gdal/frmts/jpeg/vsidataio.cpp" ], "include_dirs": [ "../gdal/frmts/jpeg", "../gdal/frmts/jpeg/libjpeg" ] } ] }
nilq/baby-python
python
import datetime import numpy as np import cv2 import pickle import face_recognition # ------------------------------------------------------------------- # Parameters # ------------------------------------------------------------------- CONF_THRESHOLD = 0.5 NMS_THRESHOLD = 0.4 IMG_WIDTH = 416 IMG_HEIGHT = 416 # Default colors COLOR_BLUE = (255, 0, 0) COLOR_GREEN = (0, 255, 0) COLOR_RED = (0, 0, 255) COLOR_WHITE = (255, 255, 255) COLOR_YELLOW = (0, 255, 255) #face encoding data data=None # ------------------------------------------------------------------- # Help functions # ------------------------------------------------------------------- # Get the names of the output layers def get_outputs_names(net): # Get the names of all the layers in the network layers_names = net.getLayerNames() # Get the names of the output layers, i.e. the layers with unconnected # outputs return [layers_names[i[0] - 1] for i in net.getUnconnectedOutLayers()] # Draw the predicted bounding box def draw_predict(frame, conf, left, top, right, bottom): # Draw a bounding box. cv2.rectangle(frame, (left, top), (right, bottom), COLOR_YELLOW, 2) text = '{:.2f}'.format(conf) # Display the label at the top of the bounding box label_size, base_line = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1) top = max(top, label_size[1]) cv2.putText(frame, text, (left, top - 4), cv2.FONT_HERSHEY_SIMPLEX, 0.4,COLOR_WHITE, 1) def post_process(frame, outs, conf_threshold, nms_threshold): frame_height = frame.shape[0] frame_width = frame.shape[1] # Scan through all the bounding boxes output from the network and keep only # the ones with high confidence scores. Assign the box's class label as the # class with the highest score. confidences = [] boxes = [] final_boxes = [] for out in outs: for detection in out: scores = detection[5:] class_id = np.argmax(scores) confidence = scores[class_id] if confidence > conf_threshold: center_x = int(detection[0] * frame_width) center_y = int(detection[1] * frame_height) width = int(detection[2] * frame_width) height = int(detection[3] * frame_height) left = int(center_x - width / 2) top = int(center_y - height / 2) confidences.append(float(confidence)) boxes.append([left, top, width, height]) # Perform non maximum suppression to eliminate redundant # overlapping boxes with lower confidences. indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold) for i in indices: i = i[0] box = boxes[i] left = box[0] top = box[1] width = box[2] height = box[3] final_boxes.append(box) #left, top, right, bottom = refined_box(left, top, width, height) # draw_predict(frame, confidences[i], left, top, left + width, # top + height) #draw_predict(frame, confidences[i], left, top, right, bottom) return final_boxes class FPS: def __init__(self): # store the start time, end time, and total number of frames # that were examined between the start and end intervals self._start = None self._end = None self._num_frames = 0 def start(self): self._start = datetime.datetime.now() return self def stop(self): self._end = datetime.datetime.now() def update(self): # increment the total number of frames examined during the # start and end intervals self._num_frames += 1 def elapsed(self): # return the total number of seconds between the start and # end interval return (self._end - self._start).total_seconds() def fps(self): # compute the (approximate) frames per second return self._num_frames / self.elapsed() def refined_box(left, top, width, height): right = left + width bottom = top + height original_vert_height = bottom - top top = int(top + original_vert_height * 0.15) bottom = int(bottom - original_vert_height * 0.05) margin = ((bottom - top) - (right - left)) // 2 left = left - margin if (bottom - top - right + left) % 2 == 0 else left - margin - 1 right = right + margin return left, top, right, bottom #dlib face recognition def load_encodings(fname): global data data = pickle.loads(open(fname, "rb").read()) def recognize_face(frame,boxes): #Converting boxes according to face_recognition reboxes=[] for j in boxes: reboxes.append([j[1],j[0]+j[2],j[1]+j[3],j[0]]) #Convert BGR to RGB rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # compute the facial embeddings for each face bounding box encodings = face_recognition.face_encodings(rgb, reboxes) names = [] # loop over the facial embeddings for encoding in encodings: # attempt to match each face in the input image to our known # encodings matches = face_recognition.compare_faces(data["encodings"], encoding,0.6) name = "Unknown" # check to see if we have found a match if True in matches: # find the indexes of all matched faces then initialize a # dictionary to count the total number of times each face # was matched matchedIdxs = [i for (i, b) in enumerate(matches) if b] counts = {} # loop over the matched indexes and maintain a count for # each recognized face face for i in matchedIdxs: name = data["names"][i] counts[name] = counts.get(name, 0) + 1 # determine the recognized face with the largest number # of votes (note: in the event of an unlikely tie Python # will select first entry in the dictionary) name = max(counts, key=counts.get) # update the list of names names.append(name) # loop over the recognized faces for ((x, y, w, h), name) in zip(boxes, names): # draw the predicted face name on the image cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2) s = y - 15 if y - 15 > 15 else y + 15 cv2.putText(frame, name, (x, s), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2) print("Faces recognized:",names)
nilq/baby-python
python
#!/usr/bin/python3 # encoding: utf-8 """ @author: m1n9yu3 @license: (C) Copyright 2021-2023, Node Supply Chain Manager Corporation Limited. @file: web_server.py @time: 2021/4/27 13:41 @desc: """ from flask import * from huluxiaThirdflood_api import get_random_imageurl import conf app = Flask(__name__) @app.route('/') def hello_world(): # 爬取 12 个帖子 image_list = get_random_imageurl(conf.display_num) # print(image_list) display_image = [] for i in range(0, len(image_list), 3): try: display_image.append([["#imageModal%d" % i, image_list[i]], ["#imageModal%d" % (i+1), image_list[i+1]], ["#imageModal%d" % (i+2), image_list[i+2]]]) except Exception as e: # 报错说明, 爬取到的图片不足3 的倍数 pass large_image = [] for image in display_image: large_image += [[i[0].replace('#', ""), i[1]] for i in image] # print(large_image) return render_template('index.html', imagelist=display_image, large_image=large_image) @app.route('/favicon.ico') def favicon(): return app.send_static_file('images/favicon.ico') @app.route('/<num>') def displayImage(num): # 这里数量达到 100 时,会发生 index out of range 异常, 正在想办法整改 num = int(num) image_list = get_random_imageurl(num) # print(image_list) display_image = [] for i in range(0, len(image_list), 3): try: display_image.append([["#imageModal%d" % i, image_list[i]], ["#imageModal%d" % (i+1), image_list[i+1]], ["#imageModal%d" % (i+2), image_list[i+2]]]) except Exception as e: # 报错说明, 爬取到的图片不足3 的倍数 pass large_image = [] for image in display_image: large_image += [[i[0].replace('#', ""), i[1]] for i in image] # print(large_image) return render_template('index.html', imagelist=display_image, large_image=large_image) if __name__ == '__main__': app.run(debug=True, host="0.0.0.0", port=8999)
nilq/baby-python
python
import UDPComms import numpy as np import time from pupper.Config import Configuration from src.State import BehaviorState, State, ArmState from src.Command import Command from src.Utilities import deadband, clipped_first_order_filter class JoystickInterface: def __init__( self, config: Configuration, udp_port=8830, udp_publisher_port=8840, ): self.config = config self.previous_gait_toggle = 0 self.previous_state = BehaviorState.REST self.previous_hop_toggle = 0 self.previous_activate_toggle = 0 self.previous_record_toggle = 0 self.previous_arm_toggle = 0 self.previous_grab_toggle = 0 self.message_rate = 50 self.udp_handle = UDPComms.Subscriber(udp_port, timeout=0.3) self.udp_publisher = UDPComms.Publisher(udp_publisher_port) def get_command(self, state, do_print=False): try: msg = self.udp_handle.get() command = Command() ####### Handle discrete commands ######## # Check if requesting a state transition to trotting, or from trotting to resting arm_toggle = msg["R2"] command.arm_event = arm_toggle > 0.99 and self.previous_arm_toggle <= 0.99 # Check if requesting recording record_toggle = msg["L2"] command.record_event = record_toggle == 1 and self.previous_record_toggle == 0 if state.arm_state is ArmState.DEACTIVATED: activate_toggle = msg["L1"] command.activate_event = activate_toggle == 1 and self.previous_activate_toggle == 0 gait_toggle = msg["R1"] command.trot_event = gait_toggle == 1 and self.previous_gait_toggle == 0 # Check if requesting a state transition to hopping, from trotting or resting hop_toggle = msg["x"] command.hop_event = hop_toggle == 1 and self.previous_hop_toggle == 0 grab_toggle = 0 else: gait_toggle = 0 activate_toggle = 0 hop_toggle = 0 grab_toggle = msg["x"] command.gripper_event = grab_toggle == 1 and self.previous_grab_toggle == 0 # Update previous values for toggles and state self.previous_arm_toggle = arm_toggle self.previous_gait_toggle = gait_toggle self.previous_hop_toggle = hop_toggle self.previous_grab_toggle = grab_toggle self.previous_activate_toggle = activate_toggle if state.arm_state is ArmState.DEACTIVATED: ####### Handle continuous commands ######## x_vel = msg["ly"] * self.config.max_x_velocity y_vel = msg["lx"] * -self.config.max_y_velocity command.horizontal_velocity = np.array([x_vel, y_vel]) command.yaw_rate = msg["rx"] * -self.config.max_yaw_rate else: command.arm_x_diff = msg["lx"] * self.config.arm_x_factor command.arm_y_diff = msg["ly"] * self.config.arm_y_factor command.arm_z_diff += msg["R1"] * self.config.arm_z_factor command.arm_z_diff -= msg["L1"] * self.config.arm_z_factor message_rate = msg["message_rate"] message_dt = 1.0 / message_rate pitch = msg["ry"] * self.config.max_pitch deadbanded_pitch = deadband(pitch, self.config.pitch_deadband) pitch_rate = clipped_first_order_filter( state.pitch, deadbanded_pitch, self.config.max_pitch_rate, self.config.pitch_time_constant, ) command.pitch = state.pitch + message_dt * pitch_rate height_movement = msg["dpady"] command.height = state.height - message_dt * self.config.z_speed * height_movement roll_movement = -msg["dpadx"] command.roll = state.roll + message_dt * self.config.roll_speed * roll_movement return command except UDPComms.timeout: if do_print: print("UDP Timed out") return Command() def set_color(self, color): joystick_msg = {"ps4_color": color} self.udp_publisher.send(joystick_msg)
nilq/baby-python
python
import cv2 from je_open_cv import template_detection image_data_array = template_detection.find_object("../test.png", "../test_template.png", detect_threshold=0.9, draw_image=True) print(image_data_array) if image_data_array[0] is True: height = image_data_array[1][2] - image_data_array[1][0] width = image_data_array[1][3] - image_data_array[1][1] center = [int(height / 2), int(width / 2)] print(center) cv2.imshow("test", image_data_array[2]) cv2.waitKey(0) cv2.destroyAllWindows()
nilq/baby-python
python
from __future__ import division from itertools import izip, count import matplotlib.pyplot as plt from numpy import linspace, loadtxt, ones, convolve import numpy as np import pandas as pd import collections from random import randint from matplotlib import style style.use('fivethirtyeight') #tab csv data = loadtxt("dataset/sunspots.txt", float) # 2. View the data as a table data_as_frame = pd.DataFrame(data, columns=['Months', 'SunSpots']) data_as_frame.head()
nilq/baby-python
python
# -*- coding: utf-8 -*- from __future__ import unicode_literals import mock import pytest from h.interfaces import IGroupService from h.services.annotation_json_presentation import AnnotationJSONPresentationService from h.services.annotation_json_presentation import annotation_json_presentation_service_factory @pytest.mark.usefixtures('presenters') class TestAnnotationJSONPresentationService(object): def test_initializes_flag_formatter(self, formatters): AnnotationJSONPresentationService(session=mock.sentinel.session, user=mock.sentinel.user, group_svc=mock.sentinel.group_svc, links_svc=mock.sentinel.links_svc, flag_svc=mock.sentinel.flag_svc) formatters.AnnotationFlagFormatter.assert_called_once_with(mock.sentinel.flag_svc, mock.sentinel.user) def test_it_configures_flag_formatter(self, formatters): svc = AnnotationJSONPresentationService(session=mock.sentinel.session, user=mock.sentinel.user, group_svc=mock.sentinel.group_svc, links_svc=mock.sentinel.links_svc, flag_svc=mock.sentinel.flag_svc) assert formatters.AnnotationFlagFormatter.return_value in svc.formatters def test_present_inits_presenter(self, svc, presenters, annotation_resource): svc.present(annotation_resource) presenters.AnnotationJSONPresenter.assert_called_once_with(annotation_resource) def test_present_adds_formatters(self, svc, annotation_resource, presenters): formatters = [mock.Mock(), mock.Mock()] svc.formatters = formatters presenter = presenters.AnnotationJSONPresenter.return_value svc.present(annotation_resource) assert presenter.add_formatter.mock_calls == [mock.call(f) for f in formatters] def test_present_returns_presenter_dict(self, svc, presenters): presenter = presenters.AnnotationJSONPresenter.return_value result = svc.present(mock.Mock()) assert result == presenter.asdict.return_value def test_present_all_loads_annotations_from_db(self, svc, storage): svc.present_all(['id-1', 'id-2']) storage.fetch_ordered_annotations.assert_called_once_with( svc.session, ['id-1', 'id-2'], query_processor=mock.ANY) def test_present_all_initialises_annotation_resources(self, svc, storage, resources): ann = mock.Mock() storage.fetch_ordered_annotations.return_value = [ann] svc.present_all(['ann-1']) resources.AnnotationResource.assert_called_once_with(ann, svc.group_svc, svc.links_svc) def test_present_all_presents_annotation_resources(self, svc, storage, resources, present): storage.fetch_ordered_annotations.return_value = [mock.Mock()] resource = resources.AnnotationResource.return_value svc.present_all(['ann-1']) present.assert_called_once_with(svc, resource) def test_present_all_preloads_formatters(self, svc, storage): formatter = mock.Mock(spec_set=['preload']) svc.formatters = [formatter] svc.present_all(['ann-1', 'ann-2']) formatter.preload.assert_called_once_with(['ann-1', 'ann-2']) def test_returns_presented_annotations(self, svc, storage, present): storage.fetch_ordered_annotations.return_value = [mock.Mock()] result = svc.present_all(['ann-1']) assert result == [present.return_value] @pytest.fixture def svc(self, db_session): group_svc = mock.Mock() links_svc = mock.Mock() flag_svc = mock.Mock() return AnnotationJSONPresentationService(session=db_session, user=None, group_svc=group_svc, links_svc=links_svc, flag_svc=flag_svc) @pytest.fixture def annotation_resource(self): return mock.Mock(spec_set=['annotation'], annotation=mock.Mock()) @pytest.fixture def presenters(self, patch): return patch('h.services.annotation_json_presentation.presenters') @pytest.fixture def storage(self, patch): return patch('h.services.annotation_json_presentation.storage') @pytest.fixture def resources(self, patch): return patch('h.services.annotation_json_presentation.resources') @pytest.fixture def present(self, patch): return patch('h.services.annotation_json_presentation.AnnotationJSONPresentationService.present') @pytest.fixture def formatters(self, patch): return patch('h.services.annotation_json_presentation.formatters') @pytest.mark.usefixtures('group_svc', 'links_svc', 'flag_svc') class TestAnnotationJSONPresentationServiceFactory(object): def test_returns_service(self, pyramid_request): svc = annotation_json_presentation_service_factory(None, pyramid_request) assert isinstance(svc, AnnotationJSONPresentationService) def test_provides_session(self, pyramid_request, service_class): annotation_json_presentation_service_factory(None, pyramid_request) _, kwargs = service_class.call_args assert kwargs['session'] == pyramid_request.db def test_provides_user(self, pyramid_request, service_class): annotation_json_presentation_service_factory(None, pyramid_request) _, kwargs = service_class.call_args assert kwargs['user'] == pyramid_request.user def test_provides_group_service(self, pyramid_request, service_class, group_svc): annotation_json_presentation_service_factory(None, pyramid_request) _, kwargs = service_class.call_args assert kwargs['group_svc'] == group_svc def test_provides_links_service(self, pyramid_request, service_class, links_svc): annotation_json_presentation_service_factory(None, pyramid_request) _, kwargs = service_class.call_args assert kwargs['links_svc'] == links_svc def test_provides_flag_service(self, pyramid_request, service_class, flag_svc): annotation_json_presentation_service_factory(None, pyramid_request) _, kwargs = service_class.call_args assert kwargs['flag_svc'] == flag_svc @pytest.fixture def service_class(self, patch): return patch('h.services.annotation_json_presentation.AnnotationJSONPresentationService') @pytest.fixture def group_svc(self, pyramid_config): svc = mock.Mock() pyramid_config.register_service(svc, iface=IGroupService) return svc @pytest.fixture def links_svc(self, pyramid_config): svc = mock.Mock() pyramid_config.register_service(svc, name='links') return svc @pytest.fixture def flag_svc(self, pyramid_config): svc = mock.Mock() pyramid_config.register_service(svc, name='flag') return svc @pytest.fixture def pyramid_request(self, pyramid_request): pyramid_request.user = mock.Mock() return pyramid_request
nilq/baby-python
python
from flask import Flask from flask_restful import Resource, Api from flask_cors import CORS from api.db_utils import * from api.Culprit_api import * app = Flask(__name__) #create Flask instance CORS(app) api = Api(app) #api router api.add_resource(CaseSetup,'/case-setup') api.add_resource(Session, '/key') api.add_resource(PlayerName, '/name') api.add_resource(Tokens, '/token') api.add_resource(TokenRemove, '/token-remove') api.add_resource(Poll, '/poll') api.add_resource(PollExclude, '/poll-exclude') api.add_resource(Accuse, '/accuse') api.add_resource(CheckEndGame, '/check-end') api.add_resource(EndGameData, '/end') api.add_resource(LoadGame, '/load') if __name__ == '__main__': print("Building Database") build_tables() print("Loading Data") load_game_data() print("Starting Flask") app.run(debug=True), #starts Flask
nilq/baby-python
python
from ._sw import Controller
nilq/baby-python
python
# -*- coding: utf-8 -*- from __future__ import ( absolute_import, division, print_function, unicode_literals) import json try: from urllib import parse as urlparse except ImportError: import urlparse from operator import itemgetter import yaml from flask import jsonify, request, Blueprint from builtins import * # noqa # pylint: disable=unused-import SWAGGER_TYPES = { 'bool': 'bool', 'int': 'integer', 'dec': 'number', 'float': 'float', 'str': 'string', 'date': 'date', 'datetime': 'date-time', } JSON_TYPES = { 'bool': 'boolean', 'float': 'number', 'binary': 'string', 'date': 'string', 'date-time': 'string', } def get_column_defn(column): stype = SWAGGER_TYPES[column['type']] if stype in JSON_TYPES: column_defn = {'type': JSON_TYPES[stype], 'format': stype} else: column_defn = {'type': stype} return column_defn class Swaggerify(object): swagger = { 'swagger': '2.0', 'info': {}, 'tags': [], 'schemes': ['https', 'http'], 'basePath': '/', 'consumes': ['application/json'], 'produces': ['application/json'], 'paths': {}, 'definitions': {} } def __init__(self, app=None, **kwargs): self.app = None if app is not None: self.init_app(app, **kwargs) def to_json(self, **kwargs): return json.dumps(self.swagger, **kwargs) def to_yaml(self, **kwargs): return yaml.dump(self.swagger, **kwargs) def __str__(self): return self.to_json(indent=4) @property def tags(self): return set(tag['name'] for tag in self.swagger['tags']) @tags.setter def tags(self, value): self.swagger['tags'] = value @property def version(self): if 'version' in self.swagger['info']: return self.swagger['info']['version'] return None @version.setter def version(self, value): self.swagger['info']['version'] = value @property def title(self): if 'title' in self.swagger['info']: return self.swagger['info']['title'] return None @title.setter def title(self, value): self.swagger['info']['title'] = value @property def description(self): if 'description' in self.swagger['info']: return self.swagger['info']['description'] return None @description.setter def description(self, value): self.swagger['info']['description'] = value def add_path(self, table, **kwargs): path = '{0}/{name}'.format(kwargs.get('url_prefix', ''), **table) parameters = [] for column in table['columns']: if column['kind'] in {'param', 'type'}: param = {'in': 'path', 'required': True} path = '{0}/{{{name}}}'.format(path, **column) elif column['kind'] in {'keyword', 'kwtype'}: param = {'in': 'query'} if column['kind'] in {'param', 'keyword', 'type', 'kwtype'}: param.update( {'name': column['name'], 'description': column['desc']}) param.update(get_column_defn(column)) parameters.append(param) self.swagger['paths'][path] = {} ref = '#/definitions/{rtype}'.format(**table) if table.get('desc'): self.swagger['paths'][path]['description'] = table['desc'] if table.get('list'): _schema = {'type': 'array', 'items': {'$ref': ref}} schema = {'type': 'object', 'properties': {'objects': _schema}} else: schema = {'$ref': ref} self.swagger['paths'][path]['get'] = { 'summary': table.get('desc', 'get {name}'.format(**table)), 'tags': [table['tag']] if table.get('tag') else [], 'parameters': parameters, 'responses': { 200: { 'description': '{name} result'.format(**table), 'schema': schema}}} if table.get('tag') and table['tag'] not in self.tags: tag = { 'name': table['tag'], 'description': '{tag} operations'.format(**table)} self.swagger['tags'].append(tag) def add_defn(self, table): def_value = {'type': 'object', 'properties': {}} for column in sorted(table['columns'], key=itemgetter('name')): excluded = column['name'] in self.exclude_columns if excluded or column['name'] == 'id': continue column_defn = get_column_defn(column) if column.get('desc'): column_defn['description'] = column['desc'] def_value['properties'][column['name']] = column_defn self.swagger['definitions'][table['name']] = def_value def init_app(self, app, **kwargs): self.app = app swagger = Blueprint('swagger', __name__) if kwargs.get('name'): self.title = kwargs['name'] if kwargs.get('version'): self.version = kwargs['version'] if kwargs.get('description'): self.description = kwargs['description'] @swagger.route('/swagger.json') def swagger_json(): # Must have a request context self.swagger['host'] = urlparse.urlparse(request.url_root).netloc return jsonify(self.swagger) app.register_blueprint(swagger) def create_docs(self, table, **kwargs): self.exclude_columns = set(kwargs.get('exclude_columns', [])) if not kwargs.get('skip_defn'): self.add_defn(table) if not kwargs.get('skip_path'): self.add_path(table, **kwargs)
nilq/baby-python
python
import yaml import rclpy import std_msgs.msg as std import geometry_msgs.msg as geom from msg_printer.std_yaml import YamlHeader class YamlAccel(yaml.YAMLObject): yaml_tag = u"!Accel" def __init__(self, a: geom.Accel): self._dict = { "linear": YamlVector3(a.linear).dict, "angular": YamlVector3(a.angular).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlAccelStamped(yaml.YAMLObject): yaml_tag = u"!AccelStamped" def __init__(self, a: geom.AccelStamped): self._dict = { "header": YamlHeader(a.header).dict, "accel": YamlAccel(a.accel).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlAccelWithCovariance(yaml.YAMLObject): yaml_tag = u"!AccelWithCovariance" def __init__(self, a: geom.AccelWithCovariance): self._dict = { "accel": YamlAccel(a.accel).dict, "covariance": [float(elem) for elem in a.covariance], } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlAccelWithCovarianceStamped(yaml.YAMLObject): yaml_tag = u"!AccelWithCovarianceStamped" def __init__(self, a: geom.AccelWithCovarianceStamped): self._dict = { "header": YamlHeader(a.header).dict, "accel": YamlAccelWithCovariance(a.accel).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlInertia(yaml.YAMLObject): yaml_tag = u"!Inertia" def __init__(self, i: geom.Inertia): self._dict = { "m": i.m, "com": YamlVector3(i.com).dict, "ixx": i.ixx, "ixy": i.ixy, "ixz": i.ixz, "iyy": i.iyy, "iyz": i.iyz, "izz": i.izz, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlInertiaStamped(yaml.YAMLObject): yaml_tag = u"!InertiaStamped" def __init__(self, i: geom.InertiaStamped): self._dict = { "header": YamlHeader(i.header).dict, "inertia": YamlInertia(i.inertia).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlPoint(yaml.YAMLObject): yaml_tag = u"!Point" def __init__(self, p: geom.Point): self._dict = {"x": p.x, "y": p.y, "z": p.z} @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlPointStamped(yaml.YAMLObject): yaml_tag = u"!PointStamped" def __init__(self, p: geom.PointStamped): self._dict = { "header": YamlHeader(p.header).dict, "point": YamlPoint(p.point).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlPoint32(yaml.YAMLObject): yaml_tag = u"!Point32" def __init__(self, p: geom.Point32): self._dict = {"x": p.x, "y": p.y, "z": p.z} @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlPolygon(yaml.YAMLObject): yaml_tag = u"!Polygon" def __init__(self, p: geom.Polygon): self._dict = {"points": [YamlPoint32(point).dict for point in p.points]} @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlPolygonStamped(yaml.YAMLObject): yaml_tag = u"!PolygonStamped" def __init__(self, p: geom.PolygonStamped): self._dict = { "header": YamlHeader(p.header).dict, "polygon": YamlPolygon(p.polygon).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlPose(yaml.YAMLObject): yaml_tag = u"!Pose" def __init__(self, p: geom.Pose): self._dict = { "position": YamlPoint(p.position).dict, "orientation": YamlQuaternion(p.orientation).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlPose2D(yaml.YAMLObject): yaml_tag = u"!Pose2D" def __init__(self, p: geom.Pose2D): self._dict = {"x": p.x, "y": p.y, "theta": p.theta} @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlPoseArray(yaml.YAMLObject): yaml_tag = u"!PoseArray" def __init__(self, p: geom.PoseArray): self._dict = { "header": YamlHeader(p.header).dict, "poses": [YamlPose(pose).dict for pose in p.poses], } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlPoseStamped(yaml.YAMLObject): yaml_tag = u"!PoseStamped" def __init__(self, p: geom.PoseStamped): self._dict = { "header": YamlHeader(p.header).dict, "pose": YamlPose(p.pose).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlPoseWithCovariance(yaml.YAMLObject): yaml_tag = u"!PoseWithCovariance" def __init__(self, p: geom.PoseWithCovariance): self._dict = { "pose": YamlPose(p.pose).dict, "covariance": [float(elem) for elem in p.covariance], } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlPoseWithCovarianceStamped(yaml.YAMLObject): yaml_tag = u"!PoseWithCovarianceStamped" def __init__(self, p: geom.PoseWithCovarianceStamped): self._dict = { "header": YamlHeader(p.header).dict, "pose": YamlPoseWithCovariance(p.pose).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlQuaternion(yaml.YAMLObject): yaml_tag = u"!Quaternion" def __init__(self, q: geom.Quaternion): self._dict = {"x": q.x, "y": q.y, "z": q.z, "w": q.w} @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlQuaternionStamped(yaml.YAMLObject): yaml_tag = u"!QuaternionStamped" def __init__(self, q: geom.QuaternionStamped): self._dict = { "header": YamlHeader(q.header).dict, "quaternion": YamlQuaternion(q.quaternion).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlTransform(yaml.YAMLObject): yaml_tag = u"!Transform" def __init__(self, t: geom.Transform): self._dict = { "translation": YamlVector3(t.translation).dict, "rotation": YamlQuaternion(t.rotation).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlTransformStamped(yaml.YAMLObject): yaml_tag = u"!TransformStamped" def __init__(self, t: geom.TransformStamped): self._dict = { "header": YamlHeader(t.header).dict, "child_frame_id": t.child_frame_id, "transform": YamlTransform(t.transform).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlTwist(yaml.YAMLObject): yaml_tag = u"!Twist" def __init__(self, t: geom.Twist): self._dict = { "linear": YamlVector3(t.linear).dict, "angular": YamlVector3(t.angular).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlTwistStamped(yaml.YAMLObject): yaml_tag = u"!TwistStamped" def __init__(self, t: geom.TwistStamped): self._dict = { "header": YamlHeader(t.header).dict, "twist": YamlTwist(t.twist).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlTwistWithCovariance(yaml.YAMLObject): yaml_tag = u"!TwistWithCovariace" def __init__(self, t: geom.TwistWithCovariance): self._dict = { "twist": YamlTwist(t.twist).dict, "covariance": [float(elem) for elem in t.covariance], } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlTwistWithCovarianceStamped(yaml.YAMLObject): yaml_tag = u"!TwistWithCovarianceStamped" def __init__(self, t: geom.TwistWithCovarianceStamped): self._dict = { "header": YamlHeader(t.header).dict, "twist": YamlTwistWithCovariance(t.twist).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlVector3(yaml.YAMLObject): yaml_tag = u"!Vector3" def __init__(self, v: geom.Vector3): self._dict = {"x": v.x, "y": v.y, "z": v.z} @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlVector3Stamped(yaml.YAMLObject): yaml_tag = u"!Vector3Stamped" def __init__(self, v: geom.Vector3Stamped): self._dict = { "header": YamlHeader(v.header).dict, "vector": YamlVector3(v.vector).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlWrench(yaml.YAMLObject): yaml_tag = u"!Wrench" def __init__(self, w: geom.Wrench): self._dict = { "force": YamlVector3(w.force).dict, "torque": YamlVector3(w.torque).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) class YamlWrenchStamped(yaml.YAMLObject): yaml_tag = u"!WrenchStamped" def __init__(self, w: geom.WrenchStamped): self._dict = { "header": YamlHeader(w.header).dict, "wrench": YamlWrench(w.wrench).dict, } @property def dict(self): return self._dict @classmethod def to_yaml(cls, dumper, data): return dumper.represent_mapping(cls.yaml_tag, data.dict) def main(args=None): rclpy.init(args=args) node = rclpy.create_node("test") test_msg = geom.WrenchStamped() node.get_logger().info(yaml.dump(YamlWrenchStamped(test_msg), sort_keys=False)) node.destroy_node() rclpy.shutdown() if __name__ == "__main__": main()
nilq/baby-python
python
import os import sys import time import argparse from naoqi import ALProxy import robot_behavior_pb2 from os.path import dirname from os.path import abspath def register_motions(name,parameterServerAddress,motions): behaviorModule = robot_behavior_pb2.RobotBehaviorModule() behaviorModule.name = name for motion in motions: desc = behaviorModule.behaviors.add() desc.name = motion desc.type = robot_behavior_pb2.BehaviorDescription.Blocking desc.state = robot_behavior_pb2.BehaviorDescription.Idle print behaviorModule register_motions("hello","",["move","sit","stand"]) currdir = dirname(__file__) parent = abspath(os.path.join(currdir,os.pardir)) i = 0 bin = os.environ["DEV_SDK_ROOT"] print bin parent = currdir while i<4: #parent = abspath(os.path.join(parent,os.pardir)) if not "scripts" in parent: i=i+1 continue else: break i=i+1 print currdir print parent for dirname, dirnames, filenames in os.walk(parent): # print path to all subdirectories first. for subdirname in dirnames: print os.path.join(dirname, subdirname) sys.path.append(currdir) sys.path.append(parent)
nilq/baby-python
python
#!/usr/bin/env python domain_name = os.environ['DOMAIN_NAME'] admin_server_listen_address = os.environ['ADMIN_SERVER_LISTEN_ADDRESS'] admin_server_listen_port = os.environ['ADMIN_SERVER_LISTEN_PORT'] admin_username = os.environ['ADMIN_USERNAME'] admin_password = os.environ['ADMIN_PASSWORD'] ###################################################################### def set_domain_web_app(_domain_name): cd('/WebAppContainer/' + _domain_name) # cmo.setReloginEnabled(false) # cmo.setAllowAllRoles(false) # cmo.setFilterDispatchedRequestsEnabled(false) # cmo.setOverloadProtectionEnabled(false) cmo.setXPoweredByHeaderLevel('NONE') # cmo.setMimeMappingFile('./config/mimemappings.properties') # cmo.setOptimisticSerialization(false) # cmo.setRtexprvalueJspParamName(false) # cmo.setClientCertProxyEnabled(false) # cmo.setHttpTraceSupportEnabled(false) # cmo.setWeblogicPluginEnabled(false) # cmo.setAuthCookieEnabled(true) # cmo.setChangeSessionIDOnAuthentication(true) # cmo.setWAPEnabled(false) # cmo.setPostTimeoutSecs(30) # cmo.setMaxPostTimeSecs(-1) # cmo.setMaxPostSize(-1) # cmo.setWorkContextPropagationEnabled(true) # cmo.setP3PHeaderValue('') # cmo.setJSPCompilerBackwardsCompatible(false) # cmo.setShowArchivedRealPathEnabled(false) ###################################################################### admin_server_url = 't3://' + admin_server_listen_address + ':' + admin_server_listen_port connect(admin_username, admin_password, admin_server_url) edit() startEdit() set_domain_web_app(domain_name) save() activate() exit()
nilq/baby-python
python
from django.db import models import json # Create your models here. class weather(models.Model): ''' 溫度、最高溫、最低溫、露點溫度、相對濕度、最小相對濕度、降雨量、最大十分鐘降雨量、最大六十分鐘降雨量 ''' date = models.DateTimeField() temperature = models.FloatField() relativeHumidity = models.FloatField() rainfall = models.FloatField() maxTenMinuteRainFall = models.FloatField() maxSixtyMinuteRainFall = models.FloatField() class hole(models.Model): ''' 區、經度、緯度、時間、原因、住址、淹水潛勢 ''' town = models.CharField(max_length=5) positionLon = models.FloatField() positionLat = models.FloatField() occurTime = models.DateTimeField() reason = models.CharField(max_length=50,blank=True) address = models.CharField(max_length=100,blank=True) flood = models.IntegerField() class examination(models.Model): ''' 巡視結果 ''' positionLon = models.FloatField() positionLat = models.FloatField() examinationTime = models.DateTimeField(auto_now=True) photoURL = models.CharField(max_length=100,blank=True) class modelResult(models.Model): ''' 儲存model, Astar預測結果 暫時忽略polyList ''' date = models.DateTimeField() route = models.CharField(max_length=200) def set_route(self,data): self.route = json.dumps(data) def get_route(self): return json.loads(self.route)
nilq/baby-python
python
class Solution: #c1 is always opening type def counter_part(self, c1: str, c2:str)->bool: if c1 == '(' and c2 == ')': return True if c1 == '{' and c2 == '}': return True if c1 == '[' and c2 == ']': return True return False def isValid(self, s: str) -> bool: stack=[] opening_type=['(', '{', '['] closing_type=[')', '}', ']'] for x in s: if x in opening_type: print("here") stack.append(x) if x in closing_type: try: c=stack.pop() if self.counter_part(c, x): #we good, continue with next char in i/p string print("here2", c, x) continue else: #we dont have a matched print("here3",x, c) return False except IndexError:#we found a closing type, but nothing left in stack return False try: stack.pop() return False except IndexError: return True
nilq/baby-python
python
from __future__ import unicode_literals __VERSION__ = '0.1.1'
nilq/baby-python
python
import os ############################################################################### def create_dir(path): if not os.path.isdir(path): os.makedirs(path) ############################################################################### # SAVE A DICTIONARY dict_ = {} import json with open(os.path.join('name.json'), 'w') as fp: json.dump(dict_, fp) fp.close() ############################################################################### # GET LINES OF A TEXT FILE def readText(file_path): with open(file_path) as f: lines = f.readlines() f.close() return lines
nilq/baby-python
python
# KicadModTree is free software: you can redistribute it and/or # modify it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # KicadModTree is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >. # # (C) 2016-2018 by Thomas Pointhuber, <thomas.pointhuber@gmx.at> # (C) 2018 by Rene Poeschl, github @poeschlr import warnings from KicadModTree.Vector import Vector2D from KicadModTree.nodes.Node import Node class PolygonPoints(object): r"""Representation of multiple points for creating polygons :Keyword Arguments: * *nodes* (``list(Point)``) -- 2D points describing the "polygon" * *polygone* (``list(Point)``) -- alternative naming for the nodes parameter for backwards compatibility. * *x_mirror* (``[int, float](mirror offset)``) -- mirror x direction around offset "point" * *y_mirror* (``[int, float](mirror offset)``) -- mirror y direction around offset "point" :Example: >>> from KicadModTree import * >>> PolyPoint([(0, 0),(1, 0)]) >>> PolyPoint([{'x': 0, 'y':0}, {'x': 1, 'y':0}]) """ def __init__(self, **kwargs): self._initMirror(**kwargs) self._initNodes(**kwargs) def _initNodes(self, **kwargs): self.nodes = [] if 'nodes' in kwargs: for n in kwargs['nodes']: self.nodes.append(Vector2D(n)) if 'polygone' in kwargs: raise KeyError('Use of "nodes" and "polygone" parameter at the same time is not supported.') elif 'polygone' in kwargs: warnings.warn( "polygone argument is deprecated, use nodes instead", DeprecationWarning ) for n in kwargs['polygone']: self.nodes.append(Vector2D(n)) else: raise KeyError('Either "nodes" or "polygone" parameter is required for creating a PolyPoint instance.') for point in self.nodes: if self.mirror[0] is not None: point.x = 2 * self.mirror[0] - point.x if self.mirror[1] is not None: point.y = 2 * self.mirror[1] - point.y def _initMirror(self, **kwargs): self.mirror = [None, None] if 'x_mirror' in kwargs and type(kwargs['x_mirror']) in [float, int]: self.mirror[0] = kwargs['x_mirror'] if 'y_mirror' in kwargs and type(kwargs['y_mirror']) in [float, int]: self.mirror[1] = kwargs['y_mirror'] def calculateBoundingBox(self): min = max = self.getRealPosition(self.nodes[0]) for n in self.nodes: min.x = min([min.x, n.x]) min.y = min([min.y, n.y]) max.x = max([max.x, n.x]) max.y = max([max.y, n.y]) return Node.calculateBoundingBox({'min': min, 'max': max}) def findNearestPoints(self, other): r""" Find the nearest points for two polygons Find the two points for both polygons that are nearest to each other. :param other: the polygon points of the other polygon :return: a tuble with the indexes of the two points (pint in self, point in other) """ min_distance = self[0].distance_to(other[0]) pi = 0 pj = 0 for i in range(len(self)): for j in range(len(other)): d = self[i].distance_to(other[j]) if d < min_distance: pi = i pj = j min_distance = d return (pi, pj) def getPoints(self): r""" get the points contained within self :return: the array of points contained within this instance """ return self.nodes def cut(self, other): r""" Cut other polygon points from self As kicad has no native support for cuting one polygon from the other, the cut is done by connecting the nearest points of the two polygons with two lines on top of each other. This function assumes that the other polygon is fully within this one. It also assumes that connecting the two nearest points creates a valid polygon. (There are no geometry checks) :param other: the polygon points that are cut from this polygon """ warnings.warn( "No geometry checks are implement for cutting polygons.\n" "Make sure the second polygon is fully inside the main polygon\n" "Check resulting polygon carefully.", Warning ) idx_self, idx_other = self.findNearestPoints(other) self.nodes.insert(idx_self+1, self[idx_self]) for i in range(len(other)): self.nodes.insert(idx_self+1, other[(i+idx_other) % len(other)]) self.nodes.insert(idx_self+1, other[idx_other]) def rotate(self, angle, origin=(0, 0), use_degrees=True): r""" Rotate points around given origin :params: * *angle* (``float``) rotation angle * *origin* (``Vector2D``) origin point for the rotation. default: (0, 0) * *use_degrees* (``boolean``) rotation angle is given in degrees. default:True """ for p in self.nodes: p.rotate(angle=angle, origin=origin, use_degrees=use_degrees) return self def translate(self, distance_vector): r""" Translate points :params: * *distance_vector* (``Vector2D``) 2D vector defining by how much and in what direction to translate. """ for p in self.nodes: p += distance_vector return self def __copy__(self): return PolygonPoints(nodes=self.nodes) def __iter__(self): for n in self.nodes: yield n def __getitem__(self, idx): return self.nodes[idx] def __len__(self): return len(self.nodes)
nilq/baby-python
python
import sys from antlr4 import * import graphGenerator from MyVisitor import MyVisitor from PythonLexer import PythonLexer from PythonParser import PythonParser import os def testefunc(graph, code, function): file = open("testfile.txt", "w") file.write(code) file.close() input=FileStream("testfile.txt") if os.path.isfile("textfile.txt"): os.remove("testfile.txt") lexer = PythonLexer(input) stream = CommonTokenStream(lexer) parser = PythonParser(stream) tree = parser.root() visitor = MyVisitor() visitor.visit(tree) src = "" if graph == "FCG": src = graphGenerator.callGraph(visitor.getCall()) elif graph == "CFG": src = graphGenerator.controlGraph(visitor.getControl(), function) elif graph == "DFG": src = graphGenerator.dataGraph(visitor.getData(), function) return src def getFunctions(code): file = open("testfile.txt", "w") file.write(code) file.close() input=FileStream("testfile.txt") os.remove("testfile.txt") lexer = PythonLexer(input) stream = CommonTokenStream(lexer) parser = PythonParser(stream) tree = parser.root() visitor = MyVisitor() visitor.visit(tree) return visitor.getListFunctions()
nilq/baby-python
python
#!/usr/bin/python3 # Modifies the assembly output of compilation for control_mem_dtlb_store # to provide necessary pattern for successful TLB/store attack on gem5 # See parse_tlb_logs.py for more details # generated with gcc version 9.3.0 (Ubuntu 9.3.0-17ubuntu1~20.04) # command: gcc src/control_mem_dtlb_store -S -o bin/control_mem_dtlb_store_pre_mod.s pre_transmit = ['movl %eax, %eax\n', '\tcmpq %rax, -2104(%rbp)\n', ] pre_transmit_mod = ['movl %eax, %eax\n', '\tcmpq %rax, -2104(%rbp)\n', '\tmovq -2104(%rbp), %rax\n', ] post_transmit = ['leaq array1(%rip), %rdx\n', '\tmovq -2104(%rbp), %rax\n', ] post_transmit_mod = ['leaq array1(%rip), %rdx\n',] with open('bin/control_mem_dtlb_store_pre_mod.s', 'r') as asm: data = asm.read().replace(''.join(pre_transmit), ''.join(pre_transmit_mod)).replace( ''.join(post_transmit), ''.join(post_transmit_mod) ) asm.close() with open('bin/control_mem_dtlb_store_post_mod.s', 'w') as asm: asm.write(data) asm.close()
nilq/baby-python
python
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .subscriptions_operations import SubscriptionsOperations from .tenants_operations import TenantsOperations from .subscription_operations import SubscriptionOperations from .operations import Operations from .alias_operations import AliasOperations from .subscription_policy_operations import SubscriptionPolicyOperations from .billing_account_operations import BillingAccountOperations __all__ = [ 'SubscriptionsOperations', 'TenantsOperations', 'SubscriptionOperations', 'Operations', 'AliasOperations', 'SubscriptionPolicyOperations', 'BillingAccountOperations', ]
nilq/baby-python
python
#-*- encoding: utf-8 -*- import redis r=redis.Redis(host='localhost',db=0) p=r.pubsub() p.subscribe('test') for message in p.listen(): print(message)
nilq/baby-python
python
# Holds permission data for a private race room def get_permission_info(server, race_private_info): permission_info = PermissionInfo() for admin_name in race_private_info.admin_names: for role in server.roles: if role.name.lower() == admin_name.lower(): permission_info.admin_roles.append(role) for member in server.members: if member.name.lower() == admin_name.lower(): permission_info.admins.append(member) for racer_name in race_private_info.racer_names: for member in server.members: if member.name.lower() == racer_name.lower(): permission_info.racers.append(member) return permission_info class PermissionInfo(object): def __init__(self): self.admins = [] self.admin_roles = [] self.racers = [] def is_admin(self, member): for role in member.roles: if role in self.admin_roles: return True return member in self.admins
nilq/baby-python
python
""" loader module provides actual implementation of the file savers. .. warning:: This is an internal implementation. API may change without notice in the future, so you should use :class:`word_embedding_loader.word_embedding.WordEmbedding` """ __all__ = ["glove", "word2vec_bin", "word2vec_text"] from word_embedding_loader.saver import glove, word2vec_bin, word2vec_text
nilq/baby-python
python
# Stimulator class # Imports #from StimulationSignal import StimulationSignal import crccheck.checksum import numpy as np import serial import time import struct # channel_stim: list of active channels # freq: main stimulation frequency in Hz (NOTE: this overrides ts1) # ts1: main stimulation period in ms (1-1024.5 ms in 0.5 steps) # ts2: inter-pulse time in ms (1.5-17 ms in 0.5 steps) # Notes: # - Revoir les principes d'orienté objet (encapsulation) # - Indentation : 4 espaces class Stimulator: # Class variables VERSION = 0x01 INIT_REPETITION_TIME = 0.5 START_BYTE = 0xF0 STOP_BYTE = 0x0F STUFFING_BYTE = 0x81 STUFFING_KEY = 0x55 MAX_PACKET_BYTES = 69 BAUD_RATE = 460800 TYPES = {'Init': 0x01, 'InitAck': 0x02, 'UnknownCommand': 0x03, 'Watchdog': 0x04, 'GetStimulationMode': 0x0A, 'GetStimulationModeAck': 0x0B, 'InitChannelListMode': 0x1E, 'InitChannelListModeAck': 0x1F, 'StartChannelListMode': 0x20, 'StartChannelListModeAck': 0x21, 'StopChannelListMode': 0x22, 'StopChannelListModeAck': 0x23, 'SinglePulse': 0x24, 'SinglePulseAck': 0x25, 'StimulationError': 0x26} # Constuctor def __init__(self, StimulationSignal, port_path): #Changer ts1 pour 1/StimulationSignal.frequency # ---- StimulationSignal = Contient les infos d'amplitude, de fréquence, de durée d'impulsion et le nom du muscle pour chaque électrode ---- # # ---- ts1 = Main stimulation interval ---- # # ---- ts2 = Inter pulse interval (use only if use duplet or triplet) ---- # # ---- Mode = Single pulse, duplet or triplet ---- # # ---- port = open port from port_path ---- # # ---- packet_count = initialise the packet count ---- # self.matrice = StimulationSignal ''' self.electrode_number = 0 idx = [] print(StimulationSignal) for i in range(0,8): if StimulationSignal[0][i]==0: idx.append(i) else: self.electrode_number += (2)**(i) StimulationSignal = np.delete(StimulationSignal, idx, 1) self.idx = idx ''' ''' self.amplitude = [] self.ts1 = [] self.frequency = [] self.pulse_width = [] self.muscle = [] self.ts2 = [] for i in range (8-len(idx)): self.amplitude.append(StimulationSignal[0][i]) self.ts1.append(int((1000/StimulationSignal[1][i] - 1)/0.5)) #à vérifier si bon indice pour fréquence self.frequency.append(StimulationSignal[1][i]) self.pulse_width.append(StimulationSignal[2][i]) #à vérifier si bon indice self.muscle.append(StimulationSignal[3][i]) self.ts2 = ts2 ''' # self.set_StimulationSignal(StimulationSignal) self.port = serial.Serial(port_path, self.BAUD_RATE, bytesize=serial.EIGHTBITS, parity=serial.PARITY_EVEN, stopbits=serial.STOPBITS_ONE, timeout=0.1) self.packet_count = 0 #self.initialise_connection() #self.stimulation_220_10() """ while True: received_packet= self.read_packets() self.init_ACK(received_packet) time.sleep(self.INIT_REPETITION_TIME) return""" def initialise_connection(self): while (1): if (self.port.in_waiting>0): self.calling_ACK() break def stimulation_220_10(self): self.set_stim_biceps_DeltPost() self.set_StimulationSignal(self.StimulationSignal) #starttime = time.time() #timer = 0 self.send_packet('InitChannelListMode', self.packet_count) #À MODIFIER POUR AVOIR ANGLES À LA PLACE ### while (1) ### self.send_packet('StartChannelListMode', self.packet_count) ### AJOUTER BREAK DANS ERGOCYCLE #while timer < 5.00: #timer = round(time.time()-starttime,2) self.send_packet('StartChannelListMode', self.packet_count) #time.sleep(1/self.frequency[0]) #if timer >=(5.00-(1/self.frequency[0])): # break def stimulation_20_180(self): self.set_stim_triceps_DeltAnt() self.set_StimulationSignal(self.StimulationSignal) starttime = time.time() timer = 0 self.send_packet('InitChannelListMode', self.packet_count) #À MODIFIER POUR AVOIR ANGLES À LA PLACE ### while (1) ### self.send_packet('StartChannelListMode', self.packet_count) ### AJOUTER BREAK DANS ERGOCYCLE self.send_packet('StartChannelListMode', self.packet_count) def set_matrice(self, Signal): self.matrice = Signal # Function to modify the stimulation's parameters def set_StimulationSignal(self,StimulationSignal): self.amplitude = [] self.ts1 = [] self.frequency = [] self.pulse_width = [] self.muscle = [] for i in range (8-len(self.idx)): self.amplitude.append(StimulationSignal[0][i]) self.ts1.append(int((1000/StimulationSignal[1][i] - 1)/0.5)) #à vérifier si bon indice pour fréquence self.frequency.append(StimulationSignal[1][i]) self.pulse_width.append(StimulationSignal[2][i]) #à vérifier si bon indice self.muscle.append(StimulationSignal[3][i]) def set_stim_biceps_DeltPost(self): idx = [] self.electrode_number = 0 biceps_DeltPost = np.copy(self.matrice) for j in range(np.shape(self.matrice)[1]): if(self.matrice[3][j] == 2 or self.matrice[3][j]== 4): biceps_DeltPost[:,j]=0 for i in range(0,8): if biceps_DeltPost[0][i]==0: idx.append(i) else: self.electrode_number += (2)**(i) biceps_DeltPost = np.delete(biceps_DeltPost, idx, 1) print(self.electrode_number) self.StimulationSignal = biceps_DeltPost self.idx = idx def set_stim_triceps_DeltAnt(self): idx = [] triceps_DeltAnt = np.copy(self.matrice) self.electrode_number = 0 for j in range(np.shape(self.matrice)[1]): if(self.matrice[3][j] == 1 or self.matrice[3][j]== 3): triceps_DeltAnt[:,j]=0 for i in range(0,8): if triceps_DeltAnt[0][i]==0: idx.append(i) else: self.electrode_number += (2)**(i) triceps_DeltAnt = np.delete(triceps_DeltAnt, idx, 1) self.StimulationSignal = triceps_DeltAnt self.idx = idx # Function to modify the time between pulses if doublet or triplet are chose def set_t2(self,t2): self.t2 = t2 # "byte stuffing", i.e, xoring with STUFFING_KEY def stuff_byte(self,byte): return ((byte & ~Stimulator.STUFFING_KEY) | (~byte & Stimulator.STUFFING_KEY)) #return bytes(a ^ b for (a, b) in zip(byte, bitarray(self.STUFFING_KEY))) # Construction of each packet def packet_construction(self,packet_count, packet_type, *packet_data): start_byte = self.START_BYTE stop_byte = self.STOP_BYTE self.packet_type = packet_type packet_command = self.TYPES[packet_type] packet_payload = [packet_count, packet_command] data_length = 0 if packet_data!= None: packet_data = list(packet_data) for i in range (0, len(packet_data)): if packet_data[i] == 240 or packet_data[i] == 15: packet_data[i] = self.stuff_byte(packet_data[i]) packet_payload += packet_data checksum = crccheck.crc.Crc8.calc(packet_payload) checksum = self.stuff_byte(checksum) data_length = self.stuff_byte(len(packet_payload)) packet_lead = [start_byte, self.STUFFING_BYTE, checksum, self.STUFFING_BYTE, data_length] packet_end = [stop_byte] packet = packet_lead + packet_payload + packet_end return b''.join([byte.to_bytes(1, 'little') for byte in packet]) # Closes port def close_port(self): self.port.close() # Send packets def send_packet(self, cmd, packet_number): if cmd == 'InitAck': self.port.write(self.init_ACK(packet_number)) elif cmd == 'Watchdog': self.port.write(self.watchdog()) elif cmd == 'GetStimulationMode': self.port.write(self.getMode()) elif cmd == 'InitChannelListMode': self.port.write(self.init_stimulation()) #quoi faire avec channel_execution elif cmd == 'StartChannelListMode': self.port.write(self.start_stimulation()) elif cmd == 'StopChannelListMode': self.port.write(self.stop_stimulation()) # Update packet count self.packet_count = (self.packet_count + 1) % 256 # Receives packet # Read the received packet def read_packets(self): # Read port stream packet = self.port.readline() # If it is a start byte, collect packet if packet[0] == self.START_BYTE: # Collect header bytes ''' for i in range(4): packet += self.port.read() # Collect data bytes datalength = packet[-1] for i in range(datalength): packet += self.port.read() # Collect stop byte packet += self.port.read() # Call the right ACK function ''' return packet else: # Return empty string to avoid hanging return b'' # Creates packet for every command part of dictionary TYPES def calling_ACK(self): #Call the Ack function packet = self.read_packets() if (len(packet)>1): if(int(packet[6]) == Stimulator.TYPES['Init'] and int(packet[7]) == self.VERSION): return Stimulator.send_packet(self, 'InitAck', int(packet[5])) elif(str(packet[6]) == Stimulator.TYPES['UnknownCommand']): return Stimulator.unknown_cmd() elif(str(packet[6]) == Stimulator.TYPES['GetStimulationModeAck']): return Stimulator.getmodeACK(packet) elif(str(packet[6]) == Stimulator.TYPES['InitChannelListModeAck']): return Stimulator.init_stimulation_ACK(packet) elif(str(packet[6]) == Stimulator.TYPES['StartChannelListMode']): return Stimulator.start_stimulation_ACK(packet) elif(str(packet[6]) == Stimulator.TYPES['StopChannelListModeAck']): return Stimulator.stop_stimulation_ACK(packet) elif(str(packet[6]) == Stimulator.TYPES['StartChannelListModeAck']): return Stimulator.error_stimulation_ACK(packet) # Establishes connexion acknowlege def init(self, packet_count): packet = self.packet_construction(packet_count,'Init', self.VERSION ) return packet # Establishes connexion acknowlege def init_ACK(self, packet_count): packet = self.packet_construction(packet_count, 'InitAck', 0) return packet # Sends message for unknown command def unknown_cmd(self, packet): return str(packet[6]) # Error signal (inactivity ends connexion) VERIFY IF IT HAVE TO BE SEND EVERY <1200MS OR SEND IF ONLY NOTHING SEND AFTER 120MS def watchdog(self): packet = self.packet_construction(self.packet_count,'Watchdog') return packet # Asking to know which mode has been chosen def getMode(self): packet = self.packet_construction(self.packet_count, 'GetStimulationMode') return packet # Sent by RehaStim2 in response to getMode def getModeACK(self, packet): if(str(packet[6] == '0')): if(str(packet[7]) == '0'): return 'Start Mode' elif(str(packet[7]) == '1'): return 'Stimulation initialized' elif(str(packet[7]) == '2'): return 'Stimulation started' elif(str(packet[6]) == '-1'): return 'Transfer error' elif(str(packet[6]) == '-8'): return 'Busy error' #add a timer # Initialises stimulation def init_stimulation(self): MSB, LSB = self.MSB_LSB_main_stim() packet = self.packet_construction(self.packet_count,'InitChannelListMode', 0, self.electrode_number, 0, 2, MSB, LSB, 0 ) # Channel est 1,2,4,8,16,32,64,128 pour chaque et l'addition donne l'activation de plusieurs channels return packet # Sent by RehaStim2 in response to init_stimulation def init_stimulation_ACK(self, packet): if(str(packet[6]) == '0'): return 'Stimulation initialized' elif(str(packet[6]) == '-1'): return 'Transfer error' elif(str(packet[6]) == '-2'): return 'Parameter error' #Change for please change parameters? elif(str(packet[6]) == '-3'): return 'Wrong mode error' elif(str(packet[6]) == '-8'): return 'Busy error' # Add a timer? # Starts stimulation and modifies it def start_stimulation(self): #VA PROBABLEMENT CHANGER PULSE_WIDTH ET AMPLITUDE SELON COMMENT RÉCUPÈRE DONNÉES #if len(self.pulse_width) == 1: MSB_matrix =[] LSB_matrix =[] for i in range (len(self.amplitude)): MSB, LSB = self.MSB_LSB_pulse_stim(self.pulse_width[i]) MSB_matrix.append(MSB) LSB_matrix.append(LSB) if (len(self.amplitude)) ==1: packet = self.packet_construction(self.packet_count,'StartChannelListMode', 0, int(MSB_matrix[0]), int(LSB_matrix[0]), int(self.amplitude[0])) if len(self.amplitude) == 2: packet = self.packet_construction(self.packet_count,'StartChannelListMode', 0, int(MSB_matrix[0]), int(LSB_matrix[0]), int(self.amplitude[0]), 0, int(MSB_matrix[1]), int(LSB_matrix[1]), int(self.amplitude[1])) elif len(self.amplitude) == 3: packet = self.packet_construction(self.packet_count,'StartChannelListMode', 0, int(MSB_matrix[0]), int(LSB_matrix[0]), int(self.amplitude[0]), 0, int(MSB_matrix[1]), int(LSB_matrix[1]), int(self.amplitude[1]), 0, int(MSB_matrix[2]), int(LSB_matrix[2]), int(self.amplitude[2])) elif len(self.amplitude) == 4: packet = self.packet_construction(self.packet_count,'StartChannelListMode', 0, int(MSB_matrix[0]), int(LSB_matrix[0]), int(self.amplitude[0]), 0, int(MSB_matrix[1]), int(LSB_matrix[1]), int(self.amplitude[1]), 0, int(MSB_matrix[2]), int(LSB_matrix[2]), int(self.amplitude[2]), 0, int(MSB_matrix[3]), int(LSB_matrix[3]), int(self.amplitude[3])) elif len(self.amplitude) == 5: packet = self.packet_construction(self.packet_count,'StartChannelListMode', 0, int(MSB_matrix[0]), int(LSB_matrix[0]), int(self.amplitude[0]), 0, int(MSB_matrix[1]), int(LSB_matrix[1]), int(self.amplitude[1]), 0, int(MSB_matrix[2]), int(LSB_matrix[2]), int(self.amplitude[2]), 0, int(MSB_matrix[3]), int(LSB_matrix[3]), int(self.amplitude[3]), 0, int(MSB_matrix[4]), int(LSB_matrix[4]), int(self.amplitude[4])) elif len(self.amplitude) == 6: packet = self.packet_construction(self.packet_count,'StartChannelListMode', 0, int(MSB_matrix[0]), int(LSB_matrix[0]), int(self.amplitude[0]), 0, int(MSB_matrix[1]), int(LSB_matrix[1]), int(self.amplitude[1]), 0, int(MSB_matrix[2]), int(LSB_matrix[2]), int(self.amplitude[2]), 0, int(MSB_matrix[3]), int(LSB_matrix[3]), int(self.amplitude[3]), 0, int(MSB_matrix[4]), int(LSB_matrix[4]), int(self.amplitude[4]), 0, int(MSB_matrix[5]), int(LSB_matrix[5]), int(self.amplitude[5])) elif len(self.amplitude) == 7: packet = self.packet_construction(self.packet_count,'StartChannelListMode', 0, int(MSB_matrix[0]), int(LSB_matrix[0]), int(self.amplitude[0]), 0, int(MSB_matrix[1]), int(LSB_matrix[1]), int(self.amplitude[1]), 0, int(MSB_matrix[2]), int(LSB_matrix[2]), int(self.amplitude[2]), 0, int(MSB_matrix[3]), int(LSB_matrix[3]), int(self.amplitude[3]), 0, int(MSB_matrix[4]), int(LSB_matrix[4]), int(self.amplitude[4]), 0, int(MSB_matrix[5]), int(LSB_matrix[5]), int(self.amplitude[5]), 0, int(MSB_matrix[6]), int(LSB_matrix[6]), int(self.amplitude[6])) elif len(self.amplitude) == 8: packet = self.packet_construction(self.packet_count,'StartChannelListMode', 0, int(MSB_matrix[0]), int(LSB_matrix[0]), int(self.amplitude[0]), 0, int(MSB_matrix[1]), int(LSB_matrix[1]), int(self.amplitude[1]), 0, int(MSB_matrix[2]), int(LSB_matrix[2]), int(self.amplitude[2]), 0, int(MSB_matrix[3]), int(LSB_matrix[3]), int(self.amplitude[3]), 0, int(MSB_matrix[4]), int(LSB_matrix[4]), int(self.amplitude[4]), 0, int(MSB_matrix[5]), int(LSB_matrix[5]), int(self.amplitude[5]), 0, int(MSB_matrix[6]), int(LSB_matrix[6]), int(self.amplitude[6]), 0, int(MSB_matrix[7]), int(LSB_matrix[7]), int(self.amplitude[7])) return packet # Sent by RehaStim2 in response to start_stimulation def start_stimulation_ACK(self, packet): if(str(packet[6]) == '0'): return ' Stimulation started' if(str(packet[6]) == '-1'): return ' Transfer error' if(str(packet[6]) == '-2'): return ' Parameter error' if(str(packet[6]) == '-3'): return ' Wrong mode error' if(str(packet[6]) == '-8'): return ' Busy error' # Stops stimulation def stop_stimulation(self): packet = self.packet_construction(self.packet_count,'StopChannelListMode') return packet # Sent by RehaStim2 in response to stop_stimulation def stop_stimulation_ACK(self, packet): if(str(packet[6]) == '0'): return ' Stimulation stopped' elif(str(packet[6]) == '-1'): return ' Transfer error' def stimulation_error(self, packet): if(str(packet[6]) == '-1'): return ' Emergency switch activated/not connected' #mettre fonction qui affiche message sur interface elif(str(packet[6]) == '-2'): return ' Electrode error' elif(str(packet[6]) == '-3'): return 'Stimulation module error' # Function to command the stimulator with pre-defined commands def throw_command(self, command): print("(Stimulator) TODO : call the '" + command + "' command") #if command type == hexadécimal of certain command, throw associated function. #fonction qui lit le paquet reçu par rehastim et qui l'associe à une commande. #command = {'Init':0x01} def MSB_LSB_main_stim (self): if self.ts1[0] <= 255: LSB = self.ts1[0] MSB = 0; elif 256 <= self.ts1[0] <= 511: LSB = self.ts1[0]-256 MSB = 1; elif 512 <= self.ts1[0] <= 767: LSB = self.ts1[0]-512 MSB = 2; elif 768 <= self.ts1[0] <= 1023: LSB = self.ts1[0]-768 MSB = 3; elif 1024 <= self.ts1[0] <= 1279: LSB = self.ts1[0]-1024 MSB = 4; elif 1280 <= self.ts1[0] <= 1535: LSB = self.ts1[0]-1280 MSB = 5; elif 1536 <= self.ts1[0] <= 1791: LSB = self.ts1[0]-1536 MSB = 6; elif 1792 <= self.ts1[0] <= 2047: LSB = self.ts1[0]-1792 MSB = 7; elif self.ts1[0] == 2048: LSB = 0 MSB = 8; return MSB, int(LSB) def MSB_LSB_pulse_stim (self, pulse_width): if pulse_width <= 255: LSB = pulse_width MSB = 0; elif 256 <= pulse_width <= 500: LSB = pulse_width-256 MSB = 1; return MSB,LSB
nilq/baby-python
python
class Order(object): def __init__(self, name, address, comments): self.name = name self.address = address self.comments = comments
nilq/baby-python
python
"""Helper functions for all Solar Forecast Arbiter /sites/* endpoints. """ import time from flask import current_app as app from requests.exceptions import ChunkedEncodingError, ConnectionError from sentry_sdk import capture_exception from sfa_dash import oauth_request_session from sfa_dash.api_interface.util import handle_response from sfa_dash.errors import DataRequestException def get_request(path, **kwargs): """Make a get request to a path at SFA api. Parameters ---------- path: str The api endpoint to query including leading slash. Returns ------- requests.Response The api response. """ # may need to handle errors if oauth_request_session does not exist somehow # definitely need to handle errors here retries = kwargs.pop('failure_retries', 2) errors = None try: req = oauth_request_session.get( f'{app.config["SFA_API_URL"]}{path}', **kwargs) except ChunkedEncodingError as e: errors = e except ConnectionError as e: errors = e if errors is not None: if retries > 0: kwargs['failure_retries'] = retries - 1 time.sleep((3 - retries) * 0.1) return get_request(path, **kwargs) else: # API timed out or dropped the connection, send the error to # sentry for tracking and return a message to the user. capture_exception(errors) raise DataRequestException(503, { 'Error': 'API connection failed. Please try again.' }) else: return handle_response(req) def post_request(path, payload, json=True): """Post payload to a path at the SFA api. Parameters ---------- path: str The api endpoint to post to including leading slash. payload: str or dict Payload to send to the api either a string or JSON dict. json: boolean A flag for setting the content type of the request, if True, posts json to the api, otherwise sends the payload as text/csv. Returns ------- requests.Response The api response. """ if json: kwargs = {'json': payload} else: kwargs = {'headers': {'Content-type': 'text/csv'}, 'data': payload} return handle_response(oauth_request_session.post( f'{app.config["SFA_API_URL"]}{path}', **kwargs)) def delete_request(path, **kwargs): """Make a delete request. Parameters ---------- path: str The api endpoint to post to including leading slash. Returns ------- requests.Response The api response. """ return handle_response(oauth_request_session.delete( f'{app.config["SFA_API_URL"]}{path}', **kwargs))
nilq/baby-python
python
from django.urls import path from app.pages.views import ( display_customers_data_page, display_customer_by_id_page, display_home_page, ) urlpatterns = [ path('', display_home_page), path('customers/', display_customers_data_page, name="customers"), path('customers_by_id/', display_customer_by_id_page, name="customers_by_id"), ]
nilq/baby-python
python
import pytest from orders.models import Order pytestmark = [pytest.mark.django_db] def test_tinkoff_bank_is_called_by_default(call_purchase, tinkoff_bank, tinkoff_credit): call_purchase() tinkoff_bank.assert_called_once() tinkoff_credit.assert_not_called() def test_tinkoff_bank(call_purchase, tinkoff_bank, tinkoff_credit): call_purchase(desired_bank='tinkoff_bank') tinkoff_bank.assert_called_once() tinkoff_credit.assert_not_called() def test_tinkoff_credit(call_purchase, tinkoff_bank, tinkoff_credit): call_purchase(desired_bank='tinkoff_credit') tinkoff_bank.assert_not_called() tinkoff_credit.assert_called_once() def test_desired_bank_is_saved(call_purchase): call_purchase(desired_bank='tinkoff_credit') order = Order.objects.last() assert order.desired_bank == 'tinkoff_credit' def test_by_default_desired_bank_is_empty_string(call_purchase): call_purchase() order = Order.objects.last() assert order.desired_bank == '' def test_desired_bank_is_stored_during_gift(api, default_gift_data): api.post( '/api/v2/courses/ruloning-oboev/gift/', { **default_gift_data, 'desired_bank': 'tinkoff_credit', }, format='multipart', expected_status_code=302) order = Order.objects.last() assert order.desired_bank == 'tinkoff_credit'
nilq/baby-python
python
""" This file contains the core methods for the Batch-command- and Batch-code-processors respectively. In short, these are two different ways to build a game world using a normal text-editor without having to do so 'on the fly' in-game. They also serve as an automatic backup so you can quickly recreate a world also after a server reset. The functions in this module is meant to form the backbone of a system called and accessed through game commands. The Batch-command processor is the simplest. It simply runs a list of in-game commands in sequence by reading them from a text file. The advantage of this is that the builder only need to remember the normal in-game commands. They are also executing with full permission checks etc, making it relatively safe for builders to use. The drawback is that in-game there is really a builder-character walking around building things, and it can be important to create rooms and objects in the right order, so the character can move between them. Also objects that affects players (such as mobs, dark rooms etc) will affect the building character too, requiring extra care to turn off/on. The Batch-code processor is a more advanced system that accepts full Python code, executing in chunks. The advantage of this is much more power; practically anything imaginable can be coded and handled using the batch-code processor. There is no in-game character that moves and that can be affected by what is being built - the database is populated on the fly. The drawback is safety and entry threshold - the code is executed as would any server code, without mud-specific permission checks and you have full access to modifying objects etc. You also need to know Python and Evennia's API. Hence it's recommended that the batch-code processor is limited only to superusers or highly trusted staff. ======================================================================= Batch-command processor file syntax The batch-command processor accepts 'batchcommand files' e.g 'batch.ev', containing a sequence of valid evennia commands in a simple format. The engine runs each command in sequence, as if they had been run at the game prompt. Each evennia command must be delimited by a line comment to mark its end. #INSERT path.batchcmdfile - this as the first entry on a line will import and run a batch.ev file in this position, as if it was written in this file. This way entire game worlds can be created and planned offline; it is especially useful in order to create long room descriptions where a real offline text editor is often much better than any online text editor or prompt. Example of batch.ev file: ---------------------------- # batch file # all lines starting with # are comments; they also indicate # that a command definition is over. @create box # this comment ends the @create command. @set box/desc = A large box. Inside are some scattered piles of clothing. It seems the bottom of the box is a bit loose. # Again, this comment indicates the @set command is over. Note how # the description could be freely added. Excess whitespace on a line # is ignored. An empty line in the command definition is parsed as a \n # (so two empty lines becomes a new paragraph). @teleport #221 # (Assuming #221 is a warehouse or something.) # (remember, this comment ends the @teleport command! Don'f forget it) # Example of importing another file at this point. #IMPORT examples.batch @drop box # Done, the box is in the warehouse! (this last comment is not necessary to # close the @drop command since it's the end of the file) ------------------------- An example batch file is game/gamesrc/commands/examples/batch_example.ev. ========================================================================== Batch-code processor file syntax The Batch-code processor accepts full python modules (e.g. "batch.py") that looks identical to normal Python files with a few exceptions that allows them to the executed in blocks. This way of working assures a sequential execution of the file and allows for features like stepping from block to block (without executing those coming before), as well as automatic deletion of created objects etc. You can however also run a batch-code python file directly using Python (and can also be de). Code blocks are separated by python comments starting with special code words. #HEADER - this denotes commands global to the entire file, such as import statements and global variables. They will automatically be pasted at the top of all code blocks. Observe that changes to these variables made in one block is not preserved between blocks! #CODE #CODE (info) #CODE (info) objname1, objname1, ... - This designates a code block that will be executed like a stand-alone piece of code together with any #HEADER defined. (info) text is used by the interactive mode to display info about the node to run. <objname>s mark the (variable-)names of objects created in the code, and which may be auto-deleted by the processor if desired (such as when debugging the script). E.g., if the code contains the command myobj = create.create_object(...), you could put 'myobj' in the #CODE header regardless of what the created object is actually called in-game. #INSERT path.filename - This imports another batch_code.py file and runs it in the given position. paths are given as python path. The inserted file will retain its own HEADERs which will not be mixed with the HEADERs of the file importing this file. The following variables are automatically made available for the script: caller - the object executing the script Example batch.py file ----------------------------------- #HEADER import traceback from django.config import settings from src.utils import create from game.gamesrc.typeclasses import basetypes GOLD = 10 #CODE obj, obj2 obj = create.create_object(basetypes.Object) obj2 = create.create_object(basetypes.Object) obj.location = caller.location obj.db.gold = GOLD caller.msg("The object was created!") #INSERT another_batch_file #CODE script = create.create_script() """ import re import codecs import traceback, sys from traceback import format_exc from django.conf import settings from django.core.management import setup_environ from src.utils import logger from src.utils import utils from game import settings as settings_module ENCODINGS = settings.ENCODINGS CODE_INFO_HEADER = re.compile(r"\(.*?\)") #------------------------------------------------------------ # Helper function #------------------------------------------------------------ def read_batchfile(pythonpath, file_ending='.py'): """ This reads the contents of a batch-file. Filename is considered to be the name of the batch file relative the directory specified in settings.py. file_ending specify which batchfile ending should be assumed (.ev or .py). """ # open the file if pythonpath and not (pythonpath.startswith('src.') or pythonpath.startswith('game.') or pythonpath.startswith('contrib.')): abspaths = [] for basepath in settings.BASE_BATCHPROCESS_PATHS: abspaths.append(utils.pypath_to_realpath("%s.%s" % (basepath, pythonpath), file_ending)) else: abspaths = [utils.pypath_to_realpath(pythonpath, file_ending)] fobj, lines, err = None, [], None for file_encoding in ENCODINGS: # try different encodings, in order load_errors = [] for abspath in abspaths: # try different paths, until we get a match try: # we read the file directly into unicode. fobj = codecs.open(abspath, 'r', encoding=file_encoding) except IOError: load_errors.append("Could not open batchfile '%s'." % abspath) continue break if not fobj: continue load_errors = [] err =None # We have successfully found and opened the file. Now actually # try to decode it using the given protocol. try: lines = fobj.readlines() except UnicodeDecodeError: # give the line of failure fobj.seek(0) try: lnum = 0 for lnum, line in enumerate(fobj): pass except UnicodeDecodeError, err: # lnum starts from 0, so we add +1 line, # besides the faulty line is never read # so we add another 1 (thus +2) to get # the actual line number seen in an editor. err.linenum = lnum + 2 fobj.close() # possibly try another encoding continue # if we get here, the encoding worked. Stop iteration. break if load_errors: logger.log_errmsg("\n".join(load_errors)) if err: return err else: return lines #------------------------------------------------------------ # # Batch-command processor # #------------------------------------------------------------ class BatchCommandProcessor(object): """ This class implements a batch-command processor. """ def parse_file(self, pythonpath): """ This parses the lines of a batchfile according to the following rules: 1) # at the beginning of a line marks the end of the command before it. It is also a comment and any number of # can exist on subsequent lines (but not inside comments). 2) #INSERT at the beginning of a line imports another batch-cmd file file and pastes it into the batch file as if it was written there. 3) Commands are placed alone at the beginning of a line and their arguments are considered to be everything following (on any number of lines) until the next comment line beginning with #. 4) Newlines are ignored in command definitions 5) A completely empty line in a command line definition is condered a newline (so two empty lines is a paragraph). 6) Excess spaces and indents inside arguments are stripped. """ #helper function def identify_line(line): """ Identifies the line type (comment, commanddef or empty) """ try: if line.strip().startswith("#INSERT"): return "insert" elif line.strip()[0] == '#': return "comment" else: return "commanddef" except IndexError: return "empty" #read the indata, if possible. lines = read_batchfile(pythonpath, file_ending='.ev') #line = utils.to_unicode(line) if not lines: return None commands = [] curr_cmd = "" #purge all superfluous whitespace and newlines from lines reg1 = re.compile(r"\s+") lines = [reg1.sub(" ", l) for l in lines] #parse all command definitions into a list. for line in lines: typ = identify_line(line) if typ == "commanddef": curr_cmd += line elif typ == "empty" and curr_cmd: curr_cmd += "\r\n" elif typ == "insert": # note that we are not safeguarding for # cyclic imports here! if curr_cmd: commands.append(curr_cmd.strip()) curr_cmd = "" filename = line.lstrip("#INSERT").strip() insert_commands = self.parse_file(filename) if insert_commands == None: insert_commands = ["{rINSERT ERROR: %s{n" % filename] commands.extend(insert_commands) else: #comment if curr_cmd: commands.append(curr_cmd.strip()) curr_cmd = "" if curr_cmd: commands.append(curr_cmd.strip()) #second round to clean up now merged line edges etc. reg2 = re.compile(r"[ \t\f\v]+") commands = [reg2.sub(" ", c) for c in commands] #remove eventual newline at the end of commands commands = [c.strip('\r\n') for c in commands] return commands #------------------------------------------------------------ # # Batch-code processor # #------------------------------------------------------------ def tb_filename(tb): "Helper to get filename from traceback" return tb.tb_frame.f_code.co_filename def tb_iter(tb): while tb is not None: yield tb tb = tb.tb_next class BatchCodeProcessor(object): """ This implements a batch-code processor """ def parse_file(self, pythonpath): """ This parses the lines of a batchfile according to the following rules: 1) Lines starting with #HEADER starts a header block (ends other blocks) 2) Lines starting with #CODE begins a code block (ends other blocks) 3) #CODE headers may be of the following form: #CODE (info) objname, objname2, ... 4) Lines starting with #INSERT are on form #INSERT filename. 3) All lines outside blocks are stripped. 4) All excess whitespace beginning/ending a block is stripped. """ # helper function def parse_line(line): """ Identifies the line type: block command, comment, empty or normal code. """ parseline = line.strip() if parseline.startswith("#HEADER"): return ("header", "", "") if parseline.startswith("#INSERT"): filename = line.lstrip("#INSERT").strip() if filename: return ('insert', "", filename) else: return ('comment', "", "{r#INSERT <None>{n") elif parseline.startswith("#CODE"): # parse code command line = line.lstrip("#CODE").strip() info = CODE_INFO_HEADER.findall(line) or "" if info: info = info[0] line = line.replace(info, "") objs = [o.strip() for o in line.split(",") if o.strip()] return ("codeheader", info, objs) elif parseline.startswith('#'): return ('comment', "", "%s" % line) else: #normal line - return it with a line break. return ('line', "", "%s" % line) # read indata lines = read_batchfile(pythonpath, file_ending='.py') if not lines: return None # parse file into blocks header = "" codes = [] in_header = False in_code = False for line in lines: # parse line mode, info, line = parse_line(line) # try: # print "::", in_header, in_code, mode, line.strip() # except: # print "::", in_header, in_code, mode, line if mode == 'insert': # recursive load of inserted code files - note that we # are not checking for cyclic imports! in_header = False in_code = False inserted_codes = self.parse_file(line) or [{'objs':"", 'info':line, 'code':""}] for codedict in inserted_codes: codedict["inserted"] = True codes.extend(inserted_codes) elif mode == 'header': in_header = True in_code = False elif mode == 'codeheader': in_header = False in_code = True # the line is a list of object variable names # (or an empty list) at this point. codedict = {'objs':line, 'info':info, 'code':""} codes.append(codedict) elif mode == 'comment' and in_header: continue else: # another type of line (empty, comment or code) if line and in_header: header += line elif line and in_code: codes[-1]['code'] += line else: # not in a block (e.g. first in file). Ignore. continue # last, we merge the headers with all codes. for codedict in codes: #print "codedict:", codedict if codedict and "inserted" in codedict: # we don't need to merge code+header in this case # since that was already added in the recursion. We # just check for errors. if not codedict['code']: codedict['code'] = "{r#INSERT ERROR: %s{n" % codedict['info'] else: objs = ", ".join(codedict["objs"]) if objs: objs = "[%s]" % objs codedict["code"] = "#CODE %s %s \n%s\n\n%s" % (codedict['info'], objs, header.strip(), codedict["code"].strip()) return codes def code_exec(self, codedict, extra_environ=None, debug=False): """ Execute a single code block, including imports and appending global vars extra_environ - dict with environment variables """ # define the execution environment environ = "setup_environ(settings_module)" environdict = {"setup_environ":setup_environ, "settings_module":settings_module} if extra_environ: for key, value in extra_environ.items(): environdict[key] = value # merge all into one block code = "%s # auto-added by Evennia\n%s" % (environ, codedict['code']) if debug: # try to delete marked objects for obj in codedict['objs']: code += "\ntry: %s.delete()\nexcept: pass" % obj # execute the block try: exec(code, environdict) except Exception: etype, value, tb = sys.exc_info() fname = tb_filename(tb) for tb in tb_iter(tb): if fname != tb_filename(tb): break lineno = tb.tb_lineno - 1 err = "" for iline, line in enumerate(code.split("\n")): if iline == lineno: err += "\n{w%02i{n: %s" % (iline + 1, line) elif lineno - 5 < iline < lineno + 5: err += "\n%02i: %s" % (iline + 1, line) err += "\n".join(traceback.format_exception(etype, value, tb)) #errlist = format_exc().split('\n') #if len(errlist) > 4: # errlist = errlist[4:] #err = "\n".join(" %s" % line for line in errlist if line) if debug: # try to delete objects again. try: for obj in codedict['objs']: eval("%s.delete()" % obj, environdict) except Exception: pass return err return None BATCHCMD = BatchCommandProcessor() BATCHCODE = BatchCodeProcessor()
nilq/baby-python
python
from typing import Optional, Union from .set_config import _get_config class _Engine(object): """Indicates the database engine that is currently in use.""" ENGINE = 0 MYSQL = 1 SQLITE = 3 _created = False # Indicates whether the connection to the database has been created. @classmethod def set_engine(cls, this_engine: str): cls.ENGINE = getattr(cls, this_engine.upper()) @classmethod def is_mysql(cls): return cls.ENGINE == cls.MYSQL @classmethod def is_sqlite(cls): return cls.ENGINE == cls.SQLITE try: _engine = _get_config().pop('engine').lower() except KeyError: _engine = 'sqlite' except ModuleNotFoundError: _engine = '' if _engine == 'mysql': _Engine.set_engine('mysql') from .engine.mysql import __create_connection, _close_db_connection, _select, _execute, _Connection, _Transaction elif _engine == 'sqlite': _Engine.set_engine('sqlite') from .engine.sqlite import __create_connection, _close_db_connection, _select, _execute, _Connection, _Transaction def _set_engine(new_engine: str): """Set a engine, import related modules, use in setconf.set_config.""" global __create_connection, _close_db_connection, _select, _execute, _Connection, _Transaction engine = new_engine.lower() if engine == 'mysql': _Engine.set_engine('mysql') from .engine.mysql import __create_connection, _close_db_connection, _select, _execute, _Connection, \ _Transaction elif engine == 'sqlite': _Engine.set_engine('sqlite') from .engine.sqlite import __create_connection, _close_db_connection, _select, _execute, _Connection, \ _Transaction def _create_connection(echo: bool = False, debug: bool = False, **kwargs): """ Create a connection to databases. Args: See setconf's __doc__ . """ return __create_connection(echo=echo, debug=debug, **kwargs) async def close_db_connection(): """Close connection with database.You may sometime need it.""" return await _close_db_connection() def Connection(): """ A async context manager to run a custom sql statement. Creates new connection.Returns a Connection instance. You can also use this connection in ORM by specifying the conn parameter. If you have not set autocommit=True, you should commit manual by use ``conn.commit()``. """ return _Connection() def Transaction(): """ Get a connection to do atomic transaction. This is a subclass of Connection and they have the same usage, and on exit, this connection will automatically commit or roll back on error. You can also use this connection in ORM by specifying the conn parameter. Example:: async whit connection.Transaction() as conn: await Table(tl1='abc',tl2=123).save(conn=conn) """ return _Transaction() def select(sql: str, args: Optional[Union[list, tuple]] = (), conn: Optional[Connection] = None) -> list: """ Execute a select query, and return a list of result.You can use this method when you encounter a query that ORM cannot complete Args: sql(str): a sql statement, use ? as placeholder. args(list or tuple): argument in placeholder. conn: use this parameter to specify a custom connection. Return: (list) a list of result. """ return _select(sql, args, conn) def execute(sql: str, args: Optional[Union[list, tuple]] = (), conn: Optional[Connection] = None) -> int: """ Execute a insert,update or delete query, and return the number of affected rows.You can use this method when you encounter a query that ORM cannot complete. Args: sql(str): a sql statement, use ? as placeholder. args(list or tuple): argument in placeholder. conn: use this parameter to specify a custom connection. Return: (int) affected rows. """ return _execute(sql, args, conn)
nilq/baby-python
python
import argparse import confluent.client as cli import sys import time c = cli.Command() nodes = [] ap = argparse.ArgumentParser(description='Snake identify light through nodes') ap.add_argument('noderange', help='Noderange to iterate through') ap.add_argument('-d', '--duration', type=float, help='How long to have each system illuminated') args = ap.parse_args() def runit(itera): for rsp in itera: if 'error' in rsp: sys.stderr.write('{0}\n'.format(repr(rsp))) for ret in c.read('/noderange/{0}/nodes/'.format(args.noderange)): node = ret.get('item', {}).get('href', None) if node: node = node.replace('/', '') nodes.append(node) else: print(repr(ret)) if not nodes: sys.exit(1) lastnode = None interval = args.duration if interval: interval = interval / 2 else: interval = 0.25 while True: for node in nodes: print('Lighting {0}'.format(node)) runit(c.update('/nodes/{0}/identify'.format(node), {'identify': 'on'})) time.sleep(interval) if lastnode: runit(c.update('/nodes/{0}/identify'.format(lastnode), {'identify': 'off'})) lastnode = node time.sleep(interval)
nilq/baby-python
python
# Generated by Django 3.1.7 on 2021-03-12 13:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0010_genre_desc'), ] operations = [ migrations.AlterField( model_name='genre', name='desc', field=models.TextField(blank=True, max_length=255, null=True), ), ]
nilq/baby-python
python
import os import sys from pathlib import Path from typing import List import nox nox.options.sessions = ["lint", "test-dist"] PYTHON_ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] RUNNING_CI = "TRAVIS" in os.environ or "GITHUB_ACTIONS" in os.environ @nox.session(python=["3.6"], reuse_venv=True) def lint(session: nox.Session) -> None: """ Run linters on the codebase. """ session.install("pre-commit") session.run("pre-commit", "run", "--all-files") @nox.session() def coverage(session: nox.Session) -> None: """ Run coverage using unit tests. """ session.install(".[coverage]") session.run( "python", "-m", "pytest", "tests/unit", "--cov=auditwheel", "--cov-report=term-missing", ) def _docker_images(session: nox.Session) -> List[str]: tmp_dir = Path(session.create_tmp()) script = tmp_dir / "list_images.py" images_file = tmp_dir / "images.lst" script.write_text( fr""" import sys from pathlib import Path sys.path.append("./tests/integration") from test_manylinux import MANYLINUX_IMAGES images = "\n".join(MANYLINUX_IMAGES.values()) Path(r"{images_file}").write_text(images) """ ) session.run("python", str(script), silent=True) return images_file.read_text().splitlines() @nox.session(python=PYTHON_ALL_VERSIONS) def tests(session: nox.Session) -> None: """ Run tests. """ posargs = session.posargs extras = "coverage" if RUNNING_CI else "test" session.install("-e", f".[{extras}]") if RUNNING_CI: session.install("codecov") posargs.extend(["--cov", "auditwheel", "--cov-branch"]) # pull manylinux images that will be used. # this helps passing tests which would otherwise timeout. for image in _docker_images(session): session.run("docker", "pull", image, external=True) session.run("pytest", "-s", *posargs) if RUNNING_CI: session.run("auditwheel", "lddtree", sys.executable) try: session.run("codecov") except nox.command.CommandFailed: pass # Ignore failures from codecov tool def _build(session: nox.Session, dist: Path) -> None: session.install("build") tmp_dir = Path(session.create_tmp()) / "build-output" session.run("python", "-m", "build", "--outdir", str(tmp_dir)) (wheel_path,) = tmp_dir.glob("*.whl") (sdist_path,) = tmp_dir.glob("*.tar.gz") dist.mkdir(exist_ok=True) wheel_path.rename(dist / wheel_path.name) sdist_path.rename(dist / sdist_path.name) @nox.session(name="test-dist") def test_dist(session: nox.Session) -> None: """ Builds SDist & Wheels then run unit tests on those. """ tmp_dir = Path(session.create_tmp()) dist = tmp_dir / "dist" _build(session, dist) python_versions = session.posargs or PYTHON_ALL_VERSIONS for version in python_versions: session.notify(f"_test_sdist-{version}", [str(dist)]) session.notify(f"_test_wheel-{version}", [str(dist)]) def _test_dist(session: nox.Session, path: str, pattern: str) -> None: (dist_path,) = Path(path).glob(pattern) session.install(f"{str(dist_path)}[test]") session.run("pytest", "tests/unit") @nox.session(python=PYTHON_ALL_VERSIONS) def _test_sdist(session: nox.Session) -> None: """ Do not run explicitly. """ _test_dist(session, session.posargs[0], "*.tar.gz") @nox.session(python=PYTHON_ALL_VERSIONS) def _test_wheel(session: nox.Session) -> None: """ Do not run explicitly. """ _test_dist(session, session.posargs[0], "*.whl") @nox.session def build(session: nox.Session) -> None: """ Make an SDist and a wheel. """ _build(session, Path("dist")) @nox.session(python=PYTHON_ALL_VERSIONS, reuse_venv=True) def develop(session: nox.Session) -> None: session.run("python", "-m", "pip", "install", "--upgrade", "pip", "setuptools") session.install("-e", ".[develop]")
nilq/baby-python
python
import pandas as pd #given a word, search all cases and find caseNames which contain the word #return as a dataframe def search_scdb_cases_by_name(word,all_scdb_case_data): return all_scdb_case_data[all_scdb_case_data['caseName'].str.contains(word,na=False)] #try to convert case names from SCDB 'caseName' field to CL opinion scraped #casenames. cl_opin case_names are from urls, so lowercase smashed w/ hyphens #vs,versus -> v def format_case_name(case_name): return str(case_name).lower().lstrip().rstrip().replace(',','').replace("'",'').replace('versus','v').replace('vs.','v.').replace('.','').replace(' ','-') #find case in SCDB database given CourtListener opinion_id #CL ids are in form "U.S. citation|lower-case-v-name" def find_scdb_case(cl_opin_id,all_scdb_case_data): us_citation = cl_opin_id.split('|')[0] #get df of all cases with given U.S. citation "xxx U.S. xxx" scdb_cases = all_scdb_case_data[all_scdb_case_data['usCite']==us_citation] if not scdb_cases.empty: return scdb_cases #if that doesn't work, try lower-case-v-name case_name_from_id = cl_opin_id.split('|')[1] for caseName in all_scdb_case_data['caseName']: if format_case_name(case_name_from_id) == format_case_name(caseName): return all_scdb_case_data[all_scdb_case_data['caseName']==caseName] #if no match, return the empty DataFrame return pd.DataFrame()
nilq/baby-python
python
# MIT License # # Copyright (c) 2020 University of Oxford # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ Command line interface for tsdate. """ import argparse import logging import sys import tskit import tsdate logger = logging.getLogger(__name__) log_format = '%(asctime)s %(levelname)s %(message)s' def exit(message): """ Exit with the specified error message, setting error status. """ sys.exit("{}: {}".format(sys.argv[0], message)) def setup_logging(args): log_level = "WARN" if args.verbosity > 0: log_level = "INFO" if args.verbosity > 1: log_level = "DEBUG" logging.basicConfig(level=log_level, format=log_format) def tsdate_cli_parser(): parser = argparse.ArgumentParser( description="Set up base data, generate inferred datasets,\ and process datasets.") parser.add_argument( "-V", "--version", action='version', version='%(prog)s {}'.format(tsdate.__version__)) parser.add_argument('ts', help="The path and name of the input tree sequence from which \ we estimate node ages.") parser.add_argument('output', help="The path and name of output file where the dated tree \ sequence will saved.") parser.add_argument('Ne', type=float, help="estimated effective (diploid) population size.") parser.add_argument('-m', '--mutation-rate', type=float, default=None, help="The estimated mutation rate per unit of genome per \ generation. If provided, the dating algorithm will use a \ mutation rate clock to help estimate node dates.") parser.add_argument('-r', '--recombination-rate', type=float, default=None, help="The estimated recombination rate per unit \ of genome per generation. If provided, the dating algorithm \ will use a recombination rate clock to help estimate node \ dates.") parser.add_argument('-e', '--epsilon', type=float, default=1e-6, help="Specify minimum distance separating time points. Also \ specifies the error factor in time difference calculations.") parser.add_argument('-t', '--num-threads', type=int, default=None, help="The number of threads to use. A simpler unthreaded \ algorithm is used unless this is >= 1 (default: None).") parser.add_argument('--probability-space', type=str, default='logarithmic', help="Should the internal algorithm save probabilities in \ 'logarithmic' (slower, less liable to to overflow) or 'linear' \ space (faster, may overflow).") parser.add_argument('--method', type=str, default='inside_outside', help="Specify which estimation method to use: can be \ 'inside_outside' (empirically better, theoretically \ problematic) or 'maximization' (worse empirically, especially \ with a gamma approximated prior, but theoretically robust). \ Default: 'inside_outside.'") parser.add_argument('-p', '--progress', action='store_true', help="Show progress bar.") parser.add_argument('-v', '--verbosity', type=int, default=0, help="How much verbosity to output.") return parser def run_date(args): try: ts = tskit.load(args.ts) except tskit.FileFormatError as ffe: exit("Error loading '{}: {}".format(args.ts, ffe)) dated_ts = tsdate.date( ts, args.Ne, mutation_rate=args.mutation_rate, recombination_rate=args.recombination_rate, probability_space=args.probability_space, method=args.method, eps=args.epsilon, num_threads=args.num_threads, progress=args.progress) dated_ts.dump(args.output) def tsdate_main(arg_list=None): parser = tsdate_cli_parser() args = parser.parse_args(arg_list) setup_logging(args) run_date(args)
nilq/baby-python
python
""" [summary] [extended_summary] """ # region [Imports] # * Standard Library Imports ----------------------------------------------------------------------------> import os import traceback from datetime import datetime from typing import Tuple import re # * Third Party Imports ---------------------------------------------------------------------------------> from discord import Embed, ChannelType from fuzzywuzzy import fuzz from fuzzywuzzy import process as fuzzprocess from discord.ext import commands import discord # * Gid Imports -----------------------------------------------------------------------------------------> import gidlogger as glog # * Local Imports ---------------------------------------------------------------------------------------> from antipetros_discordbot.utility.misc import async_seconds_to_pretty_normal, async_split_camel_case_string from antipetros_discordbot.utility.exceptions import MissingAttachmentError, NotNecessaryRole, IsNotTextChannelError, NotNecessaryDmId, NotAllowedChannelError, NotNecessaryRole from antipetros_discordbot.utility.gidtools_functions import loadjson from antipetros_discordbot.abstracts.subsupport_abstract import SubSupportBase from antipetros_discordbot.init_userdata.user_data_setup import ParaStorageKeeper from antipetros_discordbot.utility.discord_markdown_helper.special_characters import ZERO_WIDTH from antipetros_discordbot.bot_support.sub_support.sub_support_helper.cooldown_dict import CoolDownDict # endregion[Imports] # region [TODO] # TODO: rebuild whole error handling system # TODO: make it so that creating the embed also sends it, with more optional args # endregion [TODO] # region [AppUserData] # endregion [AppUserData] # region [Logging] log = glog.aux_logger(__name__) # endregion[Logging] # region [Constants] APPDATA = ParaStorageKeeper.get_appdata() BASE_CONFIG = ParaStorageKeeper.get_config('base_config') THIS_FILE_DIR = os.path.abspath(os.path.dirname(__file__)) EMBED_SYMBOLS = loadjson(APPDATA["embed_symbols.json"]) # endregion[Constants] class ErrorHandler(SubSupportBase): char_to_replace = "'" config_name = 'error_handling' error_thumbnail = "https://upload.wikimedia.org/wikipedia/commons/thumb/9/97/Dialog-error-round.svg/1200px-Dialog-error-round.svg.png" def __init__(self, bot, support): self.bot = bot self.support = support self.loop = self.bot.loop self.is_debug = self.bot.is_debug self.emphasis_regex = re.compile(r"'.*?'") self.error_handle_table = {commands.MaxConcurrencyReached: self._handle_max_concurrency, commands.CommandOnCooldown: self._handle_command_on_cooldown, commands.errors.BadArgument: self._handle_bad_argument, MissingAttachmentError: self._handle_missing_attachment, commands.CheckFailure: self._handle_check_failure, IsNotTextChannelError: self._handle_not_text_channel, NotNecessaryDmId: self._handle_not_necessary_dm_id, NotAllowedChannelError: self._handle_not_allowed_channel, NotNecessaryRole: self._handle_not_necessary_role} self.cooldown_data = CoolDownDict() glog.class_init_notification(log, self) @property def delete_invoking_messages(self): return BASE_CONFIG.retrieve(self.config_name, 'delete_invoking_messages', typus=bool, direct_fallback=False) @property def delete_reply_after(self): _out = BASE_CONFIG.retrieve(self.config_name, 'delete_reply_after', typus=int, direct_fallback=120) if _out == 0 or _out <= 0: return None return _out @property def emphasis_chars(self): format_lut = {'bold': '**', 'underlined': '__', 'italic': '*', 'strikethrough': '~'} format_keywords = BASE_CONFIG.retrieve(self.config_name, 'msg_keyword_format', typus=Tuple[str], direct_fallback=[], mod_func=lambda x: x.casefold()) return (''.join(map(lambda x: format_lut.get(x, ''), format_keywords)), ''.join(map(lambda x: format_lut.get(x, ''), reversed(format_keywords)))) async def transform_error_msg(self, error_msg): before_emphasis, after_emphasis = self.emphasis_chars _msg = error_msg for orig_word in self.emphasis_regex.findall(error_msg): cleaned_word = orig_word.strip("'").strip() mod_word = f"{before_emphasis}{cleaned_word.upper()}{after_emphasis}" _msg = _msg.replace(orig_word, mod_word) return _msg async def handle_errors(self, ctx, error): error_traceback = '\n'.join(traceback.format_exception(error, value=error, tb=None)) await self.error_handle_table.get(type(error), self._default_handle_error)(ctx, error, error_traceback) if ctx.channel.type is ChannelType.text: log.error("Error '%s' was caused by '%s' on the command '%s' with args '%s' and traceback --> %s", error.__class__.__name__, ctx.author.name, ctx.command.name, ctx.args, error_traceback) if self.delete_invoking_messages is True: await ctx.message.delete() async def _default_handle_error(self, ctx: commands.Context, error, error_traceback): log.error('Ignoring exception in command {}:'.format(ctx.command)) log.exception(error, exc_info=True, stack_info=False) if ctx.channel.type is ChannelType.text: await ctx.reply(f'The command had an unspecified __**ERROR**__\n please send {self.bot.creator.member_object.mention} a DM of what exactly you did when the error occured.', delete_after=120, allowed_mentions=discord.AllowedMentions.none()) await self.bot.message_creator(embed=await self.error_reply_embed(ctx, error, 'Error With No Special Handling Occured', msg=str(error), error_traceback=error_traceback)) async def _handle_not_necessary_role(self, ctx, error, error_traceback): embed_data = await self.bot.make_generic_embed(footer='default_footer', title='Missing Role', thumbnail=self.error_thumbnail, description=await self.transform_error_msg(error.msg), field=[self.bot.field_item(name='Your Roles:', value='\n'.join(role.name for role in ctx.author.roles))]) await ctx.reply(delete_after=self.delete_reply_after, **embed_data) async def _handle_not_allowed_channel(self, ctx, error, error_traceback): embed_data = await self.bot.make_generic_embed(footer='default_footer', title='Wrong Channel', thumbnail=self.error_thumbnail, description=await self.transform_error_msg(error.msg)) await ctx.reply(delete_after=self.delete_reply_after, **embed_data) async def _handle_not_necessary_dm_id(self, ctx, error, error_traceback): embed_data = await self.bot.make_generic_embed(footer='default_footer', title='Missing Permission', thumbnail=self.error_thumbnail, description=await self.transform_error_msg(error.msg)) await ctx.reply(**embed_data) async def _handle_not_text_channel(self, ctx, error, error_traceback): embed_data = await self.bot.make_generic_embed(footer='default_footer', title='Only allowed in Text Channels', thumbnail=self.error_thumbnail, description=await self.transform_error_msg(error.msg)) await ctx.reply(**embed_data) async def _handle_check_failure(self, ctx, error, error_traceback): if self.bot.is_blacklisted(ctx.author) is False: await ctx.channel.send(delete_after=self.delete_reply_after, embed=await self.error_reply_embed(ctx, error, 'Missing Permission', f'{ctx.author.mention}\n{ZERO_WIDTH}\n **You dont_have Permission to call this Command**\n{ZERO_WIDTH}')) async def _handle_missing_attachment(self, ctx, error, error_traceback): await ctx.channel.send(delete_after=self.delete_reply_after, embed=await self.error_reply_embed(ctx, error, 'Missing Attachments', f'{ctx.author.mention}\n{ZERO_WIDTH}\n **{str(error)}**\n{ZERO_WIDTH}')) async def _handle_bad_argument(self, ctx, error, error_traceback): await ctx.channel.send(delete_after=self.delete_reply_after, embed=await self.error_reply_embed(ctx, error, 'Wrong Argument', f'{ctx.author.mention}\n{ZERO_WIDTH}\n **You tried to invoke `{ctx.command.name}` with an wrong argument**\n{ZERO_WIDTH}\n```shell\n{ctx.command.name} {ctx.command.signature}\n```', error_traceback=None)) async def _handle_max_concurrency(self, ctx, error, error_traceback): await ctx.channel.send(embed=await self.error_reply_embed(ctx, error, 'STOP SPAMMING!', f'{ctx.author.mention}\n{ZERO_WIDTH}\n **Your mother was a hamster and your father smelled of elderberries!**', error_traceback=error_traceback), delete_after=self.delete_reply_after) await ctx.message.delete() async def _handle_command_on_cooldown(self, ctx, error, error_traceback): # TODO: get normal sentence from BucketType, with dynamical stuff (user_name, channel_name,...) msg = await self.transform_error_msg(f"Command '{ctx.command.name}' is on cooldown for '{error.cooldown.type.name.upper()}'. \n{ZERO_WIDTH}\nYou can try again in '{await async_seconds_to_pretty_normal(int(round(error.retry_after, 0)))}'\n{ZERO_WIDTH}") if self.cooldown_data.in_data(ctx, error) is True: await ctx.message.delete() await ctx.author.send(msg) return await self.cooldown_data.add(ctx, error) embed_data = await self.bot.make_generic_embed(title=f'Command is on Cooldown for the scope of {error.cooldown.type.name.upper()}', thumbnail="https://www.seekpng.com/png/full/896-8968896_cooldown-cooldown-car-air-conditioning-icon.png", description=msg) await ctx.reply(**embed_data, delete_after=error.retry_after) await ctx.message.delete() async def error_reply_embed(self, ctx, error, title, msg, error_traceback=None): embed = Embed(title=title, description=f"{ZERO_WIDTH}\n{msg}\n{ZERO_WIDTH}", color=self.support.color('red').int, timestamp=datetime.utcnow()) embed.set_thumbnail(url=EMBED_SYMBOLS.get('warning')) embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url) if error_traceback is not None: embed.add_field(name='Traceback', value=str(error_traceback)[0:500]) if ctx.command is not None: embed.set_footer(text=f"Command: `{ctx.command.name}`\n{ZERO_WIDTH}\n By User: `{ctx.author.name}`\n{ZERO_WIDTH}\n Error: `{await async_split_camel_case_string(error.__class__.__name__)}`\n{ZERO_WIDTH}\n{ZERO_WIDTH}") else: embed.set_footer(text=f"text: {ctx.message.content}\n{ZERO_WIDTH}\n By User: `{ctx.author.name}`\n{ZERO_WIDTH}\n Error: `{await async_split_camel_case_string(error.__class__.__name__)}`\n{ZERO_WIDTH}\n{ZERO_WIDTH}") return embed async def error_message_embed(self, ctx, error, msg=ZERO_WIDTH): embed = Embed(title='ERROR', color=self.support.color('orange').int, timestamp=datetime.utcnow(), description=ZERO_WIDTH + '\n' + msg + '\n' + ZERO_WIDTH) embed.set_thumbnail(url=EMBED_SYMBOLS.get('warning')) try: embed.add_field(name=await async_split_camel_case_string(error.__class__.__name__), value=f"error occured with command: {ctx.command.name} and arguments: {str(ctx.args)}") except AttributeError: embed.add_field(name=await async_split_camel_case_string(error.__class__.__name__), value="command not found\n" + ZERO_WIDTH + '\n', inline=False) corrections = fuzzprocess.extract(ctx.message.content.split(' ')[1], [command.name for command in self.bot.commands], scorer=fuzz.token_set_ratio, limit=3) if corrections is not None: embed.add_field(name='did you mean:', value=ZERO_WIDTH + '\n' + f'\n{ZERO_WIDTH}\n'.join(correction[0] for correction in corrections), inline=False) embed.set_footer(text=f'to get a list of all commands use:\n@AntiPetros {self.bot.help_invocation}\n{ZERO_WIDTH}\n{ZERO_WIDTH}') return embed async def if_ready(self): log.debug("'%s' sub_support is READY", str(self)) async def update(self, typus): return log.debug("'%s' sub_support was UPDATED", str(self)) def retire(self): log.debug("'%s' sub_support was RETIRED", str(self)) def get_class(): return ErrorHandler # region[Main_Exec] if __name__ == '__main__': pass # endregion[Main_Exec]
nilq/baby-python
python
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """GKE master version available for new clusters. The GKE master version should be a version that is available for new clusters. If a version is not available it could mean that it is deprecated, or possibly retired due to issues with it. """ from gcpdiag import lint, models from gcpdiag.queries import gke def run_rule(context: models.Context, report: lint.LintReportRuleInterface): clusters = gke.get_clusters(context) if not clusters: report.add_skipped(None, 'no clusters found') for _, c in sorted(clusters.items()): if c.release_channel: report.add_skipped(c, 'release channel: ' + c.release_channel) continue valid_master_versions = gke.get_valid_master_versions( c.project_id, c.location) if c.master_version not in valid_master_versions: report.add_failed(c, 'valid versions: ' + ', '.join(valid_master_versions), c.master_version) else: report.add_ok(c, c.master_version)
nilq/baby-python
python
# Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the COPYING file. import os def test_simple(qidoc_action): world_proj = qidoc_action.add_test_project("world") build_dir = os.path.join(world_proj.path, "build-doc") assert not os.path.exists(build_dir) qidoc_action("build", "world") assert os.path.exists(build_dir) qidoc_action("clean", "world") assert os.path.exists(build_dir) qidoc_action("clean", "world", "--force") assert not os.path.exists(build_dir)
nilq/baby-python
python
from io import BytesIO from .utils import Tools, Status, Network from .config import highQuality from .config import RESOURCES_BASE_PATH from PIL import Image, ImageDraw, ImageFilter, ImageFont class User(): def __init__(self, nickname, favorability, days, hitokoto): self._userNickname = nickname self._userFavorability = favorability self._userSignInDays = days self._userHitokoto = hitokoto self._userInfo = '签 到 成 功' self._userInfoIntegration = f'签到天数 {self._userSignInDays} 好感度 {self._userFavorability}' class SignIn(User): FONT_REEJI = 'REEJI-HonghuangLiGB-SemiBold.ttf' FONT_ZHANKU = 'zhanku.ttf' def __init__(self, userQQ, nickname, favorability, days, hitokoto, avatarUrl, basemapSize = 640, avatarSize = 256): super().__init__(nickname, favorability, days, hitokoto) self._userQQ = userQQ self._basemapSize = basemapSize self._avatarSize = avatarSize self._avatarUrl = avatarUrl self._img = Status.FAILURE self._roundImg = Status.FAILURE self._canvas = Status.FAILURE self._magicCircle = Status.FAILURE self._textBaseMap = Status.FAILURE self._magicCirclePlus = 30 self._avatarVerticalOffset = 50 self._textBaseMapSize = (540, 160) self._topPositionOfTextBaseMap = 425 self._textBaseMapLeftPosition = int((self._basemapSize - self._textBaseMapSize[0]) / 2) self._fontAttenuation = 2 self._minimumFontLimit = 10 self._infoCoordinatesY = Tools.dictToObj({ 'nickname': self._topPositionOfTextBaseMap + 26, 'info': self._topPositionOfTextBaseMap + 64, 'integration': self._topPositionOfTextBaseMap + 102, 'hitokoto': self._topPositionOfTextBaseMap + 137 }) self._infoFontSize = Tools.dictToObj({ 'nickname': 28, 'info': 28, 'integration': 25, 'hitokoto': 25 }) self._infoFontName = Tools.dictToObj({ 'nickname': self.FONT_REEJI, 'info': self.FONT_REEJI, 'integration': self.FONT_REEJI, 'hitokoto': self.FONT_ZHANKU }) @staticmethod async def getPictures(url): img = await Network.getBytes(url) return img async def createAvatar(self): size = self._basemapSize avatarImgUrl = self._avatarUrl res = await self.getPictures(avatarImgUrl) self._img = self.resize(Image.open(BytesIO(res)).convert('RGBA'), (size, size)) return self @staticmethod def resize(img, size): return img.copy().resize(size, Image.ANTIALIAS) @staticmethod def gaussianBlur(img, radius = 7): return img.copy().filter(ImageFilter.GaussianBlur(radius = radius)) @staticmethod def imageRadiusProcessing(img, centralA, radius = 30): """处理图片四个圆角。 :centralA: 中央区域的 A 通道值,当指定为 255 时全透,四角将使用 0 全不透 """ circle = Image.new('L', (radius * 2, radius * 2), 0) draw = ImageDraw.Draw(circle) draw.ellipse((0, 0, radius * 2, radius * 2), fill = centralA) w, h = img.size alpha = Image.new('L', img.size, centralA) upperLeft, lowerLeft = circle.crop((0, 0, radius, radius)), circle.crop((0, radius, radius, radius * 2)) upperRight, lowerRight = circle.crop((radius, 0, radius * 2, radius)), circle.crop((radius, radius, radius * 2, radius * 2)) alpha.paste(upperLeft, (0, 0)) alpha.paste(upperRight, (w - radius, 0)) alpha.paste(lowerRight, (w - radius, h - radius)) alpha.paste(lowerLeft, (0, h - radius)) img.putalpha(alpha) return img def createRoundImg(self): img = self._img size = self._avatarSize mask = Image.new('L', (size, size), 0) draw = ImageDraw.Draw(mask) draw.ellipse((0, 0, size, size), fill = 255) self._roundImg = self.resize(img, (size, size)) self._roundImg.putalpha(mask) return self def createCanvas(self): size = self._basemapSize self._canvas = Image.new('RGBA', (size, size), (0, 0, 0, 0)) self._canvas.paste(self.gaussianBlur(self._img)) return self def createAMagicCircle(self): size = self._magicCirclePlus + self._avatarSize magicCircle = Image.open(f'{RESOURCES_BASE_PATH}/magic-circle.png').convert('L') magicCircle = self.resize(magicCircle, (size, size)) self._magicCircle = Image.new('RGBA', (size, size), (0, 0, 0, 0)) self._magicCircle.putalpha(magicCircle) return self def createTextBasemap(self, transparency = 190): self._textBaseMap = Image.new('RGBA', self._textBaseMapSize, (0, 0, 0, transparency)) self._textBaseMap = self.imageRadiusProcessing(self._textBaseMap, transparency) return self def additionalMagicCircle(self): magicCircle = self._magicCircle x = int((self._basemapSize - self._avatarSize - self._magicCirclePlus) / 2) y = x - self._avatarVerticalOffset self._canvas.paste(magicCircle, (x, y), magicCircle) return self def additionalAvatar(self): avatar = self._roundImg x = int((self._basemapSize - self._avatarSize) / 2) y = x - self._avatarVerticalOffset self._canvas.paste(avatar, (x, y), avatar) return self def additionalTextBaseMap(self): textBaseMap = self._textBaseMap x = int((self._basemapSize - self._textBaseMapSize[0]) / 2) y = self._topPositionOfTextBaseMap self._canvas.paste(textBaseMap, (x, y), textBaseMap) return self def writePicture(self, img, text, position, fontName, fontSize, color = (255, 255, 255)): font = ImageFont.truetype(f'{RESOURCES_BASE_PATH}/font/{fontName}', fontSize) draw = ImageDraw.Draw(img) textSize = font.getsize(text) attenuation = self._fontAttenuation x = int(position[0] - textSize[0] / 2) limit = self._minimumFontLimit while x <= self._textBaseMapLeftPosition: fontSize -= attenuation if fontSize <= limit: return Status.FAILURE font = ImageFont.truetype(f'{RESOURCES_BASE_PATH}/font/{fontName}', fontSize) textSize = font.getsize(text) x = int(position[0] - textSize[0] / 2) y = int(position[1] - textSize[1] / 2) draw.text((x, y), text, color, font = font) return Status.SUCCESS def additionalSignInInformation(self): fontSize = self._infoFontSize coordinateY = self._infoCoordinatesY font = self._infoFontName x = int(self._basemapSize / 2) # Add user nickname result = self.writePicture(img = self._canvas, text = self._userNickname, position = (x, coordinateY.nickname), fontName = font.nickname, fontSize = fontSize.nickname) if result == Status.FAILURE: return Status.FAILURE # Add success message result = self.writePicture(img = self._canvas, text = self._userInfo, position = (x, coordinateY.info), fontName = font.info, fontSize = fontSize.info) if result == Status.FAILURE: return Status.FAILURE # Add integration information result = self.writePicture(img = self._canvas, text = self._userInfoIntegration, position = (x, coordinateY.integration), fontName = font.integration, fontSize = fontSize.integration) if result == Status.FAILURE: return Status.FAILURE # Addition hitokoto result = self.writePicture(img = self._canvas, text = self._userHitokoto, position = (x, coordinateY.hitokoto), fontName = font.hitokoto, fontSize = fontSize.hitokoto) if result == Status.FAILURE: return Status.FAILURE return self def save(self): dir = f'{RESOURCES_BASE_PATH}/cache' Tools.checkFolder(dir) if highQuality: path = f'{RESOURCES_BASE_PATH}/cache/{self._userQQ}.png' self._canvas.save(path) else: path = f'{RESOURCES_BASE_PATH}/cache/{self._userQQ}.jpg' self._canvas.convert('RGB').save(path) async def drawing(self): # Start generating result = await self.createAvatar() result = (result.createRoundImg() .createCanvas() .createAMagicCircle() .createTextBasemap() # Start processing .additionalMagicCircle() .additionalAvatar() .additionalTextBaseMap() # Must be the last step .additionalSignInInformation()) if result == Status.FAILURE: return result # Save result.save() return Status.SUCCESS
nilq/baby-python
python
import time import argparse import numpy as np import matplotlib.pyplot as plt import torch import torchvision import torchvision.transforms as transforms import n3ml.model import n3ml.encoder import n3ml.optimizer np.set_printoptions(threshold=np.inf, linewidth=np.nan) class Plot: def __init__(self): plt.ion() self.fig, self.ax = plt.subplots(figsize=(10, 10)) self.ax2 = self.ax.twinx() plt.title('BP-STDP') def update(self, y1, y2): x = torch.arange(y1.shape[0]) * 30 ax1 = self.ax ax2 = self.ax2 ax1.plot(x, y1, 'g') ax2.plot(x, y2, 'b') ax1.set_xlabel('number of images') ax1.set_ylabel('accuracy', color='g') ax2.set_ylabel('loss', color='b') self.fig.canvas.draw() self.fig.canvas.flush_events() def accuracy(r: torch.Tensor, label: int) -> torch.Tensor: """ :param r: (time interval, # classes) the spike trains of output neurons in T ms :param label: :return: """ return (torch.argmax(torch.sum(r, dim=0)) == label).float() def mse(r: torch.Tensor, z: torch.Tensor, label: int, epsilon: int = 4) -> torch.Tensor: """ :param r: (time interval, # classes) the spike trains of output neurons in T ms :param z: (time interval, # classes) the desired spike trains in T ms :return: """ e = torch.zeros_like(r) for t in range(e.size(0)): if z[t, label] > 0.5: tt = t-epsilon if t-epsilon > 0 else 0 for i in range(e.size(1)): if i == label: if torch.sum(r[tt:t, i]) < 0.5: e[t, i] = 1 else: if torch.sum(r[tt:t, i]) > 0.5: e[t, i] = -1 T = r.size(0) return (torch.sum(e, dim=[0, 1])/T)**2 def label_encoder(label, beta, num_classes, time_interval): """ 1초 동안에 생성될 수 있는 spikes의 수는 얼마나 될까? 이는 continuous time domain인지 discrete time domain인지에 따라 달라질 수 있다. 예를 들어, continuous의 경우는 최대로 생성될 수 있는 스파이크 수는 무한대가 된다. 반면에 discrete의 경우는 최대로 생성될 수 있는 스파이크 수는 time step에 영향을 받는다. 현재 구현에서는 time step이 1ms라고 가정을 하고 spike train을 생성한다. 향후에 입력된 time step에 따라 적절한 spike train을 생성하는 방법을 구현할 필요가 있다. """ r = torch.zeros((time_interval, num_classes)) r[:, label] = torch.rand(time_interval) <= (beta/1000) return r def validate(loader, model, encoder, criterion, opt): num_images = 0 total_loss = 0.0 num_corrects = 0 for image, label in loader: image = image.squeeze(dim=0).cuda() label = label.squeeze().cuda() spiked_image = encoder(image) spiked_image = spiked_image.view(spiked_image.size(0), -1) spiked_label = label_encoder(label, opt.beta, opt.num_classes, opt.time_interval) loss_buffer = [] for t in range(opt.time_interval): model(spiked_image[t]) loss_buffer.append(model.fc2.o.clone()) model.reset_variables(w=False) num_images += 1 num_corrects += accuracy(r=torch.stack(loss_buffer), label=label) total_loss += criterion(r=torch.stack(loss_buffer), z=spiked_label, label=label, epsilon=opt.epsilon) return total_loss/num_images, float(num_corrects)/num_images def train(loader, model, encoder, optimizer, criterion, opt) -> None: plotter = Plot() num_images = 0 total_loss = 0.0 num_corrects = 0 list_loss = [] list_acc = [] for image, label in loader: # Squeeze batch dimension # Now, batch processing isn't supported image = image.squeeze(dim=0) label = label.squeeze() spiked_image = encoder(image) spiked_image = spiked_image.view(spiked_image.size(0), -1) spiked_label = label_encoder(label, opt.beta, opt.num_classes, opt.time_interval) # print(label) # print(spiked_label) # exit(0) # np_spiked_image = spiked_image.numpy() spike_buffer = { 'inp': [], 'fc1': [], 'fc2': [] } loss_buffer = [] print() print("label: {}".format(label)) for t in range(opt.time_interval): # print(np_spiked_image[t]) model(spiked_image[t]) spike_buffer['inp'].append(spiked_image[t].clone()) spike_buffer['fc1'].append(model.fc1.o.clone()) spike_buffer['fc2'].append(model.fc2.o.clone()) loss_buffer.append(model.fc2.o.clone()) for l in spike_buffer.values(): if len(l) > 5: # TODO: 5를 epsilon을 사용해서 표현해야 함 l.pop(0) # print(model.fc1.u.numpy()) # print(model.fc1.o.numpy()) # print(model.fc2.u.numpy()) print(model.fc2.o.numpy()) # time.sleep(1) optimizer.step(spike_buffer, spiked_label[t], label) model.reset_variables(w=False) num_images += 1 num_corrects += accuracy(r=torch.stack(loss_buffer), label=label) total_loss += criterion(r=torch.stack(loss_buffer), z=spiked_label, label=label, epsilon=opt.epsilon) if num_images > 0 and num_images % 30 == 0: list_loss.append(total_loss / num_images) list_acc.append(float(num_corrects) / num_images) plotter.update(y1=np.array(list_acc), y2=np.array(list_loss)) # print("loss: {} - accuracy: {}".format(total_loss/num_images, float(num_corrects)/num_images)) # return total_loss/num_images, float(num_corrects)/num_images def app(opt): print(opt) # Load MNIST train_loader = torch.utils.data.DataLoader( torchvision.datasets.MNIST( opt.data, train=True, transform=torchvision.transforms.Compose([transforms.ToTensor()])), batch_size=opt.batch_size, shuffle=True) val_loader = torch.utils.data.DataLoader( torchvision.datasets.MNIST( opt.data, train=False, transform=torchvision.transforms.Compose([transforms.ToTensor()])), batch_size=opt.batch_size, shuffle=False) # Make a model model = n3ml.model.TravanaeiAndMaida2017(opt.num_classes, hidden_neurons=opt.hidden_neurons) model.reset_variables() # Make an encoder encoder = n3ml.encoder.Simple(time_interval=opt.time_interval) # Make an optimizer optimizer = n3ml.optimizer.TavanaeiAndMaida(model, lr=opt.lr) # Define a loss criterion = mse for epoch in range(opt.num_epochs): # loss, acc = train(train_loader, model, encoder, optimizer, criterion, opt) # print("epoch: {} - loss: {} - accuracy: {}".format(epoch, loss, acc)) train(train_loader, model, encoder, optimizer, criterion, opt) loss, acc = validate(val_loader, model, encoder, criterion, opt) print("In test, loss: {} - accuracy: {}".format(loss, acc)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data', default='data') parser.add_argument('--num_classes', default=10, type=int) parser.add_argument('--num_epochs', default=120, type=int) parser.add_argument('--batch_size', default=1, type=int) parser.add_argument('--time_interval', default=20, type=int) parser.add_argument('--beta', default=250, type=float) # 250 Hz parser.add_argument('--lr', default=0.0005, type=float) parser.add_argument('--hidden_neurons', default=500, type=int) parser.add_argument('--epsilon', default=4, type=int) app(parser.parse_args())
nilq/baby-python
python
import numpy as np from c4.tables import rev_segments, all_segments PLAYER1 = 1 PLAYER2 = 2 DRAW = 0 COMPUTE = -1 class WrongMoveError(Exception): pass class Board(object): def __init__(self, pos=None, stm=PLAYER1, end=COMPUTE, cols=7, rows=6): if pos is None: pos = np.zeros((cols, rows), dtype=int) self._pos = pos self._stm = stm if end == COMPUTE: self._end = self._check_end(pos) else: self._end = end @property def end(self): return self._end @property def stm(self): return self._stm @property def other(self): return PLAYER1 if self._stm != PLAYER1 else PLAYER2 @classmethod def _check_end(cls, pos): for seg in cls.segments(pos): c = np.bincount(seg) if c[0]: continue if c[PLAYER1] == 4: return PLAYER1 elif c[PLAYER2] == 4: return PLAYER2 if pos.all(): return DRAW else: return None @classmethod def _check_end_around(cls, pos, r, c, side): if (cls.segments_around(pos, r, c) == side).all(1).any(): return side if pos.all(): return DRAW else: return None @classmethod def segments(cls, pos): if isinstance(pos, Board): return cls.segments(pos._pos) else: pos = pos.flatten() return pos[all_segments] @classmethod def segments_around(cls, pos, r, c): if isinstance(pos, Board): return cls.segments_around(pos._pos, r, c) else: idx = c * pos.shape[1] + r pos = pos.flatten() return pos[rev_segments[idx]] def __str__(self): disc = { 0: ' ', 1: 'X', 2: 'O' } s = [] for row in reversed(self._pos.transpose()): s.append(' | '.join(disc[x] for x in row)) s.append(' | '.join('-'*7)) s.append(' | '.join(map(str, range(1, 8)))) s = ['| ' + x + ' |' for x in s] s = [i + ' ' + x for i, x in zip('ABCDEFG ', s)] s = '\n'.join(s) if self._end is DRAW: s += '\n<<< Game over: draw' % [self._end] elif self._end is not None: s += '\n<<< Game over: %s win' % disc[self._end] else: s += '\n<<< Move to %s' % disc[self._stm] return s def move(self, m): if not (0 <= m < 7): raise ValueError(m) pos = self._pos.copy() r = pos[m].argmin() if pos[m][r] != 0: raise WrongMoveError('Full Column') pos[m][r] = self._stm end = self._check_end_around(pos, r, m, self._stm) stm = self.other return Board(pos, stm, end) def freerow(self, m): r = self._pos[m].argmin() if self._pos[m][r] != 0: return None return r def moves(self): return np.flatnonzero(self._pos[:, -1] == 0) def hashkey(self): """Generates an hashkey Returns a tuple (key, flip) flip is True if it returned the key of the symmetric Board. """ k1 = 0 k2 = 0 for x in self._pos.flat: k1 *= 3 k1 += int(x) assert k1 >= 0 for x in self._pos[::-1].flat: k2 *= 3 k2 += int(x) assert k2 >= 0 if k2 < k1: return k2, True else: return k1, False
nilq/baby-python
python
from math import sqrt __author__ = "Samvid Mistry" import time from MAnimations.MAnimate import MAnimate from PySide.QtGui import QApplication, QPainterPath from PySide.QtCore import QPoint, QPointF class MCircularReveal(MAnimate): """ Can be used to perform circular reveal or circular hide animation on an MShape object. Requires self.target to be either 'show[_circle]' or 'hide[_circle]' """ def __init__(self): MAnimate.__init__(self) self.__clip = QPainterPath() def animate(self, shapes): self.start_signal.emit() time.sleep(self.start_delay) self.running = True self.ended = False target_radius = [] original_clips = [] centers = [] animating_radius = [] rate_of_change = [] for s in shapes: if self.target.startswith("show"): # Setting max of width or height as radius, ergo "circular" reveal, # not "oval" reveal side = max(s.width, s.height) side_square = side * side # Applying pythagoras theorem target = sqrt(side_square + side_square) # Starting from the zero reaching the max animating_radius.append(0) rate_of_change.append((target / self.fps) * (1000 / self.duration)) elif self.target.startswith("hide"): # You know why... target = 0 # Starting from the max reaching the 0 animating_radius.append(max(s.width, s.height)) rate_of_change.append(((target - max(s.width, s.height)) / self.fps) * (1000 / self.duration)) else: raise ValueError("Target should be either 'reveal' or 'hide'") target_radius.append(target) # Getting the original masks; Used in case of cancellation original_clips.append(s.clip) # Center of the shape, considering margin centers.append(QPoint((s.width / 2) + s.margin_left, (s.height / 2) + s.margin_top)) # Calculating the increase rate using the good ol' formula while self.running or self.paused: if self.canceled and not self.paused: for i, s in enumerate(shapes): s.clip = original_clips[i] self.cancel_signal.emit() return elif self.ended: self.end_signal.emit() return elif self.paused: # Handling the pause self.pause_signal.emit() while self.paused: # If you want the current state, pause the # animation and then cancel it if self.canceled: self.ended = True self.started = False self.cancel_signal.emit() return self.resume_signal.emit() else: # Setting FPS from the animator time.sleep(1 / self.fps) completed = False for i, s in enumerate(shapes): if rate_of_change[i] > 0: if not animating_radius[i] < target_radius[i]: completed = True else: # TODO: leaves 1 pixel visible in hiding the check, # added 1 to overall radius checking for now, look into this issue if not animating_radius[i] > target_radius[i] + 1: completed = True if not completed: animating_radius[i] += rate_of_change[i] path = QPainterPath() if self.target.endswith("circle"): path.addEllipse( QPointF((s.width / 2) + s.margin_left, (s.height / 2) + s.margin_top), animating_radius[i] / 2, animating_radius[i] / 2 ) else: path.addEllipse( QPointF((s.width / 2) + s.margin_left, (s.height / 2) + s.margin_top), animating_radius[i], animating_radius[i] ) s.clip = path s.update() QApplication.processEvents() # No need to check on every iteration, duration is same so # all objects are gonna end at the same time if completed: self.end_signal.emit() self.started = False self.ended = True return
nilq/baby-python
python
import tensorflow as tf import numpy as np import sys, os import getopt import random import datetime import traceback import pandas as pd import cfr.cfr_net as cfr from cfr.util import * ''' Define parameter flags ''' FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('loss', 'l2', """Which loss function to use (l1/l2/log)""") tf.app.flags.DEFINE_integer('n_in', 2, """Number of representation layers. """) tf.app.flags.DEFINE_integer('n_out', 2, """Number of regression layers. """) tf.app.flags.DEFINE_float('p_alpha', 1e-4, """Imbalance regularization param. """) tf.app.flags.DEFINE_float('p_lambda', 0.0, """Weight decay regularization parameter. """) tf.app.flags.DEFINE_integer('rep_weight_decay', 1, """Whether to penalize representation layers with weight decay""") tf.app.flags.DEFINE_float('dropout_in', 0.9, """Input layers dropout keep rate. """) tf.app.flags.DEFINE_float('dropout_out', 0.9, """Output layers dropout keep rate. """) tf.app.flags.DEFINE_string('nonlin', 'relu', """Kind of non-linearity. Default relu. """) tf.app.flags.DEFINE_float('lrate', 0.05, """Learning rate. """) tf.app.flags.DEFINE_float('decay', 0.5, """RMSProp decay. """) tf.app.flags.DEFINE_integer('batch_size', 100, """Batch size. """) tf.app.flags.DEFINE_integer('dim_in', 100, """Pre-representation layer dimensions. """) tf.app.flags.DEFINE_integer('dim_out', 100, """Post-representation layer dimensions. """) tf.app.flags.DEFINE_integer('batch_norm', 0, """Whether to use batch normalization. """) tf.app.flags.DEFINE_string('normalization', 'none', """How to normalize representation (after batch norm). none/bn_fixed/divide/project """) tf.app.flags.DEFINE_float('rbf_sigma', 0.1, """RBF MMD sigma """) tf.app.flags.DEFINE_integer('experiments', 1, """Number of experiments. """) tf.app.flags.DEFINE_integer('iterations', 2000, """Number of iterations. """) tf.app.flags.DEFINE_float('weight_init', 0.01, """Weight initialization scale. """) tf.app.flags.DEFINE_float('lrate_decay', 0.95, """Decay of learning rate every 100 iterations """) tf.app.flags.DEFINE_integer('wass_iterations', 20, """Number of iterations in Wasserstein computation. """) tf.app.flags.DEFINE_float('wass_lambda', 1, """Wasserstein lambda. """) tf.app.flags.DEFINE_integer('wass_bpt', 0, """Backprop through T matrix? """) tf.app.flags.DEFINE_integer('varsel', 0, """Whether the first layer performs variable selection. """) tf.app.flags.DEFINE_string('outdir', '../results/tfnet_topic/alpha_sweep_22_d100/', """Output directory. """) tf.app.flags.DEFINE_string('datadir', '../data/topic/csv/', """Data directory. """) tf.app.flags.DEFINE_string('dataform', 'topic_dmean_seed_%d.csv', """Training data filename form. """) tf.app.flags.DEFINE_string('data_test', '', """Test data filename form. """) tf.app.flags.DEFINE_integer('sparse', 0, """Whether data is stored in sparse format (.x, .y). """) tf.app.flags.DEFINE_integer('seed', 1, """Seed. """) tf.app.flags.DEFINE_integer('repetitions', 1, """Repetitions with different seed.""") tf.app.flags.DEFINE_integer('use_p_correction', 1, """Whether to use population size p(t) in mmd/disc/wass.""") tf.app.flags.DEFINE_string('optimizer', 'RMSProp', """Which optimizer to use. (RMSProp/Adagrad/GradientDescent/Adam)""") tf.app.flags.DEFINE_string('imb_fun', 'mmd_lin', """Which imbalance penalty to use (mmd_lin/mmd_rbf/mmd2_lin/mmd2_rbf/lindisc/wass). """) tf.app.flags.DEFINE_integer('output_csv',0,"""Whether to save a CSV file with the results""") tf.app.flags.DEFINE_integer('output_delay', 100, """Number of iterations between log/loss outputs. """) tf.app.flags.DEFINE_integer('pred_output_delay', -1, """Number of iterations between prediction outputs. (-1 gives no intermediate output). """) tf.app.flags.DEFINE_integer('debug', 0, """Debug mode. """) tf.app.flags.DEFINE_integer('save_rep', 0, """Save representations after training. """) tf.app.flags.DEFINE_float('val_part', 0, """Validation part. """) tf.app.flags.DEFINE_boolean('split_output', 0, """Whether to split output layers between treated and control. """) tf.app.flags.DEFINE_boolean('reweight_sample', 1, """Whether to reweight sample for prediction loss with average treatment probability. """) tf.app.flags.DEFINE_boolean('residual_block', 1, """Whether to use residual block for the output layers. """) tf.app.flags.DEFINE_boolean('embeddings', 0, """Whether to use embeddings as student features. """) tf.app.flags.DEFINE_string('rname', '../LSTM-autoencoder/result.pkl', """The file contains student representations. """) tf.app.flags.DEFINE_boolean('rnn', 0, """Whether to use rnn to extract features from student logs. """) tf.app.flags.DEFINE_string('ps', '', """The problem set id""") tf.app.flags.DEFINE_integer('hidden_num', 50, """The size of hidden layer in rnn""") tf.app.flags.DEFINE_boolean('trainable_embed', 0, """when rnn = 1, whether to use embeddings to represent problem sets""") FLAGS.dim_out = FLAGS.dim_in if FLAGS.sparse: import scipy.sparse as sparse NUM_ITERATIONS_PER_DECAY = 100 __DEBUG__ = False if FLAGS.debug: __DEBUG__ = True def train(CFR, sess, train_step, D, I_valid, D_test, logfile, i_exp, user_ids=None, test_user_ids=None, x_dict=None, len_dict=None, p_input=None, seq_len=None, ps_dict=None, sq_embed_idx=None): """ Trains a CFR model on supplied data """ ''' Train/validation split ''' n = D['x'].shape[0] I = range(n) I_train = list(set(I)-set(I_valid)) n_train = len(I_train) ''' Compute treatment probability''' p_treated = np.mean(D['t'][I_train,:]) ''' Set up loss feed_dicts''' if FLAGS.rnn: # load all data l = [] train_all_len = [] train_all_embed = [] for ite in user_ids: l.append(x_dict[ite]) train_all_len.append(len_dict[ite]) if FLAGS.trainable_embed: train_all_embed.append(ps_dict[ite]) train_all_x = np.stack(l, axis=0) if FLAGS.trainable_embed: train_all_embed = np.stack(train_all_embed, axis=0) l = [] test_all_len = [] test_all_embed = [] for ite in test_user_ids: l.append(x_dict[ite]) test_all_len.append(len_dict[ite]) if FLAGS.trainable_embed: test_all_embed.append(ps_dict[ite]) test_all_x = np.stack(l, axis=0) if FLAGS.trainable_embed: test_all_embed = np.stack(test_all_embed, axis=0) l = [] train_len = [] train_embed = [] for ite in user_ids[I_train]: l.append(x_dict[ite]) train_len.append(len_dict[ite]) if FLAGS.trainable_embed: train_embed.append(ps_dict[ite]) train_x = np.stack(l, axis=0) if FLAGS.trainable_embed: train_embed = np.stack(train_embed, axis=0) if FLAGS.trainable_embed: dict_factual = {p_input: train_x, seq_len: train_len, sq_embed_idx:train_embed, CFR.t: D['t'][I_train,:], CFR.y_: D['yf'][I_train,:], CFR.do_in: 1.0, CFR.do_out: 1.0, CFR.r_alpha: FLAGS.p_alpha, CFR.r_lambda: FLAGS.p_lambda, CFR.p_t: p_treated} else: dict_factual = {p_input: train_x, seq_len: train_len, CFR.t: D['t'][I_train,:], CFR.y_: D['yf'][I_train,:], CFR.do_in: 1.0, CFR.do_out: 1.0, CFR.r_alpha: FLAGS.p_alpha, CFR.r_lambda: FLAGS.p_lambda, CFR.p_t: p_treated} else: dict_factual = {CFR.x: D['x'][I_train,:], CFR.t: D['t'][I_train,:], CFR.y_: D['yf'][I_train,:], CFR.do_in: 1.0, CFR.do_out: 1.0, CFR.r_alpha: FLAGS.p_alpha, CFR.r_lambda: FLAGS.p_lambda, CFR.p_t: p_treated} if FLAGS.val_part > 0: if FLAGS.rnn: l = [] valid_len = [] valid_embed = [] for ite in user_ids[I_valid]: l.append(x_dict[ite]) valid_len.append(len_dict[ite]) if FLAGS.trainable_embed: valid_embed.append(ps_dict[ite]) valid_x = np.stack(l, axis=0) if FLAGS.trainable_embed: dict_valid = {p_input: valid_x, seq_len: valid_len, sq_embed_idx: valid_embed, CFR.t: D['t'][I_valid,:], CFR.y_: D['yf'][I_valid,:], CFR.do_in: 1.0, CFR.do_out: 1.0, CFR.r_alpha: FLAGS.p_alpha, CFR.r_lambda: FLAGS.p_lambda, CFR.p_t: p_treated} else: dict_valid = {p_input: valid_x, seq_len: valid_len, CFR.t: D['t'][I_valid,:], CFR.y_: D['yf'][I_valid,:], CFR.do_in: 1.0, CFR.do_out: 1.0, CFR.r_alpha: FLAGS.p_alpha, CFR.r_lambda: FLAGS.p_lambda, CFR.p_t: p_treated} else: dict_valid = {CFR.x: D['x'][I_valid,:], CFR.t: D['t'][I_valid,:], CFR.y_: D['yf'][I_valid,:], CFR.do_in: 1.0, CFR.do_out: 1.0, CFR.r_alpha: FLAGS.p_alpha, CFR.r_lambda: FLAGS.p_lambda, CFR.p_t: p_treated} ''' Initialize TensorFlow variables ''' sess.run(tf.global_variables_initializer()) ''' Set up for storing predictions ''' preds_train = [] preds_test = [] ''' Compute losses ''' losses = [] obj_loss, f_error, imb_err = sess.run([CFR.tot_loss, CFR.pred_loss, CFR.imb_dist], feed_dict=dict_factual) cf_error = np.nan if D['HAVE_TRUTH']: cf_error = sess.run(CFR.pred_loss, feed_dict=dict_cfactual) valid_obj = np.nan; valid_imb = np.nan; valid_f_error = np.nan; if FLAGS.val_part > 0: valid_obj, valid_f_error, valid_imb = sess.run([CFR.tot_loss, CFR.pred_loss, CFR.imb_dist], feed_dict=dict_valid) losses.append([obj_loss, f_error, cf_error, imb_err, valid_f_error, valid_imb, valid_obj]) objnan = False reps = [] reps_test = [] ''' Train for multiple iterations ''' for i in range(FLAGS.iterations): ''' Fetch sample ''' I = random.sample(range(0, n_train), FLAGS.batch_size) x_batch = D['x'][I_train,:][I,:] t_batch = D['t'][I_train,:][I] y_batch = D['yf'][I_train,:][I] if FLAGS.rnn: user_batch = user_ids[I_train][I] l = [] batch_len = [] batch_embed = [] for ite in user_batch: l.append(x_dict[ite]) batch_len.append(len_dict[ite]) if FLAGS.trainable_embed: batch_embed.append(ps_dict[ite]) x_batch = np.stack(l, axis=0) ''' Do one step of gradient descent ''' if not objnan: if FLAGS.rnn: if FLAGS.trainable_embed: sess.run(train_step, feed_dict={p_input: x_batch, seq_len: batch_len, sq_embed_idx: batch_embed, CFR.t: t_batch, CFR.y_: y_batch, CFR.do_in: FLAGS.dropout_in, CFR.do_out: FLAGS.dropout_out, CFR.r_alpha: FLAGS.p_alpha, CFR.r_lambda: FLAGS.p_lambda, CFR.p_t: p_treated}) else: sess.run(train_step, feed_dict={p_input: x_batch, seq_len: batch_len, CFR.t: t_batch, CFR.y_: y_batch, CFR.do_in: FLAGS.dropout_in, CFR.do_out: FLAGS.dropout_out, CFR.r_alpha: FLAGS.p_alpha, CFR.r_lambda: FLAGS.p_lambda, CFR.p_t: p_treated}) else: sess.run(train_step, feed_dict={CFR.x: x_batch, CFR.t: t_batch, \ CFR.y_: y_batch, CFR.do_in: FLAGS.dropout_in, CFR.do_out: FLAGS.dropout_out, \ CFR.r_alpha: FLAGS.p_alpha, CFR.r_lambda: FLAGS.p_lambda, CFR.p_t: p_treated}) ''' Project variable selection weights ''' if FLAGS.varsel: wip = simplex_project(sess.run(CFR.weights_in[0]), 1) sess.run(CFR.projection, feed_dict={CFR.w_proj: wip}) ''' Compute loss every N iterations ''' if i % FLAGS.output_delay == 0 or i==FLAGS.iterations-1: obj_loss,f_error,imb_err = sess.run([CFR.tot_loss, CFR.pred_loss, CFR.imb_dist], feed_dict=dict_factual) #rep = sess.run(CFR.h_rep_norm, feed_dict={CFR.x: D['x'], CFR.do_in: 1.0}) #rep_norm = np.mean(np.sqrt(np.sum(np.square(rep), 1))) cf_error = np.nan if D['HAVE_TRUTH']: cf_error = sess.run(CFR.pred_loss, feed_dict=dict_cfactual) valid_obj = np.nan; valid_imb = np.nan; valid_f_error = np.nan; if FLAGS.val_part > 0: valid_obj, valid_f_error, valid_imb = sess.run([CFR.tot_loss, CFR.pred_loss, CFR.imb_dist], feed_dict=dict_valid) losses.append([obj_loss, f_error, cf_error, imb_err, valid_f_error, valid_imb, valid_obj]) loss_str = str(i) + '\tObj: %.3f,\tF: %.3f,\tCf: %.3f,\tImb: %.2g,\tVal: %.3f,\tValImb: %.2g,\tValObj: %.2f' \ % (obj_loss, f_error, cf_error, imb_err, valid_f_error, valid_imb, valid_obj) if FLAGS.loss == 'log': if FLAGS.rnn: if FLAGS.trainable_embed: y_pred = sess.run(CFR.output, feed_dict={p_input: x_batch, seq_len: batch_len, sq_embed_idx: batch_embed, CFR.t: t_batch, CFR.do_in: 1.0, CFR.do_out: 1.0}) else: y_pred = sess.run(CFR.output, feed_dict={p_input: x_batch, seq_len: batch_len, CFR.t: t_batch, CFR.do_in: 1.0, CFR.do_out: 1.0}) else: y_pred = sess.run(CFR.output, feed_dict={CFR.x: x_batch, CFR.t: t_batch, CFR.do_in: 1.0, CFR.do_out: 1.0}) y_pred = 1.0*(y_pred > 0.5) acc = 100*(1 - np.mean(np.abs(y_batch - y_pred))) loss_str += ',\tAcc: %.2f%%' % acc log(logfile, loss_str) if np.isnan(obj_loss): log(logfile,'Experiment %d: Objective is NaN. Skipping.' % i_exp) objnan = True ''' Compute predictions every M iterations ''' if (FLAGS.pred_output_delay > 0 and i % FLAGS.pred_output_delay == 0) or i==FLAGS.iterations-1: if FLAGS.rnn: if FLAGS.trainable_embed: y_pred_f = sess.run(CFR.output, feed_dict={p_input: train_all_x, seq_len: train_all_len,sq_embed_idx: train_all_embed, CFR.t: D['t'], CFR.do_in: 1.0, CFR.do_out: 1.0}) y_pred_cf = sess.run(CFR.output, feed_dict={p_input: train_all_x, seq_len: train_all_len,sq_embed_idx: train_all_embed, CFR.t: 1-D['t'], CFR.do_in: 1.0, CFR.do_out: 1.0}) else: y_pred_f = sess.run(CFR.output, feed_dict={p_input: train_all_x, seq_len: train_all_len, CFR.t: D['t'], CFR.do_in: 1.0, CFR.do_out: 1.0}) y_pred_cf = sess.run(CFR.output, feed_dict={p_input: train_all_x, seq_len: train_all_len, CFR.t: 1-D['t'], CFR.do_in: 1.0, CFR.do_out: 1.0}) else: y_pred_f = sess.run(CFR.output, feed_dict={CFR.x: D['x'], \ CFR.t: D['t'], CFR.do_in: 1.0, CFR.do_out: 1.0}) y_pred_cf = sess.run(CFR.output, feed_dict={CFR.x: D['x'], \ CFR.t: 1-D['t'], CFR.do_in: 1.0, CFR.do_out: 1.0}) preds_train.append(np.concatenate((y_pred_f, y_pred_cf),axis=1)) if D_test is not None: if FLAGS.rnn: if FLAGS.trainable_embed: y_pred_f_test = sess.run(CFR.output, feed_dict={p_input: test_all_x, seq_len: test_all_len, sq_embed_idx: test_all_embed, CFR.t: D_test['t'], CFR.do_in: 1.0, CFR.do_out: 1.0}) y_pred_cf_test = sess.run(CFR.output, feed_dict={p_input: test_all_x, seq_len: test_all_len, sq_embed_idx: test_all_embed, CFR.t: 1-D_test['t'], CFR.do_in: 1.0, CFR.do_out: 1.0}) else: y_pred_f_test = sess.run(CFR.output, feed_dict={p_input: test_all_x, seq_len: test_all_len, CFR.t: D_test['t'], CFR.do_in: 1.0, CFR.do_out: 1.0}) y_pred_cf_test = sess.run(CFR.output, feed_dict={p_input: test_all_x, seq_len: test_all_len, CFR.t: 1-D_test['t'], CFR.do_in: 1.0, CFR.do_out: 1.0}) else: y_pred_f_test = sess.run(CFR.output, feed_dict={CFR.x: D_test['x'], \ CFR.t: D_test['t'], CFR.do_in: 1.0, CFR.do_out: 1.0}) y_pred_cf_test = sess.run(CFR.output, feed_dict={CFR.x: D_test['x'], \ CFR.t: 1-D_test['t'], CFR.do_in: 1.0, CFR.do_out: 1.0}) preds_test.append(np.concatenate((y_pred_f_test, y_pred_cf_test),axis=1)) return losses, preds_train, preds_test, reps, reps_test def run(outdir): """ Runs an experiment and stores result in outdir """ ''' Set up paths and start log ''' npzfile = outdir+'result' npzfile_test = outdir+'result.test' repfile = outdir+'reps' repfile_test = outdir+'reps.test' outform = outdir+'y_pred' outform_test = outdir+'y_pred.test' lossform = outdir+'loss' logfile = outdir+'log.txt' f = open(logfile,'w') f.close() dataform = FLAGS.datadir + FLAGS.dataform has_test = False if not FLAGS.data_test == '': # if test set supplied has_test = True dataform_test = FLAGS.datadir + FLAGS.data_test ''' Set random seeds ''' random.seed(FLAGS.seed) tf.set_random_seed(FLAGS.seed) np.random.seed(FLAGS.seed) ''' Save parameters ''' save_config(outdir+'config.txt') log(logfile, 'Training with hyperparameters: alpha=%.2g, lambda=%.2g' % (FLAGS.p_alpha,FLAGS.p_lambda)) ''' Load Data ''' npz_input = False if dataform[-3:] == 'npz': npz_input = True if npz_input: datapath = dataform if has_test: datapath_test = dataform_test else: datapath = dataform if has_test: datapath_test = dataform_test log(logfile, 'Training data: ' + datapath) if has_test: log(logfile, 'Test data: ' + datapath_test) #D = load_data(datapath) D = load_assistments_data(datapath, rname=FLAGS.rname, embeddings=FLAGS.embeddings) D_test = None if has_test: D_test = load_assistments_data(datapath_test, rname=FLAGS.rname, embeddings=FLAGS.embeddings) log(logfile, 'Loaded data with shape [%d,%d]' % (D['n'], D['dim'])) ''' Start Session ''' sess = tf.Session() ''' Parameter placeholders ''' r_alpha = tf.placeholder("float", name='r_alpha') r_lambda = tf.placeholder("float", name='r_lambda') do_in = tf.placeholder("float", name='dropout_in') do_out = tf.placeholder("float", name='dropout_out') p = tf.placeholder("float", name='p_treated') ''' Initialize input placeholders ''' if FLAGS.rnn: problem_set = FLAGS.ps trainable_embed = FLAGS.trainable_embed if trainable_embed: file_path = '../lstm-autoencoder/'+str(problem_set)+'_sq_train_data.csv' else: file_path = '../lstm-autoencoder/'+str(problem_set)+'_pl_train_data.csv' hidden_num = FLAGS.hidden_num pl_df = pd.read_csv(file_path) # the number of features d_num = 3 if trainable_embed else 2 elem_num = len(pl_df.columns)-d_num # group by students pl_df.set_index('id', inplace=True) pl_g = pl_df.groupby('user_id') cnt_list = [] for name,group in pl_g: cnt = len(group) cnt_list.append(cnt) max_len = max(cnt_list) avg_len = sum(cnt_list)/len(cnt_list) max_max_len = int(np.percentile(cnt_list, 70)) print 'max len {}'.format(max_len) print 'avg len {}'.format(avg_len) print 'max max len {}'.format(max_max_len) max_len = min(max_len, max_max_len) if trainable_embed: # load ps list if FLAGS.rnn: ps_file = '../lstm-autoencoder/'+str(problem_set)+'_ps_index' else: ps_file = '../lstm-autoencoder/2016_ps_index' ps_list = [] with open(ps_file) as f: for line in f: ps_list.append(int(line)) sq_embed_idx = tf.placeholder(tf.int32, [None, max_len]) #max_len = 1000 for i in range(len(cnt_list)): if cnt_list[i] > max_len: cnt_list[i] = max_len # get user id list user_list = pl_df['user_id'].unique().tolist() x_dict = {} len_dict = {} if trainable_embed: ps_dict = {} for ite in user_list: m = pl_g.get_group(ite).iloc[:, :-1*(d_num-1)].as_matrix() if trainable_embed: seq_ids = pl_g.get_group(ite)['sequence_id'].tolist() embed_ids = [] for seq_id in seq_ids: if seq_id in ps_list: tmp_idx = ps_list.index(seq_id) embed_ids.append(tmp_idx) else: embed_ids.append(len(ps_list)) if max_len >= m.shape[0]: len_dict[ite] = m.shape[0] diff = max_len - m.shape[0] x_dict[ite] = np.pad(m, ((0,diff), (0,0)), mode='constant', constant_values=0) if trainable_embed: ps_dict[ite] = embed_ids + [0]*diff else: len_dict[ite] = max_len x_dict[ite] = m[-1*max_len:, :] if trainable_embed: ps_dict[ite] = embed_ids[-1*max_len:] # load user ids from exp data data = np.loadtxt(open(dataform,"rb"),delimiter=",") user_ids = data[:, 1] test_data = np.loadtxt(open(dataform_test,"rb"),delimiter=",") test_user_ids = test_data[:, 1] p_input = tf.placeholder(tf.float32, [None, max_len, elem_num]) if FLAGS.trainable_embed: embedding_size = 10 # look up embeddings W = tf.get_variable('W', shape=[len(ps_list)+1, embedding_size], initializer=tf.contrib.layers.xavier_initializer()) sq_embed = tf.nn.embedding_lookup(W, sq_embed_idx) cell_input = tf.reshape(tf.expand_dims(sq_embed, -2) * tf.expand_dims(p_input, -1), [-1, max_len, embedding_size*elem_num]) else: cell_input = p_input cell = tf.nn.rnn_cell.GRUCell(hidden_num) cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=do_in) seq_len = tf.placeholder(tf.int32, [None]) z_codes, enc_state = tf.nn.dynamic_rnn(cell, cell_input, seq_len, dtype=tf.float32) x = enc_state dims = [hidden_num, FLAGS.dim_in, FLAGS.dim_out] else: x = tf.placeholder("float", shape=[None, D['dim']], name='x') # Features dims = [D['dim'], FLAGS.dim_in, FLAGS.dim_out] t = tf.placeholder("float", shape=[None, 1], name='t') # Treatent y_ = tf.placeholder("float", shape=[None, 1], name='y_') # Outcome ''' Define model graph ''' log(logfile, 'Defining graph...\n') CFR = cfr.cfr_net(x, t, y_, p, FLAGS, r_alpha, r_lambda, do_in, do_out, dims) ''' Set up optimizer ''' global_step = tf.Variable(0, trainable=False) lr = tf.train.exponential_decay(FLAGS.lrate, global_step, \ NUM_ITERATIONS_PER_DECAY, FLAGS.lrate_decay, staircase=True) opt = None if FLAGS.optimizer == 'Adagrad': opt = tf.train.AdagradOptimizer(lr) elif FLAGS.optimizer == 'GradientDescent': opt = tf.train.GradientDescentOptimizer(lr) elif FLAGS.optimizer == 'Adam': opt = tf.train.AdamOptimizer(lr) else: opt = tf.train.RMSPropOptimizer(lr, FLAGS.decay) ''' Unused gradient clipping ''' #gvs = opt.compute_gradients(CFR.tot_loss) #capped_gvs = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad, var in gvs] #train_step = opt.apply_gradients(capped_gvs, global_step=global_step) train_step = opt.minimize(CFR.tot_loss,global_step=global_step) ''' Set up for saving variables ''' all_losses = [] all_preds_train = [] all_preds_test = [] all_valid = [] if FLAGS.varsel: all_weights = None all_beta = None all_preds_test = [] ''' Handle repetitions ''' n_experiments = FLAGS.experiments if FLAGS.repetitions>1: if FLAGS.experiments>1: log(logfile, 'ERROR: Use of both repetitions and multiple experiments is currently not supported.') sys.exit(1) n_experiments = FLAGS.repetitions ''' Run for all repeated experiments ''' for i_exp in range(1,n_experiments+1): if FLAGS.repetitions>1: log(logfile, 'Training on repeated initialization %d/%d...' % (i_exp, FLAGS.repetitions)) else: log(logfile, 'Training on experiment %d/%d...' % (i_exp, n_experiments)) ''' Load Data (if multiple repetitions, reuse first set)''' if i_exp==1 or FLAGS.experiments>1: D_exp_test = None if npz_input: D_exp = {} D_exp['x'] = D['x'][:,:,i_exp-1] D_exp['t'] = D['t'][:,i_exp-1:i_exp] D_exp['yf'] = D['yf'][:,i_exp-1:i_exp] if D['HAVE_TRUTH']: D_exp['ycf'] = D['ycf'][:,i_exp-1:i_exp] else: D_exp['ycf'] = None if has_test: D_exp_test = {} D_exp_test['x'] = D_test['x'][:,:,i_exp-1] D_exp_test['t'] = D_test['t'][:,i_exp-1:i_exp] D_exp_test['yf'] = D_test['yf'][:,i_exp-1:i_exp] if D_test['HAVE_TRUTH']: D_exp_test['ycf'] = D_test['ycf'][:,i_exp-1:i_exp] else: D_exp_test['ycf'] = None else: datapath = dataform D_exp = load_assistments_data(datapath, rname=FLAGS.rname, embeddings=FLAGS.embeddings) if has_test: datapath_test = dataform_test D_exp_test = load_assistments_data(datapath_test, rname=FLAGS.rname, embeddings=FLAGS.embeddings) D_exp['HAVE_TRUTH'] = D['HAVE_TRUTH'] if has_test: D_exp_test['HAVE_TRUTH'] = D_test['HAVE_TRUTH'] ''' Split into training and validation sets ''' I_train, I_valid = validation_split(D_exp, FLAGS.val_part) ''' Run training loop ''' # pass more parameters: p_input, seq_len, rnn if FLAGS.rnn: if FLAGS.trainable_embed: losses, preds_train, preds_test, reps, reps_test = train(CFR, sess, train_step, D_exp, I_valid, D_exp_test, logfile, i_exp, user_ids, test_user_ids, x_dict, len_dict, p_input, seq_len, ps_dict, sq_embed_idx) else: losses, preds_train, preds_test, reps, reps_test = train(CFR, sess, train_step, D_exp, I_valid, D_exp_test, logfile, i_exp, user_ids, test_user_ids, x_dict, len_dict, p_input, seq_len) else: losses, preds_train, preds_test, reps, reps_test = train(CFR, sess, train_step, D_exp, I_valid, D_exp_test, logfile, i_exp) ''' Collect all reps ''' all_preds_train.append(preds_train) all_preds_test.append(preds_test) all_losses.append(losses) ''' Fix shape for output (n_units, dim, n_reps, n_outputs) ''' out_preds_train = np.swapaxes(np.swapaxes(all_preds_train,1,3),0,2) if has_test: out_preds_test = np.swapaxes(np.swapaxes(all_preds_test,1,3),0,2) out_losses = np.swapaxes(np.swapaxes(all_losses,0,2),0,1) ''' Store predictions ''' log(logfile, 'Saving result to %s...\n' % outdir) if FLAGS.output_csv: np.savetxt('%s_%d.csv' % (outform,i_exp), preds_train[-1], delimiter=',') np.savetxt('%s_%d.csv' % (outform_test,i_exp), preds_test[-1], delimiter=',') np.savetxt('%s_%d.csv' % (lossform,i_exp), losses, delimiter=',') ''' Compute weights if doing variable selection ''' if FLAGS.varsel: if i_exp == 1: all_weights = sess.run(CFR.weights_in[0]) all_beta = sess.run(CFR.weights_pred) else: all_weights = np.dstack((all_weights, sess.run(CFR.weights_in[0]))) all_beta = np.dstack((all_beta, sess.run(CFR.weights_pred))) ''' Save results and predictions ''' all_valid.append(I_valid) if FLAGS.varsel: np.savez(npzfile, pred=out_preds_train, loss=out_losses, w=all_weights, beta=all_beta, val=np.array(all_valid)) else: np.savez(npzfile, pred=out_preds_train, loss=out_losses, val=np.array(all_valid)) if has_test: np.savez(npzfile_test, pred=out_preds_test) ''' Save representations ''' if FLAGS.save_rep and i_exp == 1: np.savez(repfile, rep=reps) if has_test: np.savez(repfile_test, rep=reps_test) def main(argv=None): # pylint: disable=unused-argument """ Main entry point """ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S-%f") outdir = FLAGS.outdir+'/results_'+timestamp+'/' os.mkdir(outdir) try: run(outdir) except Exception as e: with open(outdir+'error.txt','w') as errfile: errfile.write(''.join(traceback.format_exception(*sys.exc_info()))) raise if __name__ == '__main__': tf.app.run()
nilq/baby-python
python
from compecon.tools import Options_Container, qzordered import numpy as np import pandas as pd from compecon.tools import jacobian, hessian, gridmake, indices __author__ = 'Randall' class LQlabels(Options_Container): """ Container for labels of the LQmodel variables Attributes: s labels for continuous state variables x labels for continuous action variables """ description = "Labels for LQmodel variables" def __init__(self, s, x): self.s = list(s) self.x = list(x) @property def snext(self): return [k + '_next' for k in self.s] @property def p(self): return ['value_' + k for k in self.s] class LQmodel(object): """ A Linear-Quadratic dynamic programming model class Solves discrete time linear quadratic control model using Ricatti equation methods Uses QZ decomposition to solve the Ricatti equation of a deterministic stationary infinite-horizon linear-quadratic dynamic optimization model max_x f0 + fs*s + fx*x + 0.5*s'*fss*s + s'*fsx*x +0.5*x'*fxx*x s.t. s' = g0 + gs*s + gx*x The optimal policy function is x(s) = xstar + X*(s-sstar) The shadow price function is p(s) = pstar + P*(s-sstar) The value function is V(s) = vstar + pstar*(s-sstar) + 0.5*(s-sstar)'*P*(s-sstar) The controlled state process is snext = sstar + G*(s-sstar) """ # TODO: write the docstring def __init__(self, f0,fs,fx,fss,fsx,fxx,g0,gs,gx,delta, slabels=None, xlabels=None): """ Args: f0: 1.1 objective function parameter fs: 1.ds objective function parameter fx: 1.dx objective function parameter fss: ds.ds objective function parameter fsx: ds.dx objective function parameter fxx: dx.dx objective function parameter g0: ds.1 state transition function parameter gs: ds.ds state transition function parameter gx: ds.dx state transition function parameter delta: discount factor """ fs, fx, fss, fsx, fxx, g0, gs, gx = np.atleast_2d(fs,fx,fss,fsx,fxx,g0,gs,gx) # Determine dimensions of state and action variables ds = fs.size dx = fx.size fs.shape = 1, ds fx.shape = 1, dx # Check conformability assert fss.shape == (ds, ds), f'error in LQmodel: fss must be a {ds} by {ds} matrix' assert fsx.shape == (ds, dx), f'error in LQmodel: fsx must be a {ds} by {dx} matrix' assert fxx.shape == (dx, dx), f'error in LQmodel: fxx must be a {dx} by {dx} matrix' assert g0.shape == (ds, 1), f'error in LQmodel: g0 must be a {ds} by 1 matrix' assert gs.shape == (ds, ds), f'error in LQmodel: gs must be a {ds} by {ds} matrix' assert gx.shape == (ds, dx), f'error in LQmodel: gx must be a {ds} by {dx} matrix' self.f0 = f0 self.fs = fs self.fx = fx self.fss = fss self.fsx = fsx self.fxx = fxx self.g0 = g0 self.gs = gs self.gx = gx self.delta = delta self.dims ={'ds': ds, 'dx': dx} self.X = np.nan self.P = np.nan self.G = np.nan '''MAKE THE LABELS''' if slabels is None: slabels = ['s'] if ds == 1 else [f's{i}' for i in range(ds)] if xlabels is None: xlabels = ['x'] if dx == 1 else [f'x{i}' for i in range(dx)] self.labels = LQlabels(slabels, xlabels) '''SOLVE THE MODEL''' self.solve() ''' <<<<<<<<<<<<<<<<<<< END OF CONSTRUCTOR >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>''' def __repr__(self): pass txt = 'A continuous state, ' + ('continuous' if self.dims.dx > 0 else 'discrete') + ' action dynamic model.\n' txt = txt.upper() txt += '\n\t* Continuous states:\n' n, a, b = self.Value.n, self.Value.a, self.Value.b for k in range(self.Value.d): txt += "\t\t{:<2d}: {:<s} --> {:d} nodes in [{:.2f}, {:.2f}]\n".format(k, self.labels.s[k], n[k], a[k], b[k]) if self.dims.dx > 0: txt += '\n\t* Continuous actions\n' for v, vlab in enumerate(self.labels.x): txt += '\t\t{:<2d}: {:s}\n'.format(v, vlab) if self.dims.ni > 1: txt += '\n\t* Discrete states\n' for v, vlab in enumerate(self.labels.i): txt += '\t\t{:<2d}: {:s}\n'.format(v, vlab) if self.dims.nj > 1: txt += '\n\t* Discrete choices:\n' for v, vlab in enumerate(self.labels.j): txt += '\t\t{:<2d}: {:s}\n'.format(v, vlab) return txt @property def steady_state(self): return self.steady['s'], self.steady['x'], self.steady['p'], self.steady['v'] def Value(self, ss): ''' Args: ss: state evaluation points, ds.ns Returns: value function at ss Note: The value function is V(s) = vstar + pstar' * (s-sstar) + 0.5*(s-sstar)'*P*(s-sstar) ''' sstar, xstar, pstar, vstar = self.steady_state ss0 = ss-sstar # ds.ns Pss0 = self.P @ ss0 ss0Pss0 = [k.dot(h) for k, h in zip(ss0.T, Pss0.T)] return vstar + pstar.T @ ss0 + 0.5 * np.array(ss0Pss0) def Policy(self, ss): ''' Args: ss: state evaluation points Returns: policy function at ss Notes: The optimal policy function is x(s) = xstar + X * (s - sstar) ''' sstar, xstar, pstar, vstar = self.steady_state return xstar + self.X @ (ss - sstar) def Shadow(self, ss): ''' Args: ss: state evaluation points Returns: shadow price function at ss Notes: The shadow price function is p(s) = pstar + P * (s - sstar) ''' sstar, xstar, pstar, vstar = self.steady_state return pstar + self.P @ (ss - sstar) def Next(self, ss): ''' Args: ss: state evaluation points Returns: controlled state process at ss Notes: The controlled state process is snext(s) = sstar + G * (s - sstar) ''' sstar, xstar, pstar, vstar = self.steady_state return sstar + self.G @ (ss - sstar) def solution(self, ss): """ Computes solution over a refined grid s: -- array >> compute solution over provided values """ ds, dx = self.dims['ds'], self.dims['dx'] '''GET THE DATA''' ss = np.atleast_2d(ss) assert ss.shape[0] == ds, 'provided s grid must have {} rows'.format(ds) xr = self.Policy(ss) vr = self.Value(ss) pr = self.Shadow(ss) snext = self.Next(ss) ''' MAKE DATABASE''' DATA = pd.DataFrame(np.r_[ss, xr, vr, pr, snext].T, columns=self.labels.s + self.labels.x + ['value'] + self.labels.p + self.labels.snext) '''SET INDEX FOR DATA''' if ds == 1: DATA.index = DATA[self.labels.s[0]] return DATA def simulate(self, nper, sinit, seed=None): # Simulate the model # # S = self.simulate(nper, sinit, iinit) # # nper = number of periods to simulate (scalar) # sinit = initial continuos state (nrep x ds), where nrep is number of repetitions # # S = simulation results (table), with variables: # r, repetion number # t, time period # s, continuous state # x, optimal continuous action # ****** 1: Preparation*********************************************************** #TODO: ADD THE STOCHASTIC COMPONENT ds, dx, = self.dims['ds'], self.dims['dx'] if seed: np.random.seed(seed) # Determine number of replications nrep and periods nper to be simulated. # nper cannot exceed time.horizon. sinit = np.atleast_2d(sinit).astype(float) ds2, nrep = sinit.shape assert ds==ds2, 'initial continous state must have {} rows'.format(ds) ### Allocate memory to output arrays ssim = np.empty((nper+1, ds, nrep)) xsim = np.empty((nper, dx, nrep)) ### Set initial states ssim[0] = sinit # ***** *2: Simulate the model *************************************************** for t in range(nper): xsim[t] = self.Policy(ssim[t]) ssim[t+1] = self.Next(ssim[t]) ### Trim the last observation ssim = ssim[:nper] # ****** 3: Make a table with the simulated data ********************************* '''MAKE THE LABELS''' slabels = ['s'] if ds == 1 else [f's{i}' for i in range(ds)] xlabels = ['x'] if dx == 1 else [f'x{i}' for i in range(dx)] '''MAKE DATA MATRICES''' sdata = ssim.swapaxes(0, 1).reshape((ds, -1)) xdata = xsim.swapaxes(0, 1).reshape((dx, -1)) ### Add variables rsim and tsim to identify the repetition number and the time # period of each observation tsim, rsim = gridmake(np.arange(nper), np.arange(nrep)) # Make the table. DATA = pd.DataFrame() DATA['time'] = tsim if nrep > 1: DATA['_rep'] = rsim for k, slab in enumerate(slabels): DATA[slab] = sdata[k] for k, xlab in enumerate(xlabels): DATA[xlab] = xdata[k] return DATA def solve(self): # Unpak data ds = self.dims['ds'] dx = self.dims['dx'] delta = self.delta f0, fx, fxx = self.f0, self.fx, self.fxx g0, gx = self.g0, self.gx fs, fsx, fss, gs = self.fs, self.fsx, self.fss, self.gs ''' Solve Riccati equation using QZ decomposition ''' dx2ds = dx + 2 * ds A = np.zeros((dx2ds, dx2ds)) A[:ds, :ds] = np.identity(ds) A[ds:-ds, -ds:] = -delta * gx.T A[-ds:, -ds:] = delta * gs.T B = np.zeros_like(A) B[:ds, :-ds] = np.c_[gs, gx] B[ds: -ds, :-ds] = np.c_[fsx.T, fxx] B[-ds:] = np.c_[-fss, -fsx, np.identity(ds)] S, T, Q, Z = qzordered(A, B) C = np.real(np.linalg.solve(Z[:ds, :ds].T, Z[ds:, :ds].T)).T X = C[:dx] P = C[dx:, :] G = gs + gx @ X self.X = X self.P = P self.G = G ''' Compute steady-state state, action, and shadow price''' t0 = np.r_[np.c_[fsx.T, fxx, delta * gx.T], np.c_[fss, fsx, delta*gs.T - np.eye(ds)], np.c_[gs - np.eye(ds), gx, np.zeros((ds, ds))]] t1 = np.r_[-fx.T, -fs.T, -g0] t = np.linalg.solve(t0, t1) sstar, xstar, pstar = np.split(t, [ds, ds + dx]) vstar = (f0 + fs @ sstar + fx @ xstar + 0.5 * sstar.T @ fss @ sstar + sstar.T @ fsx @ xstar + 0.5 * xstar.T @ fxx @ xstar) / (1 - delta) self.steady = {'s':sstar, 'x': xstar, 'p': pstar, 'v':vstar}
nilq/baby-python
python
h1, m1, h2, m2 = map(int, input().split()) minutos = m2 - m1 horas = h2 - h1 if horas <= 0: horas += 24 if minutos < 0: horas -= 1 minutos = 60 + minutos print("O JOGO DUROU {} HORA(S) E {} MINUTO(S)".format(horas, minutos))
nilq/baby-python
python
# grad.py """ Created on Fri May 25 19:10:00 2018 @author: Wentao Huang """ from torch.autograd import Function class Grad(Function): r"""Records operation history and defines formulas for differentiating ops. Each function object is meant to be used only once (in the forward pass). Attributes: requires_grad: Boolean indicating whether the :func:`backward` will ever need to be called. """ @staticmethod def forward(ctx, input, C, bias=None, beta=1.0, isorth=True, eps=1e-6, *args, **kwargs): r"""Performs the operation. This function is to be overridden by all subclasses. It must accept a context ctx as the first argument, followed by any number of arguments (tensors or other types). The context can be used to store tensors that can be then retrieved during the backward pass. """ return NotImplementedError @staticmethod def backward(ctx, grad_output=None): r"""Defines a formula for differentiating the operation. It must accept a context ctx as the first argument, followed by as many outputs did :func:`forward` return, and it should return as many tensors, as there were inputs to :func:`forward`. Each argument is the gradient w.r.t the given output, and each returned value should be the gradient w.r.t. the corresponding input. The context can be used to retrieve tensors saved during the forward pass. """ # d_input=d_C=d_b=d_beta=d_isorth=d_eps=None dC, db, argnum = ctx.saved_variables output = [None]*int(argnum) if ctx.needs_input_grad[1]: output[1] = dC #d_C = dC if ctx.needs_input_grad[2]: output[2] = db #d_b = db if grad_output is not None: output[1] = grad_output * output[1] if output[2] is not None: output[2] = grad_output * output[2] return tuple(output) # d_input, d_C, d_b, d_beta, d_isorth, d_eps
nilq/baby-python
python
# !/usr/bin/env python # -*- coding: utf-8 -*- """ @author: Wang Zhiyi @file: logger.py @time: 6/20/2021 @version: 1.0 """ import os from pyspark.ml.pipeline import PipelineModel from spark_manager import get_spark_session _spark_session = get_spark_session() _spark_context = _spark_session.sparkContext def load_model_from_file(path): """ Reconstruct a models from a file persisted with models.dump. Args: path (string): The source path stores a models. Returns: ModelObject: Model object. """ # model = RandomForestClassificationModel.load(path=path) # model = RandomForestClassifier.load(path=path) # model = PipelineModel.load(path) model = PipelineModel.load(path) os.system('echo -e "\033[31m\033[1m{}\033[0m"'.format(str(model))) return model def load_model_to_file(model, path): """ Persist an models object into one file. Args: model (ModelObject): The model object. path (string): The target path stores a models. Returns: NoneType: None """ # dump(model, path) model.write().overwrite().save(path=path) return None
nilq/baby-python
python
import tensorflow as tf import numpy as np from PIL import Image import os def maybe_download(directory, filename, url): print('Try to dwnloaded', url) if not tf.gfile.Exists(directory): tf.gfile.MakeDirs(directory) filepath = os.path.join(directory, filename) if not tf.gfile.Exists(filepath): filepath, _ = urllib.request.urlretrieve(url, filepath) with tf.gfile.GFile(filepath) as f: size = f.size() print('Successfully downloaded', filename, size, 'bytes.') return filepath def load_pretrained(filepath): return np.load(filepath, encoding='bytes').item() def get_epoch(): epoch_step = tf.Variable(0, name='epoch_step', trainable=False) epoch_update = epoch_step.assign(epoch_step + 1) return epoch_step, epoch_update def load_imgs(train_img_dir, filelist): def load_img(path): _img = Image.open(path) img = np.array(_img) _img.close() return img _imgs = [os.path.join(train_img_dir, filename + ".png") for filename in filelist] imgs = [load_img(_img) for _img in _imgs] return imgs def load_annots(train_annot_dir, filelist): def load_annot(path): #print(path) annot = np.load(path, encoding='bytes') #print("original dims: {}x{}".format(annot[0,0], annot[0,1])) return annot _annots = [os.path.join(train_annot_dir, filename + ".npy") for filename in filelist] annots = [load_annot(_annot) for _annot in _annots] return annots def tf_Print(on, x, summarize=50, message=""): if on: x = tf.Print(x, [x, tf.shape(x)], summarize=summarize, message=message) return x def debug_print(on, *x): if on: print(x) return x
nilq/baby-python
python
# -*- coding: utf-8 -*- import scrapy import json from locations.items import GeojsonPointItem from locations.hours import OpeningHours class SprintSpider(scrapy.Spider): name = "sprint" allowed_domains = ["sprint.com"] start_urls = ( 'https://www.sprint.com/locations/', ) def parse_hours(self, store_hours): opening_hours = OpeningHours() for store_day in store_hours: day, open_close = store_day.split(' ') open_time, close_time = open_close.split('-') opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M' ) return opening_hours.as_opening_hours() def parse(self, response): state_urls = response.xpath('//a[@class="lm-homepage__state"]/@href').extract() for state_url in state_urls: yield scrapy.Request(response.urljoin(state_url), callback=self.parse_state) def parse_state(self, response): city_urls = response.xpath('//a[@class="lm-state__store"]/@href').extract() for city_url in city_urls: yield scrapy.Request(response.urljoin(city_url), callback=self.parse_store) def parse_store(self, response): data = json.loads(response.xpath( '//script[@type="application/ld+json" and contains(text(), "streetAddress")]/text()').extract_first()) properties = { 'name': data["name"], 'ref': data["branchCode"], 'addr_full': data["address"]["streetAddress"], 'city': data["address"]["addressLocality"], 'state': data["address"]["addressRegion"], 'postcode': data["address"]["postalCode"], 'country': data["address"]["addressCountry"], 'phone': data.get("telephone"), 'website': data.get("url") or response.url, 'lat': float(data["geo"]["latitude"]), 'lon': float(data["geo"]["longitude"]), } hours = self.parse_hours(data.get("openingHoursSpecification", [])) if hours: properties["opening_hours"] = hours yield GeojsonPointItem(**properties)
nilq/baby-python
python
# -*- coding: utf-8 -*- # # Copyright (C) 2019 John J. Rofrano <rofrano@gmail.com> # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ZenHub Workspace Based on ZenHub API @ https://github.com/ZenHubIO/API """ import json from .board import Board class Workspace: """ ZenHub Workspace for a repository Example JSON: [ { "name": null, "description": null, "id": "57e2f42c86e6ae285942419d", "repositories": [ 68837948 ] } ] """ def __init__(self, data, repo): self.data = data self.repo = repo def __repr__(self): return '<%s %r>' % (type(self).__name__, self.repo.id) def __str__(self): return '<%s %r>\n' % (type(self).__name__, self.repo.id) + json.dumps(self.data, indent=4) @property def name(self): """ :type: string """ try: return self.data['name'] except KeyError: return None @property def description(self): """ :type: string """ try: return self.data['description'] except KeyError: return None @property def id(self): """ :type: int """ try: return self.data['id'] except KeyError: return None @property def repositories(self): """ :type: list """ try: return self.data['repositories'] except KeyError: return [] def board(self): """ Get ZenHub Board data for a repository (repo_id) within the Workspace (workspace_id) :calls: `GET /p2/workspaces/:workspace_id/repositories/:repo_id/board <https://github.com/ZenHubIO/API#get-a-zenhub-board-for-a-repository>`_ :return: :class:`Board <Board>` object :rtype: :class:`zenhub.Board` """ data = self.repo.zenhub.get(f'/p2/workspaces/{self.id}/repositories/{self.repo.id}/board') if data: return Board(data, self.repo) return None
nilq/baby-python
python
#!/usr/bin/python ''' Created on Apr 19, 2016 @author: Rohan Achar ''' import sys import argparse from spacetime.store_server import FrameServer import cmd import shlex from flask import request from threading import Thread as Parallel class SpacetimeConsole(cmd.Cmd): prompt = 'Spacetime> ' """Command console interpreter for frame.""" def do_quit(self, line): """ quit Exits all applications by calling their shutdown methods. """ shutdown() def do_exit(self, line): """ exit Exits all applications by calling their shutdown methods. """ shutdown() def do_findobjs(self, line): """ findobjs Looks for objects where a given dimension matches a given value for a given set. """ tokens = shlex.split(line) if len(tokens) == 3: type_text = tokens[0] dim = tokens[1] val = tokens[2] if type_text in fs.name2class: tp = fs.name2class[type_text] if hasattr(tp, dim): objs = fs.Store.get(tp) for obj in objs: try: v = getattr(obj, dim) except Exception: continue if str(v) == val: for d in obj: print "%s: %s" % (d, obj[d]) else: print "type %s does not have dimension %s" % (type_text, dim) else: print "could not find type %s" % type_text else: print "usage: findobjs <type> <dimension> <value>" def do_descobj(self, line): """ descobj <type> <id> Given a type and an id, prints all the dimensions and values. Has auto-complete. """ tokens = shlex.split(line) if len(tokens) == 2: type_text = tokens[0] oid = tokens[1] if type_text in fs.name2class: obj = {} try: obj = fs.Store.get_object_state(fs.name2class[type_text], oid) except: print "could not find object with id %s" % oid for dim in obj: print "%s: %s" % (dim, obj[dim]) else: print "could not find type %s" % type_text def complete_descobj(self, text, line, begidx, endidx): tokens = shlex.split(line) if len(tokens) == 1: completions = [t.__realname__ for t in fs.DATAMODEL_TYPES] elif len(tokens) == 2 and text: completions = [t.__realname__ for t in fs.DATAMODEL_TYPES if t.__realname__.startswith(text)] else: if tokens[1] in fs.name2class: if len(tokens) == 2 and not text: completions = [oid for oid in fs.Store.get_ids(fs.name2class[tokens[1]])] elif len(tokens) == 3 and text: completions = [oid for oid in fs.Store.get_ids(fs.name2class[tokens[1]]) if oid.startswith(text)] else: print "\n%s is not a valid type." % tokens[1] return completions def do_objsin(self, type_text): """ objsin <type> Prints the primary key of all objects of a type (accepts auto-complete) """ if type_text in fs.name2class: objs = fs.Store.get(fs.name2class[type_text]) if objs: print "{0:20s}".format("ids") print "=============" for oid in objs: print "{0:20s}".format(oid) print "" else: print "could not find type %s" % type_text def complete_objsin(self, text, line, begidx, endidx): if not text: completions = [t.__realname__ for t in fs.DATAMODEL_TYPES] else: completions = [t.__realname__ for t in fs.DATAMODEL_TYPES if t.__realname__.startswith(text)] return completions def do_countobjsin(self, type_text): """ objsin <type> Prints the primary key of all objects of a type (accepts auto-complete) """ if type_text in fs.name2class: objs = fs.Store.get(fs.name2class[type_text]) if objs: print "=============" print "Number of objects in %s is %d" % (type_text, len(objs)) print "" else: print "could not find type %s" % type_text def complete_countobjsin(self, text, line, begidx, endidx): if not text: completions = [t.__realname__ for t in fs.DATAMODEL_TYPES] else: completions = [t.__realname__ for t in fs.DATAMODEL_TYPES if t.__realname__.startswith(text)] return completions def complete_list(self, text, line, begidx, endidx): return ['sets','apps'] def do_list(self, line): """ list ['sets','apps'] list accepts one of two arguments: * 'sets' prints all pcc sets tracked by the server * 'apps' prints the name of all applications registered with the server """ if line == "sets": for t in fs.DATAMODEL_TYPES: print "{0:60s}{1:s}".format(t.__realname__, t.__module__) elif line == "apps": all_apps = fs.Store.get_app_list() for app in all_apps: print app else: print line def do_clear(self, type_text): """ clear [<type>, '!all'] Deletes all objects of the type passed. If '!all' is passed, all objects of all types are cleared. """ if type_text: if type_text == "!all": fs.Store.clear() print "cleared all objects in store..." else: try: fs.Store.clear(fs.name2class[type_text]) print "cleared all objects of type %s" % type_text except: print "could not clear objects of type %s" % type_text def emptyline(self): pass def do_EOF(self, line): shutdown() # TODO: do_pause. Will require telling the applications to pause, to avoid # issues. def shutdown(): print "Shutting down ..." global fs fs.shutdown() sys.exit(0) if __name__== "__main__": parser = argparse.ArgumentParser() parser.add_argument('-p', '--port', type=int, default=10000, help='Port where the server will listen (default: 12000)') parser.add_argument('-P', '--profile', action='store_true', help='Enable profiling on store server.') parser.add_argument('-d', '--debug', action='store_true', help='Debug on') parser.add_argument('-e', '--external', action='store_true', help='Make this server externally accessible') parser.add_argument('-w', '--watchdog', action='store_true', help='Starts the server with thes slack/github watchdog') parser.add_argument('-t', '--timeout', type=int, default=0, help='Timeout in seconds for the server to consider a client disconnected.') parser.add_argument('-c', '--clearempty', action='store_true', default=False, help='Clears the dataframes when all simulations leave.') args = parser.parse_args() global fs fs = FrameServer(args.port, args.debug, args.external, args.timeout, args.clearempty) p = Parallel(target = fs.run, args = (args.profile,)) p.daemon = True p.start() if args.watchdog: try: from slack_watchdog import start_watchdog start_watchdog(fs) except: print "error starting watchdog." raise SpacetimeConsole().cmdloop()
nilq/baby-python
python
import dydra ## # Represents a Dydra.com RDF repository. # # @see http://docs.dydra.com/sdk/python class Repository(dydra.Resource): """Represents a Dydra.com RDF repository.""" ## # (Attribute) The repository name. name = None ## # @param name A valid repository name. def __init__(self, name, **kwargs): self.name = str(name) super(Repository, self).__init__(self.name, **kwargs) ## # @return A string representation of this object. def __repr__(self): return "dydra.Repository('%s')" % (self.name) ## # @return The number of statements in this repository. def __len__(self): return self.count() ## # Creates this repository on Dydra.com. # # @return A pending operation. def create(self): """Creates this repository on Dydra.com.""" return dydra.Operation(self.client.call('repository.create', self.name), client=self.client) ## # Destroys this repository from Dydra.com. # # @return A pending operation. def destroy(self): """Destroys this repository from Dydra.com.""" return dydra.Operation(self.client.call('repository.destroy', self.name), client=self.client) ## # Returns the number of RDF statements in this repository. # # @return A positive integer. def count(self): """Returns the number of RDF statements in this repository.""" return self.client.call('repository.count', self.name) ## # Deletes all data in this repository. # # @return A pending operation. def clear(self): """Deletes all data in this repository.""" return dydra.Operation(self.client.call('repository.clear', self.name), client=self.client) ## # Imports data from the given URL into this repository. # # @param url A valid URL string. # @return A pending operation. def import_from_url(self, url, **kwargs): """Imports data from the given URL into this repository.""" url, context, base_uri = str(url), '', '' if kwargs.has_key('context') and kwargs['context']: context = str(kwargs['context']) if kwargs.has_key('base_uri') and kwargs['base_uri']: base_uri = str(kwargs['base_uri']) return dydra.Operation(self.client.call('repository.import', self.name, url, context, base_uri), client=self.client)
nilq/baby-python
python
import cv2 import numpy as np src = cv2.imread('data/src/lena.jpg') mask = np.zeros_like(src) print(mask.shape) # (225, 400, 3) print(mask.dtype) # uint8 cv2.rectangle(mask, (50, 50), (100, 200), (255, 255, 255), thickness=-1) cv2.circle(mask, (200, 100), 50, (255, 255, 255), thickness=-1) cv2.fillConvexPoly(mask, np.array([[330, 50], [300, 200], [360, 150]]), (255, 255, 255)) cv2.imwrite('data/dst/opencv_draw_mask.jpg', mask) # True # ![](data/dst/opencv_draw_mask.jpg) mask_blur = cv2.GaussianBlur(mask, (51, 51), 0) cv2.imwrite('data/dst/opencv_draw_mask_blur.jpg', mask_blur) # True # ![](data/dst/opencv_draw_mask_blur.jpg) dst = src * (mask_blur / 255) cv2.imwrite('data/dst/opencv_draw_mask_blur_result.jpg', dst) # True # ![](data/dst/opencv_draw_mask_blur_result.jpg)
nilq/baby-python
python
"""Base test class for DNS authenticators.""" import configobj import josepy as jose import mock import six from acme import challenges from certbot import achallenges from certbot.compat import filesystem from certbot.tests import acme_util from certbot.tests import util as test_util DOMAIN = 'example.com' KEY = jose.JWKRSA.load(test_util.load_vector("rsa512_key.pem")) class BaseAuthenticatorTest(object): """ A base test class to reduce duplication between test code for DNS Authenticator Plugins. Assumes: * That subclasses also subclass unittest.TestCase * That the authenticator is stored as self.auth """ achall = achallenges.KeyAuthorizationAnnotatedChallenge( challb=acme_util.DNS01, domain=DOMAIN, account_key=KEY) def test_more_info(self): self.assertTrue(isinstance(self.auth.more_info(), six.string_types)) # pylint: disable=no-member def test_get_chall_pref(self): self.assertEqual(self.auth.get_chall_pref(None), [challenges.DNS01]) # pylint: disable=no-member def test_parser_arguments(self): m = mock.MagicMock() self.auth.add_parser_arguments(m) # pylint: disable=no-member m.assert_any_call('propagation-seconds', type=int, default=mock.ANY, help=mock.ANY) def write(values, path): """Write the specified values to a config file. :param dict values: A map of values to write. :param str path: Where to write the values. """ config = configobj.ConfigObj() for key in values: config[key] = values[key] with open(path, "wb") as f: config.write(outfile=f) filesystem.chmod(path, 0o600)
nilq/baby-python
python
import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" from torch import optim from torch.utils.data import DataLoader import torch from dataloader import * from model import * from metric import * import soundfile as sf from semetrics import * test_clean = "data/test_clean/" test_noisy = "data/test_noisy/" data_augment = 'Specmix' if __name__ == "__main__" : testset = SignalDataset(test_clean, test_noisy, training=False) testloader = DataLoader(testset,batch_size=1, shuffle=False, num_workers=0, collate_fn=collate_fn) model = Network() model.to('cuda') criterion = nn.MSELoss().to('cuda') state = torch.load("model_{}/model.pth".format(data_augment)) model.load_state_dict(state['model']) epoch_loss = 0. epoch_pesq = 0. epoch_csig = 0. epoch_cbak = 0. epoch_covl = 0. epoch_ssnr = 0. epoch_pesq_noisy = 0. print("Evaluate start") model.eval() idx = 0 with torch.no_grad() : for iter, (clean, noisy, clean_spec, noisy_spec, length) in enumerate(testloader) : mask, output = model(noisy_spec) #plot_train(clean_spec[0], output[0,:,:,:], noisy_spec[0]) clean = clean_spec.permute(0, 2, 3, 1) output = output.permute(0, 2, 3, 1) noisy = noisy_spec.permute(0, 2, 3, 1) gt = get_wav(clean.squeeze(0).cpu().numpy(), length=length[0])[np.newaxis, :] pred = get_wav(output.squeeze(0).cpu().numpy(), length=length[0])[np.newaxis, :] noisy_gt = get_wav(noisy.squeeze(0).cpu().numpy(), length=length[0])[np.newaxis, :] if not os.path.isdir("eval/test_{}/clean/".format(data_augment)) : os.makedirs("eval/test_{}/clean/".format(data_augment)) if not os.path.isdir("eval/test_{}/estimated/".format(data_augment)) : os.makedirs("eval/test_{}/estimated/".format(data_augment)) if not os.path.isdir("eval/test_{}/noisy/".format(data_augment)): os.makedirs("eval/test_{}/noisy/".format(data_augment)) for i in range(len(gt)) : gt[i] = np.clip(gt[i], -1, 1) pred[i] = np.clip(pred[i], -1, 1) noisy_gt[i] = np.clip(noisy_gt[i], -1, 1) sf.write("eval/test_{}/clean/{}.wav".format(data_augment, idx), gt[i], 16000) sf.write("eval/test_{}/estimated/{}.wav".format(data_augment, idx), pred[i], 16000) sf.write("eval/test_{}/noisy/{}.wav".format(data_augment, idx), noisy_gt[i], 16000) pesq, csig, cbak, covl, ssnr = composite("eval/test_{}/clean/{}.wav".format(data_augment,idx), "eval/test_{}/estimated/{}.wav".format(data_augment,idx)) #pesq_noisy, csig_noisy, cbak_noisy, covl_noisy, ssnr_noisy = composite("eval/clean/{}.wav".format(idx), # "eval/noisy/{}.wav".format(idx)) print(idx) print('estimated : ', pesq, csig, cbak, covl, ssnr) #print('noisy : ',pesq_noisy, csig_noisy, cbak_noisy, covl_noisy, ssnr_noisy) epoch_pesq += pesq epoch_csig += csig epoch_cbak += cbak epoch_covl += covl epoch_ssnr += ssnr idx += 1 #plot_data(clean[i], mask[i], noisy[i]) epoch_pesq /= idx epoch_csig /= idx epoch_cbak /= idx epoch_covl /= idx epoch_ssnr /= idx print("test epoch pesq : %f csig : %f cbak : %f covl : %f ssnr : %f"%(epoch_pesq, epoch_csig, epoch_cbak,epoch_covl, epoch_ssnr))
nilq/baby-python
python
#!venv/bin/python3 import logging from logging.handlers import RotatingFileHandler import sync_mongo from sync_mongo import SyncMongo __level = logging.DEBUG logger = logging.getLogger(__name__) uri = "repl1/localhost:27017,localhost:27018,localhost:27019" def setup_logging(): FORMAT='%(asctime)s %(levelname)s:%(message)s' logging.basicConfig(format=FORMAT, level=__level) logger = logging.getLogger() handler = RotatingFileHandler('main.log', maxBytes=2*1024*1024, backupCount=4) handler.setLevel(__level) handler.setFormatter(logging.Formatter(FORMAT)) logger.addHandler(handler) setup_logging() logger.info("Starting") SyncMongo(uri, "superheroesdb", "superheroes", "superclone" ).sync() logger.info("completed")
nilq/baby-python
python
""" Bilby ===== Bilby: a user-friendly Bayesian inference library. The aim of bilby is to provide a user-friendly interface to perform parameter estimation. It is primarily designed and built for inference of compact binary coalescence events in interferometric data, but it can also be used for more general problems. The code, and many examples are hosted at https://git.ligo.org/lscsoft/bilby. For installation instructions see https://lscsoft.docs.ligo.org/bilby/installation.html. """ import sys from . import core, gw, hyper from .core import utils, likelihood, prior, result, sampler from .core.sampler import run_sampler from .core.likelihood import Likelihood __version__ = utils.get_version_information() if sys.version_info < (3,): raise ImportError( """You are running bilby >= 0.6.4 on Python 2 Bilby 0.6.4 and above are no longer compatible with Python 2, and you still ended up with this version installed. That's unfortunate; sorry about that. It should not have happened. Make sure you have pip >= 9.0 to avoid this kind of issue, as well as setuptools >= 24.2: $ pip install pip setuptools --upgrade Your choices: - Upgrade to Python 3. - Install an older version of bilby: $ pip install 'bilby<0.6.4' """)
nilq/baby-python
python
from discord import Color, Embed, Member, Object from discord.ext import commands from discord.ext.commands import Context as CommandContext PREMIUM_RULESBOT = 488367350274326528 ACTIVE_PATREON = 488774886043680769 class PremiumCog(object): def __init__(self, bot: commands.Bot): self.bot: commands.Bot = bot async def on_member_update(self, before: Member, after: Member): if after.guild.id != 385848724628439062: return if any(role.id == ACTIVE_PATREON for role in after.roles) and not any( role.id == PREMIUM_RULESBOT for role in after.roles): await after.add_roles(Object(id=PREMIUM_RULESBOT), reason="Patreon") @commands.command() async def premium(self, ctx: CommandContext): await ctx.send( embed=Embed( color=Color.green(), description="Hey, cool that you think about to go premium! If you go premium you would support the " "developer and the moderators. Also you would help us to cover our costs ;) But what " "would you get?\n__**What will you get?**__\n• change footer text\n• change embed " "color\n• you can sign up to an beta from the bot to test new features\n• faster " "support\n• exclusive textchannels\n__**Where to buy?**__\nYou could buy it on Patreon [" "here](https://www.patreon.com/TheBotDev), but other then normally with patreon this is " "an **one time payment** so you dont need to pay monthly for staying premium!")) def setup(bot: commands.Bot): bot.add_cog(PremiumCog(bot))
nilq/baby-python
python
"""Downloads the Forge MDK""" import os import zipfile import shutil import requests from globals import * import logger import modfile def download(): """Downloads and extracts MDK""" prefix = 'https://files.minecraftforge.net/maven/net/minecraftforge/forge/' version = modfile.get('minecraft') + '-' + modfile.get('forge') url = prefix + version + '/forge-' + version + '-mdk.zip' zip_output = 'output.zip' failmsg = f'Failed to download Forge MDK version "{version}". ' # Download MDK req = None logger.log(f'Downloading Forge MDK version {version}...') try: req = requests.get(url, allow_redirects=True) except ConnectionError as err: logger.error(err, suppress=True) logger.log(failmsg + 'Please try again.') if b"404 Not Found" in req.content: error = failmsg + 'This is most likely due to invalid configuration. Please try again.' logger.error(error, suppress=True) raise FileNotFoundError(error) with open(zip_output, 'wb') as file: file.write(req.content) # Extract MDK logger.log('Extracting downloaded MDK') shutil.rmtree(OUTPUT_FOLDER, ignore_errors=True) with zipfile.ZipFile(zip_output, 'r') as file: file.extractall(OUTPUT_FOLDER) os.remove(zip_output) try: cleanup() except: print("Folder cleanup failed.") def cleanup(): """Cleans up output folder""" # Configure gitfiles # with open(OUTPUT_FOLDER + '.gitattributes', 'w') as file: # file.write('src/generated/**/*.json text eol=lf') os.remove(OUTPUT_FOLDER + '.gitattributes') with open(OUTPUT_FOLDER + '.gitignore', 'w') as file: file.write('.gradle/\nbuild/\nlogs/\nmdk_info/\n') # Move Forge metainfo into folder mdk_info = ['changelog.txt', 'CREDITS.txt', 'README.txt', 'LICENSE.txt'] shutil.rmtree(OUTPUT_FOLDER + 'mdk_info', ignore_errors=True) os.mkdir(OUTPUT_FOLDER + 'mdk_info') for file in mdk_info: os.rename(OUTPUT_FOLDER + file, OUTPUT_FOLDER + 'mdk_info/' + file) os.rename(OUTPUT_FOLDER + 'src/main/java/com/example/examplemod/ExampleMod.java', OUTPUT_FOLDER + 'mdk_info/ExampleMod.java') shutil.rmtree(OUTPUT_FOLDER + 'src/main/java/com') # make setup if __name__ == '__main__': modfile.read() download()
nilq/baby-python
python
# -*- coding: utf-8 -*- from django.db import migrations, models import api.models class Migration(migrations.Migration): dependencies = [ ('api', '0001_initial'), ] operations = [ migrations.AlterField( model_name='app', name='id', field=models.SlugField(max_length=24, unique=True, null=True, validators=[api.models.validate_app_id, api.models.validate_reserved_names]), ), migrations.AlterField( model_name='app', name='uuid', field=models.UUIDField(serialize=False, verbose_name='UUID', primary_key=True), ), migrations.AlterField( model_name='build', name='uuid', field=models.UUIDField(serialize=False, verbose_name='UUID', primary_key=True), ), migrations.AlterField( model_name='config', name='uuid', field=models.UUIDField(serialize=False, verbose_name='UUID', primary_key=True), ), migrations.AlterField( model_name='container', name='uuid', field=models.UUIDField(serialize=False, verbose_name='UUID', primary_key=True), ), migrations.AlterField( model_name='key', name='uuid', field=models.UUIDField(serialize=False, verbose_name='UUID', primary_key=True), ), migrations.AlterField( model_name='push', name='uuid', field=models.UUIDField(serialize=False, verbose_name='UUID', primary_key=True), ), migrations.AlterField( model_name='release', name='uuid', field=models.UUIDField(serialize=False, verbose_name='UUID', primary_key=True), ), ]
nilq/baby-python
python
''' Date: 2021-08-12 12:24:42 LastEditors: Liuliang LastEditTime: 2021-08-12 18:28:35 Description: ''' from typing import Optional class Node(): def __init__(self, data) -> None: self.data = data self._next = None # class LinkList(): # def __init__(self) -> None: # self._head = None # link = LinkList() # link._head = Node(1) # link._head.next = Node(2) # print(link._head.next.val) n_1 = Node(1) n_2 = Node(2) n_3 = Node(3) n_4 = Node(4) n_5 = Node(5) n_1._next = n_2 n_2._next = n_3 n_3._next = n_4 n_4._next = n_5 def reverse(head: Node) -> Optional[Node]: reversed_head = None current = head while current: # current, reversed_head, reversed_head._next, = current._next, current, reversed_head # current, reversed_head._next, reversed_head, = current._next, reversed_head, current #这个不行 reversed_head, reversed_head._next, current = current, reversed_head,current._next return reversed_head def reverse_cur(head:Node): if head == None or head._next == None: return head else: newhead = reverse_cur(head._next) head._next._next = head head._next = None return newhead def test(head:Node): slow, fast = head, head while fast and fast._next: slow = slow._next fast = fast._next._next if slow == fast: return True return False def merge(l1:Node,l2:Node): if l1 and l2: p1, p2 = l1, l2 fake_head = Node(None) current = fake_head while p1 and p2: if p1.data <= p2.data: current._next = p1 p1 = p1._next else: current._next = p2 p2 = p2._next current = current._next current._next = p1 if p1 else p2 return fake_head._next return l1 or l2 def del_n(head:Node, n:int): current = head count = 0 while current is not None: count += 1 current = current._next count -= n+1 current = head while count>0: count -= 1 current = current._next current._next = current._next._next return head #nums = count - n def del_n_2(head:Node, n:int): fast = head count = 0 while fast and count < n: fast = fast._next count += 1 if not fast and count < n: return head if not fast and count == n: return head._next slow = head while fast._next: fast, slow = fast._next, slow._next slow._next = slow._next._next return head return 0 def print_all(head:Node): nums = [] current = head while current: nums.append(current.data) current = current._next print('->'.join(str(num) for num in nums)) # def find_mid(head:Node): print_all(n_1) m = reverse(n_1) print_all(m) print(test(m)) print(test(n_1)) nums = del_n_2(m,3) print_all(m) # print(n_1.data) # print(n_1._next.data)
nilq/baby-python
python
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ducktape.utils.util import wait_until class JmxMixin(object): """This mixin helps existing service subclasses start JmxTool on their worker nodes and collect jmx stats. A couple things worth noting: - this is not a service in its own right. - we assume the service using JmxMixin also uses KafkaPathResolverMixin """ def __init__(self, num_nodes, jmx_object_names=None, jmx_attributes=None): self.jmx_object_names = jmx_object_names self.jmx_attributes = jmx_attributes or [] self.jmx_port = 9192 self.started = [False] * num_nodes self.jmx_stats = [{} for x in range(num_nodes)] self.maximum_jmx_value = {} # map from object_attribute_name to maximum value observed over time self.average_jmx_value = {} # map from object_attribute_name to average value observed over time def clean_node(self, node): node.account.kill_process("jmx", clean_shutdown=False, allow_fail=True) node.account.ssh("rm -rf /mnt/jmx_tool.log", allow_fail=False) def start_jmx_tool(self, idx, node): if self.started[idx-1] or self.jmx_object_names is None: return # JmxTool is not particularly robust to slow-starting processes. In order to ensure JmxTool doesn't fail if the # process we're trying to monitor takes awhile before listening on the JMX port, wait until we can see that port # listening before even launching JmxTool def check_jmx_port_listening(): return 0 == node.account.ssh("nc -z 127.0.0.1 %d" % self.jmx_port, allow_fail=True) wait_until(check_jmx_port_listening, timeout_sec=30, backoff_sec=.1, err_msg="%s: Never saw JMX port for %s start listening" % (node.account, self)) cmd = "%s kafka.tools.JmxTool " % self.path.script("kafka-run-class.sh", node) cmd += "--reporting-interval 1000 --jmx-url service:jmx:rmi:///jndi/rmi://127.0.0.1:%d/jmxrmi" % self.jmx_port for jmx_object_name in self.jmx_object_names: cmd += " --object-name %s" % jmx_object_name for jmx_attribute in self.jmx_attributes: cmd += " --attributes %s" % jmx_attribute cmd += " | tee -a /mnt/jmx_tool.log" self.logger.debug("Start JmxTool %d command: %s", idx, cmd) jmx_output = node.account.ssh_capture(cmd, allow_fail=False) jmx_output.next() self.started[idx-1] = True def read_jmx_output(self, idx, node): if self.started[idx-1] == False: return object_attribute_names = [] cmd = "cat /mnt/jmx_tool.log" self.logger.debug("Read jmx output %d command: %s", idx, cmd) for line in node.account.ssh_capture(cmd, allow_fail=False): if "time" in line: object_attribute_names = line.strip()[1:-1].split("\",\"")[1:] continue stats = [float(field) for field in line.split(',')] time_sec = int(stats[0]/1000) self.jmx_stats[idx-1][time_sec] = {name : stats[i+1] for i, name in enumerate(object_attribute_names)} # do not calculate average and maximum of jmx stats until we have read output from all nodes if any(len(time_to_stats) == 0 for time_to_stats in self.jmx_stats): return start_time_sec = min([min(time_to_stats.keys()) for time_to_stats in self.jmx_stats]) end_time_sec = max([max(time_to_stats.keys()) for time_to_stats in self.jmx_stats]) for name in object_attribute_names: aggregates_per_time = [] for time_sec in xrange(start_time_sec, end_time_sec + 1): # assume that value is 0 if it is not read by jmx tool at the given time. This is appropriate for metrics such as bandwidth values_per_node = [time_to_stats.get(time_sec, {}).get(name, 0) for time_to_stats in self.jmx_stats] # assume that value is aggregated across nodes by sum. This is appropriate for metrics such as bandwidth aggregates_per_time.append(sum(values_per_node)) self.average_jmx_value[name] = sum(aggregates_per_time) / len(aggregates_per_time) self.maximum_jmx_value[name] = max(aggregates_per_time) def read_jmx_output_all_nodes(self): for node in self.nodes: self.read_jmx_output(self.idx(node), node)
nilq/baby-python
python
class Solution: """ @param arr: an array of integers @return: the length of the shortest possible subsequence of integers that are unordered """ def shortestUnorderedArray(self, arr): # write your code here inc = dec = True for i in range(len(arr) - 1): if arr[i] > arr[i + 1]: inc = False break for i in range(len(arr) - 1): if arr[i] < arr[i + 1]: dec = False break return 0 if inc or dec else 3
nilq/baby-python
python
# -*- coding: utf-8 -*- # Copyright (c) 2018-2020 Niko Sandschneider """Module implementing Signals for communicating with the GUI.""" from functools import wraps import logging from PyQt5.QtCore import QObject, pyqtSignal class Signals(QObject): """Class for signal communication between worker classes and GUI.""" update_progress_bar = pyqtSignal() add_progress_text = pyqtSignal(str, bool) abort_signal = pyqtSignal() get_credentials = pyqtSignal(str) send_credentials = pyqtSignal(str, str) def __init__(self): super().__init__() self.abort = False self.abort_signal.connect(self.abort_evaluation) self.connected = False self.logger = logging.getLogger('easyp2p.p2p_signals.Signals') self.logger.debug('Created Signals instance.') def update_progress(self, func): """Decorator for updating progress text and progress bar.""" @wraps(func) def wrapper(*args, **kwargs): try: if self.abort: raise RuntimeError('Abort by user') result = func(*args, **kwargs) except RuntimeError as err: self.logger.exception('RuntimeError in update_progress') self.add_progress_text.emit(str(err), True) raise PlatformFailedError from err except RuntimeWarning as err: self.logger.warning( 'RuntimeWarning in update_progress', exc_info=True) self.add_progress_text.emit(str(err), True) result = None finally: self.update_progress_bar.emit() return result return wrapper def watch_errors(self, func): """Decorator for emitting error messages to the progress window.""" @wraps(func) def wrapper(*args, **kwargs): try: result = func(*args, **kwargs) except RuntimeError as err: self.logger.exception('RuntimeError in watch_errors.') self.add_progress_text.emit(str(err), True) raise PlatformFailedError from err except RuntimeWarning as err: self.logger.warning(str(err)) self.add_progress_text.emit(str(err), True) result = None return result return wrapper def connect_signals(self, other: 'Signals') -> None: """ Helper method for connecting signals of different classes. Args: other: Signals instance of another class. """ self.logger.debug('Connecting signals.') self.update_progress_bar.connect(other.update_progress_bar) self.add_progress_text.connect(other.add_progress_text) self.get_credentials.connect(other.get_credentials) other.send_credentials.connect(self.send_credentials) self.connected = True self.logger.debug('Connecting signals successful.') def disconnect_signals(self) -> None: """ Disconnect signals. Ignore error if they were not connected or if disconnecting fails. """ if not self.connected: return self.logger.debug('Disconnecting signals.') for signal in [ self.add_progress_text, self.get_credentials, self.update_progress_bar]: try: signal.disconnect() except TypeError: self.logger.exception( 'Disconnecting signal %s failed.', str(signal)) else: self.logger.debug('Signal %s disconnected.', str(signal)) self.connected = False def abort_evaluation(self): """Set the abort flag to True.""" self.logger.debug('Aborting evaluation.') self.abort = True class PlatformFailedError(Exception): """Will be raised if evaluation of a P2P platform fails."""
nilq/baby-python
python
from django.contrib import admin from main.models import StudentRegisterationForm from django.http import HttpResponse import csv def show_email(obj): email = obj.email return '<a href="mailto:%s" target="_blank">%s</a>' % (email, email) show_email.allow_tags = True show_email.short_description = 'Email' def show_github_url(obj): github_url = obj.github_url return '<a href="%s" target="_blank">%s</a>' % (github_url, github_url) show_github_url.allow_tags = True show_github_url.short_description = 'GitHub URL' class registerAdmin(admin.ModelAdmin): list_display = ('name', show_email, 'branch', 'year', show_github_url) actions = ['export_csv'] class Meta: model = StudentRegisterationForm def export_csv(modeladmin, request, queryset): # Create the HttpResponse object with the appropriate CSV header. response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="css_export.csv"' writer = csv.writer(response) for obj in queryset: writer.writerow([getattr(obj, f) for f in modeladmin.model._meta.fields]) return response export_csv.short_description = "Export to CSV" admin.site.register(StudentRegisterationForm, registerAdmin)
nilq/baby-python
python
############################################################################ # Copyright ESIEE Paris (2018) # # # # Contributor(s) : Benjamin Perret # # # # Distributed under the terms of the CECILL-B License. # # # # The full license is in the file LICENSE, distributed with this software. # ############################################################################ import higra as hg import numpy as np @hg.argument_helper(hg.CptGridGraph) def graph_4_adjacency_2_khalimsky(graph, edge_weights, shape, add_extra_border=False): """ Create a contour image in the Khalimsky grid from a 4 adjacency edge-weighted graph. :param graph: must be a 4 adjacency 2d graph (Concept :class:`~higra.CptGridGraph`) :param edge_weights: edge weights of the graph :param shape: shape of the graph (deduced from :class:`~higra.CptGridGraph`) :param add_extra_border: if False result size is 2 * shape - 1 and 2 * shape + 1 otherwise :return: a 2d array """ shape = hg.normalize_shape(shape) return hg.cpp._graph_4_adjacency_2_khalimsky(graph, shape, edge_weights, add_extra_border) def khalimsky_2_graph_4_adjacency(khalimsky, extra_border=False): """ Create a 4 adjacency edge-weighted graph from a contour image in the Khalimsky grid. :param khalimsky: a 2d array :param extra_border: if False the shape of the Khalimsky image is 2 * shape - 1 and 2 * shape + 1 otherwise, where shape is the shape of the resulting grid graph :return: a graph (Concept :class:`~higra.CptGridGraph`) and its edge weights """ graph, embedding, edge_weights = hg.cpp._khalimsky_2_graph_4_adjacency(khalimsky, extra_border) hg.CptGridGraph.link(graph, hg.normalize_shape(embedding.shape())) hg.set_attribute(graph, "no_border_vertex_out_degree", 4) return graph, edge_weights def get_4_adjacency_graph(shape): """ Create an explicit undirected 4 adjacency graph of the given shape. :param shape: a pair (height, width) :return: a graph (Concept :class:`~higra.CptGridGraph`) """ shape = hg.normalize_shape(shape) graph = hg.cpp._get_4_adjacency_graph(shape) hg.CptGridGraph.link(graph, shape) hg.set_attribute(graph, "no_border_vertex_out_degree", 4) return graph def get_8_adjacency_graph(shape): """ Create an explicit undirected 8 adjacency graph of the given shape. :param shape: a pair (height, width) :return: a graph (Concept :class:`~higra.CptGridGraph`) """ shape = hg.normalize_shape(shape) graph = hg.cpp._get_8_adjacency_graph(shape) hg.CptGridGraph.link(graph, shape) hg.set_attribute(graph, "no_border_vertex_out_degree", 8) return graph def get_4_adjacency_implicit_graph(shape): """ Create an implicit undirected 4 adjacency graph of the given shape (edges are not stored). :param shape: a pair (height, width) :return: a graph (Concept :class:`~higra.CptGridGraph`) """ shape = hg.normalize_shape(shape) graph = hg.cpp._get_4_adjacency_implicit_graph(shape) hg.CptGridGraph.link(graph, shape) hg.set_attribute(graph, "no_border_vertex_out_degree", 4) return graph def get_8_adjacency_implicit_graph(shape): """ Create an implicit undirected 8 adjacency graph of the given shape (edges are not stored). :param shape: a pair (height, width) :return: a graph (Concept :class:`~higra.CptGridGraph`) """ shape = hg.normalize_shape(shape) graph = hg.cpp._get_8_adjacency_implicit_graph(shape) hg.CptGridGraph.link(graph, shape) hg.set_attribute(graph, "no_border_vertex_out_degree", 8) return graph
nilq/baby-python
python
from collections import defaultdict, namedtuple from numba import njit, jitclass from numba import types import numba import matplotlib.pyplot as plt import matplotlib.patches as patches import numpy as np from matplotlib.ticker import (AutoMinorLocator, MultipleLocator) import argparse parser = argparse.ArgumentParser() parser.add_argument("--no-random", action='store_true', help="Chose 30 points at random instead of manual input") args = parser.parse_args() Line = namedtuple('line', ['slope', 'intercept']) spec = [ ('points', numba.float32[:, :]), ('next_point', numba.int32), ] @jitclass(spec) class Queue(object): def __init__(self, points): self.points = points self.next_point = 0 @property def length(self): points_left = len(self.points) - self.next_point # TODO intersection queue intersections_left = 0 return points_left + intersections_left @property def is_next_point(self): #TODO check if an intersection is closer return True def pop_point(self): current_point = self.points[self.next_point] self.next_point += 1 return current_point def gather_points_manually(): f = plt.figure() points = plt.ginput(-1) plt.close(f) return np.array(points) def gather_points_random(): return np.random.uniform(-1, 1, size=(30, 2)) def dist(points): if points.shape[0] == 1: return np.inf return np.sqrt(((points[0] - points[1])**2).sum()) def sort_points_numpy(points): return points[np.lexsort(points.T)] @njit def solve(queue): while queue.length > 0: if queue.is_next_point: point = queue.pop_point() print(point) if not args.no_random: points = gather_points_random() else: points = gather_points_manually() print(points.shape) points = np.array([[5, 1], [0, 1], [-1, 1] ,[0, 0], [5, 0],[-1, 0]]) points = sort_points_numpy(points) queue = Queue(points.astype('float32')) solve(queue)
nilq/baby-python
python
""" Quickly load ROOT symbols without triggering PyROOT's finalSetup(). The main principle is that appropriate dictionaries first need to be loaded. """ from __future__ import absolute_import import ROOT from .. import log; log = log[__name__] from .module_facade import Facade __all__ = [] root_module = ROOT.module._root if hasattr(root_module, 'LookupCppEntity'): # pragma: no cover lookup_func = 'LookupCppEntity' else: # pragma: no cover lookup_func = 'LookupRootEntity' # Quick's __name__ needs to be the ROOT module for this to be transparent. # The below is one way of obtaining such a function # First determine the ROOT version without triggering PyROOT's finalSetup() Quick = eval('lambda symbol: module._root.{0}(symbol)'.format(lookup_func), ROOT.__dict__) _gSystem = Quick("gSystem") Load = _gSystem.Load # It is not vital to list _all_ symbols in here, just enough that a library # will be loaded by the time it is needed. SYMBOLS = dict( Hist='TH1 TGraph TGraphAsymmErrors', Tree='TCut TTree', Gui='TPad TCanvas', Graf='TLegend TLine TEllipse', Physics='TVector2 TVector3 TLorentzVector TRotation TLorentzRotation', Matrix='TMatrixT', RooStats='RooStats RooMsgService', RooFit='RooFit RooWorkspace', ) # Mapping of symbols to libraries which need to be loaded SYMBOLS_TO_LIB = dict( (sym, lib) for lib, syms in SYMBOLS.items() for sym in syms.split()) # If you encounter problems with particular symbols, add them to this set. SLOW = set("".split()) @Facade(__name__, expose_internal=False) class QuickROOT(object): def __getattr__(self, symbol): if symbol in SLOW: # pragma: no cover log.warning( "Tried to quickly load {0} which is always slow".format(symbol)) lib = SYMBOLS_TO_LIB.get(symbol, None) if lib: # Load() doesn't cost anything if the library is already loaded libname = "lib{0}".format(lib) if libname not in _gSystem.GetLibraries(): regex = "^duplicate entry .* for level 0; ignored$" with log["/ROOT.TEnvRec.ChangeValue"].ignore(regex): if Load(libname) == 0: log.debug("Loaded {0} (required by {1})".format( libname, symbol)) elif lib == 'Gui': # Possibly no X11 forwarding log.debug("Unable to load {0} (required by {1}). " "Putting ROOT in batch mode.".format( libname, symbol)) ROOT.gROOT.SetBatch(True) else: # pragma: no cover raise RuntimeError( "Unable to load {0} (required by {1})".format( libname, symbol)) try: thing = Quick(symbol) except NameError: # pragma: no cover # NameError: global name 'module' is not defined # Python must be exiting... return None if isinstance(thing, root_module.PropertyProxy): # descriptor setattr(self.__class__, symbol, thing) return getattr(self, symbol) # normal member return thing
nilq/baby-python
python
from contextlib import contextmanager from flask_login import UserMixin import markdown from markdown.extensions.toc import TocExtension from sqlalchemy import create_engine from sqlalchemy import Column, Integer, String, DateTime from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base _Base = declarative_base() _engine = create_engine("sqlite:///blog.db") _Session = sessionmaker(bind=_engine, expire_on_commit=False) class Post(_Base): __tablename__ = "posts" rowid = Column(Integer, primary_key=True) title = Column(String) text = Column(String) published = Column(DateTime) last_modified = Column(DateTime) @property def html(self): md_converter = markdown.Markdown(extensions=[ TocExtension(baselevel=2, marker=""), "markdown.extensions.fenced_code", "markdown.extensions.codehilite" ]) return md_converter.convert(self.text) @property def toc(self): md_converter = markdown.Markdown(extensions=[ TocExtension(baselevel=2, marker=""), "markdown.extensions.fenced_code", "markdown.extensions.codehilite" ]) md_converter.convert(self.text) return md_converter.toc def __repr__(self): return "<Post(title='{0}', published='{1}', ...)>".format( self.title, self.published) class Tag(_Base): __tablename__ = "tags" rowid = Column(Integer, primary_key=True) tag = Column(String) def __repr__(self): return "<Tag(tag='{0}')>".format(self.tag) class Post2Tag(_Base): __tablename__ = "posts2tags" rowid = Column(Integer, primary_key=True) post_id = Column(Integer) tag_id = Column(Integer) def __repr__(self): return "<Post2Tag(post_id='{0}', tag_id='{1}')>".format( self.post_id, self.tag_id) class User(_Base, UserMixin): __tablename__ = "users" rowid = Column(Integer, primary_key=True) username = Column(String) pw_hash = Column(String) is_admin = Column(Integer) fullname = Column(String) @property def id(self): return str(self.rowid) def __repr__(self): return "<User(username='{0}', is_admin='{1}')>".format( self.username, self.is_admin) @contextmanager def session_context(): session = _Session() try: yield session session.commit() except: session.rollback() raise finally: session.close()
nilq/baby-python
python
__author__ = 'DafniAntotsiou' import numpy as np from gym import spaces # universal prefix for the different networks def get_il_prefix(): return 'il_' def get_semi_prefix(): return 'semi_' # add auxiliary actions to env observation space for semi network def semi_ob_space(env, semi_size): if semi_size > 0: semi_obs_dim = env.observation_space.shape[0] + semi_size semi_high = np.inf * np.ones(semi_obs_dim) semi_low = -semi_high return spaces.Box(semi_low, semi_high, dtype=np.float64) else: return env.observation_space def reset_envs(semi_dataset, envs, traj_id=-1, random_init=True, add_noise=False): """ resets the environment to a frame in the semi-supervised dataset :param semi_dataset: the dataset :param env: List of environments to be reset. :param traj_id: the id number of the trajectory to initialise the environment to. Is random if < 0. :param random_init: Initialise at the beginning of the trajectory if False, random trajectory frame if True. :param add_noise: add noise to the dataset trajectories during reset :return: the (full_ob of the semi-supervised network, the environment ob, the environment) tuple """ # reset the first env in list and then copy that to the rest of the envs full_ob, ob, set_env = reset_env(semi_dataset, envs[0], traj_id=traj_id, random_init=random_init, add_noise=add_noise) qpos = set_env.env.env.sim.get_state().qpos.copy() qvel = set_env.env.env.sim.get_state().qvel.copy() if hasattr(set_env.env.env, "gs"): semi_dict = set_env.env.env.gs() for env in envs: if env == set_env: continue env.reset() if hasattr(env.env.env, "ss"): if add_noise: env.env.env.ss(semi_dict, add_noise=add_noise) # written like this for debug. TODO: refactor else: env.env.env.ss(semi_dict) elif hasattr(env.env.env, "reset_model_pos"): env.env.env.reset_model_pos(qpos=qpos, qvel=qvel) elif hasattr(env.env.env, "set_state"): env.env.env.set_state(qpos=qpos, qvel=qvel) else: print("Incompatible environment for semi supervision...") exit(1) return full_ob, ob, envs def reset_env(semi_dataset, env, traj_id=-1, random_init=True, add_noise=False): """ resets the environment to a frame in the semi-supervised dataset :param semi_dataset: the dataset :param env: the environment to be reset :param traj_id: the id number of the trajectory to initialise the environment to. Is random if < 0. :param random_init: Initialise at the beginning of the trajectory if False, random trajectory frame if True. :param add_noise: add noise to the semi_dataset trajectories during reset :return: the (full_ob of the semi-supervised network, the environment ob, the environment) tuple """ ob = env.reset() if semi_dataset: # is the retargeting network - reset the env with semi-labels semi_dict = semi_dataset.init_traj_labels(traj_id=traj_id, random_init=random_init) # random initialisation if not semi_dict: print("No available expert semi-labels fam!") exit(1) # reset the environment with the observations from the dataset if hasattr(env.env.env, "ss"): if add_noise: env.env.env.ss(semi_dict, add_noise=add_noise) # written like this for debug. TODO: refactor else: env.env.env.ss(semi_dict) elif hasattr(env.env.env, "reset_model_pos"): env.env.env.reset_model_pos(qpos=semi_dict['qpos'], qvel=semi_dict['qvel']) elif hasattr(env.env.env, "set_state"): env.env.env.set_state(qpos=semi_dict['qpos'], qvel=semi_dict['qvel']) else: print("Incompatible environment for retargeting...") exit(1) full_ob = semi_dict['full_ob'] ob = semi_dict['ob'] else: full_ob = ob return full_ob, ob, env def semi_loss_func(ac, full_ob, semi_dataset, is_relative_actions=False): """ get the L2 loss between generated actions and semi supervised actions :param ac: the semi supervised actions :param full_ob: the full observations of the semi supervised dataset :param semi_dataset: the semi supervised dataset :return: the L2 loss if semi_dataset exists, 0 otherwise """ diff = ac - semi_dataset.full_ob_2_acs(full_ob) if not is_relative_actions else ac return (diff ** 2).mean() if semi_dataset is not None else 0 def relative_2_absolute_action(ac, full_ob, semi_dataset, ac_space): """ get absolute action by adding the relative action to the original from the semi dataset, given environmental action bounds :param ac: the relative actions from the policy :param full_ob: the full set of observations from semi_dataset that produced ac :param semi_dataset: the semi dataset that produced the full_ob :param ac_space: the action space of the environment, to set the action bounds :return: the absolute value of the actions to apply to the environment """ orig_ac = semi_dataset.full_ob_2_acs(full_ob) sigma_ratio = 0.4 # ratio of sigma ac can move orig_ac in both directions sigma = (ac_space.high - ac_space.low) * sigma_ratio ac = np.clip(ac, -sigma, sigma) return np.clip(ac + orig_ac, ac_space.low, ac_space.high)
nilq/baby-python
python
# # Copyright (c) 2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import torch import torch.nn as nn class EltwiseAdd(nn.Module): def __init__(self, inplace=False): """Element-wise addition""" super().__init__() self.inplace = inplace def forward(self, *input): res = input[0] if self.inplace: for t in input[1:]: res += t else: for t in input[1:]: res = res + t return res class EltwiseSub(nn.Module): def __init__(self, inplace=False): """Element-wise subtraction""" super().__init__() self.inplace = inplace def forward(self, *input): res = input[0] if self.inplace: for t in input[1:]: res -= t else: for t in input[1:]: res = res - t return res class EltwiseMult(nn.Module): def __init__(self, inplace=False): """Element-wise multiplication""" super().__init__() self.inplace = inplace def forward(self, *input): res = input[0] if self.inplace: for t in input[1:]: res *= t else: for t in input[1:]: res = res * t return res class EltwiseDiv(nn.Module): def __init__(self, inplace=False): """Element-wise division""" super().__init__() self.inplace = inplace def forward(self, x: torch.Tensor, y): if self.inplace: return x.div_(y) return x.div(y)
nilq/baby-python
python
from .annotation import Sentence from nltk.tokenize.punkt import PunktParameters from nltk.tokenize.punkt import PunktSentenceTokenizer from .ru.processor_tokenizer_ru import _ru_abbrevs from .en.processor_tokenizer_nltk_en import _en_abbrevs from itertools import combinations class ProcessorSentenceSplitter: """Performs sentence splitting using simple rules. Simple wrapper around NLTK component. Suitable for european languages. """ def __init__(self, delay_init = False): self.sent_tokeniser_ = None if not delay_init: self.init() def init(self): if self.sent_tokeniser_ is None: punkt_param = PunktParameters() punkt_param.abbrev_types = self.compile_abbreviations() self.sent_tokeniser_ = PunktSentenceTokenizer(punkt_param) def __call__(self, tokens): assert self.sent_tokeniser_ sents = self.sent_tokeniser_.sentences_from_tokens((e.text for e in tokens)) curr = 0 res_sents = list() for sent in sents: res_sents.append(Sentence(curr, curr + len(sent))) curr += len(sent) return res_sents def compile_abbreviations(self): def get_dot_pairs(alphabet): return ['.'.join(abbrev) for abbrev in list(combinations(alphabet, 2))] def clean_regexps(regexps): return [''.join(abbrev.lower().split('.')[:-1]).replace('\\', '').replace(u'\xad', '').replace(' ', '.').replace('?', ' ').lower() for abbrev in regexps] ru_abbrevs = get_dot_pairs('цукенгшзхфвапролджэячсмитбю') ru_abbrevs += clean_regexps(_ru_abbrevs) en_abbrevs = get_dot_pairs('qwertyuiopasdfghjklzxcvbnm') en_abbrevs += clean_regexps(_en_abbrevs) return list(set(ru_abbrevs + en_abbrevs))
nilq/baby-python
python
import datetime from django.db import IntegrityError from django.db.models import Min, Max from django.utils.timezone import make_aware from zabiegi import models def integruj_jednostki(): for w in ( models.WykazStrona1.objects.all() .exclude(dane_operacji_jednostka_wykonująca_kod=None) .values_list("dane_operacji_jednostka_wykonująca_kod", flat=True) .distinct() ): models.Jednostka.objects.get_or_create(kod=w) def integruj_procedury(): for kod, nazwa in ( models.WykazStrona1.objects.all() .exclude(procedury_medyczne_kod_procedury=None) .values_list("procedury_medyczne_kod_procedury", "procedury_medyczne_nazwa") .distinct() ): try: models.Procedura.objects.get_or_create(kod=kod, nazwa=nazwa) except IntegrityError: n = models.Procedura.objects.get(kod=kod) raise ValueError( f"Procedura juz istnieje {kod}, probowano nazwy {nazwa}, jest {n.nazwa}" ) def integruj_lekarzy(): for ( personel_uczestniczący_imiona, personel_uczestniczący_nazwisko, personel_uczestniczący_kod, ) in ( models.WykazStrona1.objects.exclude(personel_uczestniczący_kod=None) .values_list( "personel_uczestniczący_imiona", "personel_uczestniczący_nazwisko", "personel_uczestniczący_kod", ) .distinct() ): models.Lekarz.objects.get_or_create( kod=personel_uczestniczący_kod, nazwisko=personel_uczestniczący_nazwisko, imiona=personel_uczestniczący_imiona, ) def integruj_pacjentow(): for (dane_pacjenta_identyfikator_pacjenta_mip, dane_pacjenta_data_urodzenia,) in ( models.WykazStrona1.objects.exclude( dane_pacjenta_identyfikator_pacjenta_mip=None ) .values_list( "dane_pacjenta_identyfikator_pacjenta_mip", "dane_pacjenta_data_urodzenia" ) .distinct() ): models.Pacjent.objects.get_or_create( mip=dane_pacjenta_identyfikator_pacjenta_mip, data_urodzenia=dane_pacjenta_data_urodzenia, ) def integruj_znieczulenia(): for w in models.WykazStrona1.objects.exclude(l_p=None): poczatek = datetime.datetime.combine( w.element_operacji_data_wykonania.date(), w.element_operacji_czas_wykonania.time(), ) poczatek = make_aware(poczatek) koniec = None if w.element_operacji_czas_zakończenia is not None: koniec = datetime.datetime.combine( w.element_operacji_data_wykonania.date(), w.element_operacji_czas_zakończenia.time(), ) koniec = make_aware(koniec) if koniec < poczatek: koniec += datetime.timedelta(days=1) jednostka = models.Jednostka.objects.get( kod=w.dane_operacji_jednostka_wykonująca_kod ) pacjent = models.Pacjent.objects.get( mip=w.dane_pacjenta_identyfikator_pacjenta_mip ) z, created = models.Znieczulenie.objects.get_or_create( nr=w.dane_operacji_księga_nr, poczatek=poczatek, koniec=koniec, czas_trwania=w.element_operacji_czas_trwania_w_minutach, jednostka=jednostka, pacjent=pacjent, ) lekarz = models.Lekarz.objects.get(kod=w.personel_uczestniczący_kod) if lekarz not in z.lekarze.all(): z.lekarze.add(lekarz) procedura = models.Procedura.objects.get(kod=w.procedury_medyczne_kod_procedury) if procedura not in z.procedury.all(): z.procedury.add(procedura) z.save() def pokaz_statystyki(): z = models.Znieczulenie.objects.all().aggregate( min=Min("poczatek"), max=Max("poczatek") ) print(f"Analizowany okres od: {z['min'].date()} do {z['max'].date()}") print(f"Liczba znieczuleń ogółem: {models.Znieczulenie.objects.count()}") print(f"Znieczulenia wg procedury: ") for p in models.Procedura.objects.all(): print( f"{p.nazwa},{models.Znieczulenie.objects.filter(procedury__kod=p.kod).count()}" ) print("Znieczulenia wg jednostek i procedur:") print(",".join([p.nazwa for p in models.Procedura.objects.all()])) for j in models.Jednostka.objects.all(): row = [] for p in models.Procedura.objects.all(): row.append( models.Znieczulenie.objects.filter(jednostka=j, procedury=p).count() ) print(f"{j.kod}," + ",".join([str(x) for x in row])) print("Znieczulenia wg miesiąca i jednostki") print(",".join(["lip", "sie", "wrz", "paź", "lis", "gru"])) for j in models.Jednostka.objects.all(): row = [] for miesiac in range(7, 13): row.append( models.Znieczulenie.objects.filter( jednostka=j, poczatek__month=miesiac ).count() ) print(f"{j.kod}," + ",".join([str(x) for x in row])) def integruj_wszystko(): integruj_jednostki() integruj_procedury() integruj_lekarzy() integruj_pacjentow() integruj_znieczulenia()
nilq/baby-python
python
#!/usr/local/bin/python # # Copyright (c) 2009-2014 Sippy Software, Inc. All rights reserved. # # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation and/or # other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import socket, sys, getopt from functools import reduce def cli_client(address, argv, tcp = False): if not tcp: s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) else: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(address) command = reduce(lambda x, y: x + ' ' + y, argv) s.send(command.encode('ascii') + b'\nquit\n') while True: data = s.recv(1024) if len(data) == 0: break sys.stdout.write(data.decode('ascii')) def usage(): print('usage: rtp_cluster_client.py [-s cmdfile]') sys.exit(1) if __name__ == '__main__': try: opts, args = getopt.getopt(sys.argv[1:], 's:') except getopt.GetoptError: usage() if len(args) == 0: usage() cmdfile = 'unix:/var/run/rtp_cluster.sock' for o, a in opts: if o == '-s': cmdfile = a.strip() continue if cmdfile.startswith('tcp:'): parts = cmdfile[4:].split(':', 1) if len(parts) == 1: address = (parts[0], 12345) else: address = (parts[0], int(parts[1])) cli_client(address, args, tcp = True) else: if cmdfile.startswith('unix:'): cmdfile = cmdfile[5:] cli_client(cmdfile, args)
nilq/baby-python
python
from datasets import load_dataset cord = load_dataset("katanaml/cord") # labels = cord['train'].features['ner_tags'].feature.names # id2label = {v: k for v, k in enumerate(labels)} label2id = {k: v for v, k in enumerate(labels)} # from PIL import Image from transformers import LayoutLMv2Processor from datasets import Features, Sequence, ClassLabel, Value, Array2D, Array3D processor = LayoutLMv2Processor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr") # we need to define custom features features = Features({ 'image': Array3D(dtype="int64", shape=(3, 224, 224)), 'input_ids': Sequence(feature=Value(dtype='int64')), 'attention_mask': Sequence(Value(dtype='int64')), 'token_type_ids': Sequence(Value(dtype='int64')), 'bbox': Array2D(dtype="int64", shape=(512, 4)), 'labels': Sequence(ClassLabel(names=labels)), }) def preprocess_data(examples): images = [Image.open(path).convert("RGB") for path in examples['image_path']] words = examples['words'] boxes = examples['bboxes'] word_labels = examples['ner_tags'] encoded_inputs = processor(images, words, boxes=boxes, word_labels=word_labels, padding="max_length", truncation=True) return encoded_inputs train_dataset = cord['train'].map(preprocess_data, batched=True, remove_columns=cord['train'].column_names, features=features) test_dataset = cord['test'].map(preprocess_data, batched=True, remove_columns=cord['test'].column_names, features=features) # train_dataset.set_format(type="torch") test_dataset.set_format(type="torch") # from torch.utils.data import DataLoader train_dataloader = DataLoader(train_dataset, batch_size=4, shuffle=True) test_dataloader = DataLoader(test_dataset, batch_size=1) # batch = next(iter(train_dataloader)) for k, v in batch.items(): print(k, v.shape) # from transformers import LayoutLMv2ForTokenClassification, TrainingArguments, Trainer from datasets import load_metric import numpy as np model = LayoutLMv2ForTokenClassification.from_pretrained('microsoft/layoutlmv2-base-uncased', num_labels=len(label2id)) # Set id2label and label2id model.config.id2label = id2label model.config.label2id = label2id # Metrics metric = load_metric("seqeval") return_entity_level_metrics = True def compute_metrics(p): predictions, labels = p predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [id2label[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [id2label[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = metric.compute(predictions=true_predictions, references=true_labels) if return_entity_level_metrics: # Unpack nested dictionaries final_results = {} for key, value in results.items(): if isinstance(value, dict): for n, v in value.items(): final_results[f"{key}_{n}"] = v else: final_results[key] = value return final_results else: return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } class CordTrainer(Trainer): def get_train_dataloader(self): return train_dataloader def get_test_dataloader(self, test_dataset): return test_dataloader args = TrainingArguments( output_dir="layoutlmv2-finetuned-cord", # name of directory to store the checkpoints max_steps=10, # we train for a maximum of 1,000 batches warmup_ratio=0.1, # we warmup a bit fp16=False, # we use mixed precision (less memory consumption), False when on CPU # push_to_hub=False, # after training, we'd like to push our model to the hub # push_to_hub_model_id=f"layoutlmv2-finetuned-cord", # this is the name we'll use for our model on the hub ) # Initialize our Trainer trainer = CordTrainer( model=model, args=args, compute_metrics=compute_metrics, ) # trainer.train() # predictions, labels, metrics = trainer.predict(test_dataset) # print(metrics) def process_document(image): print('PROCESS DOCUMENT') return image
nilq/baby-python
python
# Generated by Django 3.2.6 on 2021-09-18 04:13 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('registration', '0011_auto_20210917_0032'), ] operations = [ migrations.DeleteModel( name='Links', ), migrations.RemoveField( model_name='data', name='state', ), ]
nilq/baby-python
python
# coding: utf-8 import scipy import json import re import traceback import allennlp from allennlp.predictors.predictor import Predictor import sys from allennlp.commands.elmo import ElmoEmbedder from spacy.lang.en import English import numpy as np # import tensorflow as tf import torch from hyperpara import * import dgl from tqdm import tqdm # class Logger(object): # def __init__(self, filename='default.log', stream=sys.stdout): # self.terminal = stream # self.log = open(filename, 'w') # # def write(self, message): # self.terminal.write(message) # self.log.write(message) # # def flush(self): # pass # # sys.stdout = Logger(args.graph_gen_out_file+'_generation.log', sys.stdout) # sys.stderr = Logger(args.graph_gen_out_file+'_generation_err.log', sys.stderr) # Setting for Elmo Embedder - CHANGE THE PATH # options_file = args.project_address+'mlp_project/src/elmo_2x4096_512_2048cnn_2xhighway_options.json' # weight_file = args.project_address+'mlp_project/src/elmo_2x4096_512_2048cnn_2xhighway_weights' options_file = '/home/watsonzhouanda/multihop/src/elmo_2x4096_512_2048cnn_2xhighway_options.json' weight_file = '/home/watsonzhouanda/multihop/src/elmo_2x4096_512_2048cnn_2xhighway_weights' # Initialization for each module nlp = English() ee = ElmoEmbedder( options_file=options_file, weight_file=weight_file) # predictor = Predictor.from_path(args.project_address+'mlp_project/src/coref-model-2018.02.05.tar.gz') predictor = Predictor.from_path('/home/watsonzhouanda/multihop/src/coref-model-2018.02.05.tar.gz') print('Pre-trained modules init', flush=True) def compute_coref(s): try: ''' { "document": [tokenised document text] "clusters": [ [ [start_index, end_index], [start_index, end_index] ], [ [start_index, end_index], [start_index, end_index], [start_index, end_index], ], .... ] } ''' ret = predictor.predict(s) return ret['clusters'], ret['document'] except RuntimeError: return [], [str(w) for w in nlp(s)] MODE = args.graph_gen_mode num_data = args.graph_gen_size # DATA_ADD = args.project_address+"mlp_project/dataset/qangaroo_v1.1/"+args.dataset+"/" # in_file = DATA_ADD+MODE+".json" # GRAPH_ADD = args.project_address+"mlp_project/graph/" DATA_ADD = '/home/watsonzhouanda/multihop/dataset/qangaroo_v1.1/'+args.dataset+"/" in_file = DATA_ADD+MODE+".json" GRAPH_ADD = '/home/watsonzhouanda/multihop/graph/' with open(in_file, 'r') as f: data = json.load(f) # print('Dataset loaded! with size:', len(data[30000:43738]), flush=True) print('Dataset loaded! with size:', len(data), flush=True) def regex(text): text = text.replace(u'\xa0', ' ') text = text.translate(str.maketrans({key: ' {0} '.format(key) for key in '"!&()*+,/:;<=>?[]^`{|}~'})) text = re.sub('\s{2,}', ' ', text).replace('\n', '') return text def check(s, wi, c): return sum([s[wi + j].lower() == c_ for j, c_ in enumerate(c) if wi + j < len(s)]) == len(c) # c_i, c: entity in entitise set {s} U C_q (entites in canddiate answers and query) # s_i, s: tokenized support document in supports # wi, w: word in document s # Turns (tokenized docu, word_i in original doc, candidate i) def ind(si, wi, ci, c): return [[si, wi + i, ci] for i in range(len(c))] graph_set = [] if num_data == -1: num_data = len(data) print("Note: Now you are generating the full graph dataset! in "+args.dataset, flush=True) else: print("Note: Now you are generating tiny graph dataset! in "+args.dataset, flush=True) rm_list = [] # for i_d, d in enumerate(tqdm(data[30000:43738])): for i_d, d in enumerate(tqdm(data)): # # # Test mode # if i_d != 2: # continue g = dgl.DGLGraph() try: # Processing the query and candidate entities, find C_q U {s} d['candidates_orig'] = list(d['candidates']) # record the original candidate d['candidates'] = [c for c in d['candidates'] if c not in nlp.Defaults.stop_words] d['candidates'] = [[str(w) for w in c] for c in nlp.pipe(d['candidates'])] d['query'] = [str(w) for w in nlp.tokenizer(d['query'])][1:] # discard the sample accroding to De Cao if (len(d['query']) > max_query_size) or (len(d['candidates']) > max_candidates): rm_list.append((i_d, d['id'])) print("Discard sample because query or candidates length over limitation, ID:",(i_d, d['id']), flush=True) graph_set.append(dgl.DGLGraph()) continue entities_set = d['candidates'] + [d['query']] # C_q U {s} # Document level coreference prediction # First preprocess the document d['supports'] = [regex(s) for s in d['supports']] coref_temp = [compute_coref(support_doc) for support_doc in d['supports']] entities_span_in_docs = [e for _, e in coref_temp] # [tokenised document text for each document], entities span S_q coref_cluster_in_docs = [e for e, _ in coref_temp] # [corefernt spans for each cluster in each document] d['coref'] = [[[[f, []] for f in e] for e in s] for s in coref_cluster_in_docs] #[support_doc_id, cluster_id, span_id] # c_i, c: entity in entitise set {s} U C_q (entites in canddiate answers and query) # s_i, s: tokenized support document in supports # wi, w: word in document s # shape: [num_supports, i in entities set, tuple] # tuple: (#doc, position in doc, id of c in entities set) exact_match_doc2entity_set = [[ind(si, wi, ci, c) for wi, w in enumerate(s) for ci, c in enumerate(entities_set) if check(s, wi, c)] for si, s in enumerate(entities_span_in_docs)] exact_match_entity_spans = [] # [cid, start, end, doc_id] for support_doc_id in range(len(exact_match_doc2entity_set)): if len(exact_match_doc2entity_set[support_doc_id]) == 0: continue for c_i, exact_matched_entities in enumerate(exact_match_doc2entity_set[support_doc_id]): for loc_i, loc in enumerate(exact_matched_entities): # print(loc) doc_id = loc[0] doc_ent_loc = loc[1] id_in_entities = loc[2] # span.append(d['supports'][doc_id][doc_ent_loc]) # entity_in_supdoc_id = torch.Tensor(exact_matched_entities[0][0]) doc_id = torch.tensor(exact_matched_entities[0][0], dtype=torch.int32).unsqueeze(0) entities_id = exact_matched_entities[0][-1] # print([entities_id, exact_matched_entities[0][1],exact_matched_entities[-1][1],support_doc_id]) exact_match_entity_spans.append([entities_id, exact_matched_entities[0][1],exact_matched_entities[-1][1],support_doc_id]) # Compute coreference # print("--------------------------") # print("NEXT WE START ADDING COREFERENCE NODES!") # print("--------------------------") # Find the nodes that entities in entities_set has corefrent in coreference prediction coref_nodes = [] for sc, sm in zip(d['coref'], exact_match_doc2entity_set): # overloop (entity id, loc, doc_id) u = [] # doc for ni, n in enumerate(sm): # overloop each match entities (entity id, loc, doc_id) k = [] for cli, cl in enumerate(sc): # overloop coref clusters coref_loc = [[co[0], co[1]] for co, cll in cl] x = [(n[0][1] <= co[0] <= n[-1][1]) or (co[0] <= n[0][1] <= co[1]) for co, cll in cl] # i: entity id for i, v in filter(lambda y: y[1], enumerate(x)): k.append((cli, i)) # De cao's : cluster - entities - loc start - loc end # cl[i][1].append(ni) u.append(k) coref_nodes.append(u) # remove one entity with multiple coref for sli, sl in enumerate(coref_nodes): # loop sup document for ni, n in enumerate(sl): # loop entities to coref if len(n) > 1: for e0, e1 in n: i = d['coref'][sli][e0][e1][1].index(ni) del d['coref'][sli][e0][e1][1][i] sl[ni] = [] # remove one coref with multiple entity for ms, cs in zip(coref_nodes, d['coref']): for cli, cl in enumerate(cs): for eli, (el, li) in enumerate(cl): if len(li) > 1: for e in li: i = ms[e].index((cli, eli)) del ms[e][i] cl[eli][1] = [] ## Check here d['edges_coref'] = [] for si, (ms, cs) in enumerate(zip(exact_match_doc2entity_set, d['coref'])): tmp = [] for cl in cs: cand = {ms[n[0]][0][-1] for p, n in cl if n} if len(cand) == 1: cl_ = [] for (p0, p1), _ in cl: if not _: cl_.append(len(ms)) ms.append([[si, i, list(cand)[0]] for i in range(p0, p1 + 1)]) else: cl_.append(_[0]) tmp.append(cl_) d['edges_coref'].append(tmp) # print("coref_nodes:", coref_nodes) nodes_id_name = [] c = 0 for e in [[[x[-1] for x in c][0] for c in s] for s in exact_match_doc2entity_set]: u = [] for f in e: u.append((c, f)) c +=1 nodes_id_name.append(u) mask_ = [[x[:-1] for x in f] for e in exact_match_doc2entity_set for f in e] # print("len mask",len(mask_)) # print(mask_) record_of_loc_span = [] for node_i, node in enumerate(mask_): node_span = [] loc_span = [] doc_id = -1 for i, unit in enumerate(node): doc_id, loc = unit[0], unit[1] node_span.append(entities_span_in_docs[doc_id][loc]) loc_span.append(loc) item = (doc_id, loc_span, node_span) record_of_loc_span.append(item) candidates, _ = ee.batch_to_embeddings(entities_span_in_docs) # select out the words (entities) we want d['nodes_elmo'] = [(candidates.transpose(2, 1)[torch.tensor(m,dtype=torch.float).T.tolist()]) for m in mask_] # change second and first dimension for e in d['nodes_elmo']: t0, t1 = e[:,2,512:].clone(), e[:,1,512:].clone() e[:,1,512:], e[:,2,512:] = t0, t1 filt = lambda c: torch.stack([c.mean(0)[0], c[0][1], c[-1][2]]) nodes_embed = torch.stack([filt(a) for a in d['nodes_elmo']]) # print("nodes_id_name: ", nodes_id_name) # [[(node id, entity id)] for all docu] # g = dgl.DGLGraph() # Now we initalize the node in the graph wid = 0 for doc_id, nodes_in_doc in enumerate(nodes_id_name): if nodes_in_doc == []: continue for node_id, e_id in nodes_in_doc: doc_id, loc_span, word_span = record_of_loc_span[wid] loc_start = torch.tensor([loc_span[0]], dtype=torch.int) loc_end = torch.tensor([loc_span[-1]], dtype=torch.int) # print("Add node now:", doc_id, loc_start, loc_end) doc_id = torch.tensor([doc_id], dtype=torch.int32) e_id = torch.tensor([e_id], dtype=torch.int32) # embed_entities = torch.tensor([nodes_embed[wid]]) # print(nodes_embed[wid].shape) embed_entities = nodes_embed[wid].unsqueeze(0) # print(embed_entities.shape) wid+=1 g.add_nodes(1, {"n_embed": embed_entities, "d_id": doc_id, "loc_start":loc_start, "loc_end":loc_end, "e_id": e_id}) # Check Graph # print(g) # print(g.ndata['d_id'],g.ndata['loc_start'],g.ndata['loc_end']) # print(g.ndata['d_id']) # print(g.ndata['e_id']) # print(g.ndata['n_embed'].shape) d['nodes_candidates_id'] = [[x[-1] for x in f][0] for e in exact_match_doc2entity_set for f in e] # print(d['nodes_candidates_id']) # discard the sample according to De Cao if len(d['nodes_candidates_id']) > max_nodes or len(d['nodes_candidates_id']) <= 0: rm_list.append((i_d, d['id'])) print("Discard sample because num of nodes is zero or larger than limid. ID:",(i_d, d['id']), flush=True) graph_set.append(dgl.DGLGraph()) continue edges_in, edges_out = [], [] for e0 in nodes_id_name: for f0, w0 in e0: for f1, w1 in e0: if f0 != f1: # DOC-BASED edges_in.append((f0, f1)) for e1 in nodes_id_name: for f1, w1 in e1: # Exact match if e0 != e1 and w0 == w1: edges_out.append((f0, f1)) edges_coref = [] for nins, cs in zip (nodes_id_name, d['edges_coref']): for cl in cs: for e0 in cl: for e1 in cl: if e0 != e1: edges_coref.append((nins[e0][0], nins[e1][0])) d['edges_DOC_BASED'] = edges_in d['edges_MATCH'] = edges_out d['edges_COREF'] = edges_coref d['edges_n_COMPLETE'] = d['edges_DOC_BASED'] + d['edges_MATCH'] + d['edges_COREF'] # print("existing: ",d['edges_n_COMPLETE']) d['edges_COMPLETE'] = [] # nodes_id_list = [i for i in g.nodes().data.cpu().numpy()] nodes_id_list = np.arange(len(record_of_loc_span)) for i in nodes_id_list: for j in nodes_id_list: if i == j: # ignore same node, no self-loopo continue if (i,j) not in d['edges_n_COMPLETE']: d['edges_COMPLETE'].append((i, j)) # print(d['edges_COMPLETE']) all_edges = [d['edges_DOC_BASED']] + [d['edges_MATCH']] + [d['edges_COREF']] + [d['edges_COMPLETE']] # Calculate probability weight edge_prob_record = [] for graph_i, subgraph_edges in enumerate(all_edges): edge_prob_in_graph = {} for start_node in nodes_id_list: out_count = len([a for a in subgraph_edges if a[0] == start_node]) if out_count: edge_prob_in_graph[start_node] = 1/out_count edge_prob_record.append(edge_prob_in_graph) for i, rel_graph in enumerate(all_edges): for (src, tgt) in rel_graph: edge_type = torch.tensor([i], dtype=torch.int) p_weight = edge_prob_record[i][src] edge_weight = torch.tensor([p_weight], dtype=torch.float16) g.add_edges(src, tgt, data={'rel_type': edge_type, 'e_weight': edge_weight}) graph_set.append(g) if i_d == 0: dgl.save_graphs(GRAPH_ADD+args.graph_gen_out_file+'.dgl', graph_set) if i_d+1 % 500 == 0: dgl.save_graphs(GRAPH_ADD+args.graph_gen_out_file+'.dgl', graph_set) # file = open('removed_samples_id_30000_43738.txt','w') file = open('dev_removed_samples_id.txt','w') file.write(str(rm_list)) file.close() except: traceback.print_exc() print("Discard sample because of error: ",(i_d, d['id']), "; add an empty graph to graph set.", flush=True) graph_set.append(dgl.DGLGraph()) rm_list.append((i_d, d['id'])) continue # print(graph_set) dgl.save_graphs(GRAPH_ADD+args.graph_gen_out_file+'.dgl', graph_set) # file = open('removed_samples_id_30000_43738.txt','w') file = open('dev_removed_samples_id.txt','w') file.write(str(rm_list)) file.close()
nilq/baby-python
python
import stoked import numpy as np from functools import partial import matplotlib.pyplot as plt def harmonic_force(time, position, orientation, stiffness): return -stiffness*position nm = 1e-9 us = 1e-6 stiffness = 2e-6 radius = 25*nm N = 15 initial = np.random.uniform(-300*nm, 300*nm, size=(N,2)) Q = 1e-18 bd = stoked.brownian_dynamics(position=initial, temperature=300, drag=stoked.drag_sphere(radius=radius, viscosity=8e-4), dt=.2*us, force=partial(harmonic_force, stiffness=stiffness), interactions=stoked.electrostatics(Q)) trajectory = bd.run(10000).position fig, ax = plt.subplots() ax.plot(trajectory[...,0]/nm, trajectory[...,1]/nm, lw=.5) ax.set(aspect='equal', xlabel='x (nm)', ylabel='y (nm)') plt.show()
nilq/baby-python
python
# Test the unicode support! 👋 ᚴ=2 assert ᚴ*8 == 16 ᚴ="👋" c = ᚴ*3 assert c == '👋👋👋' import unicodedata assert unicodedata.category('a') == 'Ll' assert unicodedata.category('A') == 'Lu' assert unicodedata.name('a') == 'LATIN SMALL LETTER A' assert unicodedata.lookup('LATIN SMALL LETTER A') == 'a' assert unicodedata.bidirectional('a') == 'L' assert unicodedata.normalize('NFC', 'bla') == 'bla' # testing unicodedata.ucd_3_2_0 for idna assert "abcСĤ".encode("idna") == b'xn--abc-7sa390b' # TODO: fix: assert "abc䄣IJ".encode("idna") == b'xn--abcij-zb5f' # from CPython tests assert "python.org".encode("idna") == b"python.org" assert "python.org.".encode("idna") == b"python.org." assert "pyth\xf6n.org".encode("idna") == b"xn--pythn-mua.org" assert "pyth\xf6n.org.".encode("idna") == b"xn--pythn-mua.org." assert b"python.org".decode("idna") == "python.org" assert b"python.org.".decode("idna") == "python.org." assert b"xn--pythn-mua.org".decode("idna") == "pyth\xf6n.org" assert b"xn--pythn-mua.org.".decode("idna") == "pyth\xf6n.org." # TODO: add east_asian_width and mirrored # assert unicodedata.ucd_3_2_0.east_asian_width('\u231a') == 'N' # assert not unicodedata.ucd_3_2_0.mirrored("\u0f3a")
nilq/baby-python
python
from pathlib import Path from typing import Union import click import matplotlib.pyplot as plt from ertk.dataset import read_features @click.command() @click.argument("input", type=click.Path(exists=True, dir_okay=False, path_type=Path)) @click.argument("instance", type=str, default="2") def main(input: Path, instance: str): """Displays plot of INSTANCE in INPUT. INSTANCE can either be a numeric index, a range of indices using numpy slice notation or a named instance. """ data = read_features(input) if instance.isdigit(): idx: Union[int, slice] = int(instance) else: _i = instance.find(":") if _i != -1: start = int(instance[:_i]) end = int(instance[_i + 1 :]) idx = slice(start, end) else: idx = data.names.index(instance) arr = data.features[idx] names = data.names[idx] print(names) plt.figure() plt.imshow(arr, aspect="equal", origin="upper", interpolation="nearest") plt.xlabel("Features") plt.ylabel("Instance" if len(names) > 1 else "Time") plt.show() if __name__ == "__main__": main()
nilq/baby-python
python
import nbconvert, git, yaml, inspect; from pathlib import Path class FrontMatters(nbconvert.exporters.MarkdownExporter): def from_notebook_node(self, nb, resources=None, **kw): nb, resources = super().from_notebook_node(nb, resources, **kw) md = dict(resources['metadata']) md['author'] = author_from_repo(Path(md['path'], f"{md['name']}.ipynb")) md['layout'] = 'post' return '---\n'.join(( '', yaml.safe_dump(md, default_flow_style=False), nb)), resources def author_from_repo(file, dir='.'): repo = git.Repo(dir) return repo.blame('HEAD~0', file)[0][0].author.name try: c.NbConvertApp.export_format = f"jupyter_nbconvert_config.FrontMatters" c.FilesWriter.build_directory = "_posts" except: ...
nilq/baby-python
python
import random from arcade import Sprite, load_texture, check_for_collision_with_list from activities import explore, backtrack, follow_the_food, find_the_food from path import Path class Ant(Sprite): def __init__(self, x, y, arena, colony, scale=1, activity="wander"): super().__init__(center_x=x, center_y=y, scale=scale) self.arena = arena self.colony = colony self.speed = 1 self.textures = { "black": load_texture("graphics/ant_black.png"), "green": load_texture("graphics/ant_green.png"), "red": load_texture("graphics/ant_red.png"), "blue": load_texture("graphics/ant_blue.png"), "black_green": load_texture("graphics/ant_black_green.png"), } self.set_activity(explore) self.back_track_path = Path((x, y)) self.food_search_timer = 0 # Used to get a limited number of turns to find food at end of promising path def move(self): if self.activity in (explore, find_the_food): # Ant is exploring the environment in search of food explore(self) if check_for_collision_with_list(self, self.arena.wall_list): # Hit a wall, backup backtrack(self) food_list = check_for_collision_with_list(self, self.arena.food_list) if food_list: # Food found! Take it and back to the colony self.arena.food_list.remove(food_list[0]) # assert self.back_track_path.is_valid() self.colony.found_food(self.back_track_path) self.set_activity(backtrack) self.food_search_timer = 0 elif self.food_search_timer: # Ant followed the path to food and is now at the end of it. Where is it? self.food_search_timer -= 1 if not self.food_search_timer: # Searched at the end of the path but no food in sight. Report and continue exploring # assert self.path_to_food.is_valid() self.colony.no_food_at(self.path_to_food) self.set_activity(explore) elif random.random() < 0.001: self.set_activity(backtrack) self.texture = self.textures["black_green"] elif self.activity == backtrack: # Ant has found food and is tracing back it's steps to the colony if not backtrack(self): # No more backtracking left. We're back at the colony. self.colony.deliver_food() self.path_to_food = self.colony.get_path_to_follow() if self.path_to_food: # assert self.path_to_food.is_valid() # Colony has instructed this ant to follow a path to food self.set_activity(follow_the_food) else: # Colony has instructed this ant to go and find food self.set_activity(explore) elif self.activity == follow_the_food: # Ant is following a path to where food should be if not follow_the_food(self): # End of the path, explore and get 10 turns to find the food self.back_track_path = self.path_to_food.reverse() # assert self.back_track_path.is_valid() # assert self.back_track_path.is_valid() self.food_search_timer = 10 self.set_activity(explore) self.texture = self.textures["blue"] self.update() def set_activity(self, activity): self.activity = activity self.texture = self.textures[self.activity.color] # if activity == explore: # self.texture = self.textures['black'] # else: # self.texture = self.textures['green'] def move_to(self, coo): dx = coo[0] - self.center_x dy = coo[1] - self.center_y if dx < 0: self.angle = 90 elif dx > 0: self.angle = 270 elif dy > 0: self.angle = 0 else: self.angle = 180 self.speed = abs(dx) + abs(dy) self.center_x = coo[0] self.center_y = coo[1]
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Created on Fri Dec 29 13:52:01 2017 @author: User """ import re aPara = "this is a list of words. and here is another" aList = [] print(aPara.split()) # splits into words print(len(aPara.split())) # count of words for item in re.split('[.]', aPara): #splits into sentences print(item) aList.append((len(item.split()))) print(aList) print(re.split('[.]', aPara)) print(len(re.split('[.]', aPara))) # number of sentences
nilq/baby-python
python
## Copyright 2015-2019 Ilgar Lunin, Pedro Cabrera ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## http://www.apache.org/licenses/LICENSE-2.0 ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. from Qt.QtWidgets import * from Qt import QtCore, QtGui from PyFlow.Input import InputActionType from PyFlow.UI.Widgets.KeyboardModifiersCapture import KeyboardModifiersCaptureWidget from PyFlow.UI.Widgets.KeyCapture import KeyCaptureWidget from PyFlow.UI.Widgets.MouseButtonCapture import MouseButtonCaptureWidget class InputActionWidget(QWidget): """docstring for InputActionWidget.""" def __init__(self, parent=None, inputActionRef=None): super(InputActionWidget, self).__init__(parent) self.currentActionRef = inputActionRef self.layout = QHBoxLayout(self) self.layout.setContentsMargins(0, 0, 0, 0) modifiersLabel = QLabel() modifiersLabel.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Preferred) modifiersLabel.setPixmap(QtGui.QPixmap(":/shift-32.png")) self.modifiersWidget = KeyboardModifiersCaptureWidget() self.modifiersWidget.captured.connect(self.updateActionModifiers) self.layout.addWidget(modifiersLabel) self.layout.addWidget(self.modifiersWidget) if self.actionType == InputActionType.Keyboard: keyLabel = QLabel() keyLabel.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Preferred) keyLabel.setPixmap(QtGui.QPixmap(":/keyboard-32.png")) self.keyCapture = KeyCaptureWidget() self.keyCapture.captured.connect(self.updateActionKey) self.layout.addWidget(keyLabel) self.layout.addWidget(self.keyCapture) if self.actionType == InputActionType.Mouse: mouseLabel = QLabel("Mouse:") mouseLabel.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Preferred) mouseLabel.setPixmap(QtGui.QPixmap(":/mouse-32.png")) self.mouseCapture = MouseButtonCaptureWidget() self.mouseCapture.captured.connect(self.updateActionMouse) self.layout.addWidget(mouseLabel) self.layout.addWidget(self.mouseCapture) def updateActionMouse(self, value): if self.currentActionRef is not None: self.currentActionRef.setMouseButton(value) def updateActionKey(self, value): if self.currentActionRef is not None: self.currentActionRef.setKey(value) def updateActionModifiers(self, value): if self.currentActionRef is not None: self.currentActionRef.setModifiers(value) def setAction(self, inputAction): self.modifiersWidget.currentModifiers = inputAction.getModifiers() try: self.keyCapture.currentKey = inputAction.getKey() except: pass try: self.mouseCapture.currentButton = inputAction.getMouseButton() except: pass def getModifiers(self): return self.modifiersWidget.currentModifiers def getKey(self): try: return self.keyCapture.currentKey except: return None def getMouseButton(self): try: return self.mouseCapture.currentButton except: return None @property def actionType(self): return self.currentActionRef.actionType
nilq/baby-python
python
import argparse from typing import Optional from typing import Sequence BLACKLIST = [ b'\x64\x6e\x63', #dnc ] def main(argv: Optional[Sequence[str]] = None) -> int: parser = argparse.ArgumentParser() parser.add_argument('filenames', nargs='*', help='Filenames to check') args = parser.parse_args(argv) bad_files = [] for filename in args.filenames: with open(filename, 'rb') as f: content = f.read() if any(line in content for line in BLACKLIST): bad_files.append(filename) if bad_files: for bad_file in bad_files: print(f'do not commit tag found: {bad_file}') return 1 else: return 0 if __name__ == '__main__': exit(main())
nilq/baby-python
python
# coding=utf-8 """update_xml_verses.py """ from __future__ import print_function import sys, re,codecs sys.path.append('../step3e') # the Entry object from transcode import xml_header,read_entries class Edit(object): def __init__(self,lines): self.lines = lines assert lines[0] == '<edit>' assert lines[-1] == '</edit>' m = re.search(r'^<info L="(.*?)" page="(.*?)" gtypes="(.*?)"/>$',lines[1]) assert m != None self.L = m.group(1) self.page = m.group(2) #self.gtypestr = m.group(3) #self.gtypes = self.gtypestr.split(',') self.parse_edit_groups() def parse_edit_groups(self): # based on Entry.parse_groups groupelts = 'HS,S,D,F,V1,V2,V3,V4,V5'.split(',') groupbegs = ['<%s' % groupelt for groupelt in groupelts] groupends = ['</%s>' % groupelt for groupelt in groupelts] groups = [] ngroup = -1 groupelt = None gtypes = [] # edit gtypes for iline,line in enumerate(self.lines): if groupelt == None: for i,groupbeg in enumerate(groupbegs): if line.startswith(groupbeg): groupelt = groupelts[i] groupend = groupends[i] group = [line] break elif line.startswith(groupend): group.append(line) groups.append(group) ngroup = ngroup + 1 gtypes.append(groupelt) groupelt = None group = [] else: group.append(line) self.groups = groups self.gtypes = gtypes def generate_edits(lines): group = None for iline,line in enumerate(lines): line = line.strip() if line == '<edit>': group = [line] elif line == '</edit>': group.append(line) entry = Edit(group) yield entry group = None L = None elif group != None: group.append(line) else: pass # outside of a group, e.g. ;---------- lines def get_edits(filein): with codecs.open(filein,"r","utf-8") as f: lines = [line.rstrip('\r\n') for line in f] edits = list(generate_edits(lines)) print(len(edits),"edits from",filein) return edits def entry_dict(entries): d = {} for entry in entries: L = entry.L if L in d: print('entry_dict: unexpected duplicate',entry.infoline) d[L] = entry return d def edit_entry(entry,edit): """ """ newgroups = [] found = False info = entry.info assert info == edit.lines[1] gtypes = entry.gtypes groups = entry.groups egroups = edit.groups egtypes = edit.gtypes for igtype,gtype in enumerate(gtypes): group = groups[igtype] if not (igtype < len(egtypes)): newgroups.append(group) continue egtype = egtypes[igtype] assert egtype == gtype egroup = egroups[igtype] newgroups.append(egroup) entry.newgroups = newgroups def compute_entry_lines(entry,newgroups): newlines = [] newlines.append('<entry>') # assume no change needed in info newlines.append(entry.info) for newgroup in newgroups: for newline in newgroup: newlines.append(newline) newlines.append('</entry>') if len(entry.lines) != len(newlines): print('newlines anomaly',entry.info) print('entry.lines:') for line in entry.lines: print(' ',line) print('entry.newlines:') for line in newlines: print(' ',line) exit(1) return newlines def write_entries(entries,xmlroot,version,fileout): head = xml_header(xmlroot,version) head.append('') body = [] for entry in entries: groups = entry.groups newgroups = entry.newgroups if newgroups == None: newlines = entry.lines else: newlines = compute_entry_lines(entry,newgroups) #lines = entry.lines for line in newlines: body.append(line) body.append('') tail = ['</%s>'%xmlroot] linesout = head + body + tail with codecs.open(fileout,"w","utf-8") as f: for line in linesout: f.write(line+'\n') print(len(linesout),"lines written to",fileout) if __name__=="__main__": filein = sys.argv[1] # old boesp.xml filein1 = sys.argv[2] # corrected verses fileout = sys.argv[3] # new boesp.xml xmlroot = 'boesp' version = "1.4" # this must agree with step0/boesp.dtd entries = read_entries(filein) edits = get_edits(filein1) d = entry_dict(entries) # also, add 'newgroups' attribute to each entry # so we can tell which entries have been edited. for entry in entries: entry.newgroups = None for iedit,edit in enumerate(edits): L = edit.L if L not in d: print('edit entry not found',L) else: entry = d[L] edit_entry(entry,edit) # if True: if iedit == 0: print(edit.L) for line in edit.lines: print(line) write_entries(entries,xmlroot,version,fileout)
nilq/baby-python
python
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Tests for filesystemio.""" from __future__ import absolute_import import io import logging import multiprocessing import os import sys import threading import unittest from builtins import range from apache_beam.io import filesystemio class FakeDownloader(filesystemio.Downloader): def __init__(self, data): self._data = data self.last_read_size = -1 @property def size(self): return len(self._data) def get_range(self, start, end): self.last_read_size = end - start return self._data[start:end] class FakeUploader(filesystemio.Uploader): def __init__(self): self.data = '' self.last_write_size = -1 self.finished = False def last_error(self): return None def put(self, data): assert not self.finished self.data += data.tobytes() self.last_write_size = len(data) def finish(self): self.finished = True class TestDownloaderStream(unittest.TestCase): def test_file_attributes(self): downloader = FakeDownloader(data=None) stream = filesystemio.DownloaderStream(downloader) self.assertEqual(stream.mode, 'r') self.assertTrue(stream.readable()) self.assertFalse(stream.writable()) self.assertTrue(stream.seekable()) def test_read_empty(self): downloader = FakeDownloader(data=b'') stream = filesystemio.DownloaderStream(downloader) self.assertEqual(stream.read(), b'') def test_read(self): data = 'abcde' downloader = FakeDownloader(data) stream = filesystemio.DownloaderStream(downloader) # Read size is exactly what was passed to read() (unbuffered). self.assertEqual(stream.read(1), data[0]) self.assertEqual(downloader.last_read_size, 1) self.assertEqual(stream.read(), data[1:]) self.assertEqual(downloader.last_read_size, len(data) - 1) @unittest.skipIf(sys.version_info[0] == 3 and os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1', 'This test still needs to be fixed on Python 3' 'TODO: BEAM-5627') def test_read_buffered(self): data = 'abcde' downloader = FakeDownloader(data) buffer_size = 2 stream = io.BufferedReader(filesystemio.DownloaderStream(downloader), buffer_size) # Verify that buffering works and is reading ahead. self.assertEqual(stream.read(1), data[0]) self.assertEqual(downloader.last_read_size, buffer_size) self.assertEqual(stream.read(), data[1:]) @unittest.skipIf(sys.version_info[0] == 3 and os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1', 'This test still needs to be fixed on Python 3' 'TODO: BEAM-5627') class TestUploaderStream(unittest.TestCase): def test_file_attributes(self): uploader = FakeUploader() stream = filesystemio.UploaderStream(uploader) self.assertEqual(stream.mode, 'w') self.assertFalse(stream.readable()) self.assertTrue(stream.writable()) self.assertFalse(stream.seekable()) def test_write_empty(self): uploader = FakeUploader() stream = filesystemio.UploaderStream(uploader) data = '' stream.write(memoryview(data)) self.assertEqual(uploader.data, data) def test_write(self): data = 'abcde' uploader = FakeUploader() stream = filesystemio.UploaderStream(uploader) # Unbuffered writes. stream.write(memoryview(data[0])) self.assertEqual(uploader.data[0], data[0]) self.assertEqual(uploader.last_write_size, 1) stream.write(memoryview(data[1:])) self.assertEqual(uploader.data, data) self.assertEqual(uploader.last_write_size, len(data) - 1) def test_write_buffered(self): data = 'abcde' uploader = FakeUploader() buffer_size = 2 stream = io.BufferedWriter(filesystemio.UploaderStream(uploader), buffer_size) # Verify that buffering works: doesn't write to uploader until buffer is # filled. stream.write(data[0]) self.assertEqual(-1, uploader.last_write_size) stream.write(data[1:]) stream.close() self.assertEqual(data, uploader.data) class TestPipeStream(unittest.TestCase): def _read_and_verify(self, stream, expected, buffer_size): data_list = [] bytes_read = 0 seen_last_block = False while True: data = stream.read(buffer_size) self.assertLessEqual(len(data), buffer_size) if len(data) < buffer_size: # Test the constraint that the pipe stream returns less than the buffer # size only when at the end of the stream. if data: self.assertFalse(seen_last_block) seen_last_block = True if not data: break data_list.append(data) bytes_read += len(data) self.assertEqual(stream.tell(), bytes_read) self.assertEqual(b''.join(data_list), expected) def test_pipe_stream(self): block_sizes = list(4**i for i in range(0, 12)) data_blocks = list(os.urandom(size) for size in block_sizes) expected = b''.join(data_blocks) buffer_sizes = [100001, 512 * 1024, 1024 * 1024] for buffer_size in buffer_sizes: parent_conn, child_conn = multiprocessing.Pipe() stream = filesystemio.PipeStream(child_conn) child_thread = threading.Thread( target=self._read_and_verify, args=(stream, expected, buffer_size)) child_thread.start() for data in data_blocks: parent_conn.send_bytes(data) parent_conn.close() child_thread.join() if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) unittest.main()
nilq/baby-python
python
from subprocess import call from core import kde from core.action import has_dependency class IconTheme4(kde.KDE4Action): """Change KDE's icon theme.""" def arguments(self): return [ ('theme', 'Icon theme name.') ] def execute(self, theme): kde.writeconfig("Icons", "Theme", theme, file = "kdeglobals") return True class IconTheme5(kde.KDE5Action): """Change KDE's icon theme.""" def arguments(self): return [ ('theme', 'Icon theme name.') ] def execute(self, theme): if (has_dependency("kwriteconfig")): kde.writeconfig4("Icons", "Theme", theme, file = "kdeglobals") kde.writeconfig("Icons", "Theme", theme, file = "kdeglobals") # clear&&dbus-monitor "type=signal,interface='org.kde.KGlobalSettings'" # clear&&dbus-monitor "type=signal,path=/KIconLoader" for x in range(0, 6): call("dbus-send --session --type=signal /KIconLoader org.kde.KIconLoader.iconChanged int32:%d" % x, shell=True) call("dbus-send --session --type=signal /KGlobalSettings org.kde.KGlobalSettings.notifyChange int32:4 int32:%d" % x, shell=True) call("dbus-send --session --type=signal /KWin org.kde.KWin.reloadConfig", shell=True) return True
nilq/baby-python
python
import logging import progressbar from django.core.management.base import BaseCommand from contentcuration.models import Channel from contentcuration.utils.nodes import generate_diff from contentcuration.utils.nodes import get_diff logging.basicConfig() logger = logging.getLogger('command') class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument("--force", action="store_true", dest="force", default=False) def handle(self, *args, **options): # Set up variables for restoration process logger.info("\n\n********** GENERATING STAGED DIFFS **********") channels_with_staged_changes = Channel.objects.exclude(staging_tree=None) bar = progressbar.ProgressBar(max_value=channels_with_staged_changes.count()) for i, c in enumerate(channels_with_staged_changes): if options["force"] or not get_diff(c.staging_tree, c.main_tree): generate_diff(c.staging_tree.pk, c.main_tree.pk) bar.update(i) logger.info("\n\nDONE")
nilq/baby-python
python
import os import re import json import random import codecs import argparse from template_config import * from nltk import word_tokenize from collections import defaultdict from transformers.tokenization_roberta import RobertaTokenizer ADD_INDEX_ID = 0.7 ADD_INDEX_NAME = 0.3 OP_VAL_EQUAL = 0.4 USE_TABLE_1 = 0.5 USE_1_FOR_INTEGER = 0.5 SEP_TOKEN = "</s>" tokenizer = RobertaTokenizer.from_pretrained("roberta-base") MAX_TOKEN_LEN = 150 MAX_COL_NUM = 25 OPS = ["=", ">", "<", ">=", "<=", "!=", "LIKE"] # read NL-SQL templates def read_NL_SQL_template(nlsql_templates_file): templates = [] with open(nlsql_templates_file) as fp: lines = fp.readlines() template_one = {} for line in lines: if "\n" == line: templates.append(template_one) elif "SQL Pattern:" in line: template_one = {} sps = line.strip().replace("SQL Pattern: ", "").split("|||") template_one["questions"] = [] if len(sps) == 1: template_one["SQL pattern"] = sps[0] template_one["SQL constraints"] = [] elif len(sps) == 2: template_one["SQL pattern"] = sps[0] template_one["SQL constraints"] = [x.strip() for x in sps[1].split("|") if x != " "] else: print("\n======Error warning!!!!") elif "count: " in line: sql_count = int(line.strip().replace("count: ", "")) template_one["count"] = sql_count elif "question: " in line: sps = line.strip().replace("question: ", "").split("|||") question = sps[0] if len(sps) == 2: q_constraints = [x.strip() for x in sps[1].split("|") if x != " "] else: q_constraints = [] template_one["questions"].append((question, q_constraints)) return templates # Sieve through the templates and get valid single-table questions def get_templates_for_one_table(templates): templates_one_table = [] for template in templates: sql_constraints = template['SQL constraints'] sql_pattern = template["SQL pattern"] questions = template["questions"] skip = False for constraint in sql_constraints: if "id" in constraint or "T1" in constraint: skip = True questions_after = [] if not skip: for q, qc in questions: if "TABLE1" not in q: questions_after.append((q, qc)) if len(questions_after) > 0: template_one = {} template_one['SQL constraints'] = sql_constraints template_one['SQL pattern'] = sql_pattern template_one["questions"] = questions_after templates_one_table.append(template_one) return templates_one_table # Read json file def read_json(file): with open(file) as json_file: res = json.load(json_file) return res # Unify and combine tables as databases def create_dbs(tables): dbs = [] cur_cols = [] db_one = [] ahd_cols = [] for i, tab in enumerate(tables): # if i % 100000 == 0: # print("processed: ", i) if len(db_one) <= random.choice([0, 1]) and len(ahd_cols) < MAX_COL_NUM: db_one.append(tab) cur_cols.extend([col+"."+tab["name"] for col in tab["columns"]]) if i+1 < len(tables): ahd_cols = cur_cols + [col+"."+tables[i+1]["name"] for col in tables[i+1]["columns"]] else: break else: if len(cur_cols) == len(list(set(cur_cols))): if len(db_one) > 1: db_one_new = [] for tab in db_one: if tab["columns"][0] == "id": tab["columns"] = tab["columns"][1:] tab["column_types"] = tab["column_types"][1:] tab["columns_original"] = tab["columns_original"][1:] tab["values"] = tab["values"][1:] if random.random() < ADD_INDEX_ID: index_col = "id" if random.random() < ADD_INDEX_NAME: index_col = "name" if index_col not in tab["columns"]: tabn_str = "_".join(tab["name"].split(" ")) tab["columns"] = [tab["columns"][0]] + [tabn_str +" "+ index_col] + tab["columns"][1:] val_add = 1 if index_col == "name": val_add = "value" tab["values"] = [tab["values"][0]] + [val_add] + tab["values"][1:] tab["column_types"] = [tab["column_types"][0]] + ["text"] + tab["column_types"][1:] tab["columns_original"] = [tab["columns_original"][0]] + [index_col] + tab["columns_original"][1:] db_one_new.append(tab) dbs.append(db_one_new) else: dbs.append(db_one) db_one = [] cur_cols = [] ahd_cols = [] return dbs def get_sql_slots(sql_pattern): sql_tokens = sql_pattern.split(" ") columns = {} ops = {} values = {} aggs = {} dasc = False slots = [] val_pros = [] for i, tok in enumerate(sql_tokens): if "{" in tok and "}" in tok and "FROM" not in tok: if tok not in slots: slots.append(tok) if "AGG" in tok: if i + 2 < len(sql_tokens) and "(" == sql_tokens[i+1]: if "COLUMN" in sql_tokens[i+2]: if sql_tokens[i+2] not in columns.keys(): columns[sql_tokens[i+2]] = ["number"] else: columns[sql_tokens[i+2]].append("number") aggs[tok] = sql_tokens[i+2] else: print("\nTemplate Error: AGG format is wrong!!!") print(sql_pattern) elif "COLUMN" in tok: if tok not in columns.keys(): columns[tok] = [] elif "OP" in tok: if i - 1 >= 0 and "COLUMN" in sql_tokens[i-1]: ops[tok] = [sql_tokens[i-1]] if i + 1 < len(sql_tokens) and "VALUE" in sql_tokens[i+1]: ops[tok].append(sql_tokens[i+1]) val_pros.append(sql_tokens[i+1]) elif i - 2 >= 0 and ")" == sql_tokens[i-1] and ("COLUMN" in sql_tokens[i-2] or "*" == sql_tokens[i-2]): ops[tok] = [sql_tokens[i-2]] if i + 1 < len(sql_tokens) and "VALUE" in sql_tokens[i+1]: ops[tok].append(sql_tokens[i+1]) val_pros.append(sql_tokens[i+1]) else: print("\nTemplate Error: OP format is wrong!!!") print(sql_pattern) elif "VALUE" in tok and tok not in val_pros: """ OP} {VALUE0} LIMIT {VALUE0} {COLUMN1} BETWEEN {VALUE0} AND {VALUE1} HAVING COUNT ( * ) {OP1} {VALUE1} = {VALUE1} """ if i - 2 >= 0 and ("BETWEEN" == sql_tokens[i-1] or "AND" == sql_tokens[i-1]): values[tok] = "number" if "BETWEEN" == sql_tokens[i-1]: columns[sql_tokens[i-2]].append("number") elif i - 1 >= 0 and "LIMIT" == sql_tokens[i-1]: values[tok] = "integer" elif i - 1 >= 0 and "=" == sql_tokens[i-1]: assert "COLUMN" in sql_tokens[i-2] columns[sql_tokens[i-2]].append(tok) else: print("\nTemplate Error: VALUE format is wrong!!!") print(sql_pattern) elif "DASC" in tok: dasc = True return (list(set(slots)), columns, ops, values, aggs, dasc) def get_q_slots(question): q_toks = question.strip().split(" ") q_slots = list(set([tok for tok in q_toks if "TABLE" in tok or "SC" in tok or ("{" in tok and "}" in tok)])) return q_slots def process_constraints(constraints, columns, slots): slot_values = {} skip_db_with_one_table = False for constraint in constraints: if "P0==" == constraint: assert "{OP0}" in slots slot_values["{OP0}"] = "=" elif "P1==" == constraint: assert "{OP1}" in slots slot_values["{OP1}"] = "=" elif "P0=P1==" == constraint: assert "{OP0}" in slots and "{OP1}" in slots slot_values["{OP0}"] = "=" slot_values["{OP1}"] = "=" elif "P0=P1=P2==" == constraint: assert "{OP0}" in slots and "{OP1}" in slots and "{OP2}" in slots slot_values["{OP0}"] = "=" slot_values["{OP1}"] = "=" slot_values["{OP2}"] = "=" elif "P0=>" == constraint: assert "{OP0}" in slots slot_values["{OP0}"] = ">" elif "P0=<" == constraint: assert "{OP0}" in slots slot_values["{OP0}"] = "<" elif "{AGG0}=MIN" == constraint: assert "{AGG0}" in slots slot_values["{AGG0}"] = "MIN" elif "{AGG0}=MAX" == constraint: assert "{AGG0}" in slots slot_values["{AGG0}"] = "MAX" elif "C0-id" == constraint: skip_db_with_one_table = True assert "{COLUMN0}" in slots and "{COLUMN0}" in columns.keys() columns["{COLUMN0}"].append("id") elif "C1-id" == constraint: skip_db_with_one_table = True assert "{COLUMN1}" in slots and "{COLUMN1}" in columns.keys() columns["{COLUMN1}"].append("id") elif "C2-id" == constraint: skip_db_with_one_table = True assert "{COLUMN2}" in slots and "{COLUMN2}" in columns.keys() columns["{COLUMN2}"].append("id") elif "C3-T1" == constraint: skip_db_with_one_table = True assert "{COLUMN3}" in slots and "{COLUMN3}" in columns.keys() columns["{COLUMN3}"].append("T1") elif "T0-T1-JOIN" == constraint or 'T0-T1-NO-JOIN' == constraint: skip_db_with_one_table = True columns["{COLUMN0}"].append("T0") if "{COLUMN1}" in columns.keys(): columns["{COLUMN1}"].append("T1") return (slot_values, columns, skip_db_with_one_table) # helper function def gen_col_info(col_str, columns, columns_inf): col_conds = columns[col_str] value_slot = [cc for cc in col_conds if "VALUE" in cc] col = "" value_val = None if "id" in col_conds: has_id = False for c, t, v in columns_inf: if "id" in col or "name" in col: has_id = True col, ctype, values = c, t, v break if not has_id: col, ctype, value = columns_inf[0] elif "number" in col_conds: for colinfo in columns_inf[1:]: if colinfo[1] == "real": col, ctype, value = colinfo if col == "": col, ctype, value = random.choice(columns_inf[1:]) if len(value_slot) > 0: assert len(value_slot) < 3 if len(value_slot) == 1: value_val = [(value_slot[0], value)] else: value_val = [(value_slot[0], value), (value_slot[1], value)] return (col, value_val) def replace_dict(inp, dicts): for rep_in, rep_out in dicts.items(): inp = inp.replace(rep_in, str(rep_out)) return inp def get_labels(sql_pattern): STRUCT_KEYWORDS = ["WHERE", "GROUP_BY", "HAVING", "ORDER_BY", "SELECT"] EXTRA_OPS = ["NOT_IN", "IN", "BETWEEN", "="] COUNT = "COUNT" OTHER_KEYWORDS = ["LIMIT"] #AGG, OP, DASC, OR, = NEST_KEYWORDS = ["EXCEPT", "UNION", "INTERSECT"] sql_tokens = sql_pattern.replace("GROUP BY", "GROUP_BY").replace("ORDER BY", "ORDER_BY").replace("NOT IN", "NOT_IN").split(" ") columns = {} cur_nest = "" cur_struct = "" cur_len = len(sql_tokens) select_count = 0 for i, tok in enumerate(sql_tokens): if tok in NEST_KEYWORDS: if cur_nest == "" or cur_nest == "OP_SEL": cur_nest = tok else: cur_nest = cur_nest + " " + tok elif tok in STRUCT_KEYWORDS: cur_struct = tok if tok == "SELECT": select_count += 1 if select_count > 1 and cur_nest == "": cur_nest = "OP_SEL" elif "COLUMN" in tok or "*" == tok: if tok not in columns.keys(): columns[tok] = [] # SELECT {COLUMN0} # SELECT {COLUMN0} , {COLUMN1} # SELECT {AGG0} ( {COLUMN0} ) # SELECT {COLUMN0} {FROM} WHERE {COLUMN1} {OP} ( SELECT {AGG0} ( {COLUMN1} ) {FROM} ) AND {COLUMN2} {OP0} {VALUE0} if cur_struct == "SELECT": if "," == sql_tokens[i-1] or "SELECT" == sql_tokens[i-1]: columns[tok].append(cur_nest + " " + cur_struct) elif "(" == sql_tokens[i-1]: columns[tok].append(cur_nest + " " + cur_struct + " " + sql_tokens[i-2]) else: print("\nWarning: unexcepted SELECT format") print(sql_pattern) # WHERE {COLUMN} {OP} # WHERE {COLUMN2} {OP0} # WHERE OR {COLUMN2} {OP0} # WHERE {COLUMN2} BETWEEN elif cur_struct == "WHERE": assert "OP" in sql_tokens[i+1] or sql_tokens[i+1] in EXTRA_OPS last_tok = sql_tokens[i-1] if "OR" == last_tok or (i+3 < cur_len and "OR" == sql_tokens[i+3]): columns[tok].append(cur_nest + " " + cur_struct + " OR " + sql_tokens[i+1]) elif "WHERE" == last_tok or "AND" == last_tok: columns[tok].append(cur_nest + " " + cur_struct + " " + sql_tokens[i+1]) else: print("\nWarning: unexcepted WHERE format") # GROUP BY {COLUMN0} , {COLUMN0} elif cur_struct == "GROUP_BY": columns[tok].append(cur_nest + " " + cur_struct) # HAVING COUNT ( * ) {OP0} # HAVING {AGG0} ( {COLUMN2} ) {OP0} elif cur_struct == "HAVING": last_tok = sql_tokens[i-1] if last_tok != "(" and not ("AGG" in sql_tokens[i-2] or COUNT == sql_tokens[i-2]): print("\nWarning: unexcepted HAVING format") columns[tok].append(cur_nest + " " + cur_struct + " " + sql_tokens[i-2] + " " + sql_tokens[i+2]) # ORDER BY COUNT ( * ) {DASC} LIMIT # ORDER BY COUNT ( * ) {DASC} # ORDER BY {COLUMN1} {DASC} LIMIT # ORDER BY {COLUMN1} LIMIT # ORDER BY {COLUMN1} , {COLUMN1} {DASC} LIMIT # ORDER BY {COLUMN1} {DASC} if no DASC then is ASC elif cur_struct == "ORDER_BY": last_tok = sql_tokens[i-1] if last_tok == "(": dasc_tok = "{DASC}" limit_tok = "" if sql_tokens[i+2] != "{DASC}": dasc_tok = "ASC" if sql_tokens[i+2] == "LIMIT": limit_tok = "LIMIT" elif i+3 < cur_len and sql_tokens[i+3] == "LIMIT": limit_tok = "LIMIT" columns[tok].append(cur_nest + " " + cur_struct + " " + sql_tokens[i-2] + " " + dasc_tok + " " + limit_tok) elif last_tok == "ORDER_BY" or last_tok == ",": dasc_tok = "ASC" limit_tok = "" # small dirty pass if i+1 < cur_len and sql_tokens[i+1] == "{DASC}": dasc_tok = "{DASC}" if i+2 < cur_len and sql_tokens[i+2] == "LIMIT": limit_tok = "LIMIT" elif i+1 < cur_len and sql_tokens[i+1] == "LIMIT": limit_tok = "LIMIT" columns[tok].append(cur_nest + " " + cur_struct + " " + dasc_tok + " " + limit_tok) else: print("\n------------Warning: unexcepted COLUMN label format") column_labels = {} for col, labels in columns.items(): label_str = " ".join([l.strip() for l in labels]) column_labels[col] = label_str return column_labels def populate_one(db, templates, templates_one, sql_components): """ 'P0=P1==', 'P0=P1=P2==', 'P0==', 'P1==', 'P0=>', 'P0=<', '{AGG0}=MAX', '{AGG0}=MIN' 'T0-T1-JOIN', 'T0-T1-NO-JOIN', 'C0-id',, 'C2-id', , 'C1-id', 'C3-T1' """ if len(db) > 1: template = random.choice(templates) else: template = random.choice(templates_one) sql_constraints = template['SQL constraints'] sql_pattern = template["SQL pattern"] question, q_constraints = random.choice(template["questions"]) constraints = list(set(sql_constraints + q_constraints)) slots, columns, ops, vals, aggs, dasc = get_sql_slots(sql_pattern) slot_values, columns, skip_db_with_one_table = process_constraints(constraints, columns, slots) q_slots = get_q_slots(question) q_slot_values = {} # 1 process ops - update columns and values constraints for op, colv in ops.items(): if colv[0] == "*": if op not in slot_values.keys(): op_val = random.choice([">", "<", ">=", "<=", "="]) slot_values[op] = op_val if len(colv) == 2: slot_values[colv[1]] = random.randint(1, 10) else: if colv[0] not in columns.keys(): print("\n-----colv[0] not in columns.keys(): ") print(columns.keys()) print(ops) assert colv[0] in columns.keys() if op not in slot_values.keys(): if random.random() < OP_VAL_EQUAL: op_val = "=" else: op_val = random.choice(OPS) slot_values[op] = op_val if op_val in [">", "<", ">=", "<="]: columns[colv[0]].append("number") if len(colv) == 2: columns[colv[0]].append(colv[1]) # 2 process columns random.shuffle(db) table_0, table_1 = None, None table_label_0 = "" table_label_1 = "" use_table_1 = False if "{COLUMN0}" in columns.keys() or "{TABLE0}" in q_slots: table_label_0 = "SELECT" if len(db) >= 2: table_0, table_1 = db[:2] if "{TABLE1}" in q_slots: table_label_1 = "SELECT" if "{TABLE0}" in q_slots: # p<0.5 from T0, T1 AND to SELECT T1 * # otherwise all from T0 AND to SELECT T1 * if random.random() < USE_TABLE_1: use_table_1 = True else: # p<0.4 all from T0 # AND to SELECT T1 * if random.random() < 0.6: use_table_1 = True if "{COLUMN1}" in columns.keys(): table_label_1 = "SELECT" else: # p<0.5 from T0, T1 AND to SELECT T1 * # otherwise all from T0, NOT to SELECT T1 * if random.random() < USE_TABLE_1: use_table_1 = True if "{COLUMN1}" in columns.keys(): table_label_1 = "SELECT" else: table_0, table_1 = db[0], db[0] T0 = table_0["name"] T1 = table_1["name"] columns_inf_0 = list(zip(table_0["columns"], table_0["column_types"], table_0["values"]))[1:] if use_table_1: columns_inf_1 = list(zip(table_1["columns"], table_1["column_types"], table_1["values"]))[1:] if "{COLUMN0}" in columns.keys(): col_0, value_0 = gen_col_info("{COLUMN0}", columns, columns_inf_0) slot_values["{COLUMN0}"] = col_0 if value_0 is not None: for k, v in value_0: slot_values[k] = v if len(columns_inf_0) > 2: columns_inf_0 = [(col, ctype, val) for col, ctype, val in columns_inf_0 if col != col_0] if use_table_1: columns_input = columns_inf_1 else: columns_input = columns_inf_0 if "{COLUMN1}" in columns.keys(): col_1, value_1 = gen_col_info("{COLUMN1}", columns, columns_input) slot_values["{COLUMN1}"] = col_1 if value_1 is not None: for k, v in value_1: slot_values[k] = v columns_input_org = columns_input if len(columns_input) > 3: columns_input = [(col, ctype, val) for col, ctype, val in columns_input if col != col_1] if len(columns_input) < 2: columns_input = columns_input_org if "{COLUMN2}" in columns.keys(): col_2, value_2 = gen_col_info("{COLUMN2}", columns, columns_input) slot_values["{COLUMN2}"] = col_2 if value_2 is not None: for k, v in value_2: slot_values[k] = v columns_input_org = columns_input if len(columns_input) > 2: columns_input = [(col, ctype, val) for col, ctype, val in columns_input if col != col_2] if len(columns_input) < 2: columns_input = columns_input_org if "{COLUMN3}" in columns.keys(): col_3, value_3 = gen_col_info("{COLUMN3}", columns, columns_input) slot_values["{COLUMN3}"] = col_3 if value_3 is not None: for k, v in value_3: slot_values[k] = v # 3 aggs for agg in aggs.keys(): if agg not in slot_values.keys(): slot_values[agg] = random.choice(["MAX", "MIN", "SUM", "AVG"]) # 4 values NUM = 1 for val, cond in vals.items(): assert val not in slot_values.keys() if cond == "integer": if random.random() < USE_1_FOR_INTEGER: slot_values[val] = 1 else: NUM = random.randint(2, 10) slot_values[val] = NUM else: slot_values[val] = random.randint(0, 100) # 5 dasc - true if dasc == True: slot_values["{DASC}"] = random.choice(["ASC", "DESC"]) # 6 check if all sql slot values are done if len(slots) != len(slot_values): print("\nlen(slots) != len(slot_values)") print("sql_pattern: ", sql_pattern) print("slots: ", slots) print("slot_values: ", slot_values.keys()) assert len(slots) == len(slot_values) # 7 for the questions slots: for qs in q_slots: if qs == "{TABLE0}": q_slot_values["{TABLE0}"] = T0 elif qs == "{TABLE1}": q_slot_values["{TABLE1}"] = T1 elif "SC" in qs: sc = slot_values["{DASC}"] if "SC" == qs: q_slot_values[qs] = random.choice(sql_components["SC"][sc]) elif "SC_COL_LIMIT" == qs: if NUM > 1: sc = sc + "_NUM" q_slot_values[qs] = random.choice(sql_components["SC_COL_LIMIT"][sc]).replace("[NUM]", str(NUM)) else: q_slot_values[qs] = random.choice(sql_components["SC_COL_LIMIT"][sc]) elif "SC_COL_COUNT_LIMIT" in qs: sc_type = qs.replace("SC_COL_COUNT_LIMIT", "") if NUM > 1: sc = sc + "_NUM" + sc_type q_slot_values[qs] = random.choice(sql_components["SC_COL_COUNT_LIMIT"][sc]).replace("[NUM]", str(NUM)) else: sc = sc + sc_type q_slot_values[qs] = random.choice(sql_components["SC_COL_COUNT_LIMIT"][sc]) else: if "-" not in qs: print("qs wrong", qs) assert "-" in qs if "C1" in qs: sc_col = slot_values["{COLUMN1}"] elif "C2" in qs: sc_col = slot_values["{COLUMN2}"] q_slot_values[qs] = random.choice(sql_components["SC_COL"][sc]).replace("[COL]", sc_col) else: if qs not in slot_values.keys(): print("qs not in sv: ", qs) print("sql_pattern: ", sql_pattern) print("slot_values: ", slot_values) assert qs in slot_values.keys() if "OP" in qs: q_slot_values[qs] = random.choice(sql_components["OP"][slot_values[qs]]) elif "AGG" in qs: q_slot_values[qs] = random.choice(sql_components["AGG"][slot_values[qs]]) elif "COLUMN" in qs: q_slot_values[qs] = " ".join(slot_values[qs].split(" ")[1:6]) elif "VALUE" in qs: q_slot_values[qs] = " ".join(str(slot_values[qs]).split(" ")[:5]) else: print("\nWarning: some q slot type not considered!") print(qs) # 8 check if all question slots are processed assert len(q_slots) == len(q_slot_values) # 9 generate final SQL-question pair question_gen = replace_dict(question, q_slot_values) # 10 generate column labels slot_values_new = {} for sl, vl in slot_values.items(): if "COLUMN" in sl: slot_values_new[sl] = "_=_".join(vl.split(" ")) else: slot_values_new[sl] = vl column_labels = get_labels(sql_pattern) column_lables_real = {} for col, label in column_labels.items(): if col != "*": col = slot_values[col] for slot, value in slot_values.items(): label = label.replace(slot, str(value)) column_lables_real[col] = label # also add labels for table column * if table_label_0 != "": column_lables_real[table_0["columns"][0]] = table_label_0 if table_label_1 != "": column_lables_real[table_1["columns"][0]] = table_label_1 sql_gen = replace_dict(sql_pattern.replace(" {FROM}", ""), slot_values_new) return (sql_gen, question_gen, column_lables_real) # augmentation for one db def augment_db(db, templates, templates_one_table, sql_components, aug_limit): count = 1 augment_pairs = [] while count < aug_limit or (count == int(aug_limit)+1 and random.random()<aug_limit+1-count): sql_gen, question_gen, column_lables = populate_one(db, templates, templates_one_table, sql_components) augment_pairs.append((question_gen, sql_gen, column_lables)) count += 1 return augment_pairs def augment_all_dbs(dbs, templates, templates_one_table, sql_components, aug_limit): augment_data = {} for idx, db in enumerate(dbs): # if idx % 10000 == 0: # print("processed: ", idx) db_cols = ["*"] db_values = [""] for tab in db: db_cols.extend(tab["columns"]) db_values.extend(tab["values"]) assert len(db_cols) == len(db_values) schema_str = " </s> ".join(db_cols) values_str = " </s> ".join([str(k) for k in db_values]) schema_str = schema_str + " |-| " + values_str augment_pairs = augment_db(db, templates, templates_one_table, sql_components, aug_limit) augment_data[schema_str] = augment_pairs return augment_data # Return the mapping of all the labels to an integer def get_label_map(data): label_dict = defaultdict(int) for schema_str, example_list in data.items(): for example in example_list: (question, sql, col_labels) = example for val in col_labels.values(): label_dict[val] += 1 label_list = sorted(label_dict.items(), key=lambda kv: kv[1], reverse=True) label_map = {} count = 1 for label, _ in label_list: label_map[label] = count count += 1 return label_map def map_labels(data, label_map, is_dev=False): data_new = {} skip_count = 0 count = 0 for schema_str, exs in data.items(): count += 1 # if count % 100000 == 0: # print("processed: ", count) data_new[schema_str] = [] for ex in exs: skip = False label_dict = ex[2] label_dict_new = {} for col, label in label_dict.items(): if label in label_map.keys(): label_dict_new[col] = label_map[label] else: skip = True skip_count += 1 #else just skip if not skip: data_new[schema_str].append((ex[0], ex[1], ex[2], label_dict_new)) # print("skip_count: ", skip_count) return data_new def write_final_file(augment_data): data_json = [] skip_count = 0 line_count = 0 dup_count = 0 pro_count = 0 for schema_str, exs in augment_data.items(): for ex in exs: line_count += 1 # if line_count % 100000 == 0: # print("processed: ", line_count) question, sql, label_strs, label_ints = ex col_str, val_str = schema_str.split(" |-| ") colns = col_str.split(" </s> ") values = val_str.split(" </s> ") assert len(colns) == len(values) cols = [] label_num = len(label_ints) label_count = 0 for idx, coln in enumerate(colns): col = {} col["name"] = coln col["value"] = values[idx] if coln != "*": col["name"] = " ".join(coln.split(" ")[1:]) col["label_int"] = 0 if coln in label_ints.keys(): col["label_int"] = label_ints[coln] label_count += 1 cols.append(col) assert label_count >= label_num if label_count > label_num: dup_count += 1 col_list = [] label_list = [] value_list = [] col_count = 0 for i, col in enumerate(cols): if col_count > 40 and col["label_int"] == 0: continue col_list.append(col["name"]) value_list.append(col["value"]) col_count += 1 label_list.append(int(col["label_int"])) assert len(col_list) == len(value_list) label_str = " ".join([str(k) for k in label_list]) q_col_str = "<s> " + question.lower() + " </s> " + " </s> ".join(col_list).strip() + " </s> " caption = q_col_str + " ||| " + label_str tokens = tokenizer.tokenize(q_col_str) if len(tokens) > MAX_TOKEN_LEN: continue data_json.append({"question": question.lower(), "columns": col_list, "rows": [value_list], "column_labels": label_list }) pro_count += 1 print("total line: ", line_count) print("skiped line: ", skip_count) print("dup line: ", dup_count) print("pro line: ", pro_count) return data_json if __name__=="__main__": parser = argparse.ArgumentParser() parser.add_argument("table_file", help="Please provide a processed table file") parser.add_argument("nlsql_templates_file", help="Please provide a template file") parser.add_argument("sql_components_file", help="Please provide the SQL component file") parser.add_argument("output", help="Please provide the output path") parser.add_argument("size", type=int, help="Please provide the output path") args = parser.parse_args() # read input files table_file = args.table_file nlsql_templates_file = args.nlsql_templates_file sql_components_file = args.sql_components_file templates = read_NL_SQL_template(nlsql_templates_file) sql_components = read_json(sql_components_file) all_tables = read_json(table_file) table_dbs = create_dbs(all_tables) single_table_templates = get_templates_for_one_table(templates) sample_size_per_db = 1.0 * args.size / len(table_dbs) augment_data = augment_all_dbs(table_dbs, templates, single_table_templates, sql_components, sample_size_per_db) label_map = get_label_map(augment_data) augment_data = map_labels(augment_data, label_map) json_data = write_final_file(augment_data) with open(args.output, "w") as f: json.dump(json_data, f)
nilq/baby-python
python
""" Tests for exact diffuse initialization Notes ----- These tests are against four sources: - Koopman (1997) - The R package KFAS (v1.3.1): test_exact_diffuse_filtering.R - Stata: test_exact_diffuse_filtering_stata.do - Statsmodels state space models using approximate diffuse filtering Koopman (1997) provides analytic results for a few cases that we can test against. More comprehensive tests are available against the R package KFAS, which also uses the Durbin and Koopman (2012) univariate diffuse filtering method. However, there are apparently some bugs in the KFAS output (see notes below), so some tests are run against Stata. KFAS v1.3.1 appears to have the following bugs: - Incorrect filtered covariance matrix (in their syntax, kf$Ptt). These matrices are not even symmetric, so they are clearly wrong. - Loglikelihood computation appears to be incorrect for the diffuse part of the state. See the section with "Note: Apparent loglikelihood discrepancy" in the R file. It appears that KFAS does not include the constant term (-0.5 * log(2 pi)) for the diffuse observations, whereas the loglikelihood function as given in e.g. section 7.2.5 of Durbin and Koopman (2012) shows that it should be included. To confirm this, we also check against the loglikelihood value computed by Stata. Stata uses the DeJong diffuse filtering method, which gives almost identical results but does imply some numerical differences for output at the 6th or 7th decimal place. Finally, we have tests against the same model using approximate (rather than exact) diffuse filtering. These will by definition have some discrepancies in the diffuse observations. Author: Chad Fulton License: Simplified-BSD """ from __future__ import division, absolute_import, print_function import numpy as np import pandas as pd import pytest import os from statsmodels.tools.tools import Bunch from statsmodels import datasets from statsmodels.tsa.statespace.initialization import Initialization from statsmodels.tsa.statespace.kalman_smoother import KalmanSmoother from statsmodels.tsa.statespace.mlemodel import MLEModel from statsmodels.tsa.statespace.varmax import VARMAX from statsmodels.tsa.statespace.dynamic_factor import DynamicFactor from statsmodels.tsa.statespace.structural import UnobservedComponents from numpy.testing import assert_equal, assert_allclose import pytest from . import kfas_helpers current_path = os.path.dirname(os.path.abspath(__file__)) macrodata = datasets.macrodata.load_pandas().data macrodata.index = pd.PeriodIndex(start='1959Q1', end='2009Q3', freq='Q') # - Model definitions -------------------------------------------------------- def model_local_level(endog=None, params=None, direct=False): if endog is None: y1 = 10.2394 endog = np.r_[y1, [1] * 9] if params is None: params = [1.993, 8.253] sigma2_y, sigma2_mu = params if direct: mod = None # Construct the basic representation ssm = KalmanSmoother(k_endog=1, k_states=1, k_posdef=1) ssm.bind(endog) init = Initialization(ssm.k_states, initialization_type='diffuse') ssm.initialize(init) # ssm.filter_univariate = True # should not be required # Fill in the system matrices for a local level model ssm['design', :] = 1 ssm['obs_cov', :] = sigma2_y ssm['transition', :] = 1 ssm['selection', :] = 1 ssm['state_cov', :] = sigma2_mu else: mod = UnobservedComponents(endog, 'llevel') mod.update(params) ssm = mod.ssm ssm.initialize(Initialization(ssm.k_states, 'diffuse')) return mod, ssm def model_local_linear_trend(endog=None, params=None, direct=False): if endog is None: y1 = 10.2394 y2 = 4.2039 y3 = 6.123123 endog = np.r_[y1, y2, y3, [1] * 7] if params is None: params = [1.993, 8.253, 2.334] sigma2_y, sigma2_mu, sigma2_beta = params if direct: mod = None # Construct the basic representation ssm = KalmanSmoother(k_endog=1, k_states=2, k_posdef=2) ssm.bind(endog) init = Initialization(ssm.k_states, initialization_type='diffuse') ssm.initialize(init) # ssm.filter_univariate = True # should not be required # Fill in the system matrices for a local level model ssm['design', 0, 0] = 1 ssm['obs_cov', 0, 0] = sigma2_y ssm['transition'] = np.array([[1, 1], [0, 1]]) ssm['selection'] = np.eye(2) ssm['state_cov'] = np.diag([sigma2_mu, sigma2_beta]) else: mod = UnobservedComponents(endog, 'lltrend') mod.update(params) ssm = mod.ssm ssm.initialize(Initialization(ssm.k_states, 'diffuse')) return mod, ssm def model_common_level(endog=None, params=None, restricted=False): if endog is None: y11 = 10.2394 y21 = 8.2304 endog = np.column_stack([np.r_[y11, [1] * 9], np.r_[y21, [1] * 9]]) if params is None: params = [0.1111, 3.2324] theta, sigma2_mu = params # sigma2_1 = 1 # sigma_12 = 0 # sigma2_2 = 1 if not restricted: # Construct the basic representation ssm = KalmanSmoother(k_endog=2, k_states=2, k_posdef=1) ssm.bind(endog.T) init = Initialization(ssm.k_states, initialization_type='diffuse') ssm.initialize(init) # ssm.filter_univariate = True # should not be required # Fill in the system matrices for a common trend model ssm['design'] = np.array([[1, 0], [theta, 1]]) ssm['obs_cov'] = np.eye(2) ssm['transition'] = np.eye(2) ssm['selection', 0, 0] = 1 ssm['state_cov', 0, 0] = sigma2_mu else: # Construct the basic representation ssm = KalmanSmoother(k_endog=2, k_states=1, k_posdef=1) ssm.bind(endog.T) init = Initialization(ssm.k_states, initialization_type='diffuse') ssm.initialize(init) # ssm.filter_univariate = True # should not be required # Fill in the system matrices for a local level model ssm['design'] = np.array([[1, theta]]).T ssm['obs_cov'] = np.eye(2) ssm['transition', :] = 1 ssm['selection', :] = 1 ssm['state_cov', :] = sigma2_mu return ssm def model_var1(endog=None, params=None, measurement_error=False, init=None): if endog is None: endog = (np.log( macrodata[['realgdp','realcons']]).iloc[:21].diff().iloc[1:] * 400) if params is None: params = np.r_[0.5, 0.3, 0.2, 0.4, 2**0.5, 0, 3**0.5] if measurement_error: params = np.r_[params, 4, 5] # Model mod = VARMAX(endog, order=(1, 0), trend='nc', measurement_error=measurement_error) mod.update(params) ssm = mod.ssm if init is None: init = Initialization(ssm.k_states, 'diffuse') ssm.initialize(init) return mod, ssm def model_dfm(endog=None, params=None, factor_order=2): if endog is None: endog = (np.log( macrodata[['realgdp','realcons']]).iloc[:21].diff().iloc[1:] * 400) if params is None: params = np.r_[0.5, 1., 1.5, 2., 0.9, 0.1] # Model mod = DynamicFactor(endog, k_factors=1, factor_order=factor_order) mod.update(params) ssm = mod.ssm ssm.filter_univariate = True init = Initialization(ssm.k_states, 'diffuse') ssm.initialize(init) return mod, ssm # - Analytic tests (Koopman, 1997) ------------------------------------------- class TestLocalLevelAnalytic(object): @classmethod def setup_class(cls, **kwargs): cls.mod, cls.ssm = model_local_level(**kwargs) cls.res = cls.ssm.smooth() def test_results(self): ssm = self.ssm res = self.res y1 = ssm.endog[0, 0] sigma2_y = ssm['obs_cov', 0, 0] sigma2_mu = ssm['state_cov', 0, 0] # Basic initialization variables assert_allclose(res.predicted_state_cov[0, 0, 0], 0) assert_allclose(res.predicted_diffuse_state_cov[0, 0, 0], 1) # Output of the exact diffuse initialization, see Koopman (1997) assert_allclose(res.forecasts_error[0, 0], y1) assert_allclose(res.forecasts_error_cov[0, 0, 0], sigma2_y) assert_allclose(res.forecasts_error_diffuse_cov[0, 0, 0], 1) assert_allclose(res.kalman_gain[0, 0, 0], 1) assert_allclose(res.predicted_state[0, 1], y1) assert_allclose(res.predicted_state_cov[0, 0, 1], sigma2_y + sigma2_mu) assert_allclose(res.predicted_diffuse_state_cov[0, 0, 1], 0) # Miscellaneous assert_equal(res.nobs_diffuse, 1) class TestLocalLevelAnalyticDirect(TestLocalLevelAnalytic): @classmethod def setup_class(cls): super(TestLocalLevelAnalyticDirect, cls).setup_class(direct=True) class TestLocalLinearTrendAnalytic(object): @classmethod def setup_class(cls, **kwargs): cls.mod, cls.ssm = model_local_linear_trend(**kwargs) cls.res = cls.ssm.smooth() def test_results(self): ssm = self.ssm res = self.res y1, y2, y3 = ssm.endog[0, :3] sigma2_y = ssm['obs_cov', 0, 0] sigma2_mu, sigma2_beta = np.diagonal(ssm['state_cov']) # Basic initialization variables assert_allclose(res.predicted_state_cov[..., 0], np.zeros((2, 2))) assert_allclose(res.predicted_diffuse_state_cov[..., 0], np.eye(2)) # Output of the exact diffuse initialization, see Koopman (1997) q_mu = sigma2_mu / sigma2_y q_beta = sigma2_beta / sigma2_y assert_allclose(res.forecasts_error[0, 0], y1) assert_allclose(res.kalman_gain[:, 0, 0], [1, 0]) assert_allclose(res.predicted_state[:, 1], [y1, 0]) P2 = sigma2_y * np.array([[1 + q_mu, 0], [0, q_beta]]) assert_allclose(res.predicted_state_cov[:, :, 1], P2) assert_allclose(res.predicted_diffuse_state_cov[0, 0, 1], np.ones((2, 2))) # assert_allclose(res.kalman_gain[:, 0, 1], [2, 1]) assert_allclose(res.predicted_state[:, 2], [2 * y2 - y1, y2 - y1]) P3 = sigma2_y * np.array([[5 + 2 * q_mu + q_beta, 3 + q_mu + q_beta], [3 + q_mu + q_beta, 2 + q_mu + 2 * q_beta]]) assert_allclose(res.predicted_state_cov[:, :, 2], P3) assert_allclose(res.predicted_diffuse_state_cov[:, :, 2], np.zeros((2, 2))) # Miscellaneous assert_equal(res.nobs_diffuse, 2) class TestLocalLinearTrendAnalyticDirect(TestLocalLinearTrendAnalytic): @classmethod def setup_class(cls): super(TestLocalLinearTrendAnalyticDirect, cls).setup_class(direct=True) class TestLocalLinearTrendAnalyticMissing(TestLocalLinearTrendAnalytic): @classmethod def setup_class(cls): y1 = 10.2394 y2 = np.nan y3 = 6.123123 endog = np.r_[y1, y2, y3, [1] * 7] super(TestLocalLinearTrendAnalyticMissing, cls).setup_class( endog=endog) def test_results(self): ssm = self.ssm res = self.res y1, y2, y3 = ssm.endog[0, :3] sigma2_y = ssm['obs_cov', 0, 0] sigma2_mu, sigma2_beta = np.diagonal(ssm['state_cov']) # Test output q_mu = sigma2_mu / sigma2_y q_beta = sigma2_beta / sigma2_y a4 = [1.5 * y3 - 0.5 * y1, 0.5 * y3 - 0.5 * y1] assert_allclose(res.predicted_state[:, 3], a4) P4 = sigma2_y * np.array([ [2.5 + 1.5 * q_mu + 1.25 * q_beta, 1 + 0.5 * q_mu + 1.25 * q_beta], [1 + 0.5 * q_mu + 1.25 * q_beta, 0.5 + 0.5 * q_mu + 2.25 * q_beta]]) assert_allclose(res.predicted_state_cov[:, :, 3], P4) # Miscellaneous assert_equal(res.nobs_diffuse, 3) def test_common_level_analytic(): # Analytic test using results from Koopman (1997), section 5.3 mod = model_common_level() y11, y21 = mod.endog[:, 0] theta = mod['design', 1, 0] sigma2_mu = mod['state_cov', 0, 0] # Perform filtering res = mod.smooth() # Basic initialization variables assert_allclose(res.predicted_state_cov[..., 0], np.zeros((2, 2))) assert_allclose(res.predicted_diffuse_state_cov[..., 0], np.eye(2)) # Output of the exact diffuse initialization, see Koopman (1997) # Note: since Koopman (1997) did not apply the univariate method, # forecast errors and covariances, and the Kalman gain won't match # assert_allclose(res.forecasts_error[:, 0], [y11, y21]) # assert_allclose(res.forecasts_error_cov[:, :, 0], np.eye(2)) # F_inf1 = np.array([[1, theta], # [theta, 1 + theta**2]]) # assert_allclose(res.forecasts_error_diffuse_cov[:, :, 0], F_inf1) # K0 = np.array([[1, 0], # [-theta, 1]]) # assert_allclose(res.kalman_gain[..., 0], K0) assert_allclose(res.predicted_state[:, 1], [y11, y21 - theta * y11]) P2 = np.array([[1 + sigma2_mu, -theta], [-theta, 1 + theta**2]]) assert_allclose(res.predicted_state_cov[..., 1], P2) assert_allclose(res.predicted_diffuse_state_cov[..., 1], np.zeros((2, 2))) # Miscellaneous assert_equal(res.nobs_diffuse, 1) def test_common_level_restricted_analytic(): # Analytic test using results from Koopman (1997), section 5.3, # with the restriction mu_bar = 0 mod = model_common_level(restricted=True) y11, y21 = mod.endog[:, 0] theta = mod['design', 1, 0] sigma2_mu = mod['state_cov', 0, 0] # Perform filtering res = mod.smooth() # Basic initialization variables assert_allclose(res.predicted_state_cov[..., 0], 0) assert_allclose(res.predicted_diffuse_state_cov[..., 0], 1) # Output of the exact diffuse initialization, see Koopman (1997) phi = 1 / (1 + theta**2) # Note: since Koopman (1997) did not apply the univariate method, # forecast errors and covariances, and the Kalman gain won't match # assert_allclose(res.forecasts_error[:, 0], [y11, y21]) # assert_allclose(res.forecasts_error_cov[0, 0, 0], np.eye(2)) # F_inf1 = np.array([[1, theta], # [theta, theta**2]]) # assert_allclose(res.forecasts_error_diffuse_cov[0, 0, 0], F_inf1) # assert_allclose(res.kalman_gain[..., 0], phi * np.array([1, theta])) assert_allclose(res.predicted_state[:, 1], phi * (y11 + theta * y21)) # Note: Koopman (1997) actually has phi + sigma2_mu**0.5, but that appears # to be a typo assert_allclose(res.predicted_state_cov[..., 1], phi + sigma2_mu) assert_allclose(res.predicted_diffuse_state_cov[..., 1], 0) # Miscellaneous assert_equal(res.nobs_diffuse, 1) class CheckSSMResults(object): atol = 1e-14 rtol = 1e-07 atol_diffuse = 1e-7 rtol_diffuse = None def check_object(self, actual, desired, rtol_diffuse): # Short-circuit the test if desired is set to None (which allows us to # skip testing some objects where appropriate) if actual is None or desired is None: return # Optionally apply a different relative tolerance to the periods in the # diffuse observations. # This is especially useful when testing against approximate diffuse # initialization. By definition, the first few observations will be # quite different between the exact and approximate approach for many # quantities. # Note that the absolute tolerance is also pretty low (1e-7), mostly # for comparison against zero values in the approximate case d = None if rtol_diffuse is None: rtol_diffuse = self.rtol_diffuse if rtol_diffuse is not None: d = self.d if rtol_diffuse != np.inf: assert_allclose(actual.T[:d], desired.T[:d], rtol=rtol_diffuse, atol=self.atol_diffuse) assert_allclose(actual.T[d:], desired.T[d:], rtol=self.rtol, atol=self.atol) # - Filtered results tests ----------------------------------------------- def test_forecasts(self, rtol_diffuse=None): actual = self.results_a.forecasts desired = self.results_a.forecasts self.check_object(actual, desired, rtol_diffuse) def test_forecasts_error(self, rtol_diffuse=None): actual = self.results_a.forecasts_error desired = self.results_a.forecasts_error self.check_object(actual, desired, rtol_diffuse) def test_forecasts_error_cov(self, rtol_diffuse=None): actual = self.results_a.forecasts_error_cov desired = self.results_b.forecasts_error_cov self.check_object(actual, desired, rtol_diffuse) def test_filtered_state(self, rtol_diffuse=1e-5): # Note: we do want to check the diffuse values here, with a reduced # tolerance. See the note before the smoothed values for additional # details. actual = self.results_a.filtered_state desired = self.results_b.filtered_state self.check_object(actual, desired, rtol_diffuse) def test_filtered_state_cov(self, rtol_diffuse=None): actual = self.results_a.filtered_state_cov desired = self.results_b.filtered_state_cov self.check_object(actual, desired, rtol_diffuse) def test_predicted_state(self, rtol_diffuse=None): actual = self.results_a.predicted_state desired = self.results_b.predicted_state self.check_object(actual, desired, rtol_diffuse) def test_predicted_state_cov(self, rtol_diffuse=None): actual = self.results_a.predicted_state_cov desired = self.results_b.predicted_state_cov self.check_object(actual, desired, rtol_diffuse) def test_kalman_gain(self, rtol_diffuse=None): actual = self.results_a.kalman_gain desired = self.results_b.kalman_gain self.check_object(actual, desired, rtol_diffuse) def test_loglike(self, rtol_diffuse=None): if np.isscalar(self.results_b.llf_obs): actual = np.sum(self.results_a.llf_obs) desired = self.results_b.llf_obs assert_allclose(actual, desired) else: actual = self.results_a.llf_obs desired = self.results_b.llf_obs self.check_object(actual, desired, rtol_diffuse) # - Smoothed output tests ------------------------------------------------ # Note: for smoothed states, we do want to check some of the diffuse values # even in the approximate case, but with reduced precision. Note also that # there are cases that demonstrate the numerical error associated with the # approximate method, and so some specific tests are overridden in certain # cases, since they would not pass. def test_smoothed_state(self, rtol_diffuse=1e-5): actual = self.results_a.smoothed_state desired = self.results_b.smoothed_state self.check_object(actual, desired, rtol_diffuse) def test_smoothed_state_cov(self, rtol_diffuse=1e-5): actual = self.results_a.smoothed_state_cov desired = self.results_b.smoothed_state_cov self.check_object(actual, desired, rtol_diffuse) def test_smoothed_state_autocov(self, rtol_diffuse=None): actual = self.results_a.smoothed_state_autocov desired = self.results_b.smoothed_state_autocov self.check_object(actual, desired, rtol_diffuse) def test_smoothed_measurement_disturbance(self, rtol_diffuse=1e-5): actual = self.results_a.smoothed_measurement_disturbance desired = self.results_b.smoothed_measurement_disturbance self.check_object(actual, desired, rtol_diffuse) def test_smoothed_measurement_disturbance_cov(self, rtol_diffuse=1e-5): actual = self.results_a.smoothed_measurement_disturbance_cov desired = self.results_b.smoothed_measurement_disturbance_cov self.check_object(actual, desired, rtol_diffuse) def test_smoothed_state_disturbance(self, rtol_diffuse=1e-5): actual = self.results_a.smoothed_state_disturbance desired = self.results_b.smoothed_state_disturbance self.check_object(actual, desired, rtol_diffuse) def test_smoothed_state_disturbance_cov(self, rtol_diffuse=1e-5): actual = self.results_a.smoothed_state_disturbance_cov desired = self.results_b.smoothed_state_disturbance_cov self.check_object(actual, desired, rtol_diffuse) # - Smoothed intermediate tests ------------------------------------------ # This isn't computed in the univariate method or by KFAS # def test_smoothing_error(self, rtol_diffuse=None): # actual = self.results_a.smoothing_error # desired = self.results_b.smoothing_error # self.check_object(actual, desired, rtol_diffuse) def test_scaled_smoothed_estimator(self, rtol_diffuse=1e-5): actual = self.results_a.scaled_smoothed_estimator desired = self.results_b.scaled_smoothed_estimator self.check_object(actual, desired, rtol_diffuse) def test_scaled_smoothed_estimator_cov(self, rtol_diffuse=1e-5): actual = self.results_a.scaled_smoothed_estimator_cov desired = self.results_b.scaled_smoothed_estimator_cov self.check_object(actual, desired, rtol_diffuse) # - Diffuse objects tests ------------------------------------------------ # Note: these can't be checked against the approximate diffuse method. def test_forecasts_error_diffuse_cov(self, rtol_diffuse=None): actual = self.results_a.forecasts_error_diffuse_cov desired = self.results_b.forecasts_error_diffuse_cov self.check_object(actual, desired, rtol_diffuse) def test_predicted_diffuse_state_cov(self, rtol_diffuse=None): actual = self.results_a.predicted_diffuse_state_cov desired = self.results_b.predicted_diffuse_state_cov self.check_object(actual, desired, rtol_diffuse) # We don't currently store this array # def test_kalman_gain_diffuse(self, rtol_diffuse=None): # actual = self.results_a. # desired = self.results_b. # self.check_object(actual, desired, rtol_diffuse) def test_scaled_smoothed_diffuse_estimator(self, rtol_diffuse=None): actual = self.results_a.scaled_smoothed_diffuse_estimator desired = self.results_b.scaled_smoothed_diffuse_estimator self.check_object(actual, desired, rtol_diffuse) def test_scaled_smoothed_diffuse1_estimator_cov(self, rtol_diffuse=None): actual = self.results_a.scaled_smoothed_diffuse1_estimator_cov desired = self.results_b.scaled_smoothed_diffuse1_estimator_cov self.check_object(actual, desired, rtol_diffuse) def test_scaled_smoothed_diffuse2_estimator_cov(self, rtol_diffuse=None): actual = self.results_a.scaled_smoothed_diffuse2_estimator_cov desired = self.results_b.scaled_smoothed_diffuse2_estimator_cov self.check_object(actual, desired, rtol_diffuse) # - Simulation smoother results tests ------------------------------------ # def test_simulation_smoothed_state(self): # assert_allclose( # self.sim_a.simulated_state, # self.sim_a.simulated_state) # def test_simulation_smoothed_measurement_disturbance(self): # assert_allclose( # self.sim_a.simulated_measurement_disturbance, # self.sim_a.simulated_measurement_disturbance) # def test_simulation_smoothed_state_disturbance(self): # assert_allclose( # self.sim_a.simulated_state_disturbance, # self.sim_a.simulated_state_disturbance) class CheckApproximateDiffuseMixin(object): """ Test the exact diffuse initialization against the approximate diffuse initialization. By definition, the first few observations will be quite different between the exact and approximate approach for many quantities, so we do not test them here. """ approximate_diffuse_variance = 1e6 @classmethod def setup_class(cls, *args, **kwargs): init_approx = kwargs.pop('init_approx', None) super(CheckApproximateDiffuseMixin, cls).setup_class(*args, **kwargs) # Get the approximate diffuse results kappa = cls.approximate_diffuse_variance if init_approx is None: init_approx = Initialization(cls.ssm.k_states, 'approximate_diffuse', approximate_diffuse_variance=kappa) cls.ssm.initialize(init_approx) cls.results_b = cls.ssm.smooth() # Instruct the tests not to test against the first d values cls.rtol_diffuse = np.inf def test_initialization_approx(self): kappa = self.approximate_diffuse_variance assert_allclose(self.results_b.initial_state_cov, np.eye(self.ssm.k_states) * kappa) assert_equal(self.results_b.initial_diffuse_state_cov, None) class CheckKFASMixin(object): """ Test against values from KFAS """ @classmethod def setup_class(cls, *args, **kwargs): kwargs.setdefault('filter_univariate', True) super(CheckKFASMixin, cls).setup_class(*args, **kwargs) # Get the KFAS results objects cls.results_b = kfas_helpers.parse(cls.results_path, cls.ssm) # Set some attributes that KFAS does not compute cls.results_b.smoothed_state_autocov = None # Remove the Kalman gain matrix since KFAS computes it using the # non-univariate method cls.results_b.kalman_gain = None # Remove the filtered_state_cov since KFAS v1.3.1 has a bug for these # matrices (they are not even symmetric) cls.results_b.filtered_state_cov = None # KFAS v1.3.1 seems to compute the loglikelihood incorrectly, so we # correct for it here # (we need to add back in the constant term for all of the non-missing # diffuse observations for which Finf is nonsingular) Finf = cls.results_b.forecasts_error_diffuse_cov.T Finf_nonsingular_obs = np.c_[[np.diag(Finf_t) for Finf_t in Finf]] > 0 nonmissing = ~np.isnan(cls.ssm.endog).T constant = (-0.5 * np.log(2 * np.pi) * (Finf_nonsingular_obs * nonmissing).sum(axis=1)) cls.results_b.llf_obs += constant[:cls.results_a.nobs_diffuse].sum() # - VAR(1) ------------------------------------------------------------------- class CheckVAR1(CheckSSMResults): @classmethod def setup_class(cls, **kwargs): filter_univariate = kwargs.pop('filter_univariate', False) cls.mod, cls.ssm = model_var1(**kwargs) if filter_univariate: cls.ssm.filter_univariate = True cls.results_a = cls.ssm.smooth() cls.d = cls.results_a.nobs_diffuse def test_nobs_diffuse(self): assert_allclose(self.d, 1) def test_initialization(self): assert_allclose(self.results_a.initial_state_cov, 0) assert_allclose(self.results_a.initial_diffuse_state_cov, np.eye(2)) class TestVAR1_Approx(CheckApproximateDiffuseMixin, CheckVAR1): pass class TestVAR1_KFAS(CheckKFASMixin, CheckVAR1): results_path = os.path.join( current_path, 'results', 'results_exact_initial_var1_R.csv') # - VAR(1) + Measurement error ----------------------------------------------- class CheckVAR1MeasurementError(CheckVAR1): @classmethod def setup_class(cls, **kwargs): kwargs['measurement_error'] = True super(CheckVAR1MeasurementError, cls).setup_class(**kwargs) class TestVAR1MeasurementError_Approx(CheckApproximateDiffuseMixin, CheckVAR1MeasurementError): # Note: somewhat fragile, we need to increase the approximate variance to # 1e9 for the tests to pass at the appropriate level of precision, but # we can't increase too much more than this because then we start get # numerical errors (e.g. 1e10 is fine but 1e11 doesn't pass) approximate_diffuse_variance = 1e9 def test_smoothed_measurement_disturbance_cov(self, rtol_diffuse=None): # Note: this test would fail here with most rtol, because # this is an example where the numerical errors associated with the # approximate method result in noticeable errors # term: (x is the exact method, y is the approximate method) # x: array([[[3.355072, 0. ], # [0. , 4.221227]]]) # y: array([[[ 3.355072, -0.600856], # [-0.600856, 4.221227]]]) super(TestVAR1MeasurementError_Approx, self).test_smoothed_measurement_disturbance_cov( rtol_diffuse=rtol_diffuse) class TestVAR1MeasurementError_KFAS(CheckKFASMixin, CheckVAR1MeasurementError): results_path = os.path.join(current_path, 'results', 'results_exact_initial_var1_measurement_error_R.csv') # - VAR(1) + Missing data ---------------------------------------------------- class CheckVAR1Missing(CheckVAR1): @classmethod def setup_class(cls, **kwargs): endog = (np.log( macrodata[['realgdp','realcons']]).iloc[:21].diff().iloc[1:] * 400) endog.iloc[0:5, 0] = np.nan endog.iloc[8:12, :] = np.nan kwargs['endog'] = endog super(CheckVAR1Missing, cls).setup_class(**kwargs) def test_nobs_diffuse(self): assert_allclose(self.d, 2) class TestVAR1Missing_Approx(CheckApproximateDiffuseMixin, CheckVAR1Missing): # Note: somewhat fragile, we need to increase the approximate variance to # 1e10 for the tests to pass at the appropriate level of precision, but # we can't increase it any more than this because then we start get # numerical errors (e.g. 1e11 doesn't pass) approximate_diffuse_variance = 1e10 def test_smoothed_state_cov(self, rtol_diffuse=None): # Note: this test would fail here with essentially any rtol, because # this is an example where the numerical errors associated with the # approximate method result in extreme errors: here a negative variance # term: (x is the exact method, y is the approximate method) # x: array([[[ 5.601218e+01, 0.000000e+00], # [ 0.000000e+00, 0.000000e+00]], # ... # y: array([[[-12.083676, 0. ], # [ 0. , 0. ]], super(TestVAR1Missing_Approx, self).test_smoothed_state_cov( rtol_diffuse=rtol_diffuse) class TestVAR1Missing_KFAS(CheckKFASMixin, CheckVAR1Missing): results_path = os.path.join( current_path, 'results', 'results_exact_initial_var1_missing_R.csv') def test_forecasts_error_cov(self): # TODO: fails for the general version of forecasts_error_cov because # (1) the routines in kalman_filter.py fill in values for all variables # regardless of missing status and also it uses the multivariate # approach rather than the univariate approach, and (2) KFAS fills in # values for all variables regardless of missing status (but does use # the univariate method). # Here we remove the off-diagonal elements so that the test passes (but # note that this is **not** a general solution since it depends on # which variables are missing). bak = self.results_a.forecasts_error_cov[:] self.results_a.forecasts_error_cov[0, 1, :] = 0 self.results_a.forecasts_error_cov[1, 0, :] = 0 super(TestVAR1Missing_KFAS, self).test_forecasts_error_cov() self.results_a.forecasts_error_cov = bak # - VAR(1) + Mixed stationary / diffuse initialization ----------------------- class CheckVAR1Mixed(CheckVAR1): @classmethod def setup_class(cls, **kwargs): k_states = 2 init = Initialization(k_states) init.set(0, 'diffuse') init.set(1, 'stationary') if kwargs.pop('approx', False): init_approx = Initialization(k_states) init_approx.set(0, 'approximate_diffuse') init_approx.set(1, 'stationary') kwargs['init_approx'] = init_approx super(CheckVAR1Mixed, cls).setup_class(init=init, **kwargs) def test_nobs_diffuse(self): assert_allclose(self.d, 1) def test_initialization(self): stationary_init = 3.5714285714285716 assert_allclose(self.results_a.initial_state_cov, np.diag([0, stationary_init])) assert_allclose(self.results_a.initial_diffuse_state_cov, np.diag([1, 0])) class TestVAR1Mixed_Approx(CheckVAR1Mixed, CheckApproximateDiffuseMixin, CheckVAR1): @classmethod def setup_class(cls, **kwargs): kwargs['approx'] = True super(TestVAR1Mixed_Approx, cls).setup_class(**kwargs) def test_initialization_approx(self): stationary_init = 3.5714285714285716 kappa = self.approximate_diffuse_variance assert_allclose(self.results_b.initial_state_cov, np.diag([kappa, stationary_init])) assert_equal(self.results_b.initial_diffuse_state_cov, None) class TestVAR1Mixed_KFAS(CheckVAR1Mixed, CheckKFASMixin, CheckVAR1): # TODO: fails results_path = os.path.join( current_path, 'results', 'results_exact_initial_var1_mixed_R.csv') # TODO: KFAS disagrees for the diffuse observations for all of these # states, but it appears that they have a bug (e.g. since the approximate # diffuse case agrees with us), so we should double-check against a third # package (RATS?) def test_predicted_state(self): super(TestVAR1Mixed_KFAS, self).test_predicted_state( rtol_diffuse=np.inf) def test_filtered_state(self): super(TestVAR1Mixed_KFAS, self).test_filtered_state( rtol_diffuse=np.inf) def test_smoothed_state(self): super(TestVAR1Mixed_KFAS, self).test_smoothed_state( rtol_diffuse=np.inf) # - DFM ---------------------------------------------------------------------- class CheckDFM(CheckSSMResults): @classmethod def setup_class(cls, **kwargs): filter_univariate = kwargs.pop('filter_univariate', False) cls.mod, cls.ssm = model_dfm(**kwargs) if filter_univariate: cls.ssm.filter_univariate = True cls.results_a = cls.ssm.smooth() cls.d = cls.results_a.nobs_diffuse def test_nobs_diffuse(self): assert_allclose(self.d, 2) def test_initialization(self): assert_allclose(self.results_a.initial_state_cov, 0) assert_allclose(self.results_a.initial_diffuse_state_cov, np.eye(2)) class TestDFM_Approx(CheckApproximateDiffuseMixin, CheckDFM): # Note: somewhat fragile, we need to increase the approximate variance to # 5e10 for the tests to pass at the appropriate level of precision, but # we can't increase it too much more than this because then we start get # numerical errors (e.g. 1e11 works but 1e12 doesn't pass) approximate_diffuse_variance = 5e10 class TestDFM_KFAS(CheckKFASMixin, CheckDFM): results_path = os.path.join( current_path, 'results', 'results_exact_initial_dfm_R.csv') # TODO: KFAS disagrees for the diffuse observations for all of these # states, but it appears that they have a bug (e.g. since the approximate # diffuse case agrees with us), so we should double-check against a third # package (RATS?) def test_predicted_state(self): super(TestDFM_KFAS, self).test_predicted_state(rtol_diffuse=np.inf) def test_filtered_state(self): super(TestDFM_KFAS, self).test_filtered_state(rtol_diffuse=np.inf) def test_smoothed_state(self): super(TestDFM_KFAS, self).test_smoothed_state(rtol_diffuse=np.inf) # - DFM + Collapsed ---------------------------------------------------------- class CheckDFMCollapsed(CheckSSMResults): @classmethod def setup_class(cls, **kwargs): filter_univariate = kwargs.pop('filter_univariate', True) cls.mod, cls.ssm = model_dfm(factor_order=1, **kwargs) if filter_univariate: cls.ssm.filter_univariate = True cls.ssm.filter_collapsed = True cls.results_a = cls.ssm.smooth() cls.d = cls.results_a.nobs_diffuse def test_nobs_diffuse(self): assert_allclose(self.d, 1) def test_initialization(self): assert_allclose(self.results_a.initial_state_cov, 0) assert_allclose(self.results_a.initial_diffuse_state_cov, np.eye(1)) class TestDFMCollapsed_Approx(CheckApproximateDiffuseMixin, CheckDFMCollapsed): # Note: somewhat fragile, we need to increase the approximate variance to # 1e9 for the tests to pass at the appropriate level of precision, but # we can't increase it too much more than this because then we start get # numerical errors (e.g. 1e10 doesn't pass) approximate_diffuse_variance = 1e9 # Note: we cannot test against KFAS, since it doesn't support collapsed # filtering # class TestDFMCollapsed_KFAS(CheckKFASMixin, TestDFMCollapsed): # results_path = os.path.join( # current_path, 'results', '') # - TODO: additional tests --------------------------------------------------- # - Local level model, above # - Local linear trend model, above # - Common level model, above # - multivariate test with non-diagonal observation covariance matrix # - simulation smoother @pytest.mark.xfail def test_irrelevant_state(): # This test records a case in which exact diffuse initialization leads to # numerical problems, becuase the existence of an irrelevant state # initialized as diffuse means that there is never a transition to the # usual Kalman filter. endog = macrodata.infl spec = { 'freq_seasonal': [{'period':8, 'harmonics': 6}, {'period': 36, 'harmonics': 6}] } # Approximate diffuse version mod = UnobservedComponents(endog, 'llevel', **spec) mod.ssm.initialization = Initialization(mod.k_states,'approximate_diffuse') res = mod.smooth([3.4, 7.2, 0.01, 0.01]) # Exact diffuse version mod2 = UnobservedComponents(endog, 'llevel', **spec) mod2.ssm.filter_univariate = True mod2.ssm.initialization = Initialization(mod2.k_states, 'diffuse') res2 = mod2.smooth([3.4, 7.2, 0.01, 0.01]) # Check that e.g. the filtered state for the level is equal assert_allclose(res.filtered_state[0, 25:], res2.filtered_state[0, 25:], atol=1e-5)
nilq/baby-python
python
# Generated from astLogic/propositional.g4 by ANTLR 4.7.2 # encoding: utf-8 from antlr4 import * from io import StringIO from typing.io import TextIO import sys def serializedATN(): with StringIO() as buf: buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\f") buf.write("B\4\2\t\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\5\2\16\n") buf.write("\2\3\2\7\2\21\n\2\f\2\16\2\24\13\2\3\2\3\2\3\2\3\2\3\2") buf.write("\5\2\33\n\2\3\2\7\2\36\n\2\f\2\16\2!\13\2\3\2\3\2\3\2") buf.write("\5\2&\n\2\3\2\7\2)\n\2\f\2\16\2,\13\2\3\2\3\2\3\2\5\2") buf.write("\61\n\2\3\2\7\2\64\n\2\f\2\16\2\67\13\2\3\2\5\2:\n\2\3") buf.write("\2\3\2\3\2\3\2\5\2@\n\2\3\2\2\2\3\2\2\3\3\2\5\t\2N\2?") buf.write("\3\2\2\2\4\5\7\3\2\2\5\6\5\2\2\2\6\7\7\4\2\2\7@\3\2\2") buf.write("\2\b\t\7\3\2\2\t\n\5\2\2\2\n\22\7\4\2\2\13\r\t\2\2\2\f") buf.write("\16\7\n\2\2\r\f\3\2\2\2\r\16\3\2\2\2\16\17\3\2\2\2\17") buf.write("\21\5\2\2\2\20\13\3\2\2\2\21\24\3\2\2\2\22\20\3\2\2\2") buf.write("\22\23\3\2\2\2\23@\3\2\2\2\24\22\3\2\2\2\25\26\7\3\2\2") buf.write("\26\27\5\2\2\2\27\37\7\4\2\2\30\32\t\2\2\2\31\33\7\n\2") buf.write("\2\32\31\3\2\2\2\32\33\3\2\2\2\33\34\3\2\2\2\34\36\7\13") buf.write("\2\2\35\30\3\2\2\2\36!\3\2\2\2\37\35\3\2\2\2\37 \3\2\2") buf.write("\2 @\3\2\2\2!\37\3\2\2\2\"*\7\13\2\2#%\t\2\2\2$&\7\n\2") buf.write("\2%$\3\2\2\2%&\3\2\2\2&\'\3\2\2\2\')\5\2\2\2(#\3\2\2\2") buf.write("),\3\2\2\2*(\3\2\2\2*+\3\2\2\2+@\3\2\2\2,*\3\2\2\2-\65") buf.write("\7\13\2\2.\60\t\2\2\2/\61\7\n\2\2\60/\3\2\2\2\60\61\3") buf.write("\2\2\2\61\62\3\2\2\2\62\64\7\13\2\2\63.\3\2\2\2\64\67") buf.write("\3\2\2\2\65\63\3\2\2\2\65\66\3\2\2\2\66@\3\2\2\2\67\65") buf.write("\3\2\2\28:\7\n\2\298\3\2\2\29:\3\2\2\2:;\3\2\2\2;<\7\3") buf.write("\2\2<=\5\2\2\2=>\7\4\2\2>@\3\2\2\2?\4\3\2\2\2?\b\3\2\2") buf.write("\2?\25\3\2\2\2?\"\3\2\2\2?-\3\2\2\2?9\3\2\2\2@\3\3\2\2") buf.write("\2\f\r\22\32\37%*\60\659?") return buf.getvalue() class propositionalParser ( Parser ): grammarFileName = "propositional.g4" atn = ATNDeserializer().deserialize(serializedATN()) decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] sharedContextCache = PredictionContextCache() literalNames = [ "<INVALID>", "'('", "')'", "'IMPLIES'", "'REQUIRES'", "'EXCLUDES'", "'AND'", "'OR'", "'NOT'" ] symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "IMPLIES", "REQUIRES", "EXCLUDES", "AND", "OR", "NOT", "FEATURE", "WS" ] RULE_formula = 0 ruleNames = [ "formula" ] EOF = Token.EOF T__0=1 T__1=2 IMPLIES=3 REQUIRES=4 EXCLUDES=5 AND=6 OR=7 NOT=8 FEATURE=9 WS=10 def __init__(self, input:TokenStream, output:TextIO = sys.stdout): super().__init__(input, output) self.checkVersion("4.7.2") self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache) self._predicates = None class FormulaContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def formula(self, i:int=None): if i is None: return self.getTypedRuleContexts(propositionalParser.FormulaContext) else: return self.getTypedRuleContext(propositionalParser.FormulaContext,i) def IMPLIES(self, i:int=None): if i is None: return self.getTokens(propositionalParser.IMPLIES) else: return self.getToken(propositionalParser.IMPLIES, i) def REQUIRES(self, i:int=None): if i is None: return self.getTokens(propositionalParser.REQUIRES) else: return self.getToken(propositionalParser.REQUIRES, i) def EXCLUDES(self, i:int=None): if i is None: return self.getTokens(propositionalParser.EXCLUDES) else: return self.getToken(propositionalParser.EXCLUDES, i) def AND(self, i:int=None): if i is None: return self.getTokens(propositionalParser.AND) else: return self.getToken(propositionalParser.AND, i) def OR(self, i:int=None): if i is None: return self.getTokens(propositionalParser.OR) else: return self.getToken(propositionalParser.OR, i) def NOT(self, i:int=None): if i is None: return self.getTokens(propositionalParser.NOT) else: return self.getToken(propositionalParser.NOT, i) def FEATURE(self, i:int=None): if i is None: return self.getTokens(propositionalParser.FEATURE) else: return self.getToken(propositionalParser.FEATURE, i) def getRuleIndex(self): return propositionalParser.RULE_formula def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterFormula" ): listener.enterFormula(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitFormula" ): listener.exitFormula(self) def accept(self, visitor:ParseTreeVisitor): if hasattr( visitor, "visitFormula" ): return visitor.visitFormula(self) else: return visitor.visitChildren(self) def formula(self): localctx = propositionalParser.FormulaContext(self, self._ctx, self.state) self.enterRule(localctx, 0, self.RULE_formula) self._la = 0 # Token type try: self.state = 61 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,9,self._ctx) if la_ == 1: self.enterOuterAlt(localctx, 1) self.state = 2 self.match(propositionalParser.T__0) self.state = 3 self.formula() self.state = 4 self.match(propositionalParser.T__1) pass elif la_ == 2: self.enterOuterAlt(localctx, 2) self.state = 6 self.match(propositionalParser.T__0) self.state = 7 self.formula() self.state = 8 self.match(propositionalParser.T__1) self.state = 16 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,1,self._ctx) while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER: if _alt==1: self.state = 9 _la = self._input.LA(1) if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << propositionalParser.IMPLIES) | (1 << propositionalParser.REQUIRES) | (1 << propositionalParser.EXCLUDES) | (1 << propositionalParser.AND) | (1 << propositionalParser.OR))) != 0)): self._errHandler.recoverInline(self) else: self._errHandler.reportMatch(self) self.consume() self.state = 11 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,0,self._ctx) if la_ == 1: self.state = 10 self.match(propositionalParser.NOT) self.state = 13 self.formula() self.state = 18 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,1,self._ctx) pass elif la_ == 3: self.enterOuterAlt(localctx, 3) self.state = 19 self.match(propositionalParser.T__0) self.state = 20 self.formula() self.state = 21 self.match(propositionalParser.T__1) self.state = 29 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,3,self._ctx) while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER: if _alt==1: self.state = 22 _la = self._input.LA(1) if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << propositionalParser.IMPLIES) | (1 << propositionalParser.REQUIRES) | (1 << propositionalParser.EXCLUDES) | (1 << propositionalParser.AND) | (1 << propositionalParser.OR))) != 0)): self._errHandler.recoverInline(self) else: self._errHandler.reportMatch(self) self.consume() self.state = 24 self._errHandler.sync(self) _la = self._input.LA(1) if _la==propositionalParser.NOT: self.state = 23 self.match(propositionalParser.NOT) self.state = 26 self.match(propositionalParser.FEATURE) self.state = 31 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,3,self._ctx) pass elif la_ == 4: self.enterOuterAlt(localctx, 4) self.state = 32 self.match(propositionalParser.FEATURE) self.state = 40 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,5,self._ctx) while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER: if _alt==1: self.state = 33 _la = self._input.LA(1) if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << propositionalParser.IMPLIES) | (1 << propositionalParser.REQUIRES) | (1 << propositionalParser.EXCLUDES) | (1 << propositionalParser.AND) | (1 << propositionalParser.OR))) != 0)): self._errHandler.recoverInline(self) else: self._errHandler.reportMatch(self) self.consume() self.state = 35 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,4,self._ctx) if la_ == 1: self.state = 34 self.match(propositionalParser.NOT) self.state = 37 self.formula() self.state = 42 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,5,self._ctx) pass elif la_ == 5: self.enterOuterAlt(localctx, 5) self.state = 43 self.match(propositionalParser.FEATURE) self.state = 51 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,7,self._ctx) while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER: if _alt==1: self.state = 44 _la = self._input.LA(1) if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << propositionalParser.IMPLIES) | (1 << propositionalParser.REQUIRES) | (1 << propositionalParser.EXCLUDES) | (1 << propositionalParser.AND) | (1 << propositionalParser.OR))) != 0)): self._errHandler.recoverInline(self) else: self._errHandler.reportMatch(self) self.consume() self.state = 46 self._errHandler.sync(self) _la = self._input.LA(1) if _la==propositionalParser.NOT: self.state = 45 self.match(propositionalParser.NOT) self.state = 48 self.match(propositionalParser.FEATURE) self.state = 53 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,7,self._ctx) pass elif la_ == 6: self.enterOuterAlt(localctx, 6) self.state = 55 self._errHandler.sync(self) _la = self._input.LA(1) if _la==propositionalParser.NOT: self.state = 54 self.match(propositionalParser.NOT) self.state = 57 self.match(propositionalParser.T__0) self.state = 58 self.formula() self.state = 59 self.match(propositionalParser.T__1) pass except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx
nilq/baby-python
python
frase = " Amo Física, é a mãe de todas as ciências asd " # atribuição duma string à uma variável # print(frase[2]) # print(frase[0:4]) # vai do caractere 0 ao 4, excluindo o 4 # print(frase[0:10]) # print(frase[0:15:2]) # vai do crc.0 ao crc.15 saltando de 2 em 2 # print(frase[:13]) # começa do caractere inicial e termina ele no caractere 13 # print(frase[13:0:-2]) # vai do crc.13 até o final, o -2 faz ele inverter # print(frase[2::10]) # começa do crc.2 e vai até o final saltando de 10 em 10 # -------------------------------------------------------------------------------------------- # print(len(frase)) # print(frase.count('i', 0, 10)) # Busca de caracteres até o caractere 10 # print(frase.find('mãe')) # Em que posição COMEÇA a string 'mãe' # print(frase.find("banana")) # -1 indica que essa string não existe # -------------------------------------------------------------------------------------------- # print("Física" in frase) # Operador lógico para identificar se há ou não essa string # print(frase.replace("Física", "Matemática")) # print(frase.upper()) # print(frase.lower()) # print(frase.capitalize()) # Deixa apenas o primeiro crc. em maiúsculo e passa o restante para minúsculo # print(frase.title()) # Coloca tudo o que vem em seguida do espaço em maiúsculo # print(frase.strip()) # Remove todos os espaços desnecessários # print(frase.rstrip()) # Remove todos os espaços desnecessários à direita. A mesma lógica para lstrip # -------------------------------------------------------------------------------------------- # print(frase.split()) # Gera uma lista, dividindo uma string pelo espaço. Ao colocar # algo dentro do (), definimos nosso separador # print(frase.split()[1]) # print(frase.split()[1][3]) # Mostra o crc. de nº 3 no primeiro elemento da lista # print("=".join(frase)) # Cerca cada crc. com um tracinho
nilq/baby-python
python
"""Syntax checks These checks verify syntax (schema), in particular for the ``extra`` section that is otherwise free-form. """ from . import LintCheck, ERROR, WARNING, INFO class extra_identifiers_not_list(LintCheck): """The extra/identifiers section must be a list Example:: extra: identifiers: - doi:123 """ def check_recipe(self, recipe): identifiers = recipe.get('extra/identifiers', None) if identifiers and not isinstance(identifiers, list): self.message(section='extra/identifiers') class extra_identifiers_not_string(LintCheck): """Each item in the extra/identifiers section must be a string Example:: extra: identifiers: - doi:123 Note that there is no space around the colon """ requires = [extra_identifiers_not_list] def check_recipe(self, recipe): identifiers = recipe.get('extra/identifiers', []) for n, identifier in enumerate(identifiers): if not isinstance(identifier, str): self.message(section=f'extra/identifiers/{n}') class extra_identifiers_missing_colon(LintCheck): """Each item in the extra/identifiers section must be of form ``type:value`` Example:: extra: identifiers: - doi:123 """ requires = [extra_identifiers_not_string] def check_recipe(self, recipe): identifiers = recipe.get('extra/identifiers', []) for n, identifier in enumerate(identifiers): if ':' not in identifier: self.message(section=f'extra/identifiers/{n}') class extra_skip_lints_not_list(LintCheck): """The extra/skip-lints section must contain a list Example:: extra: skip-lints: - should_use_compilers """ def check_recipe(self, recipe): if not isinstance(recipe.get('extra/skip-lints', []), list): self.message(section='extra/skip-lints')
nilq/baby-python
python
# # All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or # its licensors. # # For complete copyright and license terms please see the LICENSE at the root of this # distribution (the "License"). All use of this software is governed by the License, # or, if provided, by the license below or the license accompanying this file. Do not # remove or modify any license notices. This file is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # $Revision: #1 $ from resource_manager_common import resource_type_info class ResourceTypeContext(object): def __init__(self, context): self.__context = context self.__resource_types_by_stack_id = {} def get_type_definitions_for_stack_id(self, stack_id, s3_client=None): result = self.__resource_types_by_stack_id.get(stack_id, None) if not result: # Load the type definitions for this stack and its ancestors from the configuration bucket session = self.__context.aws.session s3_client = self.__context.aws.client('s3') if s3_client is None else s3_client stack = self.__context.stack_info.manager.get_stack_info(stack_id, session) result = resource_type_info.load_resource_type_mapping( self.__context.config.configuration_bucket_name, stack, s3_client ) # Cache the type definitions for this stack self.__resource_types_by_stack_id[stack_id] = result return result
nilq/baby-python
python
from cStringIO import StringIO import tldextract import web try: import json except ImportError: from django.utils import simplejson as json urls = ( '/api/extract', 'Extract', '/api/re', 'TLDSet', '/test', 'Test', ) class Extract: def GET(self): url = web.input(url='').url if not url: return web.webapi.badrequest() ext = tldextract.extract(url)._asdict() web.header('Content-Type', 'application/json') return json.dumps(ext) + '\n' class TLDSet: def GET(self): extractor = tldextract.tldextract._get_tld_extractor() web.header('Content-Type', 'text/html; charset=utf-8') return '<br/>'.join(sorted(extractor.tlds)) class Test: def GET(self): stream = StringIO() tldextract.tldextract.run_tests(stream) return stream.getvalue() app = web.application(urls, globals()) main = app.cgirun()
nilq/baby-python
python
#!/usr/bin/env python3 # import many libraries from __future__ import print_function import pickle import os.path import io import subprocess import urllib, json from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request import datetime # My Spreadsheet ID ... See google documentation on how to derive this MY_SPREADSHEET_ID = '193rLLMTHkEk1ER17QpqMCCHqwGSACW-.........' def update_sheet(sheetname, temperature, waterlevel, var1): """update_sheet method: appends a row of a sheet in the spreadsheet with the the latest temperature, pressure and humidity sensor data """ # If modifying these scopes, delete the file token.pickle. SCOPES = ['https://www.googleapis.com/auth/spreadsheets'] creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'logbuchpi_googleauth.json', SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) service = build('sheets', 'v4', credentials=creds) sheet = service.spreadsheets() # Call the Sheets API, append the next row of sensor data # values is the array of rows we are updating, its a single row values = [ [ str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')), temperature, waterlevel, var1 ] ] body = { 'values': values } # call the append API to perform the operation result = service.spreadsheets().values().append( spreadsheetId=MY_SPREADSHEET_ID, range=sheetname + '!A1:D1', valueInputOption='USER_ENTERED', insertDataOption='INSERT_ROWS', body=body).execute() def main(): """main method: reads raspberry pi sensors, then call update_sheets method to add that sensor data to the spreadsheet """ f = open("/sys/class/thermal/thermal_zone0/temp", "r") t = f.readline() tempC = float(t)/1000 url = 'http://nichtzuhaben.at/level/index.php?l=1' response = urllib.request.urlopen(url) data = json.loads(response.read()) waterlevel = int(data[2]) #freedisk_cmd = "df -H | grep root | awk '{ print $4 }'" freedisk_cmd = "df -h -BM | grep root | cut -d 'M' -f3" freedisk_str = int(subprocess.Popen(freedisk_cmd, shell=True, stdout=subprocess.PIPE).stdout.read().strip()) #freedisk_str = subprocess.stdout.read() print ('CPU Temperature: %f °C' % tempC) print ('Waterlevel Linz: %i cm' % waterlevel) print ('Free Disk Space: %i MByte' % freedisk_str) update_sheet("Logbuchpi_Log", tempC, waterlevel, freedisk_str) if __name__ == '__main__': main()
nilq/baby-python
python