text stringlengths 38 1.54M |
|---|
import shutil
import pathlib as pl
import pytest
import click.testing
from click.testing import CliRunner
from .context import make_gtfs, DATA_DIR
from make_gtfs import *
from make_gtfs.cli import *
runner = CliRunner()
def rm_paths(*paths):
"""
Delete the given file paths/directory paths, if they exists.
"""
for p in paths:
p = pl.Path(p)
if p.exists():
if p.is_file():
p.unlink()
else:
shutil.rmtree(str(p))
@pytest.mark.slow
def test_make_gtfs():
s_path = DATA_DIR / "auckland"
t1_path = DATA_DIR / "bingo.zip"
t2_path = DATA_DIR / "bingo"
rm_paths(t1_path, t2_path)
result = runner.invoke(make_gtfs, [str(s_path), str(t1_path)])
assert result.exit_code == 0
assert t1_path.exists()
assert t1_path.is_file()
result = runner.invoke(make_gtfs, [str(s_path), str(t2_path)])
assert result.exit_code == 0
assert t2_path.exists()
assert t2_path.is_dir()
rm_paths(t1_path, t2_path)
|
import sys
line = sys.stdin.read()
d = {}
e = []
for word in line:
word = word.strip()
if word not in d:
d[word] = 1
e.append(word)
else:
d[word] = d[word] + 1
for c in e:
if d[c] == 1:
print(c)
|
import argparse
import ftrace
from ftrace import Ftrace
from pandas import DataFrame
from ftrace.task import TaskState
from parse_process import ProcessInfoParse
import time
import numpy as np
LITTLE_CLUSTER_MASK = 0x0F
BIG_CLUSTER_MASK = 0xF0
LITTLE_CPUS = ftrace.common.unpack_bitmap(LITTLE_CLUSTER_MASK)
BIG_CPUS = ftrace.common.unpack_bitmap(BIG_CLUSTER_MASK)
ALL_CPUS = LITTLE_CPUS.union(BIG_CPUS)
FREQ_ALL_CORES = []
print 'parse argument start'
parser = argparse.ArgumentParser(description='Per-core frequencies')
parser.add_argument('-f', '--file', dest='file',
help='File to parse')
parser.add_argument('-pf', '--process file', dest='process_file',
help='Process file to parse')
parser.add_argument('-pid', '--process pid', dest='process_pid',
help='Process pid')
parser.add_argument('-pn', '--process name', dest='process_name',
help='Process name')
args = parser.parse_args()
print 'parse argument end'
print 'parse trace start'
trace = Ftrace(args.file)
print 'parse trace end'
print 'parse process info start'
process_info = ProcessInfoParse()
process_info.parse_process_info(args.process_file)
print 'parse process info end'
print 'cpu.task_intervals start'
task_interval = trace.cpu.task_intervals()
# task_tid_list = process_info.get_all_task_tid()
print 'cpu.task_intervals end'
print 'main parse start'
for busy_interval_item in task_interval:
if busy_interval_item.state == TaskState.RUNNING:
task_tid = busy_interval_item.task.pid
if task_tid != 0:
task_pid = process_info.get_pid(task_tid)
task_name = process_info.get_taskname_by_tid(task_tid)
process_name = process_info.get_taskname_by_pid(task_pid)
process_info.save_running_info_to_pid_list(
task_pid,
task_tid,
process_name,
task_name,
busy_interval_item.interval.duration)
total_time = process_info.get_total_running_time()
print 'Total running time = ' + str(total_time)
process_info.cal_percentage(total_time)
process_info.print_result_sort_by_pid()
# process_info.print_result_sort_by_process()
print 'main parse end'
if args.process_pid is not None:
print 'target process parse by process pid start'
target_pid = args.process_pid # input your target process id
process_name = None
for busy_interval_item in task_interval:
# print busy_interval_item
if busy_interval_item.state == TaskState.RUNNING:
task_tid = busy_interval_item.task.pid
if task_tid != 0:
task_pid = process_info.get_pid(task_tid)
if task_pid == target_pid:
task_name = process_info.get_taskname_by_tid(task_tid)
if process_name is None:
process_name = process_info.get_taskname_by_pid(task_pid)
process_info.get_process_running_info(
task_pid,
task_tid,
process_name,
task_name,
busy_interval_item.interval.duration)
process_info.print_result_sort_by_process()
print 'target process parse by process pid end'
if args.process_name is not None:
print 'target process parse by process name start'
target_process_name = args.process_name
target_pid = process_info.get_pid_by_taskname(target_process_name)
print 'target_pid : ' + str(target_pid)
if target_pid is not None:
for busy_interval_item in task_interval:
# print busy_interval_item
if busy_interval_item.state == TaskState.RUNNING:
task_tid = busy_interval_item.task.pid
if task_tid != 0:
task_pid = process_info.get_pid(task_tid)
if task_pid == target_pid:
task_name = process_info.get_taskname_by_tid(task_tid)
if process_name is None:
process_name = process_info.get_taskname_by_pid(task_pid)
process_info.get_process_running_info(
task_pid,
task_tid,
process_name,
task_name,
busy_interval_item.interval.duration)
process_info.print_result_sort_by_process()
print 'target process parse by process name end'
# print "Total trace time = " + str(trace.duration)
# process_info.print_result_sort_by_all()
# process_info.print_result_sort_by_tid()
print 'freq level parse start'
for cpu in ALL_CPUS:
for busy_interval in trace.cpu.busy_intervals(cpu=cpu):
for freq in trace.cpu.frequency_intervals(cpu=cpu, interval=busy_interval.interval):
if freq.frequency not in FREQ_ALL_CORES:
FREQ_ALL_CORES.append(freq.frequency)
FREQ_ALL_CORES.sort()
print FREQ_ALL_CORES
print 'freq level parse end'
print 'freq parse start'
df_freq = DataFrame(index = ALL_CPUS, columns=FREQ_ALL_CORES)
df_freq.fillna(0, inplace=True)
for cpu in ALL_CPUS:
for busy_interval in trace.cpu.busy_intervals(cpu=cpu):
# print busy_interval
for freq in trace.cpu.frequency_intervals(cpu=cpu, interval= busy_interval.interval):
# print freq
df_freq.loc[cpu, freq.frequency] += freq.interval.duration
print df_freq.to_string()
now = time.strftime("%Y-%m-%d-%H_%M_%S",time.localtime(time.time()))
fname = "./" + now + r"_cpu_frequency_intervals.csv"
df_freq.to_csv(fname, index=False)
# df_freq.to_csv("lock_to_launcher_2020_3_23_freq.csv")
print 'freq parse end'
print 'All done'
|
# def solution(A):
# minimum = 0;
# record = 0
# my_sum = sum(A);
# differences = []
# # print(my_sum)
# for val in A:
# record += val
# dif = abs(my_sum-record*2)
# differences.append(dif)
# minimum = min(differences)
# # print(minimum, A.index(minimum))
# ret = A.index(minimum)
# return ret
# solution([3,1,2,4,3])
def solution(A):
left = A[0]
right = sum(A)-A[0]
dif = abs(right-left)
for i in range(1,len(A)-1):
left += A[i]
right -= A[i]
current_dif = abs(left-right)
if dif>current_dif:
dif = current_dif
print(dif)
return dif
print(solution([3, 1, 2, 4, 3]))
|
text = input()
index = 0
index2 = 0
substring = ''
explode_number = 0
while True:
text = text.replace(substring, '', 1)
substring = ''
if index == len(text):
break
char = text[index]
if text[index] == '>':
index2 = index
while True:
index2 += 1
char1 = text[index2]
if text[index2].isdigit():
explode_number += int(text[index2])
else:
break
for i in range(1, explode_number+1):
if text[index+i] != '>':
substring += text[index+i]
explode_number -= 1
else:
break
index += 1
print(text) |
import numpy as np
prediction_labels = [line.rstrip('\n') for line in open('predictions.txt')]
window_lines = [line.rstrip('\n') for line in open('windows.txt')]
optimum_windows = [[int(float(line.split(',')[0])),int(float(line.split(',')[1])),int(float(line.split(',')[2])),int(float(line.split(',')[3]))] for line in window_lines]
lines = [line.rstrip('\n') for line in open('test/bounding_box.txt')]
test_labels = [line.split(',')[0] for line in lines]
x1_test = [int(line.split(',')[1]) for line in lines]
y1_test = [int(line.split(',')[2]) for line in lines]
x2_test = [int(line.split(',')[3]) for line in lines]
y2_test = [int(line.split(',')[4]) for line in lines]
#6.1 Classification accuracy:
class_test_size = np.zeros(10)
true_class_prediction = np.zeros(10)
false_class_prediction = np.zeros(10)
total_prediction_num = 0
total_true_num = 0
for prediction,label in zip(prediction_labels,test_labels):
inverse_label_dict = {'n01615121':0, 'n02099601':1, 'n02123159':2, 'n02129604':3, 'n02317335':4, 'n02391049':5, 'n02410509':6, 'n02422699':7, 'n02481823':8, 'n02504458':9}
prediction = inverse_label_dict[prediction]
label = inverse_label_dict[label]
if prediction == label:
total_true_num = total_true_num + 1
true_class_prediction[prediction] = true_class_prediction[prediction] + 1
else:
false_class_prediction[prediction] = false_class_prediction[prediction] + 1
class_test_size[label] = class_test_size[label] + 1
total_prediction_num = total_prediction_num + 1
label_dictionary = {0:'n01615121', 1:'n02099601', 2:'n02123159',3:'n02129604', 4:'n02317335', 5:'n02391049', 6:'n02410509', 7:'n02422699', 8:'n02481823',9:'n02504458'}
for key in label_dictionary:
label = label_dictionary[key]
print("For the class " + label)
print("Confusion matrix :")
print("True Positives: " + str(true_class_prediction[key]))
print("False Positive: " + str(false_class_prediction[key]))
print("True Negative: " + str((total_prediction_num - (true_class_prediction[key]+false_class_prediction[key])) - (class_test_size[key]-true_class_prediction[key])))
print("False Negative: " + str( class_test_size[key]-true_class_prediction[key]))
print("Precision : " + str(true_class_prediction[key]/(true_class_prediction[key]+false_class_prediction[key])))
print("Recall : " + str(true_class_prediction[key]/class_test_size[key]))
print ("-----------------------------------------------")
print("Overall accuracy: " + str(total_true_num/total_prediction_num))
#6.2 Localization accuracy
true_localized_num = 0
for window_predicted,x1t,y1t,x2t,y2t, prediction,label in zip(optimum_windows,x1_test,y1_test,x2_test,y2_test,prediction_labels,test_labels):
x1p,y1p,width,height = window_predicted
x2p = x1p + width
y2p = y1p + height
predicted_window_area = (y2p-y1p) * (x2p-x1p)
test_window_area = (y2t-y1t) * (x2t - x1t)
intersection_point1 = [max(x1p,x1t),max(y1p,y1t)]
intersection_point2 = [min(x2p,x2t),min(y2p,y2t)]
intersection_area = float(intersection_point2[1]-intersection_point1[1]) * float(intersection_point2[0]-intersection_point1[0])
portion = (intersection_area / (float(predicted_window_area+test_window_area)-intersection_area))
if portion>0.50 :
true_localized_num = true_localized_num + 1
print("Localization accuracy: " + str(true_localized_num/total_prediction_num))
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.http import HttpResponse
from rest_framework import viewsets, status
from rest_framework.response import Response
import itchat
import threading
import time
import hashlib
import xml.etree.ElementTree as EleTree
from bot.serializers.serializers import *
from bot.utils import baidu_api, request_api, bot
class LoginThread (threading.Thread):
def __init__(self, thread_id, name, counter):
threading.Thread.__init__(self)
self.threadID = thread_id
self.name = name
self.counter = counter
def run(self):
print("Starting " + self.name)
itchat.auto_login(picDir=settings.QR_FILE, qrCallback=bot.qr_complete)
itchat.run()
print("Exiting " + self.name)
class BotLoginViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
"""
List a queryset.
"""
def list(self, request, *args, **kwargs):
if not hasattr(settings, 'access_token') or settings.access_token is '':
settings.access_token = baidu_api.get_token(settings.INTER_API_KEY, settings.INTER_SECRET_KEY)
if not hasattr(settings, 'session_id'):
settings.session_id = ''
settings.qr_code = ''
itchat.logout()
child_thread = LoginThread(1, "Thread-1", 1)
child_thread.start()
# itchat.auto_login(picDir=settings.QR_FILE)
# itchat.run()
time_out = 0
while True and time_out <= settings.QR_TIMEOUT:
time.sleep(1)
if settings.qr_code is not '':
break
time_out += 1
return HttpResponse(settings.qr_code, content_type="image/png")
"""
Create a model instance.
"""
def create(self, request, *args, **kwargs):
return Response('', status=status.HTTP_400_BAD_REQUEST)
class PublicVerifyViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = User.objects.all()
serializer_class = GroupSerializer
"""
List a queryset.
"""
def list(self, request, *args, **kwargs):
params = request.query_params
request_api.log(params)
token = 'xiaobei777'
# 进行字典排序
data = [token, params['timestamp'], params['nonce']]
data.sort()
# 进行sha1加密
sha1 = hashlib.sha1()
sha1.update(data[0].encode("utf-8"))
sha1.update(data[1].encode("utf-8"))
sha1.update(data[2].encode("utf-8"))
signature = sha1.hexdigest()
if signature == params['signature']:
return HttpResponse(params['echostr'])
else:
return HttpResponse('')
"""
Create a model instance.
"""
def create(self, request, *args, **kwargs):
utf8_parser = EleTree.XMLParser(encoding='utf-8')
xml = EleTree.fromstring(request.body, utf8_parser)
to_user = xml.find('ToUserName').text
from_user = xml.find('FromUserName').text
msg_type = xml.find("MsgType").text
# create_time = xml.find("CreateTime")
# 判断类型并回复
if msg_type == "text":
content = xml.find('Content').text
if not hasattr(settings, 'access_token') or settings.access_token is '':
settings.access_token = baidu_api.get_token(settings.INTER_API_KEY, settings.INTER_SECRET_KEY)
if not hasattr(settings, 'session_id'):
settings.session_id = ''
return HttpResponse(bot.reply_text(from_user, to_user, baidu_api.get_response(content, from_user)))
else:
return HttpResponse('请输入文字')
|
from rest_framework import serializers
from account.models import CustomUser
from ..models import Note, NoteCategory
class NoteCategorySerializer(serializers.ModelSerializer):
class Meta:
model = NoteCategory
fields = "__all__"
def create(self, validated_data):
user = validated_data.pop('user')
custom_user = CustomUser.objects.get(username=user)
category = NoteCategory.objects.create(user=custom_user, **validated_data)
return category
class NoteSerializer(serializers.ModelSerializer):
class Meta:
model = Note
fields = "__all__"
def create(self, validated_data):
user = validated_data.pop('user')
category = validated_data.pop("category")
custom_user = CustomUser.objects.get(username=user)
category = NoteCategory.objects.get(id=category.id)
note = Note.objects.create(user=custom_user, category=category, **validated_data)
return note |
#!/usr/bin/python
'''
Date: 18-06-2019
Created By: TusharM
11) Define a function generate_n_chars() that takes an integer n and a character c and returns a string, n characters long, consisting only of c:s. For example, generate_n_chars(5,"x") should return the string "xxxxx". (pyScripts is unusual in that you can actually write an expression 5 * "x" that will evaluate to "xxxxx". For the sake of the exercise you should ignore that the problem can be solved in this manner.)
'''
def generate_n_chars(count,char):
'''Returns a string n character long consisting of only c
Time complexity of this function in worst condition is O(n)'''
ret_val=[]
if type(char)==str:
i=0
while i<count:
ret_val.append(char)
i+=1
return ''.join(ret_val)
else:
raise Exception('Expected count and character as input')
if __name__=='__main__':
print(generate_n_chars(5,'x'))
#xxxxx
print(generate_n_chars(10,'!'))
#!!!!!!!!!!
print(generate_n_chars(1,'+'))
#+
print(generate_n_chars(0,'A'))
#
|
import numpy as np
import random
np.random.seed(7883)
random.seed(7883)
def generate_random_binomial_(row, col):
return np.random.binomial(
1, 0.5, (row, col)).astype(np.uint8)
def generate_weightings(row, col):
write_weightings = np.zeros((row, col), dtype=np.float32)
read_weightings = np.zeros((row, col), dtype=np.float32)
r = (row * 3) / 4
for i in np.arange(0, col/2):
write_weightings[r][i] = 1
read_weightings[r][i + col/2] = 1
r -= 1
return write_weightings, read_weightings
def generate_copy_sample(dimension, sequence_length):
"""Generate one sample of copy algorithm.
# Arguments
dimension: the dimension of each input output tokens.
sequence_length: the length of input sequence, i.e. the number of
input tokens.
# Returns
input_sequence: the input sequence of a sample.
output_sequence: the output sequence of a sample.
"""
# produce random sequence
sequence = np.random.binomial(
1, 0.5, (sequence_length, dimension - 1)).astype(np.uint8)
# allocate space for input sequence and output sequence
input_sequence = np.zeros(
(sequence_length * 2 + 1, dimension), dtype=np.bool)
output_sequence = np.zeros(
(sequence_length * 2 + 1, dimension), dtype=np.bool)
# set value of input sequence
input_sequence[:sequence_length, :-1] = sequence
# "1": A special flag which indicate the end of the input
input_sequence[sequence_length, -1] = 1
# set value of output sequence
output_sequence[sequence_length + 1:, :-1] = sequence
# "1": A special flag which indicate the begin of the output
output_sequence[sequence_length, -1] = 1
# return the sample
return input_sequence, output_sequence
def generate_copy_data_set(
dimension,
max_length_of_original_sequence,
data_set_size):
"""Generate samples for learning copy algorithm.
# Arguments
dimension: the dimension of each input output tokens.
max_length_of_original_sequence: the max length of original sequence.
data_set_size: the size of total samples.
# Returns
input_sequences: the input sequences of total samples.
output_sequences: the output sequences of total samples.
"""
# get random sequence lengths from uniform distribution e.g. [1, 20]
sequence_lengths = np.random.randint(
1, max_length_of_original_sequence + 1, data_set_size)
# allocate space for input sequences and output sequences, where the
# "1" is a special flag which indicate the end of the input or output
input_sequences = np.zeros(
(data_set_size, max_length_of_original_sequence * 2 + 1, dimension),
dtype=np.bool)
output_sequences = np.zeros(
(data_set_size, max_length_of_original_sequence * 2 + 1, dimension),
dtype=np.bool)
# set the value for input sequences and output sequences
for i in range(data_set_size):
input_sequence, output_sequence = \
generate_copy_sample(dimension, sequence_lengths[i])
input_sequences[i, :sequence_lengths[i]*2+1] = input_sequence
output_sequences[i, :sequence_lengths[i]*2+1] = output_sequence
# return the total samples
return input_sequences, output_sequences
def generate_repeat_copy_sample(dimension, sequence_length, repeat_times):
"""Generate one sample of repeat copy algorithm.
# Arguments
dimension: the dimension of each input output tokens.
sequence_length: the length of input sequence, i.e. the number of
input tokens.
repeat_times: repeat times of output.
# Returns
input_sequence: the input sequence of a sample.
output_sequence: the output sequence of a sample.
"""
# produce random sequence
sequence = np.random.binomial(
1, 0.5, (sequence_length, dimension - 1)).astype(np.uint8)
# allocate space for input sequence and output sequence
input_sequence = np.zeros(
(sequence_length + 1 + sequence_length * repeat_times, # + 1
dimension),
dtype=np.bool)
output_sequence = np.zeros(
(sequence_length + 1 + sequence_length * repeat_times, # + 1
dimension),
dtype=np.bool)
# set value of input sequence
input_sequence[:sequence_length, :-1] = sequence
# input_sequence[sequence_length, -1] = repeat_times
input_sequence[sequence_length, -1] = 1
# set value of output sequence ## sequence_length + 1
output_sequence[sequence_length+1:, :-1] = \
np.tile(sequence, (repeat_times, 1))
# "1": A special flag which indicate the begin of the output
# output_sequence[sequence_length, -1] = 1
# return the sample
return input_sequence, output_sequence
def generate_repeat_copy_data_set(
dimension,
max_length_of_original_sequence,
max_repeat_times,
data_set_size):
"""Generate samples for learning repeat copy algorithm.
# Arguments
dimension: the dimension of each input output tokens.
max_length_of_original_sequence: the max length of original sequence.
max_repeat_times: the maximum repeat times.
data_set_size: the size of total samples.
# Returns
input_sequences: the input sequences of total samples.
output_sequences: the output sequences of total samples.
repeat_times: the repeat times of each output sequence of total
samples.
"""
# produce random sequence lengths from uniform distribution
# [1, max_length]
sequence_lengths = np.random.randint(
1, max_length_of_original_sequence + 1, data_set_size)
# produce random repeat times from uniform distribution
# [1, max_repeat_times]
repeat_times = np.random.randint(1, max_repeat_times + 1, data_set_size)
input_sequences = np.zeros(
(data_set_size,
max_length_of_original_sequence * (max_repeat_times + 1) + 1, # + 1
dimension),
dtype=np.bool)
output_sequences = np.zeros(
(data_set_size,
max_length_of_original_sequence * (max_repeat_times + 1) + 1, # + 1
dimension),
dtype=np.bool)
# set the value for input sequences and output sequences
for i in range(data_set_size):
input_sequence, output_sequence = generate_repeat_copy_sample(
dimension, sequence_lengths[i], repeat_times[i])
input_sequences[i, :sequence_lengths[i]*(repeat_times[i]+1)+1] = \
input_sequence
output_sequences[i, :sequence_lengths[i]*(repeat_times[i]+1)+1] = \
output_sequence
# return total samples
return input_sequences, output_sequences, repeat_times
def _generate_associative_recall_items(dimension, item_size, episode_size):
"""Generate items of associative recall algorithm.
# Arguments
dimension: the dimension of input output sequences.
item_size: the size of items.
episode_size: the size of one episode.
# Returns
items: the generated item.
"""
inner_item = np.random.binomial(
1, 0.5, ((item_size + 1) * episode_size, dimension)
).astype(np.uint8)
items = np.zeros(((item_size + 1) * episode_size, dimension + 2),
dtype=np.uint8)
# items = np.zeros(((item_size + 1) * episode_size, dimension + 2),
# dtype=np.bool)
items[:, :-2] = inner_item
separator = np.zeros((1, dimension + 2), dtype=np.uint8)
# separator = np.zeros((1, dimension + 2), dtype=np.bool)
separator[0][-2] = 1
items[:(item_size + 1) * episode_size:(item_size + 1)] = separator[0]
# return one items for associative recall
return items
def generate_associative_recall_sample(
dimension, item_size, episode_size, max_episode_size):
"""Generate one sample of associative recall algorithm.
Arguments
dimension: the dimension of input output sequences.
item_size: the size of one item.
episode_size: the size of one episode.
max_episode_size: the maximum episode size.
Returns
input_sequence: the input sequence of a sample.
output_sequence: the output sequence of a sample.
"""
sequence_length = (item_size+1) * (max_episode_size+2)
input_sequence = np.zeros(
(sequence_length, dimension + 2), dtype=np.uint8)
# input_sequence = np.zeros(
# (sequence_length, dimension + 2), dtype=np.bool)
input_sequence[:(item_size + 1) * episode_size] = \
_generate_associative_recall_items(
dimension, item_size, episode_size)
separator = np.zeros((1, dimension + 2), dtype=np.uint8)
# separator = np.zeros((1, dimension + 2), dtype=np.bool)
separator[0][-2] = 1
query_index = np.random.randint(0, episode_size-1)
input_sequence[(item_size+1)*episode_size:(item_size+1)*(episode_size+1)] = \
input_sequence[(item_size+1)*query_index:(item_size+1)*(query_index+1)]
input_sequence[(item_size+1)*episode_size][-2] = 0
input_sequence[(item_size+1)*episode_size][-1] = 1
input_sequence[(item_size+1)*(episode_size+1)][-1] = 1
output_sequence = np.zeros(
(sequence_length, dimension + 2), dtype=np.uint8)
# output_sequence = np.zeros(
# (sequence_length, dimension + 2), dtype=np.bool)
output_sequence[(item_size+1)*(episode_size+1):(item_size+1)*(episode_size+2)] = \
input_sequence[(item_size+1)*(query_index+1):(item_size+1)*(query_index+2)]
output_sequence[(item_size+1)*(episode_size+1)][-2] = 0
# return one sample for associative recall
return input_sequence, output_sequence
def generate_associative_recall_data_set(
dimension, item_size, max_episode_size, data_set_size):
"""Generate samples for learning associative recall algorithm.
Arguments
dimension: the dimension of input output sequences.
item_size: the size of one item.
max_episode_size: the maximum episode size.
data_set_size: the size of one episode.
Returns
input_sequences: the input sequences of total samples.
output_sequences: the output sequences of total samples.
"""
episode_size = np.random.randint(2, max_episode_size + 1, data_set_size)
sequence_length = (item_size+1) * (max_episode_size+2)
input_sequences = np.zeros(
(data_set_size, sequence_length, dimension + 2), dtype=np.uint8)
output_sequences = np.zeros(
(data_set_size, sequence_length, dimension + 2), dtype=np.uint8)
# input_sequences = np.zeros(
# (training_size, sequence_length, dimension + 2), dtype=np.bool)
# output_sequences = np.zeros(
# (training_size, sequence_length, dimension + 2), dtype=np.bool)
for i in range(data_set_size):
input_sequence, output_sequence = generate_associative_recall_sample(
dimension, item_size, episode_size[i], max_episode_size)
input_sequences[i] = input_sequence
output_sequences[i] = output_sequence
# return the total samples
return input_sequences, output_sequences
# def generate_probability_of_n_gram_by_beta(a, b, n):
# return np.random.beta(a, b, np.power(2, n-1))
def get_index(n_1_bits, n):
index = n_1_bits[0]
for i in range(1, n-1):
index = index + np.power(2, i) * n_1_bits[i]
return index
def generate_dynamical_n_gram_sample(look_up_table, n, sequence_length):
example_number = 100
input_size = 1
input_sequence = np.zeros((example_number, sequence_length*2-n+2, input_size+2), dtype=np.uint8)
output_sequence = np.zeros((example_number, sequence_length*2-n+2, input_size+2), dtype=np.uint8)
input_sequence_ = np.zeros((sequence_length*2-n+2, input_size+2), dtype=np.uint8)
output_sequence_ = np.zeros((sequence_length*2-n+2, input_size+2), dtype=np.uint8)
input_sequence_[0:n-1, 0] = np.random.binomial(1, 0.5, (1, n-1)).astype(np.uint8)
# for i in range(n-1, sequence_length):
# n_1_bits = input_sequence[i-n+1: i]
# index = get_index(n_1_bits, n)
# input_sequence[i] = np.random.binomial(1, look_up_table[index], 1)
# output_sequence[n-1: -1] = input_sequence[n-1: -1]
for i in range(n-1, sequence_length):
n_1_bits = input_sequence_[i-n+1: i, 0]
index = get_index(n_1_bits, n)
# input_sequence_[i][0] = np.random.binomial(1, look_up_table[index], 1)
# output_sequence_[sequence_length+i-n+2][0] = np.random.binomial(1, look_up_table[index], 1)
input_sequence[:, i, 0] = np.random.binomial(1, look_up_table[index], 1)
# output_sequence_[sequence_length+i-n+2][0] = np.random.binomial(1, look_up_table[index], 1)
output_sequence[:, sequence_length+i-n+2, 0] = np.random.binomial(
1, look_up_table[index], example_number)
input_sequence[:, sequence_length, -1] = 1
input_ones = np.ones((example_number, sequence_length))
input_sequence[:, 0:sequence_length, 1] = \
input_ones - input_sequence[:, 0:sequence_length, 0]
output_ones = np.ones((example_number, sequence_length-n+1))
output_sequence[:, sequence_length+1:sequence_length*2-n+2, 1] = \
output_ones - output_sequence[:, sequence_length+1:sequence_length*2-n+2, 0]
# print(input_sequence_.shape)
# input_sequence_[0:sequence_length, 0] = input_sequence
# input_sequence_[sequence_length, -1] = 1
# output_sequence_[1, sequence_length+1:sequence_length*2-n+2] = input_sequence
# print(input_sequence)
# print(output_sequence)
return input_sequence, output_sequence
def generate_dynamical_n_gram_data_set(
look_up_table, n, sequence_length, example_size):
input_size = 1
input_sequences = np.zeros((example_size, sequence_length*2-n+2, input_size+2), dtype=np.uint8)
output_sequences = np.zeros((example_size, sequence_length*2-n+2, input_size+2), dtype=np.uint8)
# input_sequences = np.zeros((example_size, sequence_length, input_size), dtype=np.uint8)
# output_sequences = np.zeros((example_size, sequence_length, input_size), dtype=np.uint8)
# input_sequences = np.zeros((example_size, sequence_length, 1), dtype=np.bool)
# output_sequences = np.zeros((example_size, sequence_length, 1), dtype=np.bool)
for i in range(example_size/100):
input_sequence, output_sequence = generate_dynamical_n_gram_sample(
look_up_table, n, sequence_length)
input_sequences[i*100:(i+1)*100] = input_sequence
output_sequences[i*100:(i+1)*100] = output_sequence
# print(i)
# print(input_sequence)
# print(output_sequence)
return input_sequences, output_sequences
def generate_priority_sort_sample(
dimension,
input_sequence_length,
output_sequence_length,
priority_lower_bound,
priority_upper_bound):
"""Generate one sample of priority sort algorithm.
Arguments
dimension: the dimension of input output sequences.
input_sequence_length: the length of input sequence.
output_sequence_length: the length of output sequence.
priority_lower_bound: the lower bound of priority.
priority_upper_bound: the upper bound of priority.
Returns
input_sequence: the input sequence of a sample.
output_sequence: the output sequence of a sample.
"""
sequence = input_sequence_length + output_sequence_length + 1
input_sequence = np.random.binomial(
1, 0.5, (input_sequence_length, dimension + 1)).astype(np.uint8)
output_sequence = np.zeros(
(output_sequence_length, dimension + 1), dtype=np.uint8)
input_priority = np.random.uniform(priority_lower_bound,
priority_upper_bound,
(input_sequence_length, 1))
output_priority = sorted(
input_priority, reverse=True)[:output_sequence_length]
pair = [(input_sequence[i], input_priority[i])
for i in range(input_sequence_length)]
sorted_input_sequence = sorted(
pair, key=lambda prior: prior[1], reverse=True)
for i in range(output_sequence_length):
output_sequence[i] = sorted_input_sequence[i][0]
input_sequence_ = np.zeros((sequence, dimension + 2), dtype=np.float32)
input_priority_ = np.zeros((sequence, 1), dtype=np.float32)
output_sequence_ = np.zeros((sequence, dimension + 2), dtype=np.float32)
output_priority_ = np.zeros((sequence, 1), dtype=np.float32)
input_sequence_[:input_sequence_length, :-1] = input_sequence
input_sequence_[input_sequence_length][-1] = 1
input_priority_[:input_sequence_length] = input_priority
output_sequence_[input_sequence_length+1:sequence, :-1] = output_sequence
output_priority_[input_sequence_length+1:sequence] = output_priority
# return input sequence, priority of each input, output sequence, priority
# of each output
return input_sequence_, input_priority_, output_sequence_, output_priority_
def generate_priority_sort_data_set(
dimension,
input_sequence_length,
output_sequence_length,
priority_lower_bound,
priority_upper_bound,
data_set_size):
"""Generate samples for learning priority sort algorithm.
Arguments
dimension: the dimension of input output sequences.
input_sequence_length: the length of input sequence.
output_sequence_length: the length of output sequence.
priority_lower_bound: the lower bound of priority.
priority_upper_bound: the upper bound of priority.
data_set_size: the size of one episode.
Returns
input_sequence: the input sequence of a sample.
output_sequence: the output sequence of a sample.
"""
sequence_length = input_sequence_length + output_sequence_length
input_sequences = np.zeros(
(data_set_size, sequence_length + 1, dimension + 2), dtype=np.float32)
output_sequences = np.zeros(
(data_set_size, sequence_length + 1, dimension + 2), dtype=np.float32)
for i in range(data_set_size):
input_sequence, input_priority, output_sequence, output_priority = \
generate_priority_sort_sample(
dimension,
input_sequence_length,
output_sequence_length,
priority_lower_bound,
priority_upper_bound)
input_sequences[i] = input_sequence
output_sequences[i] = output_sequence
input_sequences[i][:, -2] = input_priority.transpose()
output_sequences[i][:, -2] = output_priority.transpose()
# return the total samples
return input_sequences, output_sequences
|
number = int(input("Please enter a number: "))
if number > 20:
print("Number not within range and is too high.")
elif number < 1:
print("Number not within range and is too low.")
elif number > 1 <20:
print("Your number is in range.")
|
def subsets(self, nums: List[int]) -> List[List[int]]:
res = [[]]
for n in nums:
res += [subset + [n] for subset in res]
return res |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('adm', '0102_auto_20160922_1121'),
]
operations = [
migrations.RemoveField(
model_name='ot',
name='fecha_aviso',
),
migrations.AddField(
model_name='factura',
name='fecha_aviso',
field=models.DateField(null=True, verbose_name=b'Aviso de Trabajo Realizado', blank=True),
),
]
|
from django.apps import AppConfig
# These is a PyFile that gives Our App a Unique Name within these Project
class LogConfig(AppConfig):
name = 'log'
|
#
# from sanic import Sanic
# from sanic.response import json
# from sanic.request import Request
# from sanic_jwt_extended import (
# JWTManager,
# jwt_required,
# create_access_token,
# create_refresh_token,
# )
# import uuid
# from sanic_jwt_extended.tokens import Token
#
# app = Sanic("New Sanic App")
#
# # Setup the Sanic-JWT-Extended extension
# app.config["JWT_SECRET_KEY"] = "some-secret"
# JWTManager(app)
#
#
#
# @app.route("/login", methods=["POST"])
# async def login(request: Request):
# if not request.json:
# return json({"msg": "Missing JSON in request"}, status=400)
#
# username = request.json.get("username", None)
# password = request.json.get("password", None)
# if not username:
# return json({"msg": "Missing username parameter"}, status=400)
# if not password:
# return json({"msg": "Missing password parameter"}, status=400)
#
# if username != "test" or password != "test":
# return json({"msg": "Bad username or password"}, status=403)
#
# # Identity can be any data that is json serializable
# access_token = await create_access_token(identity=username, app=request.app)
# refresh_token = await create_refresh_token(
# identity=str(uuid.uuid4()), app=request.app
# )
# return json(
# dict(access_token=access_token, refresh_token=refresh_token), status=200
# )
#
#
#
#
# if __name__ == "__main__":
# app.run() |
# coding:utf-8
import requests
import uuid
from PIL import Image
import os
url = "http://download.csdn.net/index.php/rest/tools/validcode/source_ip_validate/10.5711163911089325"
for i in range(100):
resp = requests.get(url)
filename = "./captchas/" + str(uuid.uuid4()) + ".png"
with open(filename, 'wb') as f:
for chunk in resp.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
f.close()
im = Image.open(filename)
if im.size != (48, 20):
os.remove(filename)
else:
print filename
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mongoengine
from goodjob.config import config
def connect():
mongoengine.connect(config.DB_NAME, host=config.MONGO_URL)
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns(
'accounting_core.views',
url(r'^accountingyear/(?P<pk>[0-9,]+)/copy$', 'copy_accounting_year'),
url(r'^accountingyear/(?P<pk>[0-9]+)/cost_centers$', 'pdf_list_cost_centers'),
url(r'^accountingyear/(?P<pk>[0-9]+)/accounts$', 'pdf_list_accounts'),
url(r'^accountingyear/(?P<ypk>[0-9]+)/get_leaves_cat$', 'leaves_cat_by_year'),
url(r'^accountingyear/(?P<ypk>[0-9]+)/get_parents_cat$', 'parents_cat_by_year'),
url(r'^accountingyear/(?P<ypk>[0-9]+)/get_accounts$', 'accounts_by_year'),
url(r'^costcenter/available_list$', 'costcenter_available_list'),
url(r'^account/available_list$', 'account_available_list'),
url(r'^tva/available_list$', 'tva_available_list'),
url(r'^unit/(?P<upk>[0-9]+)/users_available_list$', 'users_available_list_by_unit'),
)
|
from sys import argv, exit
from json import loads
import transaction_verifier
from collections import defaultdict
from datetime import datetime
def count(file='blocks.txt'):
bank = defaultdict(int)
print('### TRANSACTIONS ###')
with open(file) as f:
for l in f.readlines():
json_data = loads(l)
tx = json_data['tx']
time = json_data['time']
time = datetime.utcfromtimestamp(time)
verify = transaction_verifier.verify_transaction(tx)
if not verify:
print('!!! this block chain is blocken !!!')
exit(1)
frm, to, coin, sig = tx.split(';')
bank[frm] -= int(coin)
bank[to] += int(coin)
frm = frm[:15] + '...'
to = to[:15] + '...'
print(f'[{time}]')
print(f'{frm} ----- {coin} coins -----> {to}: ({"verified" if verify else "ERROR"})')
print()
print('### CURRENT COIN ###')
for user in bank.keys():
print(f'{user} have {bank[user]} coins')
if __name__ == '__main__':
print('block file:', argv[1])
count(argv[1])
|
#!/usr/bin/env python
import argparse
import logging
import subprocess
import sys
from typing import IO, Union
from dcos_internal_utils import utils
"""Use this node's internal IP address to reach the local CockroachDB instance
and backup the IAM database.
This program is expected to be executed manually before invasive procedures
such as master replacement or cluster upgrade.
"""
log = logging.getLogger(__name__)
logging.basicConfig(format='[%(levelname)s] %(message)s', level='INFO')
def dump_database(my_internal_ip: str, out: Union[IO[bytes], IO[str]]) -> None:
"""
Use `cockroach dump` to dump the IAM database to stdout.
It is expected that the operator will redirect the output to
a file or consume it from a backup automation program.
Args:
my_internal_ip: The internal IP of the current host.
"""
command = [
'/opt/mesosphere/active/cockroach/bin/cockroach',
'dump',
'--insecure',
'--host={}'.format(my_internal_ip),
'iam',
]
log.info('Dump iam database via command `%s`', ' '.join(command))
try:
subprocess.run(command, check=True, stdout=out)
log.info('Database successfully dumped.')
except subprocess.CalledProcessError:
# The stderr output of the underlying cockroach command will be printed
# to stderr independently.
log.error('Failed to dump database.')
# We know the caller isn't doing any cleanup so just exit.
sys.exit(1)
def _parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description='Dump the IAM database to a file.')
parser.add_argument(
'backup_file_path', type=str, nargs='?',
help='the path to a file to which the database backup must be written (stdout if omitted)')
return parser.parse_args()
def main() -> None:
# Determine the internal IP address of this node.
my_internal_ip = utils.detect_ip()
log.info('My internal IP address is `{}`'.format(my_internal_ip))
args = _parse_args()
if args.backup_file_path:
log.info('Write backup to: {}'.format(args.backup_file_path))
else:
log.info('Write backup to: STDOUT')
if args.backup_file_path:
with open(args.backup_file_path, 'wb') as f:
dump_database(my_internal_ip=my_internal_ip, out=f)
else:
dump_database(my_internal_ip=my_internal_ip, out=sys.stdout)
if __name__ == '__main__':
main()
|
from numpy import*
mt=array(eval(input("insira a matriz quadrada: ")))
q=shape(mt)[0]
r=(q**2-q)//2
v=zeros(r,dtype=float)
s=0
for i in range(q):
for j in range(q):
if i>j:
v[s]=mt[i,j]
s=s+1
print(min(v)) |
import os
import numpy as np
import jieba
from utils import singleton
from client.tfs_client import BaseClient
from protos.tensorflow.core.framework import types_pb2
_get_module_path = lambda path: os.path.normpath(os.path.join(os.getcwd(),os.path.dirname(__file__), path))
# from TFS_client import inference_sync
@singleton
class Content2Vec():
def __init__(self, num_label=2, num_step=1000, dict_path=_get_module_path('../dic/ccd.txt')):
self.dict_path = dict_path
self.num_label = num_label
self.num_step=num_step
self.word_dict = self.load_dict()
self.num_words = len(self.word_dict)
def load_dict(self):
if self.dict_path:
jieba.load_userdict(self.dict_path)
wid = 0
wd = {}
for w,freq in jieba.dt.FREQ.items():
if freq > 0:
wd[w] = wid
wid += 1
return wd
def process_text(self, content):
#先进行分词
words = list(jieba.cut(content, cut_all=True))[:self.num_step]
return self.process_word_list(words)
def process_word_list(self,words):
#初始化向量序列
data = [0 for i in range(self.num_step)]
#按照词序,依次把用词向量填充序列
for i in range(len(words)):
w = words[i]
if w in self.word_dict:
data[i] = self.word_dict[w]
return data
def do_commom_class(wordlist):
all_word = []
for w0 in wordlist:
for w1 in w0:
all_word.append(w1[0])
lf = Content2Vec()
da = lf.process_word_list(all_word[:1000])
d=np.array([da])
data = np.expand_dims(d, axis=0).astype(np.int32).reshape((-1,1000))
# bc = BaseClient()
# result = bc.inference("only_attention", "serving_default", "text", data, tf.int32)
# 得到的result是protobuf的格式,因此我们需要如下形式提取出向量
model_name = "only_attention"
signature_name = "serving_default"
input_data = {
"text":{
"data": data,
"dtype": types_pb2.DT_INT32
}
}
output_list = ["class_prob"]
tfs_serving = BaseClient()
out = tfs_serving.inference_sync(model_name,signature_name,input_data,output_list)
# print(out)
val=np.array(out['class_prob']['value'])
# train_data_dir=_get_module_path('../tfnlp/data/THUCNews')
# label_list = os.listdir(train_data_dir)
label_list=['体育', '娱乐', '家居', '彩票', '房产', '教育', '时尚', '时政', '星座', '游戏', '社会', '科技', '股票', '财经']
final_prob=np.array(val)
label_id = np.argmax(final_prob)
label_name = label_list[label_id]
return label_name
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import hashlib
import random
def make_get_request(url, payload=None, headers=None, is_json=True):
req = requests.get(
url,
params=payload,
headers=headers and headers or {}
)
if 200 >= req.status_code <= 299:
if is_json:
return req.json()
else:
return req.content
else:
return None
def random_ua():
ua = [
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)'
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246',
'Mozilla/5.0 (Linux; U; Android 4.0.3; ko-kr; LG-L160L Build/IML74K) AppleWebkit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30'
]
return random.choice(ua)
def make_sha1(text):
return hashlib.sha1(text).hexdigest()
|
import json
from django.contrib.auth import authenticate, login, logout
from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.forms import ModelForm
from django.http import JsonResponse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.template.defaulttags import register
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import ListView
from .models import User, Post, Like
class PostForm(ModelForm):
class Meta:
model = Post
fields = ['body']
labels = {
'body': ''
}
@register.filter
def get_range(value):
return range(1, value + 1)
@register.filter
def likes(user, likes):
for like in likes:
if like.user == user:
return True
return False
def index(request):
posts = Post.objects.all().order_by("-timestamp")
paginator = Paginator(posts, 10) # Show 10 contacts per page.
page_number = request.GET.get('page')
try:
posts = paginator.page(page_number)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
return render(request, "network/index.html", {
"posts" : posts
})
def getposts(request, posts):
if posts == "all":
posts = Post.objects.all()
elif posts == "following":
posts = Post.objects.filter(user__in=request.user.following.all())
else:
return JsonResponse({"error": "Invalid choice."}, status=400)
posts = posts.order_by("-timestamp").all()
paginator = Paginator(posts, 10)
page_number = request.GET.get('page_number')
# Return emails in reverse chronologial order
try:
posts = paginator.page(page_number)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
return JsonResponse({"posts": [post.serialize() for post in page_obj.object_list]
}, safe=False)
@csrf_exempt
def addpost(request):
# Composing a new email must be via POST
if request.method != "POST":
return JsonResponse({"error": "POST request required."}, status=400)
# Get body of post
data = json.loads(request.body)
body = data.get("body", "")
# Create new post
post = Post(
user=request.user,
body=body,
likeCount=0
)
post.save()
return JsonResponse({"message": "Post added successfully."}, status=201)
def login_view(request):
if request.method == "POST":
# Attempt to sign user in
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "network/login.html", {
"message": "Invalid username and/or password."
})
else:
return render(request, "network/login.html")
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("index"))
def register(request):
if request.method == "POST":
username = request.POST["username"]
email = request.POST["email"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(request, "network/register.html", {
"message": "Passwords must match."
})
# Attempt to create new user
try:
user = User.objects.create_user(username, email, password)
user.save()
except IntegrityError:
return render(request, "network/register.html", {
"message": "Username already taken."
})
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "network/register.html")
def profile(request, profile_id):
profileuser = User.objects.get(pk=profile_id)
print(profileuser, profileuser.following.all())
return render(request, "network/profile.html", {
"posts" : profileuser.posts.all().order_by("-timestamp"),
"user_data" : {
"user": profileuser,
"following" : profileuser.following.count(),
"followers" : profileuser.followers.count()
}
})
def follow(request, profile_id):
current_user = request.user
if request.method == "POST":
if request.POST["_method"] == "PUT":
current_user.following.add(User.objects.get(id=profile_id))
elif request.POST["_method"] == "DELETE":
current_user.following.remove(User.objects.get(id=profile_id))
return HttpResponseRedirect(reverse("profile", kwargs={"profile_id": profile_id}))
def following(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
body = form.cleaned_data["body"]
post = Post(user=request.user, body=body)
post.save()
posts = Post.objects.filter(user__in=request.user.following.all()).order_by("-timestamp")
paginator = Paginator(posts, 10) # Show 25 contacts per page.
page_number = request.GET.get('page')
try:
posts = paginator.page(page_number)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
return render(request, "network/index.html", {
"postForm" : PostForm(),
"posts" : posts
})
@csrf_exempt
def edit(request, post_id):
# Query for requested post
try:
post = Post.objects.get(user=request.user, pk=post_id)
except Post.DoesNotExist:
return JsonResponse( {"error": "Post not found."}, status=404)
# Update post body
if request.method == "PUT":
data = json.loads(request.body)
if data.get("body") is not None:
post.body = data["body"]
post.save()
return HttpResponse(status=204)
# Email must be via GET or PUT
else:
return JsonResponse({
"error": "PUT request required."
}, status=400)
@csrf_exempt
def like(request, post_id):
# Query for requested post
try:
post = Post.objects.get(user=request.user, pk=post_id)
except Post.DoesNotExist:
return JsonResponse( {"error": "Post not found."}, status=404)
# Update post like & unlike Count, Create a new Like
if request.method == "PUT":
like = Like(
user=request.user,
post=post)
like.save()
post.likeCount = post.likeCount + 1
post.save()
like_id = like.id
print(like_id)
return JsonResponse(
{
"like_id": like_id
}, status=201)
elif request.method == "DELETE":
data = json.loads(request.body)
if data.get("like") is not None:
like_id = data.get("like")
Like.objects.get(pk=like_id).delete()
print("Hello")
post.likeCount = post.likeCount - 1
post.save()
return HttpResponse(status=204)
# Post must be PUT
else:
return JsonResponse({
"error": "PUT request required."
}, status=400)
def post(request, post_id):
# Query for requested email
try:
post = Post.objects.get(pk=post_id)
except Post.DoesNotExist:
return JsonResponse({"error": "Post not found."}, status=404)
# Return email contents
if request.method == "GET":
return JsonResponse(post.serialize())
|
""" Generate connected waxman topologies.
Author: Srinivas Narayana (narayana@cs.princeton.edu)
Run as:
python gen-waxman.py <node-count> <alpha> <beta>
To generate topologies over a range of node counts, you could use a bash loop,
like
alpha=0.4; beta=0.2; for i in `echo 20 40 60 80 100 120 140 160 180 200`; do mkdir -p pyretic/evaluations/Tests/waxman-${alpha}-${beta}/$i ; python pyretic/evaluations/gen-waxman.py $i $alpha $beta > pyretc/evaluations/Tests/waxman-${alpha}-${beta}/${i}/original-topo.txt ; done | grep FAIL
alpha=0.4; beta=0.2; for i in `echo 20 40 60 80 100 120 140 160 180 200`; do python pyretic/evaluations/rw_sertopo.py pyretic/evaluations/Tests/waxman-${alpha}-${beta}/${i}/original-topo.txt > pyretic/evaluations/Tests/waxman-${alpha}-${beta}/${i}/topo.txt ; done
"""
import fnss
import networkx as nx
import time
import sys
n=int(sys.argv[1])
alpha=float(sys.argv[2])
beta=float(sys.argv[3])
standard_bw = 100 # dummy bandwidth value to go on topo files
disconnected = True
tries = 0
while disconnected:
topo = fnss.waxman_1_topology(n, alpha=alpha, beta=beta)
disconnected = not nx.is_connected(topo)
tries += 1
if tries == 300:
sys.stderr.write("%d FAIL\n" % n)
break
if not disconnected:
print "edges"
nodes = list(topo.nodes())
num_nodes = len(nodes)
for n in nodes[:int(0.7*num_nodes)]:
print n, 'p%d' % n
print "links"
for (src,dst) in topo.edges():
print src, dst, standard_bw
|
import RPi.GPIO as GPIO
from time import sleep
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(17,GPIO.OUT)
#############################
pins=[17,27,22,5,6,13,19]
for i in range(0,7):
GPIO.setup(pins[i],GPIO.OUT)
for i in range(0,7):
GPIO.output(pins[i],0)
###############################
x = GPIO.PWM(17,1000) #1 khz
x.start(10)
y = GPIO.PWM(27,1000) #1 khz
y.start(10)
z = GPIO.PWM(22,1000) #1 khz
z.start(10)
while 1:
for i in range(0,100,2):
print "pwm dutycycle is %d"%i
sleep(0.2)
x.ChangeDutyCycle(i)
y.ChangeDutyCycle(i)
z.ChangeDutyCycle(i)
for i in range(99,0,-2):
print "pwm dutycycle is %d"%i
sleep(0.2)
x.ChangeDutyCycle(i)
y.ChangeDutyCycle(i)
z.ChangeDutyCycle(i)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
# We are just fetching the raw page here, no Javascript or
# other client-side languages will execute.
body = urllib2.urlopen("http://www.google.com") # Returns a file-like object that allows us to read back the body of what the remote web server returns
print(body.read())
"""
In most cases, however, you are going to want more finely grained
control over how you make these requests, including being able to define
specific headers, handle cookies, and create POST requests. urllib2 exposes
a Request class that gives you this level of control. Below is an example of
how to create the same GET request using the Request class and defining a
custom User-Agent HTTP header
"""
# The construction of a Request object is slightly different than our previous example
url = "http://www.duckduckgo.com"
# To create custom headers, we define a headers dictionary, which allows to
# then set the header key and value that we want to use (in this case, Duckbot)
headers = {}
headers['User-agent'] = "Duckbot"
# We then create our Request object and pass the url and the headers dictionary
request = urllib2.Request(url, headers=headers)
# And then pass the Request object to the urlopen function call.
# This returns a normal file-like object that we can use to read in the data
# from the remote website.
response = urllib2.urlopen(request)
print(response.read())
response.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tornado.web import RequestHandler, HTTPError
from jinja2.exceptions import TemplateNotFound
class BaseHandler(RequestHandler):
@property
def env(self):
return self.application.env
@property
def db(self):
return self.application.db
def get_error_html(self, status_code, **kwargs):
try:
self.render('error/{}.html'.format(status_code))
except (TemplateNotFound, HTTPError):
try:
self.render('error/50x.html', status_code=status_code)
except (TemplateNotFound, HTTPError):
self.write("You aren't supposed to see this")
def render(self, template, **kwargs):
try:
template = self.env.get_template(template)
except TemplateNotFound:
raise HTTPError(404)
self.env.globals['static_url'] = self.static_url
self.write(template.render(kwargs))
class NoDestinationHandler(RequestHandler):
def get(self):
raise HTTPError(404)
class IndexHandler(BaseHandler):
def get(self):
self.render('index.html')
def post(self):
return self.redirect('/')
|
"""
CartoDB Spatial Analysis Python Library
See:
https://github.com/CartoDB/crankshaft
"""
from setuptools import setup, find_packages
setup(
name='crankshaft',
version='0.2.0',
description='CartoDB Spatial Analysis Python Library',
url='https://github.com/CartoDB/crankshaft',
author='Data Services Team - CartoDB',
author_email='dataservices@cartodb.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Mapping comunity',
'Topic :: Maps :: Mapping Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
keywords='maps mapping tools spatial analysis geostatistics',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
extras_require={
'dev': ['unittest'],
'test': ['unittest', 'nose', 'mock'],
},
# The choice of component versions is dictated by what's
# provisioned in the production servers.
# IMPORTANT NOTE: please don't change this line. Instead issue a ticket to systems for evaluation.
install_requires=['joblib==0.8.3', 'numpy==1.6.1', 'scipy==0.14.0', 'pysal==1.11.2', 'scikit-learn==0.14.1'],
requires=['pysal', 'numpy', 'sklearn'],
test_suite='test'
)
|
"""Subcommand for deploying pod."""
import os
import click
from grow.commands import shared
from grow.common import bulk_errors
from grow.common import rc_config
from grow.common import utils
from grow.deployments import stats
from grow.deployments.destinations import base
from grow.extensions import hooks
from grow.performance import docs_loader
from grow.pods import pods
from grow.rendering import renderer
from grow import storage
CFG = rc_config.RC_CONFIG.prefixed('grow.deploy')
@click.command()
@click.argument('deployment_name', default='default')
@shared.pod_path_argument
@click.option('--confirm/--noconfirm', '-c/-f', default=CFG.get('force', True), is_flag=True,
help='Whether to confirm prior to deployment.')
@click.option('--test/--notest', default=CFG.get('test', True), is_flag=True,
help='Whether to run deployment tests.')
@click.option('--test_only', default=False, is_flag=True,
help='Only run the deployment tests.')
@click.option('--auth',
help='(deprecated) --auth must now be specified'
' before deploy. Usage: grow --auth=user@example.com deploy')
@shared.force_untranslated_option(CFG)
@shared.preprocess_option(CFG)
@shared.threaded_option(CFG)
@shared.shards_option
@shared.shard_option
@shared.work_dir_option
@shared.routes_file_option()
@click.pass_context
def deploy(context, deployment_name, pod_path, preprocess, confirm, test,
test_only, auth, force_untranslated, threaded, shards, shard,
work_dir, routes_file):
"""Deploys a pod to a destination."""
if auth:
text = ('--auth must now be specified before deploy. Usage:'
' grow --auth=user@example.com deploy')
raise click.ClickException(text)
auth = context.parent.params.get('auth')
root = os.path.abspath(os.path.join(os.getcwd(), pod_path))
try:
pod = pods.Pod(root, storage=storage.FileStorage)
with pod.profile.timer('grow_deploy'):
# Always clear the cache when building.
pod.podcache.reset()
deployment = pod.get_deployment(deployment_name)
# use the deployment's environment for preprocessing and later
# steps.
if deployment.config.env:
pod.set_env(deployment.config.env)
localization = pod.podspec.localization
require_translations = localization and localization.get(
'require_translations', False)
require_translations = require_translations and not force_untranslated
if auth:
deployment.login(auth)
if preprocess:
pod.preprocess()
if test_only:
deployment.test()
return
repo = utils.get_git_repo(pod.root)
pod.router.use_simple()
if routes_file:
pod.router.from_data(pod.read_json(routes_file))
else:
pod.router.add_all()
is_partial = False
# Filter routes based on deployment config.
for build_filter in deployment.filters:
is_partial = True
pod.router.filter(
build_filter.type, collection_paths=build_filter.collections,
paths=build_filter.paths, locales=build_filter.locales)
# Shard the routes when using sharding.
if shards and shard:
is_partial = True
pod.router.shard(shards, shard)
if not work_dir:
# Preload the documents used by the paths after filtering.
docs_loader.DocsLoader.load_from_routes(pod, pod.router.routes)
paths = pod.router.routes.paths
stats_obj = stats.Stats(pod, paths=paths)
content_generator = deployment.dump(
pod, source_dir=work_dir, use_threading=threaded)
content_generator = hooks.generator_wrapper(
pod, 'pre_deploy', content_generator, 'deploy')
deployment.deploy(
content_generator, stats=stats_obj, repo=repo, confirm=confirm,
test=test, require_translations=require_translations,
is_partial=is_partial)
pod.podcache.write()
except bulk_errors.BulkErrors as err:
# Write the podcache files even when there are rendering errors.
pod.podcache.write()
bulk_errors.display_bulk_errors(err)
raise click.Abort()
except base.Error as err:
raise click.ClickException(str(err))
except pods.Error as err:
raise click.ClickException(str(err))
return pod
|
#!/usr/bin/env python
"""Mixin class to be used in tests for BlobStore implementations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import abc
import time
from future.utils import with_metaclass
import mock
from grr_response_core.lib import rdfvalue
from grr_response_server import blob_store
from grr_response_server.databases import mysql_blobs
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import stats_test_lib
from grr.test_lib import test_lib
POSITIONAL_ARGS = 0
class BlobStoreTestMixin(
with_metaclass(abc.ABCMeta, stats_test_lib.StatsTestMixin)):
"""Mixin providing tests shared by all blob store tests implementations."""
@abc.abstractmethod
def CreateBlobStore(self):
"""Create a test blob store.
Returns:
A tuple (blob_store, cleanup), where blob_store is an instance of
blob_store.BlobStore to be tested and cleanup is a function which
destroys blob_store, releasing any resources held by it.
"""
def setUp(self):
super(BlobStoreTestMixin, self).setUp()
bs, cleanup = self.CreateBlobStore()
if cleanup is not None:
self.addCleanup(cleanup)
self.blob_store = blob_store.BlobStoreValidationWrapper(bs)
def testCheckBlobsExistOnEmptyListReturnsEmptyDict(self):
self.assertEqual(self.blob_store.CheckBlobsExist([]), {})
def testReadBlobsOnEmptyListReturnsEmptyDict(self):
self.assertEqual(self.blob_store.ReadBlobs([]), {})
def testReadingNonExistentBlobReturnsNone(self):
blob_id = rdf_objects.BlobID(b"01234567" * 4)
result = self.blob_store.ReadBlob(blob_id)
self.assertIsNone(result)
def testReadingNonExistentBlobsReturnsNone(self):
blob_id = rdf_objects.BlobID(b"01234567" * 4)
result = self.blob_store.ReadBlobs([blob_id])
self.assertEqual(result, {blob_id: None})
def testSingleBlobCanBeWrittenAndThenRead(self):
blob_id = rdf_objects.BlobID(b"01234567" * 4)
blob_data = b"abcdef"
self.blob_store.WriteBlobs({blob_id: blob_data})
result = self.blob_store.ReadBlob(blob_id)
self.assertEqual(result, blob_data)
def testMultipleBlobsCanBeWrittenAndThenRead(self):
blob_ids = [rdf_objects.BlobID((b"%d1234567" % i) * 4) for i in range(10)]
blob_data = [b"a" * i for i in range(10)]
self.blob_store.WriteBlobs(dict(zip(blob_ids, blob_data)))
result = self.blob_store.ReadBlobs(blob_ids)
self.assertEqual(result, dict(zip(blob_ids, blob_data)))
def testWriting80MbOfBlobsWithSingleCallWorks(self):
num_blobs = 80
blob_ids = [
rdf_objects.BlobID((b"%02d234567" % i) * 4) for i in range(num_blobs)
]
blob_data = [b"a" * 1024 * 1024] * num_blobs
self.blob_store.WriteBlobs(dict(zip(blob_ids, blob_data)))
result = self.blob_store.ReadBlobs(blob_ids)
self.assertEqual(result, dict(zip(blob_ids, blob_data)))
def testCheckBlobExistsReturnsFalseForMissing(self):
blob_id = rdf_objects.BlobID(b"11111111" * 4)
self.assertFalse(self.blob_store.CheckBlobExists(blob_id))
def testCheckBlobExistsReturnsTrueForExisting(self):
blob_id = rdf_objects.BlobID(b"01234567" * 4)
blob_data = b"abcdef"
self.blob_store.WriteBlobs({blob_id: blob_data})
self.assertTrue(self.blob_store.CheckBlobExists(blob_id))
def testCheckBlobsExistCorrectlyReportsPresentAndMissingBlobs(self):
blob_id = rdf_objects.BlobID(b"01234567" * 4)
blob_data = b"abcdef"
self.blob_store.WriteBlobs({blob_id: blob_data})
other_blob_id = rdf_objects.BlobID(b"abcdefgh" * 4)
result = self.blob_store.CheckBlobsExist([blob_id, other_blob_id])
self.assertEqual(result, {blob_id: True, other_blob_id: False})
@mock.patch.object(mysql_blobs, "BLOB_CHUNK_SIZE", 1)
def testLargeBlobsAreReassembledInCorrectOrder(self):
blob_data = b"0123456789"
blob_id = rdf_objects.BlobID(b"00234567" * 4)
self.blob_store.WriteBlobs({blob_id: blob_data})
result = self.blob_store.ReadBlobs([blob_id])
self.assertEqual({blob_id: blob_data}, result)
@mock.patch.object(mysql_blobs, "BLOB_CHUNK_SIZE", 3)
def testNotEvenlyDivisibleBlobsAreReassembledCorrectly(self):
blob_data = b"0123456789"
blob_id = rdf_objects.BlobID(b"00234567" * 4)
self.blob_store.WriteBlobs({blob_id: blob_data})
result = self.blob_store.ReadBlobs([blob_id])
self.assertEqual({blob_id: blob_data}, result)
def testOverwritingExistingBlobDoesNotRaise(self):
blob_id = rdf_objects.BlobID(b"01234567" * 4)
blob_data = b"abcdef"
for _ in range(2):
self.blob_store.WriteBlobs({blob_id: blob_data})
@mock.patch.object(time, "sleep")
def testReadAndWaitForBlobsWorksWithImmediateResults(self, sleep_mock):
a_id = rdf_objects.BlobID(b"0" * 32)
b_id = rdf_objects.BlobID(b"1" * 32)
blobs = {a_id: b"aa", b_id: b"bb"}
with mock.patch.object(
self.blob_store, "ReadBlobs", return_value=blobs) as read_mock:
results = self.blob_store.ReadAndWaitForBlobs(
[a_id, b_id], timeout=rdfvalue.Duration.From(10, rdfvalue.SECONDS))
sleep_mock.assert_not_called()
read_mock.assert_called_once()
self.assertCountEqual(read_mock.call_args[POSITIONAL_ARGS][0], [a_id, b_id])
self.assertEqual({a_id: b"aa", b_id: b"bb"}, results)
@mock.patch.object(time, "sleep")
def testReadAndWaitForBlobsPollsUntilResultsAreAvailable(self, sleep_mock):
a_id = rdf_objects.BlobID(b"0" * 32)
b_id = rdf_objects.BlobID(b"1" * 32)
effect = [{
a_id: None,
b_id: None
}, {
a_id: b"aa",
b_id: None
}, {
b_id: None
}, {
b_id: b"bb"
}]
with test_lib.FakeTime(rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10)):
with mock.patch.object(
self.blob_store, "ReadBlobs", side_effect=effect) as read_mock:
results = self.blob_store.ReadAndWaitForBlobs(
[a_id, b_id], timeout=rdfvalue.Duration.From(10, rdfvalue.SECONDS))
self.assertEqual({a_id: b"aa", b_id: b"bb"}, results)
self.assertEqual(read_mock.call_count, 4)
self.assertCountEqual(read_mock.call_args_list[0][POSITIONAL_ARGS][0],
[a_id, b_id])
self.assertCountEqual(read_mock.call_args_list[1][POSITIONAL_ARGS][0],
[a_id, b_id])
self.assertCountEqual(read_mock.call_args_list[2][POSITIONAL_ARGS][0],
[b_id])
self.assertCountEqual(read_mock.call_args_list[3][POSITIONAL_ARGS][0],
[b_id])
self.assertEqual(sleep_mock.call_count, 3)
def testReadAndWaitForBlobsStopsAfterTimeout(self):
a_id = rdf_objects.BlobID(b"0" * 32)
b_id = rdf_objects.BlobID(b"1" * 32)
effect = [{a_id: b"aa", b_id: None}] + [{b_id: None}] * 3
time_mock = test_lib.FakeTime(10)
sleep_call_count = [0]
def sleep(secs):
time_mock.time += secs
sleep_call_count[0] += 1
with time_mock, mock.patch.object(time, "sleep", sleep):
with mock.patch.object(
self.blob_store, "ReadBlobs", side_effect=effect) as read_mock:
results = self.blob_store.ReadAndWaitForBlobs(
[a_id, b_id], timeout=rdfvalue.Duration.From(3, rdfvalue.SECONDS))
self.assertEqual({a_id: b"aa", b_id: None}, results)
self.assertGreaterEqual(read_mock.call_count, 3)
self.assertCountEqual(read_mock.call_args_list[0][POSITIONAL_ARGS][0],
[a_id, b_id])
for i in range(1, read_mock.call_count):
self.assertCountEqual(read_mock.call_args_list[i][POSITIONAL_ARGS][0],
[b_id])
self.assertEqual(read_mock.call_count, sleep_call_count[0] + 1)
@mock.patch.object(time, "sleep")
def testReadAndWaitForBlobsPopulatesStats(self, sleep_mock):
a_id = rdf_objects.BlobID(b"0" * 32)
b_id = rdf_objects.BlobID(b"1" * 32)
blobs = {a_id: b"aa", b_id: b"bb"}
with mock.patch.object(self.blob_store, "ReadBlobs", return_value=blobs):
with self.assertStatsCounterDelta(2,
blob_store.BLOB_STORE_POLL_HIT_LATENCY):
with self.assertStatsCounterDelta(
2, blob_store.BLOB_STORE_POLL_HIT_ITERATION):
self.blob_store.ReadAndWaitForBlobs([a_id, b_id],
timeout=rdfvalue.Duration.From(
10, rdfvalue.SECONDS))
|
from datetime import datetime
from datetime import timedelta
from kafka import KafkaConsumer
import settings
import json
import logging
import paho.mqtt.client as mqtt
import sys
root = logging.getLogger()
root.setLevel(settings.LOGLEVEL)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(settings.LOGLEVEL)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
class Consumer():
def __init__(self):
import socket
self.remote_ip = \
socket.gethostbyname(settings.KAFKA_HOST)
print(self.remote_ip)
self.remote_port = settings.KAFKA_PORT
self.__init_mqtt()
self.__init_kafka()
def __init_kafka(self):
self.consumer = KafkaConsumer(
bootstrap_servers=f'{self.remote_ip}:{self.remote_port}',
api_version=settings.KAFKA_API_VERSION,
auto_offset_reset='earliest',
enable_auto_commit=False,
group_id="tweet_consumer_for_mqtt"
)
self.consumer.subscribe([settings.KAFKA_TOPIC_TWEETS])
def __init_mqtt(self):
self.mqtt_client = mqtt.Client()
try:
self.mqtt_client.username_pw_set(settings.MQTT_USER_SUBSCRIBER, settings.MQTT_PASS_SUBSCRIBER)
except:
pass
self.mqtt_client.connect(settings.MQTT_HOST_SUBSCRIBER, settings.MQTT_PORT_SUBSCRIVER, 60)
self.is_running = False
def run(self):
self.is_running = True
try:
self.__run()
finally:
self.is_running = False
def __run(self):
self.deadline = datetime.now() + timedelta(seconds=30)
try:
while datetime.now() < self.deadline:
partitions_with_messages = self.consumer.poll(timeout_ms=1000, max_records=1)
if len(partitions_with_messages) == 0:
continue
for messages in partitions_with_messages.values():
for message in messages:
decoded_message = str(message.value.decode('utf-8'))
mqtt_message = json.dumps({
# "action": "COMMIT",
"kafka_commit_offset": {
"topic": message.topic,
"partition": message.partition,
"offset": message.offset,
"metadata": ''
},
"message": decoded_message
})
self.mqtt_client.publish("hypebot/tweet_stream", mqtt_message, qos=0)
except Exception as ex:
logging.error(ex)
pass
def commit(self, offsets):
try:
self.consumer.commit(offsets)
if not self.is_running:
self.run()
except Exception as ex:
logging.error(ex)
pass |
import json
with open("files/monitoria.json", "r") as f:
archivo = json.load(f)
print(archivo["cedula"]) |
from __future__ import annotations
import random
from typing import List, Tuple
from functools import reduce
MINIMUM_INITIAL_PILE_SIZE = 1
ACTION = Tuple[int, int]
class Nim:
def __init__(self, piles: int = 3, objects: int = 20, seed: int or None = None) -> None:
random.seed(seed)
assert objects >= piles * MINIMUM_INITIAL_PILE_SIZE
objects -= piles * MINIMUM_INITIAL_PILE_SIZE
self.piles: List[int] = [
MINIMUM_INITIAL_PILE_SIZE for _ in range(piles)]
self.current_player: int = 0
self.done: bool = False
# distribute objects randomly into piles
for _ in range(objects):
self.piles[random.randint(0, piles - 1)] += 1
# Generate list of actions base on piles size and number
def possible_actions(self, player=None) -> List[ACTION]:
return reduce(lambda acc, list_of_actions: acc + list_of_actions,
[[(pile_idx, j) for j in range(1, pile + 1)] for pile_idx, pile in enumerate(self.piles)])
def act(self, action: ACTION) -> bool:
pile, objects_taken = action
# Make sure that pile size has enough objects to take from
assert self.piles[pile] >= objects_taken
self.piles[pile] -= objects_taken
# Count number of objects left and check if game is done
self.done = reduce(lambda acc, pile_size: acc +
pile_size, self.piles) == 0
if not self.done:
self.current_player = 1 - self.current_player
return 0
self.done = 1 if self.current_player == 0 else -1
return self.done
def act_random(self) -> bool:
actions = self.possible_actions()
action = random.choice(actions)
return self.act(action)
def copy(self) -> Nim:
_copy = Nim(0, 0, 0)
_copy.piles = self.piles[:]
_copy.current_player = self.current_player
_copy.done = self.done
return _copy
def get_state(self):
return self.piles.copy()
def get_state_name(self):
state_name = ''
for pile, objects in enumerate(self.piles):
state_name += f'({str(pile)},{str(objects)})'
return state_name
def set_state(self, state, done, current_player):
self.done = done
self.current_player = current_player
self.piles = state.copy()
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.tattoshome, name='index'),
path('<int:tatto_id>', views.ver_tatto, name='ver_tatto'),
path('busca/', views.busca, name='busca')
] |
from django.contrib import admin
from .models import Addresses, Gymnasts, Meets, University, University_Meet_Participation
admin.site.register(Addresses)
admin.site.register(Gymnasts)
admin.site.register(Meets)
admin.site.register(University)
admin.site.register(University_Meet_Participation) |
import os.path
from commonroad.common.solution import Solution, CommonRoadSolutionWriter
# Load planner module
from iterator import scenario_iterator_interactive, scenario_iterator_non_interactive, _search_interactive_scenarios
from main_interactive_CRplanner import motion_planner_interactive
def save_solution(solution: Solution, path: str) -> None:
"""
Save the given solution to the given path.
"""
return CommonRoadSolutionWriter(solution).write_to_file(
output_path=path,
overwrite=True,
pretty=True
)
# Run Main Process
if __name__ == "__main__":
scenario_dir = "/commonroad/scenarios"
solution_dir = "/commonroad/solutions"
# solve all non-interactive scenarios
# for scenario, planning_problem_set in scenario_iterator_non_interactive(scenario_dir):
# print(f"Processing scenario {str(scenario.scenario_id)} ...")
# solution = motion_planner(scenario)
# save_solution(solution, solution_dir)
# solve the second half of interactive scenarios
interactive_paths = _search_interactive_scenarios(scenario_dir)
n_interactive_scenarios = len(interactive_paths)
last_half_scenario_path = interactive_paths[int(n_interactive_scenarios/2):]
for scenario_path in last_half_scenario_path:
print(f"Processing scenario {os.path.basename(scenario_path)} ...")
try:
solution = motion_planner_interactive(scenario_path)
save_solution(solution, solution_dir)
except:
print('-'*20,'cannot solve this scenario', scenario_path,'-'*20)
else:
print('-'*20,scenario_path, 'solved already','-'*20)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-03-14 18:04:48
# @Author : mutudeh (josephmathone@gmail.com)
# @Link : ${link}
# @Version : $Id$
import os
class Solution(object):
def letterCasePermutation(self, S):
"""
:type S: str
:rtype: List[str]
"""
if not S:
return []
self.fin_list = []
self.dfs(S,[])
return self.fin_list
def dfs(self,S,tem_path):
if not S:
self.fin_list.append("".join(tem_path[:]))
return
if S[0] in "1234567890":
tem_path.append(S[0])
self.dfs(S[1:],tem_path)
tem_path.pop()
else:
tem_path.append(S[0].lower())
self.dfs(S[1:],tem_path)
tem_path.pop()
tem_path.append(S[0].upper())
self.dfs(S[1:],tem_path)
tem_path.pop()
return
s = Solution()
print(s.letterCasePermutation("12345")) |
#
# oci-email-send-python version 1.0.
#
# Copyright (c) 2020 Oracle, Inc.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
#
import io, os, json, smtplib, email.utils, random, glob
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email import encoders
import datetime as DT
from cryptography.fernet import Fernet
def write_to_file(prefix, content):
n = str(random.random())
today_date = str(DT.date.today())
#print(">>>> " + n)
#print(">>>> " + today_date)
f = open(prefix + "_" + today_date + "_" + n + ".txt", "a")
f.write(today_date + ": " + content)
f.close()
def send_email(mail_contents, unique_mail_farm_dict):
try:
smtp_username, smtp_password = get_cred()
except Exception as ex:
print("ERROR in getting SMTP details: ", ex, flush=True)
write_to_file("ERROR-SMTP", "ERROR in getting SMTP details: " + str(ex))
return False
smtp_host = "smtp.us-ashburn-1.oraclecloud.com"
smtp_port = 587
sender_email = "Connect@DeepVisionTech.AI"
sender_name = "Crop Health (DeepVisionTech.AI)"
recipient = subject = body = ""
STATUS = []
for mail_con in mail_contents:
success_flag1 = success_flag2 = success_flag3 = True
#recipient = mail_con['recipient']
recipient = mail_con
farms_list = unique_mail_farm_dict.get(mail_con)
try:
fromDate = DT.date.today()
toDate = fromDate - DT.timedelta(days=7)
subject = "Crop Health for the week " + str(toDate) + " to " + str(fromDate)
body = "Hello there, <br><br>We hope you are doing well.<br>This email is regarding your weekly Crop Health for the week " \
+ str(toDate) + " to " + str(fromDate) + "."
#print("subject: " + subject)
#print("Body: " + body)
except Exception as ex:
print("ERROR in generating email headers: ", ex, flush=True)
#raise
success_flag1 = False
STATUS.append({recipient: "FAILED"})
continue
try:
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = email.utils.formataddr((sender_name, sender_email))
msg['To'] = recipient
#filename = recipient + ".png"
files_for_email = glob.glob(recipient + '*.png')
print(">>> Got "+str(len(files_for_email))+" image files for email " + recipient)
cnt = 0
if len(files_for_email) > 0:
farms_list_copy = farms_list.copy()
for img_file in files_for_email:
ffile = os.path.basename(img_file)
print("FILE: " + ffile)
frm_in_ffile = ffile[ffile.find('_')+1 : ffile.find('.png')]
print("Farm name in file: " + frm_in_ffile)
if frm_in_ffile in farms_list_copy:
#print(">>>>>>>> attachment for this farm is available" + frm_in_ffile)
attachment = open("./" + os.path.basename(img_file), "rb")
p = MIMEBase('application', 'octet-stream')
p.set_payload((attachment).read())
encoders.encode_base64(p)
p.add_header('Content-Disposition', "attachment; filename=" + os.path.basename(img_file))
msg.attach(p)
farms_list_copy.remove(frm_in_ffile)
#else:
#print(">>>>>>>> NO attachment for this farm is available" + frm_in_ffile)
cnt = cnt + 1
body = body + "<br><br>Attached graph(s) show health status of all the monitored farms for current and previous 2 months."
print(len(farms_list_copy))
if len(farms_list_copy) > 0:
body = body + "<br>NOTE: Satellite images for few farms are unavailable or unusable. So, health graphs are not attached for these farms: "
fff = ""
for kk in farms_list_copy:
print("## one or more farms did not have images...adding their name: " + str(kk))
fff = fff + str(kk) + ", "
body = body + fff
else:
print("No images for this email...so, not attaching anything.")
body = body + '<br><br>NOTE: Satellite images are unavailable or unusable. So, we are unable to show health graph.'
print("len(farms_list): " + str(len(farms_list)))
body = getBodyContent(body, mail_con, farms_list)
#print('after calling getBodyContent(): ' + body)
msg.attach(MIMEText(body, 'html'))
except Exception as ex:
print("ERROR in attaching images to email: ", ex, flush=True)
#raise
success_flag2 = False
if success_flag1 == True:
STATUS.append({recipient: "FAILED"})
continue
try:
server = smtplib.SMTP(smtp_host, smtp_port)
server.ehlo()
server.starttls()
server.ehlo()
server.login(smtp_username, smtp_password)
server.sendmail(sender_email, recipient, msg.as_string())
server.close()
except Exception as ex:
print("ERROR in sending email: ", ex, flush=True)
#raise
success_flag3 = False
if (success_flag1 == True) and (success_flag2 == True):
STATUS.append({recipient: "FAILED"})
continue
else:
print ("INFO: Email successfully sent!", flush=True)
STATUS.append({recipient: "SUCCESS"})
return STATUS
def getBodyContent(body, mail, farms_list):
URL_TO_INVOKE_FN = "https://ackamrxc4anoos3auilnhwzt2q.apigateway.us-ashburn-1.oci.customer-oci.com/delsub/delsub"
body = body + "<br>NOTE: In case Satellite images for any farm is unavailable or unusable, our system \'predicts\' the health of farm using previous 8 months\' health data. " \
+ "<br>Health abnormalities are categorized as below: " \
+ "<br> 1. 'mild' - variation upto 'Sensitivity lower threshold' chosen for the farm" \
+ "<br> 2. 'medium' - variation upto 10% of 'Sensitivity lower threshold' chosen for the farm" \
+ "<br> 3. 'severe' - variation beyond 10% of 'Sensitivity lower threshold' chosen for the farm" \
+ "<br><br>Thanks & Regards,<br>Team <a href='https://DeepVisionTech.AI'>DeepVisionTech Pvt. Ltd.</a>" \
+ "<br><br>Click here to unsubscribe from email notifications: " \
+ "<a href='"+URL_TO_INVOKE_FN+"?mail="+mail+"&farm=all'>All farms</a>"
links = ""
for frm in farms_list:
link = URL_TO_INVOKE_FN + "?mail="+mail+"&farm="+frm
links = links + " | <a href='"+link+"'>"+frm+"</a>"
body = body + links
return body
def get_cred():
cred_filename = 'CredFile.ini'
key_file = 'key.key'
key = ''
with open('key.key','r') as key_in:
key = key_in.read().encode()
#If you want the Cred file to be of one
# time use uncomment the below line
#os.remove(key_file)
f = Fernet(key)
with open(cred_filename,'r') as cred_in:
lines = cred_in.readlines()
config = {}
for line in lines:
tuples = line.rstrip('\n').split('=',1)
if tuples[0] in ('Username','Password'):
config[tuples[0]] = tuples[1]
username = (config['Username'])
passwd = f.decrypt(config['Password'].encode()).decode()
#print("Username:", username)
#print("Password:", passwd)
return username, passwd
if __name__ == '__main__':
mail_contents = ["jay.test.test1@gmail.com"] # NOTE: update with email ids to test
status = send_email(mail_contents)
if status == False:
print('UNABLE TO SEND EMAILs...could not retrieve SMTP server details')
write_to_file('ERROR-EMAIL', 'UNABLE TO SEND EMAILs...could not retrieve SMTP server details')
else:
print(status)
write_to_file('STATUS-EMAIL', str(status)) |
from setuptools import setup, find_packages
setup(
name='deepcovidnet',
version='0.1',
packages=find_packages(exclude=['tests', 'data', 'runs', 'models'])
#can add entrypoints here
)
|
# -*- coding: utf-8 -*-
"""
########## Term Project ############
# #
# owner: @author Sterling Engle #
# #
# Due Jun 24, 2021 at 11:59 PM PDT #
# Finished: Jun 19, 2021 #
#----------------------------------#
# CSULB CECS 343 Intro to S/W Engr #
# Professor Phuong Nguyen #
####################################
"""
# Tenant provides an object type that stores an apartment number and tenant
# name in private memory, and public methods used with this object.
class Tenant:
# __init__ function is the overloaded class constructor
# @param aptNum is the integer apartment number
# @param tenantName is the name of our tenant
def __init__(self, aptNum, tenantName):
self.__aptNumber = aptNum
self.__name = tenantName
# __del__
def __del__(self):
return
# getApt return apartment number
# @return __aptNumber
def getApt(self):
return self.__aptNumber
# getTenant function returns tenant name
# @return __name
def getTenant(self):
return self.__name
# getAptTenant function return tenant apartment
# @return __aptNumber, __name
def getAptTenant(self):
return self.__aptNumber, self.__name
# aptOccupied return name if occupied
# @param __name
def aptOccupied(self):
return self.__name is not None and self.__name != ""
# __lt__ checks if name is less than
# @param other
def __lt__(self, other):
if self.__name < other.getTenant():
return True
else:
return False
# __eq__ checks if name is the same
# @param other
def __eq__(self, other):
if self.__name == other.getTenant():
return True
else:
return False
# __str__ returns apartment and tenant
# @return getApt, getTenant
def __str__(self):
return f"{self.getApt():5d} {self.getTenant()}"
|
from __future__ import division
try:
from itertools import izip_longest as zip_longest
base_value = long
except:
from itertools import zip_longest
base_value = int
from random import randint
class FieldValue(base_value):
"""
Class for operating on finite fields with overloaded operators.
The field modulus is replicated on each field value.
**MOD MUST BE PRIME FOR CORRECT DIVISION**
"""
def __new__(cls, value, mod):
return base_value.__new__(cls, base_value(value % mod))
def __init__(self, value, mod):
self.mod = mod
base_value.__init__(base_value(value))
def __bool__(self):
return base_value(self) != 0
def __add__(self, other):
return FieldValue(base_value.__add__(self, base_value(other)), self.mod)
def __radd__(self, other):
return FieldValue(base_value.__add__(self, base_value(other)), self.mod)
def __sub__(self, other):
result = base_value.__sub__(self, base_value(other))
while result < 0:
result += self.mod
return FieldValue(result, self.mod)
def __rsub__(self, other):
result = base_value.__sub__(base_value(other), self)
while result < 0:
result += self.mod
return FieldValue(result, self.mod)
def __mul__(self, other):
return FieldValue(base_value(self) * base_value(other % self.mod), self.mod)
def __rmul__(self, other):
return FieldValue(base_value(other % self.mod) * base_value(self), self.mod)
def __div__(self, other):
return self * FieldValue(other, self.mod).inverse()
def __rdiv__(self, other):
return other * self.inverse()
def __truediv__(self, other):
return self.__div__(other)
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __pow__(self, other):
return FieldValue(pow(base_value(self), base_value(other), self.mod), self.mod)
def __eq__(self, other):
if other is None: return False
return base_value(self) == base_value(other) % self.mod
def inverse(self):
"""
Returns the multiplicative inverse such that `a.inverse() * a == 1`.
The modulus must be a prime for this to work.
"""
a, n = base_value(self), self.mod
t = 0
newt = 1
r = n
newr = a
while newr != 0:
quotient = r // newr
t, newt = newt, t - quotient * newt
r, newr = newr, r - quotient * newr
if r > 1: raise ValueError('Not invertible {}'.format(self))
if t < 0: t +=n
return FieldValue(t, self.mod)
def __repr__(self):
return '{:x}(%{:x})'.format(int(self), self.mod)
def __str__(self):
return repr(self)
def __hash__(self):
return base_value(self)
|
# pylint: disable=missing-docstring, invalid-name, too-many-public-methods, line-too-long
"""test indexedlist"""
from __future__ import absolute_import
import unittest
from dicetables.tools import indexedvalues as iv
class TestIndexedValues(unittest.TestCase):
def assert_indexed_values(self, indexed_values, start_index, values):
self.assertEqual(indexed_values.start_index, start_index)
self.assertEqual(indexed_values.raw_values, values)
def test_make_start_index_and_list_start_zero(self):
start_index, lst = iv.make_start_index_and_list(sorted({0: 1, 1: 1}.items()))
self.assertEqual(start_index, 0)
self.assertEqual(lst, [1, 1])
def test_make_start_index_and_list_start_other(self):
start_index, lst = iv.make_start_index_and_list(sorted({-2: 1, -1: 1}.items()))
self.assertEqual(start_index, -2)
self.assertEqual(lst, [1, 1])
def test_make_start_index_and_list_gap_in_values(self):
start_index, lst = iv.make_start_index_and_list(sorted({-2: 1, 1: 1}.items()))
self.assertEqual(start_index, -2)
self.assertEqual(lst, [1, 0, 0, 1])
def test_generated_indexed_values(self):
test = iv.generate_indexed_values([(1, 1), (2, 2)])
self.assert_indexed_values(test, 1, [1, 2])
def test_generated_indexed_values_gap_in_list(self):
test = iv.generate_indexed_values([(1, 1), (3, 2), (5, 5)])
self.assert_indexed_values(test, 1, [1, 0, 2, 0, 5])
def test_generate_indexed_values_from_dict(self):
test = iv.generate_indexed_values_from_dict({1: 1})
self.assert_indexed_values(test, 1, [1])
def test_generate_indexed_values_from_dict_gap_in_values(self):
test = iv.generate_indexed_values_from_dict({5: 1, 3: 3, 1: 1})
self.assert_indexed_values(test, 1, [1, 0, 3, 0, 1])
def test_IndexedValues_init(self):
test = iv.IndexedValues(1, [1, 3])
self.assert_indexed_values(test, 1, [1, 3])
def test_IndexedValues_init_is_mutated_list_safe(self):
the_list_to_mutate = [1, 2, 3]
test = iv.IndexedValues(0, the_list_to_mutate)
the_list_to_mutate[0] = 5
self.assertEqual(test.raw_values, [1, 2, 3])
def test_IndexedValues_start_index(self):
test = iv.IndexedValues(3, [1])
self.assertEqual(test.start_index, 3)
def test_IndexedValues_values(self):
test = iv.IndexedValues(3, [1, 2])
self.assertEqual(test.raw_values, [1, 2])
def test_indexedValues_range(self):
test = iv.IndexedValues(3, [1, 2, 3])
self.assertEqual(test.index_range, (3, 5))
def test_IndexedValues_get_dict_single_value(self):
test = iv.IndexedValues(5, [2])
self.assertEqual(test.get_dict(), {5: 2})
def test_IndexedValues_get_dict_does_not_include_zeroes(self):
test = iv.IndexedValues(1, [2, 0, 3])
self.assertEqual(test.get_dict(), {1: 2, 3: 3})
def test_indexedValues_get_value_at_key_within_range(self):
test = iv.IndexedValues(3, [1, 2, 3])
self.assertEqual(test.get_value_at_key(4), 2)
def test_indexedValues_get_value_at_key_out_of_range(self):
test = iv.IndexedValues(3, [1, 2, 3])
self.assertEqual(test.get_value_at_key(2), 0)
self.assertEqual(test.get_value_at_key(6), 0)
def test_IndexedValues_change_list_len_with_zeroes_max_size_offset_zero(self):
self.assertEqual(iv.change_list_len_with_zeroes([1, 2, 3], 3, 0), [1, 2, 3])
def test_change_list_len_with_zeroes_other_case_offset_zero(self):
self.assertEqual(iv.change_list_len_with_zeroes([1, 2, 3], 5, 0), [1, 2, 3, 0, 0])
def test_change_list_len_with_zeroes_no_adds(self):
self.assertEqual(iv.change_list_len_with_zeroes([1, 2, 3], 3, 0), [1, 2, 3])
def test_change_list_len_with_zeroes_offset_and_zeros_is_total_size(self):
self.assertEqual(iv.change_list_len_with_zeroes([1, 2, 3], 5, 2), [0, 0, 1, 2, 3])
def test_change_list_len_with_zeroes_offset_and_zeros_lt_total_size(self):
self.assertEqual(iv.change_list_len_with_zeroes([1, 2, 3], 10, 2), [0, 0, 1, 2, 3, 0, 0, 0, 0, 0])
def test_equalize_len_demonstration(self):
lower = [1, 2, 3, 4, 5, 6]
higher = [1, 2, 3]
diff_in_start_indices = 2
total_size = 6
self.assertEqual(iv.change_list_len_with_zeroes(lower, total_size, 0),
[1, 2, 3, 4, 5, 6])
self.assertEqual(iv.change_list_len_with_zeroes(higher, total_size, diff_in_start_indices),
[0, 0, 1, 2, 3, 0])
def test_add_many_empty(self):
self.assertEqual(iv.add_many(), 0)
def test_add_many_non_empty(self):
self.assertEqual(iv.add_many(-1, 0, 1, 2, 3), 5)
def test_get_events_list_normal_case(self):
occurrences = 3
new_size = 6
offset = 2
event_list = iv.get_events_list([1, 2, 3], occurrences, new_size, offset)
self.assertEqual(event_list, [0, 0, 3, 6, 9, 0])
def test_get_events_list_empty(self):
occurrences = 3
new_size = 6
offset = 2
event_list = iv.get_events_list([], occurrences, new_size, offset)
self.assertEqual(event_list, [0, 0, 0, 0, 0, 0])
def test_get_events_list_occurrences_are_one(self):
occurrences = 1
new_size = 6
offset = 2
event_list = iv.get_events_list([1, 2, 3], occurrences, new_size, offset)
self.assertEqual(event_list, [0, 0, 1, 2, 3, 0])
def test_IndexedValues_combine_with_events_list_normal_case(self):
test = iv.IndexedValues(5, [1, 2, 1])
input_dict = {1: 2, 2: 1}
"""
[2, 4, 2, 0] +
[0, 1, 2, 1]
"""
self.assert_indexed_values(test.combine_with_dictionary(input_dict), 6, [2, 5, 4, 1])
def test_IndexedValues_combine_with_events_list_gaps_in_list(self):
test = iv.IndexedValues(5, [1, 2, 1])
input_dict = {1: 2, 3: 1}
"""
[2, 4, 2, 0, 0] +
[0, 0, 1, 2, 1]
"""
self.assert_indexed_values(test.combine_with_dictionary(input_dict), 6, [2, 4, 3, 2, 1])
def test_IndexedValues_combine_with_events_list_negative_events(self):
test = iv.IndexedValues(5, [1, 2, 1])
input_dict = {-1: 2, 0: 1}
"""
[2, 4, 2, 0] +
[0, 1, 2, 1]
"""
self.assert_indexed_values(test.combine_with_dictionary(input_dict), 4, [2, 5, 4, 1])
if __name__ == '__main__':
unittest.main()
|
import ijson
import numpy
import math
import pandas as pd
import json
from glom import glom
from math import radians, sin, cos, sqrt, asin
def parse_float(x):
try:
x = float(x)
except Exception:
x = 0
return x
def div_prevent_zero(a,b):
if b > 0:
return a / b
else:
return 0
# from rosetta code
def haversine(lat1, lon1, lat2, lon2):
R = 6372.8 # Earth radius in kilometers
dLat = radians(lat2 - lat1)
dLon = radians(lon2 - lon1)
lat1 = radians(lat1)
lat2 = radians(lat2)
a = sin(dLat / 2)**2 + cos(lat1) * cos(lat2) * sin(dLon / 2)**2
c = 2 * asin(sqrt(a))
return R * c
def subgroup_distance(subgroup, long_col, lat_col, distance_col_name):
distance = []
current = []
previous = []
lat1 = 0
ling1 = 0
i = 0
for index, row in subgroup.iterrows():
lat1 = row[lat_col]
long1 = row[long_col]
current = [lat1, long1]
if i < 1:
distance.append(0)
else:
diff_km = haversine(current[0], current[1], previous[0], previous[1])
distance.append(diff_km * 1000)
previous = current
i+=1
subgroup[distance_col_name] = distance
return subgroup
def subgroup_timedelta(subgroup, date_col, timediff_col):
delta = []
current = 0
previous = 0
i = 0
for index, row in subgroup.iterrows():
current = row[date_col]
if i < 1:
delta.append(0)
else:
diff = pd.Timedelta(current - previous).seconds
delta.append(diff)
previous = current
i+=1
subgroup[timediff_col] = delta
return subgroup
def compute_speed_kn(subgroup, dist_col, time_col, speed_col):
speed = []
speed_kn = 0
i=0
for index, row in subgroup.iterrows():
if i < 1:
speed.append(0.0)
else:
speed_kn = div_prevent_zero(row[dist_col], row[time_col])
speed.append(speed_kn / 1.94)
i+=1
subgroup[speed_col] = speed
return subgroup
def parseInputFile(filename, messageTypes=[1, 2, 3, 18, 19, 27]):
data = []
good_columns = [
"MessageID",
"UserID",
"Longitude",
"Latitude",
"UTCTimeStamp"
]
with open(filename, 'r') as input_file:
jsonobj = ijson.items(input_file, '', multiple_values=True)
#jsons = (o for o in jsonobj)
jsons = (o for o in jsonobj if o['Message']['MessageID'] in messageTypes)
for row in jsons:
# if j['Message']['MessageID'] in messageTypes:
# print(j)
selected_row = {}
selected_row["MessageID"] = row['Message']['MessageID']
selected_row["UserID"] = row['Message']['UserID']
selected_row["Longitude"] = row['Message']['Longitude']
selected_row["Latitude"] = row['Message']['Latitude']
selected_row["UTCTimeStamp"] = row['UTCTimeStamp']
data.append(selected_row)
# print(selected_row)
return data
def generate_geojson_feature(lat, lon, vessel_ID, time):
prop = {"name": vessel_ID, "time": time}
geometry = { "type": "Point", "coordinates": [lon, lat]}
feature = { "type": "Feature", "geometry" : geometry, "properties" : prop}
return feature
def geojson_list(feat_list):
features = { "type": "FeatureCollection", "features": feat_list }
return features
def dataframe_to_geojson_feature_list(subgroup, all_zero_speed_geojson_list):
for index, row in subgroup.iterrows():
feat = generate_geojson_feature(row['Latitude'], row['Longitude'], row['UserID'], str(row['time']))
all_zero_speed_geojson_list.append(feat)
def write_geojson_file(features, filename, chunksize = 5000):
with open(filename, 'w') as outfile:
json.dump(features, outfile)
# parse positional reports
def extract_json(filename, messageTypes=[1, 2, 3, 18, 19, 27], chunksize = 10):
good_columns = [
"MessageID",
"UserID",
"Longitude",
"Latitude",
"UTCTimeStamp"
]
# with open(filename, 'r') as input_file:
# jsonobj = ijson.items(input_file, 'Message', multiple_values=True)
# jsons = (o for o in jsonobj if o['MessageID'] in messageTypes)
# for j in jsons:
# print(j)
df = pd.read_json(filename, lines=True)
df2 = pd.DataFrame( df)
# df2 = pd.json_normalize(df)
# df2['UserID'] = df.Message.UserID
# df2['Longitude'] = df.Message.Longitude
# df2['UTCTimeStamp'] = df.UTCTimeStamp
print(df2.to_json(orient="records", lines=True))
# pd.json_normalize(df, 'Message', ['UTCTimeStamp'])
print(df2.head())
#filtering the data set to the minimun excluding message types which are unwanted and
# columns.
result = None
with pd.read_json(filename, lines=True, chunksize=chunksize) as reader:
for chunk in reader:
utc = chunk["UTCTimeStamp"]
msgs = chunk["Message"]
print(msgs)
for u in utc:
print(u)
result = result.add(u)
for u in msgs:
print(u)
|
# -*- coding: utf-8 -*-
'''Contains the `VowsDefaultReporter` class, which handles output after tests
have been run.
'''
# pyvows testing engine
# https://github.com/heynemann/pyvows
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 Bernardo Heynemann heynemann@gmail.com
from __future__ import division, print_function
import re
import traceback
import sys
from pyvows.color import yellow, green, red, bold
__all__ = [
'PROGRESS_SIZE',
'V_EXTRA_VERBOSE',
'V_VERBOSE',
'V_NORMAL',
'V_SILENT',
'ensure_encoded',
'VowsReporter',
]
PROGRESS_SIZE = 50
# verbosity levels
V_EXTRA_VERBOSE = 4
V_VERBOSE = 3
V_NORMAL = 2
V_SILENT = 1
def ensure_encoded(thing, encoding='utf-8'):
'''Ensures proper encoding for unicode characters.
Currently used only for characters `✓` and `✗`.
'''
if isinstance(thing, bytes) or not isinstance(thing, str):
return thing
else:
return thing.encode(encoding)
class VowsReporter(object):
'''Base class for other Reporters to extend. Contains common attributes
and methods.
'''
# Should *only* contain attributes and methods that aren't specific
# to a particular type of report.
HONORED = green('✓')
BROKEN = red('✗')
SKIPPED = '?'
TAB = ' '
def __init__(self, result, verbosity):
self.result = result
self.verbosity = verbosity
self.indent = 1
#-------------------------------------------------------------------------
# String Formatting
#-------------------------------------------------------------------------
def camel_split(self, string):
'''Splits camel-case `string` into separate words.
Example:
self.camel_split('SomeCamelCaseString')
Returns:
'Some camel case string'
'''
return re.sub('((?=[A-Z][a-z])|(?<=[a-z])(?=[A-Z])|(?=[0-9]\b))', ' ', string).strip()
def under_split(self, string):
'''Replaces all underscores in `string` with spaces.'''
return ' '.join(string.split('_'))
def format_traceback(self, traceback_list):
'''Adds the current level of indentation to a traceback (so it matches
the current context's indentation).
'''
# TODO:
# ...Is this a decorator? If so, please add a comment or docstring
# to make it explicit.
def _indent(msg):
if msg.strip().startswith('File'):
return self.indent_msg(msg)
return msg
tb_list = [_indent(tb) for tb in traceback_list]
return ''.join(tb_list)
def format_python_constants(self, msg):
'''Fixes capitalization of Python constants.
Since developers are used to reading `True`, `False`, and `None`
as capitalized words, it makes sense to match that capitalization
in reports.
'''
msg = msg.replace('true', 'True')
msg = msg.replace('false', 'False')
msg = msg.replace('none', 'None')
return msg
def header(self, msg, ruler_character='='):
'''Returns the string `msg` with a text "ruler". Also colorizes as
bright green (when color is available).
'''
ruler = ' {0}'.format(len(msg) * ruler_character)
msg = ' {0}'.format(msg)
msg = '{0}{ruler}{0}{msg}{0}{ruler}{0}'.format(
'\n',
ruler=ruler,
msg=msg)
msg = green(bold(msg))
return msg
def indent_msg(self, msg, indentation=None):
'''Returns `msg` with the indentation specified by `indentation`.
'''
if indentation is not None:
indent = self.TAB * indentation
else:
indent = self.TAB * self.indent
return '{indent}{msg}'.format(
indent=indent,
msg=msg
)
#-------------------------------------------------------------------------
# Printing Methods
#-------------------------------------------------------------------------
def humanized_print(self, msg, indentation=None, file=sys.stdout):
'''Passes `msg` through multiple text filters to make the output
appear more like normal text, then prints it (indented by
`indentation`).
'''
msg = self.under_split(msg)
msg = self.camel_split(msg)
msg = msg.replace(' ', ' ') # normalize spaces if inserted by
# both of the above
msg = msg.capitalize()
msg = self.format_python_constants(msg)
print(self.indent_msg(msg, indentation), file=file)
def print_traceback(self, err_type, err_obj, err_traceback, file=sys.stdout):
'''Prints a color-formatted traceback with appropriate indentation.'''
if isinstance(err_obj, AssertionError):
error_msg = err_obj
elif isinstance(err_obj, bytes):
error_msg = err_obj.decode('utf8')
else:
error_msg = err_obj
print(self.indent_msg(red(error_msg)), file=file)
if self.verbosity >= V_NORMAL:
traceback_msg = traceback.format_exception(err_type, err_obj, err_traceback)
traceback_msg = self.format_traceback(traceback_msg)
traceback_msg = '\n{traceback}'.format(traceback=traceback_msg)
traceback_msg = self.indent_msg(yellow(traceback_msg))
print(traceback_msg, file=file)
|
import copy
import time
import random
from Cost import Cost
from Car import Car
from State import State
class Great_deluge:
def __init__(self,solver):
self.solver = solver
def staydry(self,up):
print(up)
b=2
State.backup(Cost.getCost(State.rlist))
#solver2=copy.deepcopy(self.solver)
c=0
a=State.getBest()
while(b<120):
print('wet -------------')
self.solver.freeData()
self.tempSolution()
self.localSearch()
cost = Cost.getCost(State.rlist)
print(cost)
b+=1
if(cost<=a-up):
print('improvement')
State.setBestResult(Cost.getCost(State.rlist))
a=State.getBest()
#self.solver=copy.deepcopy(solver2)
#State.restore()
else:
up-=(up/10)
if(up<=0):
up=0
print('up:',up)
print(a)
print(' we are dry')
return State.resultRlist , State.resultCars
print(a)
def tempSolution(self):
l = State.rlist
random.shuffle(l)
for r in l:
if(r.notAssigned):
for cid in State.options[r.id]:
c = State.cars[cid]
if(not(c.overlap(r.start,r.end)) and (c.inZone(r))):
c.addR(r)
break
for r in l:
if(r.notAssigned):#could not be assigned to any car
for cid in State.options[r.id]:
c = State.cars[cid]
if(len(c.res)==0):#No other reservations so no problem
c.setZone(r.zone)
c.addR(r)
break
for r in l:
if(r.adjZone):#could not be assigned to any car
for cid in State.options[r.id]:
c = State.cars[cid]
if(not(c.overlap(r.start,r.end)) and (c.zone ==r.zone)):
c.addR(r)
break
if(r.adjZone):#could not be assigned to any car
for cid in State.options[r.id]:
c = State.cars[cid]
if(len(c.res)==0):#No other reservations so no problem
c.setZone(r.zone)
c.addR(r)
break
def hill_climbing(self):
best = 0 #verbetering >0
#All possible 'assigned car' swaps
for r in State.rlist:
for cid in State.options[r.id]:
c = State.cars[cid]
if(r.zone == c.zone or r.zone in Car.zoneIDtoADJ[c.zone]):
cost = Cost.costToAddR(c,r)
if(cost>best):
return c,None,r
#All sensible 'car zone' swaps
for c in State.cars:
for rid in c.res:
r = State.rlist[rid]
cost = Cost.costToSetZone(c,r.zone)
if(cost>best):
return c,r.zone,None
return None,None,None
def localSearch(self):
while(1):
bestc,bestz,bestr = self.hill_climbing()
if(bestz is not None):
bestc.setZone(bestz)
elif(bestr is not None):
if(bestr.getCar()):#if currently assigned to a car, remove from list
print(bestr.getCar().res)
print(bestr.id)
bestr.getCar().res.remove(bestr.id)
#assign to new car
bestc.addR(bestr)
else:
#reached peak
return
|
from pydash import find
from unittest import TestCase
from scraper_feed.scraper_configs import get_field_mapping
class TestGetFieldMap(TestCase):
def test_basic_with_default_config(self):
actual = get_field_mapping()
self.assertIsInstance(actual, list)
self.assertIsInstance(
find(actual, lambda x: x["source"] == "url"), dict,
)
def test_get_field_map(self):
field_mapping = [
{
"replace_type": "key",
"source": "Manufacturer Number",
"destination": "mpn",
}
]
actual = get_field_mapping(field_mapping)
self.assertDictEqual(
find(actual, lambda x: x["destination"] == "mpn"), field_mapping[0],
)
|
# hanoi
# 移动第n个圆盘到z柱,首先要把前n-1个圆盘从x柱移动到y柱上
# 在移动n-1个圆盘到y柱上时,需要把z柱看作y柱
# 移动n-1个圆盘到y柱,需要移动n-2个圆盘从x柱移动到z柱
# 移动n-2个圆盘到z柱,需要移动n-3个圆盘从x柱移动到y柱
# .........
# 移动2个圆盘到目标柱(第三根),需要移动1个圆盘从x柱移动到中间柱
# 移动一个圆盘到目标柱(第三根),需要x柱 -> 目标柱
# 最后需要把y柱作为起始柱,x作为中间柱,z作为目标柱
def hanio(n, x, y, z):
if n == 1:
print("%c -> %c" % (x, z))
else:
hanio(n-1, x, z, y)
print("%c -> %c" % (x, z))
hanio(n-1, y, x, z)
piles = int(input("请输入汉诺塔的层数:"))
hanio(piles, 'X', 'Y', 'Z')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class DatadigitalAnttechQqqCccQueryResponse(AlipayResponse):
def __init__(self):
super(DatadigitalAnttechQqqCccQueryResponse, self).__init__()
self._cert_no = None
self._city_code_open_id = None
self._city_cppp_open_id = None
self._province_code_open_id = None
@property
def cert_no(self):
return self._cert_no
@cert_no.setter
def cert_no(self, value):
self._cert_no = value
@property
def city_code_open_id(self):
return self._city_code_open_id
@city_code_open_id.setter
def city_code_open_id(self, value):
if isinstance(value, list):
self._city_code_open_id = list()
for i in value:
self._city_code_open_id.append(i)
@property
def city_cppp_open_id(self):
return self._city_cppp_open_id
@city_cppp_open_id.setter
def city_cppp_open_id(self, value):
self._city_cppp_open_id = value
@property
def province_code_open_id(self):
return self._province_code_open_id
@province_code_open_id.setter
def province_code_open_id(self, value):
self._province_code_open_id = value
def parse_response_content(self, response_content):
response = super(DatadigitalAnttechQqqCccQueryResponse, self).parse_response_content(response_content)
if 'cert_no' in response:
self.cert_no = response['cert_no']
if 'city_code_open_id' in response:
self.city_code_open_id = response['city_code_open_id']
if 'city_cppp_open_id' in response:
self.city_cppp_open_id = response['city_cppp_open_id']
if 'province_code_open_id' in response:
self.province_code_open_id = response['province_code_open_id']
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pathlib import Path
import griffe # type: ignore
import mkdocs_gen_files # type: ignore
nav = mkdocs_gen_files.Nav()
root = Path(__file__).parent.parent
src_root = root.joinpath("pyiceberg")
data = griffe.load(src_root)
for path in sorted(src_root.glob("**/*.py")):
module_path = path.relative_to(root).with_suffix("")
doc_path = path.relative_to(root).with_suffix(".md")
full_doc_path = Path("reference", doc_path)
parts = tuple(module_path.parts)
if parts[-1] == "__init__":
parts = parts[:-1]
doc_path = doc_path.with_name("index.md")
full_doc_path = full_doc_path.with_name("index.md")
elif parts[-1].startswith("_"):
continue
if module_path.parts[1:] in data.members and not data[module_path.parts[1:]].has_docstrings:
continue
nav[parts] = doc_path.as_posix()
with mkdocs_gen_files.open(full_doc_path, "w") as fd:
ident = ".".join(parts)
fd.write(f"::: {ident}")
mkdocs_gen_files.set_edit_path(full_doc_path, Path("../") / path)
with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file:
nav_file.writelines(nav.build_literate_nav())
|
# IPython log file
from collections import defaultdict
from copy import copy, deepcopy
#FUDO| check everywhere whether to deepcopy...
class Graph(object):
def __init__(self, vertices=None, edges=None, dists=None, check_connectivity=True):
self.verts = set()
#sets are nice for this because no duplicates, but they are unordered
#could use collections.orderedict if we really need the O(1) access
self.edges = defaultdict(list)
self.dists = defaultdict(list)
#self.paths = defaultdict(list)
#self.pdist = defaultdict(list)
for vertex in vertices:
self.add_vertex(vertex)
if dists is None:
dists = len(edges)*[1,]
for edge, dist in zip(edges, dists):
self.add_edge(*edge, dist=dist)
#if dists is None:
# for vertex in self.verts:
# self.dists[vertex] = defaultdict(list)
# for edge in self[vertex]:
# dists[vertex][edge].append(1)
if check_connectivity:
self.check_connectivity()
def check_connectivity(self):
if not self.connected() and len(self.verts) > 1:
raise RuntimeError("There are disconnected vertices")
def connected(self):
nedge = 0
for vertex in self.verts:
if len(self.edges[vertex]) == 1:
nedge += 1
elif len(self.edges[vertex]) == 0:
#elif not vertex in self.edges:
return False
#FUX| guessing, double-check
#if nedge > len(self) - 2:
# return False
# #raise RuntimeError("There are disconnected vertices")
# at least exclude the ones already check, only check if from one end can get to all other ends
for i, vx1 in enumerate(self.verts):
if i > 0:
break
for vx2 in self.verts:
if vx1 == vx2:
continue
dist, path = self.shortest_path(vx1, vx2)
if dist == -1:
return False
return True
def add_vertex(self, vertex):
#don't need to check double-counting, because verts is set
self.verts.add(vertex)
def add_edge(self, from_vert, to_vert, dist=1):
#if not to_vert in self.edges[from_vert] and not from_vert in self.edges[to_vert]:
# self.edges[from_vert].append(to_vert)
# self.dists[from_vert].append(dist)
# self.edges[to_vert].append(from_vert)
# self.dists[to_vert].append(dist)
#else:
# raise RuntimeError('Edge already exists')
if not to_vert in self.edges[from_vert]:
self.edges[from_vert].append(to_vert)
self.dists[from_vert].append(dist)
else:
raise RuntimeError('Edge already exists')
if not from_vert in self.edges[to_vert]:
self.edges[to_vert].append(from_vert)
self.dists[to_vert].append(dist)
else:
raise RuntimeError('Edge already exists')
def remove_vertex(self, remove, discard_disconnected=False, split_graph=False, check_connectivity=True):
"""
"""
#FUDO| create copy of self remove vertex and associated edges
#FUDO| check connectivity and raise corresponding error if graph disconnected
graph = deepcopy(self)
#remove the vertex
if remove in graph.verts:
graph.verts.remove(remove)
#remove associated edges that contain remove
for vertex in graph:
if remove in graph[vertex]:
graph[vertex].remove(remove)
#if RuntimeError is raised, nothing happened to self
if discard_disconnected:
graph.remove_disconnected_vertices()
#if not graph.connected() and split_graph:
#FUX| I don't really care if they're disconnected or not, should I?
#FUX| if it's one graph, it'll return a list of one graph
if split_graph:
return graph.split_graph()
else:
if check_connectivity:
graph.check_connectivity()
#FUDO| or do we want to return the result?
return graph
#self = deepcopy(graph)
def split_graph(self):
graphs = []
vertices = None
edges = None
dists = None
#print 'new split graph'
for vx1 in self.verts:
if any([vx1 in graph.verts for graph in graphs]):
#print 'already contained'
continue
vertices = [vx1]
edges = [[vx1, e] for e in self[vx1]]
dists = [d for d in self.dists[vx1]]
for vx2 in self.verts:
if vx1 == vx2:
continue
dist, path = self.shortest_path(vx1, vx2)
if dist == -1:
continue
else:
vertices.append(vx2)
new_edges = [sorted([vx2, e]) for e in self[vx2]]
new_dists = [d for d in self.dists[vx2]]
#avoid duplicates, ordering doesn't matter because edge is added to both vertices
for dst, edg in zip(new_dists, new_edges):
if not edg in edges:
edges.append(edg)
dists.append(dst)
#print 'VERTICES: ', vertices
#print 'EDGES: ', edges
g = Graph(vertices=vertices, edges=edges, dists=dists, check_connectivity=False)
graphs.append(g)
#for g in graphs:
# print g.verts
#print 'in total ', len(graphs), ' new graphs'
return graphs
def remove_edge(self, from_vertex, to_vertex, discard_disconnected=False, check_connectivity=True):
graph = deepcopy(self)
graph.edges[from_vertex].remove(to_vertex)
graph.edges[to_vertex].remove(from_vertex)
if discard_disconnected:
graph.remove_disconnected_vertices()
if check_connectivity:
graph.check_connectivity()
return graph
def remove_disconnected_vertices(self):
graph = deepcopy(self)
for vertex in graph:
#print vertex, self[vertex]
if len(graph[vertex]) == 0:
self.verts.remove(vertex)
def detect_rings(self):
"""
"""
rings = []
for vx1 in self:
edges = self[vx1]
nedge = len(edges)
if nedge < 2:
continue
for vx2 in self:
#exclude if the same
if vx1 == vx2:
continue
paths = self.find_all_paths(vx1, vx2)
rng = []
if len(paths) > 1:
for p in paths:
rng.extend(p)
rng = set(rng)
if rng in rings:
continue
else:
rings.append(rng)
return rings
def find_all_paths(self, from_vertex, to_vertex):
"""
"""
if from_vertex == to_vertex:
raise RuntimeError('Finding paths between the same vertices does not work right now')
graph = deepcopy(self)
allpaths = []
while len(graph):
#I don't think we need this
#if not from_vertex in graph.verts or not to_vertex in graph.verts:
# break
dist, path = graph.shortest_path(from_vertex, to_vertex)
allpaths.append(path)
#if it's an edge
if len(path) == 2:
graph = graph.remove_edge(path[0], path[1], discard_disconnected=True, check_connectivity=False)
else:
for vertex in path[1:-1]:
graph = graph.remove_vertex(vertex, discard_disconnected=True, check_connectivity=False)
if not from_vertex in graph.verts or not to_vertex in graph.verts:
break
dist1, path = graph.shortest_path(from_vertex, to_vertex)
dist2, path = graph.shortest_path(to_vertex, from_vertex)
if all([d < 0 for d in [dist1, dist2]]):
break
else:
graphs = graph.split_graph()
for g in graphs:
if to_vertex in g.verts and from_vertex in g.verts:
graph = g
return allpaths
def __getitem__(self, key):
if not key in self.edges:
raise IndexError("Vertex not in graph")
return self.edges[key]#, self.dists[key]
def __len__(self):
return len(self.verts)
def __add__(self, graph):
newgraph = copy(self)
#newgraph = deepcopy(self)
#first, add all vertices
for vertex in graph.verts:
#print vertex
if not vertex in newgraph.verts:
newgraph.add_vertex(vertex)
#second, copy all edges
for vertex in graph.verts:
#because we don't know if all vertices have edges? (they should have at least on edges, otherwise disconnected
#this shouldn't happen, because we check connectivity every time we initialize graph
if not vertex in graph.edges:
continue
for edge in graph.edges[vertex]:
if not edge in newgraph.edges[vertex]:
newgraph.add_edge(vertex, edge)
return newgraph
def __iter__(self):
return iter(self.verts)
def __eq__(self, other):
if not self.verts == other.verts:
return False
else:
for vertex in self:
l1 = self[vertex]
l2 = other[vertex]
l1.sort()
l2.sort()
if not l1 == l2:
return False
l1 = self.dists[vertex]
l2 = other.dists[vertex]
l1.sort()
l2.sort()
if not l1 == l2:
return False
return True
def __ne__(self, other):
"""
"""
iseq = self.__eq__(other)
return not iseq
def distance(self, from_vert, to_vert):
#if all distances were one:
#if self.paths[from_vert] is None:
dist, prvs = self.shortest_path(from_vert, to_vert)
#for prev in prvs:
# path = spell_out_path(prvs, from_vert, to_vert)
return dist[to_vert]
def build_path_matrix(self):
"""
"""
pathmat = defaultdict(list)
for vx1 in self:
dists, prvs = self.shortest_path(vx1)
pathmat[vx1] = defaultdict(list)
for vx2 in self:
pathmat[vx1][vx2] = self.spell_out_path(prvs, vx1, vx2)
#self.distmat = copy(distmat)
return pathmat
def build_distance_matrix(self):
"""
"""
distmat = defaultdict(list)
for vx1 in self:
dists, prvs = self.shortest_path(vx1)
distmat[vx1] = defaultdict() #could just use {}
for vx2 in self:
distmat[vx1][vx2] = dists[vx2]
#self.distmat = copy(distmat)
return distmat
def edge_length(self, from_vert, to_vert):
"""
"""
edges = self.edges[from_vert]
ndx = edges.index(to_vert)
return self.dists[from_vert][ndx]
def shortest_path(self, from_vert, to_vert=None):
return Dijkstra(self, from_vert, to_vert)
@staticmethod
def spell_out_path(prev, from_vert, to_vert):
return Dijkstra_path(prev, from_vert, to_vert)
def Dijkstra(graph, source, destination=None):
"""
"""
unvisited = copy(graph.verts)
dist = {}
prev = {}
inf = float('inf')
for vertex in unvisited:
dist[vertex] = inf
prev[vertex] = -1
dist[source] = 0
while len(unvisited) != 0:
#u = dist.index(min(dist))
#unvisited.remove(u)
mindist = inf
minvert = -1
for vertex in unvisited:
if dist[vertex] < mindist:
mindist = dist[vertex]
minvert = vertex
if minvert == -1:
return -1, []
unvisited.remove(minvert)
if destination is not None:
if minvert == destination:
break
for neighbor in graph[minvert]:
alt = mindist + graph.edge_length(minvert, neighbor)
if alt < dist[neighbor]:
dist[neighbor] = alt
prev[neighbor] = minvert
if destination is not None:
path = Dijkstra_path(prev, source, destination)
return dist[destination], path
else:
return dist, prev
def Dijkstra_path(prev, source, target):
"""
"""
path = [target]
#print 'initial path: ', path
while path[-1] != source:
#print 'iterated path: ', path[-1]
path.append(prev[path[-1]])
#print 'append: ', prev[path[-1]]
path.reverse()
return path
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 24 01:33:23 2020
@author: liece
"""
import sys
import csv
import random as rd
def getCityNames(cityNames,indexList):
"""
Retourne le noms des villes pour les indexes données en paramètres
"""
return [cityNames[x] for x in indexList]
def subTable(indexList,cityDist):
"""
Permet d'extraire la sous-table des distances pour les indexes données
en paramètres
"""
#indexList*indexList liste
subTable = [[0 for x in range(len(indexList))] for y in range(len(indexList))]
#extrait la sous table des distances pour la 2.2
for i,x in enumerate(indexList):
for j,y in enumerate(indexList):
subTable[i][j] = cityDist[x][y]
return subTable
def generate_random_p(pimax, size):
"""
Génere aléatoirement des p de façon equilibré,
un pi ne pourra jamais dépasser pimax/size!
On aurait pu faire d'une autre manière mais c'est suffisant pour faire nos
tests
"""
return [rd.randint(0,int(pimax/size)) for _ in (range(size))]
def generate_random_p_biaised(pimax, size):
"""
Pareil mais qui a beaucoup plus de chance de genrer des composants > 100
"""
p = [0]*size
for x in range(size):
p[x] = rd.randrange(0,int(pimax-sum(p)))
return p
def calculate_equilibrium_p(p,size):
"""
Permet de calculé un p à l'équilibre, on se sert du reste dans le cas ou
la division sum(p)/size ne renvoie pas un nombre entier, on essaye alors
d'équilibrer au mieux.
"""
#supposed equilibrium
p_equi = sum(p)//size
rest = sum(p)%size
p_equi_arr = [p_equi]*size
for x in range(rest):
p_equi_arr[x]+=1
return p_equi_arr
#Permet de lire un fichier csv d'un format équivalent à celui donné
#On peut changer le delimiter pour acceuilir un nouveau format
def read_csv(filename):
"""
Lit le fichier csv dans le format donnée dans le sujet et nous renvoie
le noms des villes, le nombre de ville, la population de chaque ville et
la distance entre chacune d'elle'
"""
size = 0
with open(filename) as csv_file:
i = -1
csv_reader = csv.reader(csv_file, delimiter=';')
for row in csv_reader:
if i == -1:
nbCity = len(row)-2
cityDist = [[0 for x in range(nbCity)] for y in range(nbCity)]
cityPop = [0]*nbCity
cityName = row[2:]
i = 0
else:
cityPop[i] = int(row[0])
for x in range(0,i+1):
cityDist[i][x] = int(row[x+2])
cityDist[x][i] = int(row[x+2])
i+=1
return nbCity,cityDist,cityPop,cityName
|
from cx_Freeze import *
import sys
import psutil
import signal
import threading
import time
import os
import tkinter
import os.path
PYTHON_INSTALL_DIR = os.path.dirname(os.path.dirname(os.__file__))
os.environ['TCL_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tcl8.6')
os.environ['TK_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tk8.6')
base = None
if sys.platform == 'win32':
base = "Win32GUI"
shortcut_table = [
("DesktopShortcut", # Shortcut
"DesktopFolder", # Directory_
"Network_monitor_shortcut", # Name
"TARGETDIR", # Component_
"[TARGETDIR]Network_monitor.exe",# Target
None, # Arguments
None, # Description
None, # Hotkey
None, # Icon
None, # IconIndex
None, # ShowCmd
'TARGETDIR' # WkDir
)
]
msi_data = {"Shortcut": shortcut_table}
bdist_msi_options = {'data': msi_data}
executables = [Executable("Network_monitor.py",
base=base,
icon="speedometer3.ico")]
setup(
name = "Network login",
options = {"build_exe": {"packages":["tkinter","psutil","os","signal","threading","time","sys"], "include_files":[
"speedometer3.ico","C:/Users/yashr/AppData/Local/Programs/Python/Python36/DLLs/tcl86t.dll",
"C:/Users/yashr/AppData/Local/Programs/Python/Python36/DLLs/tk86t.dll","Supportfiles"],"includes": ["tkinter"]},"bdist_msi": bdist_msi_options},
version = "1.0",
description = "helps user to login into there University portal as well as tell them how much data they have used by Wi-Fi",
executables = executables
)
|
#coding: utf-8
'''
创建人:Javen
创建时间:
'''
from Models.Abstract import Model_Abstract
class Model_Scrape(Model_Abstract):
keys = ['id', 'download_queue_id', 'method', 'url', 'proxy_id', 'begin_time', 'end_time', 'status', 'comment', 'time'] |
#!/usr/bin/env python
# encoding: utf-8
"""
Euler Project Problem 206 - Concealed Square
Created by Ehud Tamir on 2013-04-13.
Copyright (c) 2013 Ehud Tamir. All rights reserved.
"""
import sys
import os
def extract_odd_digits(n):
sgn = 1
m = 0
mul = 1
while n:
if sgn & 1:
m += mul * (n % 10)
mul *= 10
sgn ^= 1
n /= 10
return m
def interleave2nums(n, m):
mul = 1
result = 0
while n or m:
result += mul * (n % 10) + 10 * mul * (m % 10)
mul *= 100
n /= 10
m /= 10
result += mul * (n + m)
return result
import math
def main():
#print interleave2nums(12345, 10000)
for i in xrange(1000000000, 0, -10):
whole = interleave2nums(1234567890, i)
sqrt_w = int(math.sqrt(whole))
if not i % 10000000:
print i
if sqrt_w*sqrt_w == whole:
print sqrt_w
break
if __name__ == '__main__':
main()
'''
target = 1234567890
for i in xrange(1000000000, 10000000000, 10):
if extract_odd_digits(i*i) == target:
print i
break
''' |
from model_car.vis import *
from model_car.model.model import *
import h5py
import sys, traceback
def plot_performance(steer,motor,loss1000):
figure('loss1000')
clf()
plot(loss1000, 'o')
#plt.ylim(0.045,0.06)
plt.title(time_str('Pretty'))
plt.xlabel(solver_file_path)
figure('steer')
clf()
s1000 = steer[-(min(len(steer),10000)):]
s = array(s1000)
plot(s[:,0],s[:,1],'o')
plt.xlim(0,1.0)
plt.ylim(0,1.0)
plot([-1,5,1.5],[-1,5,1.5],'r')
plt_square()
plt.title(time_str('Pretty'))
plt.xlabel(solver_file_path)
plt.ylabel(dp(np.corrcoef(s[:,0],s[:,1])[0,1],2))
################## Setup Keras ####################################
from keras import backend as K
from keras import optimizers
#solver_file_path = opjh("model_car/net_training/z2_color/solver.prototxt")
version = 'version_1b'
solver_file_path = 'z2_color_' + version
#weights_file_mode = 'most recent' #'this one' #None #'most recent' #'this one' #None #'most recent'
#weights_file_path = opjD('/home/bdd/git/model_car/model_car/model/z2_color_tf.npy') #opjD('z2_color_long_train_21_Jan2017') #None #opjh('kzpy3/caf6/z2_color/z2_color.caffemodel') #None #'/home/karlzipser/Desktop/z2_color' # None #opjD('z2_color')
weights_file_path = opjD('/home/bdd/Desktop/tmp/z2_color_version_1b_final_train2.hdf5')
model = get_model(version, phase='train')
model = load_model_weight(model, weights_file_path)
model.compile(loss = 'mean_squared_error',
optimizer = optimizers.SGD(lr = 0.01, momentum = 0.001, decay = 0.000001, nesterov = True),
metrics=['accuracy'])
model.summary()
def get_layer_output(model, layer_index, model_input, training_flag = True):
get_outputs = K.function([model.layers[0].input, model.layers[9].input, K.learning_phase()], [model.layers[layer_index].output])
layer_outputs = get_outputs([model_input[0], model_input[1], training_flag])[0]
return layer_outputs
##############################################################
runs_folder = sys.argv[1]#runs_folder = '~/Desktop/tmp/hdf5/runs'
run_names = sorted(gg(opj(runs_folder,'*.hdf5')),key=natural_keys)
solver_inputs_dic = {}
keys = {}
k_ctr = 0
for hdf5_filename in run_names:
try:
solver_inputs_dic[hdf5_filename] = h5py.File(hdf5_filename,'r')
print hdf5_filename
kk = solver_inputs_dic[hdf5_filename].keys()
for k in kk:
keys[k] = hdf5_filename
k_ctr += 1
except Exception as e:
cprint("********** Exception ***********************",'red')
traceback.print_exc(file=sys.stdout)
#print(e.message, e.args)
ks = keys.keys()
cprint(d2s('Using',len(ks),'data entries'),'red','on_yellow')
ctr = 0
loss = []
loss1000 = []
steer = []
motor = []
T = 6
timer = Timer(T)
id_timer = Timer(3*T)
iteration = 10
i_time = 1
try:
while True: # Training
random.shuffle(ks)
print('metrics: {}'.format(model.metrics_names))
for k in ks:
#print('--{}--'.format(k))
hdf5_filename = keys[k]
solver_inputs = solver_inputs_dic[hdf5_filename]
x_train = {}
y_train = {}
x_train['ZED_input'] = solver_inputs[k]['ZED_input'][:]/255.-0.5
x_train['meta_input'] = solver_inputs[k]['meta_input'][:]
y_train['steer_motor_target_data'] = solver_inputs[k]['steer_motor_target_data'][:]
step_loss = model.train_on_batch({'ZED_input':x_train['ZED_input'], 'meta_input':x_train['meta_input']}, {'ip2': y_train['steer_motor_target_data']})
steer_motor_out = get_layer_output(model, 20, [x_train['ZED_input'], x_train['meta_input']])
steer_out = steer_motor_out[0,9]
motor_out = steer_motor_out[0,19]
loss.append(step_loss[0])
#print('steer_motor_out: {}'.format(steer_motor_out[0,19]))
#print('steer_motor_target:({},{}), steer_motor_out:({},{})'.format(y_train['steer_motor_target_data'][0,9], y_train['steer_motor_target_data'][0,19], steer_out, motor_out))
steer.append([y_train['steer_motor_target_data'][0,9],steer_motor_out[0,9]])
motor.append([y_train['steer_motor_target_data'][0,19],steer_motor_out[0,19]])
if len(loss) >= 1000:
loss1000.append(array(loss[-1000:]).mean())
loss = []
if len(steer) > 1000:
steer = steer[-1000:]
motor = motor[-1000:]
ctr += 1
if timer.check():
#print('Check performace loss1000:{}\n'.format(len(loss1000)))
if len(loss1000) > 0:
plot_performance(steer,motor,loss1000)
timer.reset()
#print('-------------------------------------{}-----------------------------------------------'.format(ctr))
if id_timer.check():
print('----------------------{} {}---------------------------------'.format(solver_file_path, ctr))
#cprint(solver_file_path,'blue','on_yellow')
id_timer.reset()
#print('++++++++++++++++++++++++++++++++++++++++++++++++++++')
if i_time % 10000 == 0:
# save snapshot model
model.save(opj(runs_folder, solver_file_path+'_'+str(i_time)+'.hdf5'))
i_time = i_time + 1
cprint('saving model.....','blue','on_yellow')
model.save(opj(runs_folder, solver_file_path+'_final.hdf5'))
#print('++++++++++++++++++++++++++++++++++++++++++++++++++++')
except KeyboardInterrupt:
cprint("********** Exception ***********************",'red')
traceback.print_exc(file=sys.stdout)
cprint('saving model.....','blue','on_yellow')
model.save(opj(runs_folder, solver_file_path+'_final.hdf5'))
pass
"""
figure('loss')
hist(loss)
print np.median(loss)
save_obj(loss,opjD('z2_color/0.056_direct_local_sidewalk_test_data_01Nov16_14h59m31s_Mr_Orange.loss'))
"""
if False: # Testing
loss = []
ctr = 0
for k in ks:
hdf5_filename = keys[k]
solver_inputs = solver_inputs_dic[hdf5_filename]
x_train = {}
y_train = {}
x_train['ZED_input'] = solver_inputs[k]['ZED_input'][:]/255.-0.5
x_train['meta_input'] = solver_inputs[k]['meta_input'][:]
y_train['steer_motor_target_data'] = solver_inputs[k]['steer_motor_target_data'][:]
step_loss = model.train_on_batch({'ZED_input':x_train['ZED_input'], 'meta_input':x_train['meta_input']}, {'ip2': y_train['steer_motor_target_data']})
steer_motor_out = get_layer_output(model, 20, [x_train['ZED_input'], x_train['meta_input']])
loss.append(step_loss[0])
steer.append([y_train['steer_motor_target_data'][0,9],steer_motor_out[0,9]])
motor.append([y_train['steer_motor_target_data'][0,19],steer_motor_out[0,19]])
if len(loss) >= 1000:
loss1000.append(array(loss[-1000:]).mean())
loss = []
ctr += 1
if timer.check():
plot_performance(steer,motor,loss1000)
timer.reset()
print ctr
if id_timer.check():
cprint(solver_file_path,'blue','on_yellow')
id_timer.reset()
|
#!/usr/bin/env pyformex
# $Id$
##
## This file is part of pyFormex 0.7.1 Release Sat May 24 13:26:21 2008
## pyFormex is a Python implementation of Formex algebra
## Website: http://pyformex.berlios.de/
## Copyright (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
##
## This program is distributed under the GNU General Public License
## version 2 or later (see file COPYING for details)
##
## (C) Benedict Verhegghe
##
## All physical quantities are N,mm
##
import simple,utils
from plugins.surface import TriSurface, compactElems
from plugins.properties import *
from plugins.fe_abq import *
from gui.colorscale import ColorScale,ColorLegend
import gui.decors
import time
filename = GD.cfg['curfile']
dirname,basename = os.path.split(filename)
project = os.path.splitext(basename)[0]
formexfile = '%s.formex' % project
os.chdir(dirname)
smooth()
lights(False)
def howto():
showInfo(""" How to use this menu?
1. If you want to save your work, start by opening a new project (File menu).
2. Create the geometry: it will be put in a Formex named 'F'.
3. Add (or read) properties to be used for the snow loading: enter a property number, then select the corresponding facets. Save the properties if you want to use them again later.
4. Create a Finite Element model : either shell or frame.
5. Perform the calculation:
For a shell model:
5a. Create an Abaqus input file
5b. Send the job to the cluster (job menu)
5c. Get the results back when finished (job menu)
For a frame model:
5a. Directly calculate using the CALPY module
6. Postprocess:
For an Abaqus job: use the postproc menu
For a Calpy job: use the hesperia menu
""")
def createGeometry():
global F
# Construct a triangle of an icosahedron oriented with a vertex in
# the y-direction, and divide its edges in n parts
n = 6
# Add a few extra rows to close the gap after projection
nplus = n+3
clear()
# Start with an equilateral triangle in the x-y-plane
A = simple.triangle()
A.setProp(1)
draw(A)
# Modular size
a,b,c = A.sizes()
GD.message("Cell width: %s; height: %s" % (a,b))
# Create a mirrored triangle
B = A.reflect(1)
B.setProp(2)
draw(B)
# Replicate nplus times in 2 directions to create triangular pattern
F = A.replic2(1,nplus,a,-b,0,1,bias=-a/2,taper=1)
G = B.replic2(1,nplus-1,a,-b,0,1,bias=-a/2,taper=1)
clear()
F += G
draw(F)
# Get the top vertex and make it the origin
P = F[0,-1]
draw(Formex([P]),bbox=None)
F = F.translate(-P)
draw(F)
# Now rotate around the x axis over an angle so that the projection on the
# x-y plane is an isosceles triangle with top angle = 360/5 = 72 degrees.
# The base angles thus are (180-72)/2 = 54 degrees.
# Ratio of the height of the isosceles triangle over the icosaeder edge length.
c = 0.5*tand(54.)
angle = arccos(tand(54.)/sqrt(3.))
GD.message("Rotation Ratio: %s; Angle: %s, %s" % (c,angle,angle/rad))
F = F.rotate(angle/rad,0)
clear()
draw(F,colormap=['black','magenta','yellow','black'])
# Project it on the circumscribing sphere
# The sphere has radius ru
golden_ratio = 0.5 * (1. + sqrt(5.))
ru = 0.5 * a * sqrt(golden_ratio * sqrt(5.))
GD.message("Radius of circumscribed sphere: %s" % ru)
ru *= n
C = [0.,0.,-ru]
F = F.projectOnSphere(ru,center=C)
draw(F)
hx,hy,h = F.sizes()
GD.message("Height of the dome: %s" % h)
# The base circle goes through bottom corner of n-th row,
# which will be the first point of the first triangle of the n-th row.
# Draw the point to check it.
i = (n-1)*n/2
P = F[i][0]
draw(Formex([P]),marksize=10,bbox=None)
# Get the radius of the base circle from the point's coordinates
x,y,z = P
rb = sqrt(x*x+y*y)
# Give the base points a z-coordinate 0
F = F.translate([0.,0.,-z])
clear()
draw(F)
# Draw the base circle
H = simple.circle().scale(rb)
draw(H)
# Determine intersections with base plane
P = [0.,0.,0.]
N = [0.,0.,1.]
newprops = [ 5,6,6,None,4,None,None ]
F = F.cutAtPlane(P,N,newprops=newprops,side='+',atol=0.0001)
clear()
draw(F)
# Finally, create a rosette to make the circle complete
# and rotate 90 degrees to orient it like in the paper
clear()
F = F.rosette(5,72.).rotate(90)
def cutOut(F,c,r):
"""Remove all elements of F contained in a sphere (c,r)"""
d = distanceFromPoint(F.f,c)
return F.select((d < r).any(axis=-1) == False)
# Cut out the door: remove all members having a point less than
# edge-length a away from the base point
p1 = [rb,0.,0.]
F = cutOut(F,p1,1.1*a*n/6) # a was a good size with n = 6
# Scale to the real geometry
scale = 7000. / F.sizes()[2]
GD.message("Applying scale factor %s " % scale)
print F.bbox()
F = F.scale(scale)
print F.bbox()
clear()
draw(F,alpha=0.4)
export({'F':F})
def assignProperties():
"""Assign properties to the structure's facets"""
# make sure we have only one actor
clear()
FA = draw(F)
#drawNumbers(F)
p = 0
while True:
res = askItems([('Property',p)])
if not res:
break
p = res['Property']
sel = pickElements()
if sel.has_key(0):
GD.debug("PICKED NUMBERS:%s" % sel)
F.p[sel[0]] = p
undraw(FA)
FA = draw(F,view=None,bbox=None)
def exportProperties():
"""Save the current properties under a name"""
res = askItems([('Property Name','p')])
if res:
p = res['Property Name']
if not p.startswith('prop:'):
p = "prop:%s" % p
export({p:F.p})
def selectProperties():
"""Select one of the saved properties"""
res = askItems([('Property Name','p')])
if res:
p = res['Property Name']
if GD.PF.has_key(p):
F.setProp(GD.PF[p])
def saveProperties(fn = None):
"""Save the current properties."""
if not fn:
fn = askFilename(dirname,filter="Property files (*.prop)")
if fn:
F.p.tofile(fn,sep=',')
def readProperties(fn = None):
"""Read properties from file."""
if not fn:
fn = askFilename(dirname,filter="Property files (*.prop)",exist=True)
if fn:
p = fromfile(fn,sep=',')
F.setProp(p)
clear()
draw(F)
def connections(elems):
"""Create lists of connections to lower entities.
Elems is an array giving the numbers of lower entities.
The result is a sequence of maxnr+1 lists, where maxnr is the
highest lower entity number. Each (possibly empty) list contains
the numbers of the rows of elems that contain (at least) one value
equal to the index of the list.
"""
return [ (i,list(where(elems==i)[0])) for i in unique(elems.flat) ]
#####################################################################
def createFrameModel():
"""Create the Finite Element Model.
It is supposed here that the Geometry has been created and is available
as a global variable F.
"""
wireframe()
lights(False)
# Turn the Formex structure into a TriSurface
# This guarantees that element i of the Formex is element i of the TriSurface
S = TriSurface(F)
nodes = S.coords
elems = S.elems # the triangles
# Create edges and faces from edges
print "The structure has %s nodes, %s edges and %s faces" % (S.ncoords(),S.nedges(),S.nfaces())
# Create the steel structure
E = Formex(nodes[S.edges])
clear()
draw(E)
# Get the tri elements that are part of a quadrilateral:
prop = F.p
quadtri = S.faces[prop==6]
nquadtri = quadtri.shape[0]
print "%s triangles are part of quadrilateral faces" % nquadtri
if nquadtri > 0:
# Create triangle definitions of the quadtri faces
tri = compactElems(S.edges,quadtri)
D = Formex(nodes[tri])
clear()
flatwire()
draw(D,color='yellow')
conn = connections(quadtri)
print conn
# Filter out the single connection edges
internal = [ c[0] for c in conn if len(c[1]) > 1 ]
print "Internal edges in quadrilaterals: %s" % internal
E = Formex(nodes[S.edges],1)
E.p[internal] = 6
wireframe()
clear()
draw(E)
# Remove internal edges
tubes = S.edges[E.p != 6]
print "Number of tube elements after removing %s internals: %s" % (len(internal),tubes.shape[0])
D = Formex(nodes[tubes],1)
clear()
draw(D)
# Beam section and material properties
b = 60
h = 100
t = 4
b1 = b-2*t
h1 = h-2*t
A = b*h - b1*h1
print b*h**3
I1 = (b*h**3 - b1*h1**3) / 12
I2 = (h*b**3 - h1*b1**3) / 12
I12 = 0
J = 4 * A**2 / (2*(b+h)/t)
tube = {
'name':'tube',
'cross_section': A,
'moment_inertia_11': I1,
'moment_inertia_22': I2,
'moment_inertia_12': I12,
'torsional_rigidity': J
}
steel = {
'name':'steel',
'young_modulus' : 206000,
'shear_modulus' : 81500,
'density' : 7.85e-9,
}
print tube
print steel
tubesection = ElemSection(section=tube,material=steel)
# Calculate the nodal loads
# Area of triangles
area,normals = S.areaNormals()
print "Area:\n%s" % area
# compute bar lengths
bars = nodes[tubes]
barV = bars[:,1,:] - bars[:,0,:]
barL = sqrt((barV*barV).sum(axis=-1))
print "Member length:\n%s" % barL
### DEFINE LOAD CASE (ask user) ###
res = askItems([('Steel',True),
('Glass',True),
('Snow',False),
('Solver',None,'select',['Calpy','Abaqus']),
])
if not res:
return
nlc = 0
for lc in [ 'Steel','Glass','Snow' ]:
if res[lc]:
nlc += 1
NODLoad = zeros((nlc,S.ncoords(),3))
nlc = 0
if res['Steel']:
# the STEEL weight
lwgt = steel['density'] * tube['cross_section'] * 9810 # mm/s**2
print "Weight per length %s" % lwgt
# assemble steel weight load
for e,L in zip(tubes,barL):
NODLoad[nlc,e] += [ 0., 0., - L * lwgt / 2 ]
nlc += 1
if res['Glass']:
# the GLASS weight
wgt = 450e-6 # N/mm**2
# assemble uniform glass load
for e,a in zip(S.elems,area):
NODLoad[nlc,e] += [ 0., 0., - a * wgt / 3 ]
nlc += 1
if res['Snow']:
# NON UNIFORM SNOW
fn = 'hesperia-nieve.prop'
snowp = fromfile(fn,sep=',')
snow_uniform = 320e-6 # N/mm**2
snow_non_uniform = { 1:333e-6, 2:133e-6, 3:133e-6, 4:266e-6, 5:266e-6, 6:667e-6 }
# assemble non-uniform snow load
for e,a,p in zip(S.elems,area,snowp):
NODLoad[nlc,e] += [ 0., 0., - a * snow_non_uniform[p] / 3]
nlc += 1
# For Abaqus: put the nodal loads in the properties database
print NODLoad
PDB = PropertyDB()
for lc in range(nlc):
for i,P in enumerate(NODLoad[lc]):
PDB.nodeProp(tag=lc,set=i,cload=[P[0],P[1],P[2],0.,0.,0.])
# Get support nodes
botnodes = where(isClose(nodes[:,2], 0.0))[0]
bot = nodes[botnodes]
GD.message("There are %s support nodes." % bot.shape[0])
# Upper structure
nnodes = nodes.shape[0] # node number offset
ntubes = tubes.shape[0] # element number offset
PDB.elemProp(set=arange(ntubes),section=tubesection,eltype='FRAME3D')
# Create support systems (vertical beams)
bot2 = bot + [ 0.,0.,-200.] # new nodes 200mm below bot
botnodes2 = arange(botnodes.shape[0]) + nnodes # node numbers
nodes = concatenate([nodes,bot2])
supports = column_stack([botnodes,botnodes2])
elems = concatenate([tubes,supports])
## !!!
## THIS SHOULD BE FIXED !!!
supportsection = ElemSection(material=steel,section={
'name':'support',
'cross_section': A,
'moment_inertia_11': I1,
'moment_inertia_22': I2,
'moment_inertia_12': I12,
'torsional_rigidity': J
})
PDB.elemProp(set=arange(ntubes,elems.shape[0]),section=supportsection,eltype='FRAME3D')
# Finally, the botnodes2 get the support conditions
botnodes = botnodes2
## # Radial movement only
## np_fixed = NodeProperty(1,bound=[0,1,1,0,0,0],coords='cylindrical',coordset=[0,0,0,0,0,1])
## # No movement, since we left out the ring beam
## for i in botnodes:
## NodeProperty(i,bound=[1,1,1,0,0,0],coords='cylindrical',coordset=[0,0,0,0,0,1])
## np_central_loaded = NodeProperty(3, displacement=[[1,radial_displacement]],coords='cylindrical',coordset=[0,0,0,0,0,1])
## #np_transf = NodeProperty(0,coords='cylindrical',coordset=[0,0,0,0,0,1])
# Draw the supports
S = connect([Formex(bot),Formex(bot2)])
draw(S,color='black')
if res['Solver'] == 'Calpy':
fe_model = Dict(dict(solver='Calpy',nodes=nodes,elems=elems,prop=PDB,loads=NODLoad,botnodes=botnodes,nsteps=nlc))
else:
fe_model = Dict(dict(solver='Abaqus',nodes=nodes,elems=elems,prop=PDB,botnodes=botnodes,nsteps=nlc))
export({'fe_model':fe_model})
print "FE model created and exported as 'fe_model'"
#################### SHELL MODEL ########################################
def createShellModel():
"""Create the Finite Element Model.
It is supposed here that the Geometry has been created and is available
as a global variable F.
"""
# Turn the Formex structure into a TriSurface
# This guarantees that element i of the Formex is element i of the TriSurface
S = TriSurface(F)
print "The structure has %s nodes, %s edges and %s faces" % (S.ncoords(),S.nedges(),S.nfaces())
nodes = S.coords
elems = S.elems # the triangles
clear()
draw(F)
# Shell section and material properties
# VALUES SHOULD BE SET CORRECTLY
glass_plate = {
'name': 'glass_plate',
'sectiontype': 'shell',
'thickness': 18,
'material': 'glass',
}
glass = {
'name': 'glass',
'young_modulus': 72000,
'shear_modulus': 26200,
'density': 2.5e-9, # T/mm**3
}
print glass_plate
print glass
glasssection = ElemSection(section=glass_plate,material=glass)
PDB = PropertyDB()
# All elements have same property:
PDB.elemProp(set=arange(len(elems)),section=glasssection,eltype='STRI3')
# Calculate the nodal loads
# Area of triangles
area,normals = S.areaNormals()
print "Area:\n%s" % area
### DEFINE LOAD CASE (ask user) ###
res = askItems([('Glass',True),('Snow',False)])
if not res:
return
step = 0
if res['Glass']:
step += 1
NODLoad = zeros((S.ncoords(),3))
# add the GLASS weight
wgt = 450e-6 # N/mm**2
# Or, calculate weight from density:
# wgt = glass_plate['thickness'] * glass['density'] * 9810
# assemble uniform glass load
for e,a in zip(S.elems,area):
NODLoad[e] += [ 0., 0., - a * wgt / 3 ]
# Put the nodal loads in the properties database
for i,P in enumerate(NODLoad):
PDB.nodeProp(tag=step,set=i,cload=[P[0],P[1],P[2],0.,0.,0.])
if res['Snow']:
step += 1
NODLoad = zeros((S.ncoords(),3))
# add NON UNIFORM SNOW
fn = 'hesperia-nieve.prop'
snowp = fromfile(fn,sep=',')
snow_uniform = 320e-6 # N/mm**2
snow_non_uniform = { 1:333e-6, 2:133e-6, 3:133e-6, 4:266e-6, 5:266e-6, 6:667e-6 }
# assemble non-uniform snow load
for e,a,p in zip(S.elems,area,snowp):
NODLoad[e] += [ 0., 0., - a * snow_non_uniform[p] / 3]
# Put the nodal loads in the properties database
for i,P in enumerate(NODLoad):
PDB.nodeProp(tag=step,set=[i],cload=[P[0],P[1],P[2],0.,0.,0.])
# Get support nodes
botnodes = where(isClose(nodes[:,2], 0.0))[0]
bot = nodes[botnodes].reshape((-1,1,3))
GD.message("There are %s support nodes." % bot.shape[0])
botofs = bot + [ 0.,0.,-0.2]
bbot2 = concatenate([bot,botofs],axis=1)
print bbot2.shape
S = Formex(bbot2)
draw(S)
## np_central_loaded = NodeProperty(3, displacement=[[1,radial_displacement]],coords='cylindrical',coordset=[0,0,0,0,0,1])
## #np_transf = NodeProperty(0,coords='cylindrical',coordset=[0,0,0,0,0,1])
## # Radial movement only
## np_fixed = NodeProperty(1,bound=[0,1,1,0,0,0],coords='cylindrical',coordset=[0,0,0,0,0,1])
# Since we left out the ring beam, we enforce no movement at the botnodes
bc = PDB.nodeProp(set=botnodes,bound=[1,1,1,0,0,0],csys=CoordSystem('C',[0,0,0,0,0,1]))
# And we record the name of the bottom nodes set
botnodeset = Nset(bc.nr)
fe_model = Dict(dict(nodes=nodes,elems=elems,prop=PDB,botnodeset=botnodeset,nsteps=step))
export({'fe_model':fe_model})
smooth()
lights(False)
#####################################################################
#### Analyze the structure using Abaqus ####
def createAbaqusInput():
"""Write the Abaqus input file.
It is supposed that the Finite Element model has been created and
exported under the name 'fe_model'.
"""
try:
FE = named('fe_model')
nodes = FE.nodes
elems = FE.elems
prop = FE.prop
nsteps = FE.nsteps
except:
warning("I could not find the finite element model.\nMaybe you should try to create it first?")
return
# ask job name from user
res = askItems([('JobName','hesperia_shell')])
if not res:
return
jobname = res['JobName']
if not jobname:
print "No Job Name: writing to sys.stdout"
jobname = None
out = [ Output(type='history'),
Output(type='field'),
]
res = [ Result(kind='NODE',keys=['U','COORD']),
Result(kind='ELEMENT',keys=['S'],pos='AVERAGED AT NODES'),
Result(kind='ELEMENT',keys=['SINV'],pos='AVERAGED AT NODES'),
Result(kind='ELEMENT',keys=['SF'],pos='AVERAGED AT NODES'),
]
step1 = Step(time=[1.,1.,0.01,1.],nlgeom='no',tags=[1])
step2 = Step(time=[1.,1.,0.01,1.],nlgeom='no',tags=[2])
model = Model(nodes,elems)
AbqData(model,prop,[step1,step2],out=out,res=res).write(jobname)
#############################################################################
#### perform analysis with the calpy module ####
def runCalpyAnalysis():
"""Create data for Calpy analysis module and run Calpy on the data.
While we could write an analysis file in the Calpy format and then
run the Calpy program on it (like we did with Abaqus), we can (and do)
take another road here: Calpy has a Python/numpy interface, allowing
us to directly present the numerical data in arrays to the analysis
module.
It is supposed that the Finite Element model has been created and
exported under the name 'fe_model'.
"""
############################
# Load the needed calpy modules
from plugins import calpy_itf
calpy_itf.check()
import calpy
calpy.options.optimize=True
from calpy import fe_util,beam3d
############################
try:
FE = named('fe_model')
## print FE.keys()
## nodes = FE.nodes
## elems = FE.elems
## prop = FE.prop
## nodloads = FE.loads
## botnodes = FE.botnodes
## nsteps = FE.nsteps
except:
warning("I could not find the finite element model.\nMaybe you should try to create it first?")
return
# ask job name from user
res = askItems([('JobName','hesperia_frame'),('Verbose Mode',False)])
if not res:
return
jobname = res['JobName']
if not jobname:
print "No Job Name: bailing out"
return
verbose = res['Verbose Mode']
nnod = FE.nodes.shape[0]
nel = FE.elems.shape[0]
print "Number of nodes: %s" % nnod
print "Number of elements: %s" % nel
# Create an extra node for beam orientations
#
# !!! This is ok for the support beams, but for the structural beams
# !!! this should be changed to the center of the sphere !!!
extra_node = array([[0.0,0.0,0.0]])
coords = concatenate([FE.nodes,extra_node])
nnod = coords.shape[0]
print "Adding a node for orientation: %s" % nnod
# We extract the materials/sections from the property database
matprops = FE.prop.getProp(kind='e',attr=['section'])
# Beam Properties in Calpy consist of 7 values:
# E, G, rho, A, Izz, Iyy, J
# The beam y-axis lies in the plane of the 3 nodes i,j,k.
mats = array([[mat.young_modulus,
mat.shear_modulus,
mat.density,
mat.cross_section,
mat.moment_inertia_11,
mat.moment_inertia_22,
mat.moment_inertia_12,
] for mat in matprops])
if verbose:
print "Calpy.materials"
print mats
# Create element definitions:
# In calpy, each beam element is represented by 4 integer numbers:
# i j k matnr,
# where i,j are the node numbers,
# k is an extra node for specifying orientation of beam (around its axis),
# matnr refers to the material/section properties (i.e. the row nr in mats)
# Also notice that Calpy numbering starts at 1, not at 0 as customary
# in pyFormex; therefore we add 1 to elems.
# The third node for all beams is the last (extra) node, numbered nnod.
# We need to reshape tubeprops to allow concatenation
matnr = zeros(nel,dtype=int32)
for i,mat in enumerate(matprops): # proces in same order as above!
matnr[mat.set] = i+1
elements = concatenate([FE.elems + 1, # the normal node numbers
nnod * ones(shape=(nel,1),dtype=int), # extra node
matnr.reshape((-1,1))], # mat number
axis=1)
if verbose:
print "Calpy.elements"
print elements
# Boundary conditions
# While we could get the boundary conditions from the node properties
# database, we will formulate them directly from the numbers
# of the supported nodes (botnodes).
# Calpy (currently) only accepts boundary conditions in global
# (cartesian) coordinates. However, as we only use fully fixed
# (though hinged) support nodes, that presents no problem here.
# For each supported node, a list of 6 codes can (should)be given,
# corresponding to the six Degrees Of Freedom (DOFs): ux,uy,uz,rx,ry,rz.
# The code has value 1 if the DOF is fixed (=0.0) and 0 if it is free.
# The easiest way to set the correct boundary conditions array for Calpy
# is to put these codes in a text field and have them read with
# ReadBoundary.
s = ""
for n in FE.botnodes + 1: # again, the +1 is to comply with Calpy numbering!
s += " %d 1 1 1 1 1 1\n" % n # a fixed hinge
# Also clamp the fake extra node
s += " %d 1 1 1 1 1 1\n" % nnod
if verbose:
print "Specified boundary conditions"
print s
bcon = fe_util.ReadBoundary(nnod,6,s)
fe_util.NumberEquations(bcon)
if verbose:
print "Calpy.DOF numbering"
print bcon # all DOFs are numbered from 1 to ndof
# The number of free DOFs remaining
ndof = bcon.max()
print "Number of DOF's: %s" % ndof
# Create load vectors
# Calpy allows for multiple load cases in a single analysis.
# However, our script currently puts all loads together in a single
# load case. So the processing hereafter is rather simple, especially
# since Calpy provides a function to assemble a single concentrated
# load into the load vector. We initialize the load vector to zeros
# and then add all the concentrated loads from the properties database.
# A single concentrated load consists of 6 components, corresponding
# to the 6 DOFs of a node.
#
# AssembleVector takes 3 arguments: the global vector in which to
# assemble a nodal vector (length ndof), the nodal vector values
# (length 6), and a list of indices specifying the positions of the
# nodal DOFs in the global vector.
# Beware: The function does not change the global vector, but merely
# returns the value after assembling.
# Also notice that the indexing inside the bcon array uses numpy
# convention (starting at 0), thus no adding 1 is needed!
print "Assembling Concentrated Loads"
nlc = 1
loads = zeros((ndof,nlc),float)
for p in FE.prop.getProp('n',attr=['cload']):
loads[:,0] = fe_util.AssembleVector(loads[:,0],p.cload,bcon[p.set,:])
if verbose:
print "Calpy.Loads"
print loads
# Perform analysis
# OK, that is really everything there is to it. Now just run the
# analysis, and hope for the best ;)
# Enabling the Echo will print out the data.
# The result consists of nodal displacements and stress resultants.
print "Starting the Calpy analysis module --- this might take some time"
GD.app.processEvents()
starttime = time.clock()
displ,frc = beam3d.static(coords,bcon,mats,elements,loads,Echo=True)
print "Calpy analysis has finished --- Runtime was %s seconds." % (time.clock()-starttime)
# Export the results, but throw way these for the extra (last) node
export({'calpy_results':(displ[:-1],frc)})
def postCalpy():
"""Show results from the Calpy analysis."""
from plugins.postproc import niceNumber,frameScale
from plugins.postproc_menu import showResults
try:
FE = named('fe_model')
displ,frc = named('calpy_results')
except:
warning("I could not find the finite element model and/or the calpy results. Maybe you should try to first create them?")
raise
return
# The frc array returns element forces and has shape
# (nelems,nforcevalues,nloadcases)
# nforcevalues = 8 (Nx,Vy,Vz,Mx,My1,Mz1,My2,Mz2)
# Describe the nforcevalues element results in frc.
# For each result we give a short and a long description:
frc_contents = [('Nx','Normal force'),
('Vy','Shear force in local y-direction'),
('Vz','Shear force in local z-direction'),
('Mx','Torsional moment'),
('My','Bending moment around local y-axis'),
('Mz','Bending moment around local z-axis'),
('None','No results'),
]
# split in two lists
frc_keys = [ c[0] for c in frc_contents ]
frc_desc = [ c[1] for c in frc_contents ]
# Ask the user which results he wants
res = askItems([('Type of result',None,'select',frc_desc),
('Load case',0),
('Autocalculate deformation scale',True),
('Deformation scale',100.),
('Show undeformed configuration',False),
('Animate results',False),
('Amplitude shape','linear','select',['linear','sine']),
('Animation cycle','updown','select',['up','updown','revert']),
('Number of cycles',5),
('Number of frames',10),
('Animation sleeptime',0.1),
])
if res:
frcindex = frc_desc.index(res['Type of result'])
loadcase = res['Load case']
autoscale = res['Autocalculate deformation scale']
dscale = res['Deformation scale']
showref = res['Show undeformed configuration']
animate = res['Animate results']
shape = res['Amplitude shape']
cycle = res['Animation cycle']
count = res['Number of cycles']
nframes = res['Number of frames']
sleeptime = res['Animation sleeptime']
dis = displ[:,0:3,loadcase]
if autoscale:
siz0 = Coords(FE.nodes).sizes()
siz1 = Coords(dis).sizes()
print siz0
print siz1
dscale = niceNumber(1./(siz1/siz0).max())
if animate:
dscale = dscale * frameScale(nframes,cycle=cycle,shape=shape)
# Get the scalar element result values from the frc array.
val = val1 = txt = None
if frcindex <= 5:
val = frc[:,frcindex,loadcase]
txt = frc_desc[frcindex]
if frcindex > 3:
# bending moment values at second node
val1 = frc[:,frcindex+2,loadcase]
showResults(FE.nodes,FE.elems,dis,txt,val,showref,dscale,count,sleeptime)
#############################################################################
######### Create a menu with interactive tasks #############
def create_menu():
"""Create the Hesperia menu."""
MenuData = [
("&How To Use",howto),
("---",None),
("&Create Geometry",createGeometry),
("&Assign Properties",assignProperties),
("&Export Properties",exportProperties),
("&Select Properties",selectProperties),
("&Save Properties",saveProperties),
("&Read Properties",readProperties),
("---",None),
("&Create Frame Model",createFrameModel),
("&Create Shell Model",createShellModel),
("---",None),
("&Write Abaqus input file",createAbaqusInput),
("&Run Calpy Analysis",runCalpyAnalysis),
("&Show Calpy Results",postCalpy),
("---",None),
("&Close Menu",close_menu),
]
return widgets.Menu('Hesperia',items=MenuData,parent=GD.gui.menu,before='help')
def show_menu():
"""Show the menu."""
if not GD.gui.menu.item('Hesperia'):
create_menu()
def close_menu():
"""Close the menu."""
m = GD.gui.menu.item('Hesperia')
if m :
m.remove()
def reload_menu():
"""Reload the menu."""
close_menu()
show_menu()
####################################################################
######### What to do when the script is executed ###################
if __name__ == "draw":
# The sole intent of running this script is to create a top level
# menu 'Hesperia'. The typical action then might be 'show_menu()'.
# However, during development, you might want to change the menu's
# actions will pyFormex is running, so a 'reload' action seems
# more appropriate.
reload_menu()
# End
|
#!/usr/bin/env python
# input() reads a string with a line of input, stripping the '\n' (newline) at the end.
# This is all you need for most Google Code Jam problems.
t = int(input()) # read a line with a single integer
for i in range(1, t + 1):
num=input()
counter=0
if(num==0):
ans="INSOMNIA"
else:
digits= 10*[False]
loop=True
multi=0
while(loop):
multi+=1
counter+=1
numLoop=num*multi
while(numLoop!=0 and digits.count(True)!=10):
digit=numLoop%10
digits[digit]=True
numLoop/=10
if(digits.count(True)==10):
loop=False
ans=num*multi
print("Case #{}: {}".format(i,ans ))
# check out .format's specification for more formatting optionsa
|
'''
MIT LICENSE
Copyright 2014 Inertial Sense, LLC - http://inertialsense.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files(the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions :
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import argparse
import sys
from time import sleep
from pySDK.inertialsensemodule import InertialSense, pyUpdateFlashConfig
from pySDK.display import cInertialSenseDisplay
from pySDK.logger import cISLogger
import pySDK.isutilities as util
# TODO: Make Defaults, Definitions and Member Variables available directly from C++ or put them in another python module
IS_COM_BAUDRATE_DEFAULT = 3000000
DEFAULT_LOGS_DIRECTORY = "IS_logs"
DMODE_PRETTY = 0
DMODE_SCROLL = 1
DMODE_STATS = 2
DMODE_QUITE = 3
def main():
parser = argparse.ArgumentParser(description="Embeedded Test Analytics")
parser.add_argument('-baud', '--baudRate', type=int, help='Baud Rate', default=IS_COM_BAUDRATE_DEFAULT)
parser.add_argument('-b', '--bootloaderFileName', type=str, help='Path to Boot Loader File', default=None)
parser.add_argument('-c', '--comPort', type=str, help='COM Port Number (eg. com4)', default=None)
parser.add_argument('-lms', '--maxLogSpaceMB', type=int, help='Max Space Alloted to Logger', default=1024.0)
parser.add_argument('-lmf', '--maxLogFileSize', type=int, help='Max size of Log File', default= 1024 * 1024 * 5)
parser.add_argument('-lmm', '--maxLogMemory', type=int, help='Max Log Memory', default=131072)
parser.add_argument('-lts', '--useLogTimestampSubFolder', type=bool, help='Notification Email', default=True)
parser.add_argument('-lp', '--logPath', type=str, help='Log Path', default=DEFAULT_LOGS_DIRECTORY)
parser.add_argument('-lt', '--logType', type=str, help='Log Type (SDAT, DAT, CSV, KML)', default="DAT")
parser.add_argument('-loff', '--enableLogging', type=bool, help='Enable Logging', default=True)
parser.add_argument('-q', '--qlite', type=bool, help='Display: Qlite Mode', default=False)
parser.add_argument('-rp', '--replayDataLogPath', type=bool, help='Replay Data (True), Note: use logPath -lp for the directory location.', default=False)
parser.add_argument('-rs', '--replayDataSpeed', type=int, help='Data log replay speed', default=1.0)
parser.add_argument('-r', '--replayDataLog', type=bool, help='Config Json file', default=False)
parser.add_argument('-sINS1', '--sINS1', type=bool, help='Stream INL1 msg', default=False)
parser.add_argument('-sINS2', '--sINS2', type=bool, help='Stream INL2 msg', default=False)
parser.add_argument('-sDualIMU', '--sDualIMU', type=bool, help='Stream Dual IMU msg', default=False)
parser.add_argument('-sIMU1', '--sIMU1', type=bool, help='Stream IMU1 msg', default=False)
parser.add_argument('-sIMU2', '--sIMU2', type=bool, help='Stream IMU2 msg', default=False)
parser.add_argument('-sGPS', '--sGPS', type=bool, help='Stream GPS msg', default=False)
parser.add_argument('-sRawGPS', '--sRawGPS', type=bool, help='Stream GPS msg', default=False)
parser.add_argument('-sMag1', '--sMag1', type=bool, help='Stream Magnetometer1 msg', default=False)
parser.add_argument('-sBaro', '--sBaro', type=bool, help='Stream Barometer msg', default=False)
parser.add_argument('-sSol', '--sSol', type=bool, help='Stream Sol', default=False)
parser.add_argument('-sSensors', '--sSensors', type=bool, help='Stream Sensors', default=False)
parser.add_argument('-sDThetaVel', '--sDThetaVel', type=bool, help='Stream dThetaVel', default=False)
parser.add_argument('-scroll', '--scroll', type=bool, help='Display: Scroll Mode', default=False)
parser.add_argument('-stats', '--stats', type=bool, help='Display: Stats Mode', default=False)
parser.add_argument('-svr', '--serverConnection', type=str, help='Server Connection', default=None)
parser.add_argument('-host', '--host', type=str, help='Host', default=None)
parser.add_argument('-flashConfig', '--flashConfig', type=str, help='Flash Config', default=None)
# TODO: More Detailed Help (Same as C++ SDK)
opts, extra = parser.parse_known_args()
# TODO: Check for extra Args and throw error
if opts.qlite is True: opts.displayMode = DMODE_QUITE
elif opts.scroll is True: opts.displayMode = DMODE_SCROLL
elif opts.stats is True: opts.displayMode = DMODE_STATS
else: opts.displayMode = DMODE_PRETTY
if opts.replayDataLogPath is True: opts.replayDataLog = True
if opts.replayDataSpeed is not 1.0: opts.replayDataLog = True
# Check for proper usage
if opts.comPort is None and not opts.replayDataLog:
print "ERROR - incompataible commnad line arguments"
return False
if opts.replayDataLog:
if 'dat' in opts.logType:
opts.logType = cISLogger.eLogType.LOGTYPE_DAT
elif 'sdat' in opts.logType:
opts.logType = cISLogger.eLogType.LOGTYPE_SDAT
elif 'csv' in opts.logType:
opts.logType = cISLogger.eLogType.LOGTYPE_CSV
elif 'kml' in opts.logType:
opts.logType = cISLogger.eLogType.LOGTYPE_KML
else:
print "ERROR: Invalid Log type"
return
print str(opts.logType)
opts.logSolution = 2 # INL2
print "Inertial Sense - Python interface to CommandLine Tool"
print "====================================================="
CLTool(opts)
class CLTool(object):
def __init__(self, opts):
self.opts = opts
self.display = cInertialSenseDisplay()
self.cltool_main()
def cltool_dataCallback(self,data):
#print "Callback 2 - Received msg: %d" % data['header']['id']
# Example for data access
DID_INS_2 = 5
DID_DUAL_IMU = 58
DID_DELTA_THETA_VEL = 3
DID_GPS = 5
DID_MAGNETOMETER_1 = 52
DID_MAGNETOMETER_2 = 55
DID_BAROMETER = 53
DID_RAW_DATA = 60
if data['header']['id'] == DID_INS_2:
#print "qn2b Data Legth %d" % len(data['data']['qn2b']) # quaternion attitude
#print "Quaternions: %f, %f, %f, %f" % (data['data']['qn2b'][0],data['data']['qn2b'][1],data['data']['qn2b'][2],data['data']['qn2b'][3])
data['data']['qn2b'] # quaternion attitude
data['data']['uvw'] # body velocities
data['data']['lla'] # latitude, longitude, altitude
elif data['header']['id'] == DID_DUAL_IMU:
data['data']['time'];
data['data']['imu1acc']
data['data']['imu1pqr']
data['data']['imu2acc']
data['data']['imu2pqr']
elif data['header']['id'] == DID_DELTA_THETA_VEL:
data['data']['time']
data['data']['theta']
data['data']['uvw']
data['data']['dt']
elif data['header']['id'] == DID_GPS:
data['data']['tOffset']
data['data']['gpsPos']
data['data']['gpsVel']
data['data']['rxps']
elif data['header']['id'] == DID_MAGNETOMETER_1:
data['data']['time']
data['data']['mag']
#elif data['header']['id'] == DID_MAGNETOMETER_2: # See Mag 1
elif data['header']['id'] == DID_BAROMETER:
data['data']['time']
data['data']['bar']
data['data']['barTemp']
data['data']['humidity']
data['data']['mslBar']
elif data['header']['id'] == DID_RAW_DATA:
print "Received the Raw message in Python!!"
data['data']['receiverIndex']
data['data']['type'] # Indicates the message type
data['data']['count']
data['data']['reserved']
data['data']['buf'] # 1020 byte buffer
# Handle the different GPS Raw messages here
# elif ... (add other messages here)
def cltool_main(self):
#clear display
self.display.SetDisplayMode(self.opts.displayMode);
self.display.Clear();
# if replay data log specified on command line, do that now and return
if self.opts.replayDataLog:
return not self.__cltool_replayDataLog()
# if bootloader was specified on the command line, do that now and return out
elif self.opts.bootloaderFileName is not None:
return self.__cltool_runBootloader()
# if host was specified on the command line, create a tcp server
elif self.opts.host is not None:
return self.__cltool_createHost();
# open the device, start streaming data and logging
else:
# [COMM INSTRUCTION] 1.) Create InertialSense object and open serial port. if reading/writing flash config, don't bother with data callback
self.inertialSenseInterface = InertialSense()
self.inertialSenseInterface.SetPyCallback(self.cltool_dataCallback)
self.inertialSenseInterface.SetPyDisplay(self.display)
if not self.inertialSenseInterface.Open(self.opts.comPort, self.opts.baudRate):
print "Failed to open serial port at %s - %d" % (self.opts.comPort, self.opts.baudRate)
return -1; # Failed to open serial port
# [COMM INSTRUCTION] 2.) Enable data broadcasting from uINS
if self.__cltool_setupCommunications():
# [LOGGER INSTRUCTION] Setup and start data logger
self.__cltool_setupLogger();
try:
# Main loop. Could be in separate thread if desired.
while True:
# [COMM INSTRUCTION] 3.) Process data and messages
self.inertialSenseInterface.Update();
if self.inertialSenseInterface.GetTcpByteCount() != 0:
self.display.GoToRow(1);
print "Tcp bytes read: %d" % self.inertialSenseInterface.GetTcpByteCount()
# Specify the minimum time between read/write updates.
sleep(.001);
except Exception as e:
print "Unknown exception, %s" % str(e)
except:
# Catch System Exit or Keyboard Interupt
pass
print "Shutting down..."
# close the interface cleanly, this ensures serial port and any logging are shutdown properly
self.inertialSenseInterface.Close();
return 0;
def __cltool_runBootloader(self):
# [BOOTLOADER INSTRUCTIONS] Update firmware
print "Bootloading file at %s" % self.opts.bootloaderFileName
uIns = InertialSense()
success = uIns.PyBootloadFile(self.opts.comPort, self.opts.bootloaderFileName)
if not success:
print "Error bootloading file %s, error: %s" % (self.opts.bootloaderFileName)
return success
def __cltool_replayDataLog(self):
if self.opts.logPath is None:
print "Please specify the replay log path!"
return False
print str(self.opts.logType)
print type(self.opts.logType)
logger = cISLogger();
if not logger.LoadFromDirectory(self.opts.logPath,self.opts.logType):
print "Failed to load log files: %s" % self.opts.logPath
return False;
print "Replaying log files: %s" % self.opts.logPath
# TODO: There are a lot of Debug statements stemming from SDK
# 1. Consider creating a python specific Read method that calls the display (will need to set display)
while True:
data = logger.PyReadData(0)
if 'header' not in data:
break
# Call Data Callback here to handle the data the same way as from the uINS
print "Done replaying log files: %s" % self.opts.logPath
self.display.Goodbye();
return True;
def __cltool_setupCommunications(self):
periodMs = 50;
self.inertialSenseInterface.StopBroadcasts(); # Stop streaming any prior messages
# ask for device info every 2 seconds
DID_DEV_INFO = 1
self.inertialSenseInterface.PyBroadcastBinaryData(DID_DEV_INFO, 2000);
# depending on command line options. stream various data sets
if self.opts.sSol:
self.inertialSenseInterface.SetBroadcastSolutionEnabled(true);
if self.opts.sINS1:
DID_INS_1 = 4
self.inertialSenseInterface.PyBroadcastBinaryData(DID_INS_1, periodMs);
if self.opts.sINS2:
DID_INS_2 = 5
self.inertialSenseInterface.PyBroadcastBinaryData(DID_INS_2, periodMs);
if self.opts.sSensors:
DID_SYS_SENSORS = 11
self.inertialSenseInterface.PyBroadcastBinaryData(DID_SYS_SENSORS, 100);
if self.opts.sDualIMU:
DID_DUAL_IMU = 58
self.inertialSenseInterface.PyBroadcastBinaryData(DID_DUAL_IMU, periodMs);
if self.opts.sIMU1:
DID_IMU_1 = 2
self.inertialSenseInterface.PyBroadcastBinaryData(DID_IMU_1, periodMs);
if self.opts.sIMU2:
DID_IMU_2 = 54
self.inertialSenseInterface.PyBroadcastBinaryData(DID_IMU_2, periodMs);
if self.opts.sDThetaVel:
DID_DELTA_THETA_VEL = 3
self.inertialSenseInterface.PyBroadcastBinaryData(DID_DELTA_THETA_VEL, periodMs);
if self.opts.sGPS:
DID_GPS = 6
self.inertialSenseInterface.PyBroadcastBinaryData(DID_GPS, 200);
if self.opts.sMag1:
DID_MAGNETOMETER_1 = 52
self.inertialSenseInterface.PyBroadcastBinaryData(DID_MAGNETOMETER_1, periodMs);
if self.opts.sBaro:
DID_BAROMETER = 53
self.inertialSenseInterface.PyBroadcastBinaryData(DID_BAROMETER, periodMs);
if self.opts.sRawGPS:
DID_RAW_DATA = 60
self.inertialSenseInterface.PyBroadcastBinaryData(DID_RAW_DATA, periodMs);
if self.opts.serverConnection is not None:
if self.opts.serverConnection.find("RTCM3:") == 0:
if not self.inertialSenseInterface.OpenServerConnectionRTCM3(self.opts.serverConnection.substr(6)):
print "Failed to connect to server."
elif self.opts.serverConnection.find("IS:") == 0:
if not self.inertialSenseInterface.OpenServerConnectionInertialSense(self.opts.serverConnection.substr(3)):
print "Failed to connect to server."
elif self.opts.serverConnection.find("UBLOX:") == 0:
if not self.inertialSenseInterface.OpenServerConnectionUblox(self.opts.serverConnection.substr(6)):
print "Failed to connect to server."
else:
print "Invalid server connection, must prefix with RTCM3: or IS:, %s" % self.opts.serverConnection
return alse;
if self.opts.flashConfig is not None:
return cltool_updateFlashConfig(self.opts.flashConfig)
return True
def __cltool_setupLogger(self):
# Enable logging in continuous background mode
self.inertialSenseInterface.SetLoggerEnabled(
self.opts.enableLogging, # enable logger
self.opts.logPath, # path to log to, if empty defaults to DEFAULT_LOGS_DIRECTORY
self.opts.logSolution, # solution logging options
self.opts.maxLogSpaceMB, # max space in mb to use, 0 for unlimited - only MAX_PERCENT_OF_FREE_SPACE_TO_USE_FOR_IS_LOGS% of free space will ever be allocated
self.opts.maxLogFileSize, # each log file will be no larger than this in bytes
self.opts.maxLogMemory, # logger will try and keep under this amount of memory
self.opts.useLogTimestampSubFolder # whether to place log files in a new sub-folder with the current timestamp as the folder name
)
def __cltool_updateFlashConfig(self):
return pyUpdateFlashConfig(self.inertialSenseInterface,self.opts.flashConfig)
def __cltool_createHost(self):
self.inertialSenseInterface = InertialSense()
if not self.inertialSenseInterface.Open(self.opts.comPort, self.opts.baudRate):
print "Failed to open serial port at %s" % self.opts.comPort
return -1 # Failed to open serial port
elif self.opts.flashConfig is not None and not self.__cltool_updateFlashConfig(self.inertialSenseInterface):
return -1
elif not self.inertialSenseInterface.CreateHost(self.opts.host):
print "Failed to create host at %s" % self.opts.host
return -1; # Failed to open host
try:
while True:
self.inertialSenseInterface.Update();
self.display.Home();
print "Tcp bytes sent: %d" % self.inertialSenseInterface.GetTcpByteCount()
sleep(.001);
except Exception as e:
print "Unknown exception, %s" % str(e)
except:
# Catch System Exit or Keyboard Interupt
pass
print "Shutting down..."
# close the interface cleanly, this ensures serial port and any logging are shutdown properly
self.inertialSenseInterface.Close();
return 0;
if __name__ == "__main__":
main() |
from hello import *
def test_success():
assert summ(2, 3) == 5
assert summ(100, -10) == 90
assert mul(2, 5) == 10
|
import logging
import asyncio
import json
from autobahn.asyncio.websocket import WebSocketClientProtocol
from .slack import fetch_from_slack, SlackError
logger = logging.getLogger('app')
class SlackClientProtocol(WebSocketClientProtocol):
def onConnect(self, response):
logger.info("Server connected: {0}".format(response.peer))
@asyncio.coroutine
def onOpen(self):
logger.info("WebSocket connection open.")
while True:
try:
resp = yield from fetch_from_slack('im.list')
except SlackError as error:
logger.error(error)
else:
self.dm_user = {
im.get('user'): im.get('id')
for im in resp.get('ims')
}
logger.info('Updated direct message user data')
# Repeat this every 30 minutes
yield from asyncio.sleep(1800)
def onMessage(self, payload, isBinary):
# The payload comes as bytes, decode before loading the JSON
payload = json.loads(payload.decode('utf8'))
# Only interested in the file comments
if payload.get('type') != 'file_comment_added':
return
# Saves a reference to these two items
file = payload.get('file', {})
comment = payload.get('comment', {})
# Grab the mimetype to easily distinguish type of file
mime_type = file.get('mimetype', 'fish/sticks')
# Grabs the comment from the payload
comment_text = comment.get('comment', '')
# If the comment is empty or the first character isn't a /, move on.
if not comment_text or not comment_text[0].startswith('/'):
return
# Splits the string on the spaces and slices the array to separate
# the command from the rest.
split_comment = comment_text.split()
# Replaces the /
command = split_comment[:1][0].replace('/', '')
modifiers = split_comment[1:]
# Checks what kind of file this is (text, image, video, application)
# so we can check the command against available command for that type.
file_type = mime_type.split('/')[0]
# Gets the user id to use later in an @message to notify success
# or to offer feedback/help in the case of an incorrect command
user_id = comment.get('user')
file_id = file.get('id')
test_message = {
"type": "message",
"channel": self.dm_user.get(user_id),
"text": 'File received a command: {}'.format(command)
}
self.sendMessage(json.dumps(test_message).encode('utf-8'))
logger.info('File received a command: {}'.format(command))
logger.info(payload)
def onClose(self, wasClean, code, reason):
logger.info("WebSocket connection closed: {0}".format(reason))
|
class Solution(object):
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
li, hi = 0, len(nums) - 1
while li<=hi:
mid = (li+hi)/2
if nums[li] <= nums[hi]:
return nums[li]
elif nums[li] > nums[mid]:
hi = mid
else:
li = mid + 1 |
from Tkinter import *
from parserLastFM import *
from class_Interface import *
from class_Node import *
import time
# Christian Jose Soler: Estructura de Datos
# Grupo F: Lunes
# Practica 4: Tkinter y Colas
def main():
global mainQueue, inter
mainQueue = parser("LastFM_small.dat") # The principal Queue of the application
## I won't use LastFM_big.dat because it has errors made by its creator
inter = Interface() # Class to create the widgets of the application
root = Tk()
root.title("Users LastFM")
app = LastFMApp(root) # Initialization of the application
root.mainloop()
class LastFMApp():
def __init__(self, root):
self.frame = Frame(root)
self.frame.grid()
self.appQueue = Queue() # Queue of the application
self.searchQueue = Queue() # Queue of the search
self.currentNode = Node("No user selected") # The current user shown
#Widgets by column made with the class Interface()
#1st column
inter.button(self.frame,"ADD",self.addUsers,1,1,"left",1,3)
self.displayUser, userLabel = inter.labelvar(self.frame,"Users will be shown here",1,4)
#2nd column
inter.button(self.frame,"SEARCH", self.searchUsers,2,1)
inter.label(self.frame,"From relevance",2,2)
self.endEntry = inter.entry(self.frame,3,3)
inter.label(self.frame,"To relevance",2,3)
self.beginEntry = inter.entry(self.frame,3,2)
#3rd column
self.displayArtist, artistLabel = inter.labelvar(self.frame,"Artists will be shown here",2,4,"center",2,1)
displayNextButton = inter.button(self.frame,"DISPLAY NEXT", self.displayNext,2,5,"center",2,1)
#4th column
inter.label(self.frame,"ADDING COST: ",4,1)
self.addCost, addLabel = inter.labelvar(self.frame,"XXX",4,2)
inter.label(self.frame,"SEARCHING COST: ",4,3)
self.searchCost, searchLabel = inter.labelvar(self.frame,"XXX",4,4)
root.mainloop()
# Method to add 1000 Users to the Queue of the application
def addUsers(self):
t1 = time.clock() # Timer start
for a in xrange(1000):
self.appQueue.enqueue(mainQueue.dequeue())
# dequeue 1000 times from MainQueue and add the returned element to appQueue
t2 = time.clock() # Timer end
self.addCost.set("%0.3f s" %(t2-t1)) # Set the timer difference as addCost
self.currentNode = self.appQueue.peek() # The node of the application is the head of appQueue
self.frame.mainloop()
# Method to search between users by their relevance
def searchUsers(self):
# Get begin and end from the current entries
begin = self.beginEntry.get()
end = self.endEntry.get()
# If end is the empty String, end=100, same for begin=0
if end == "": end = 100
if begin == "": begin = 0
# Since .get() return a String, it must be converted to float
begin = float(begin)
end = float(end)
t3 = time.clock() # Timer start
# Definition of temporal Queue and Node
tmpQueue = Queue()
tmpNode = self.appQueue.peek()
while not isinstance(self.appQueue.peekAfter(tmpNode),NoneType):
# While the appQueue is not finished, check if the current element meets the requirements
tmpData = self.appQueue.peekAfter(tmpNode).data
if tmpData.searchRelevance(begin,end):
tmpQueue.enqueue(tmpData) # In case it meets, add it to tmpQueue
tmpNode = self.appQueue.peekAfter(tmpNode) # Advance node
self.searchQueue = tmpQueue # Set the search result, tmpQueue, to self.searchQueue
t4 = time.clock() # Timer end
self.searchCost.set("%0.3f s" %(t4-t3)) # Set the timer difference as searchCost
if self.searchQueue.isEmpty(): # If there are no matches, the currentNode will show Nothing
self.currentNode = Node("Nothing")
else: # In the rest of cases, the head of the Queue will become the current node
self.currentNode = self.searchQueue.peek()
self.frame.mainloop()
# Method to display the next element of the current list
def displayNext(self):
# If the Node has a String in it ("Nothing"), just show it
if isinstance(self.currentNode.data,str):
self.displayUser.set(str(self.currentNode.data))
self.displayArtist.set(str(self.currentNode.data))
else: # In the rest of cases, show its content, in the right way
self.displayUser.set(str(self.currentNode.data))
self.displayArtist.set("Artist: %s \n Relevance: %s" %(str(self.currentNode.data.getMost()), str(self.currentNode.data.getRelevance())))
self.currentNode = self.currentNode.after # Advance node in the list
self.frame.mainloop()
main() |
from gevent import monkey
monkey.patch_all()
import time
import heapq
import random
import sys
import app
from threading import Thread
from flask import Flask, render_template, session, request,jsonify,request
from flask.ext.socketio import SocketIO, emit, join_room, leave_room
def run(port):
app.runApp(port)
#get the number of servers we want
if len(sys.argv) < 2:
sys.exit(0)
num = int(sys.argv[1])
print num
run(num+10000)
#for x in range(0,num):
# print x+10000
# Thread(target=run(x+10000)).start() |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-07-19 14:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Alumno',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=60)),
('apellido', models.CharField(max_length=60)),
('cedula', models.CharField(max_length=10, unique=True)),
],
),
migrations.CreateModel(
name='Materia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=60)),
('codigo', models.CharField(max_length=10, unique=True)),
('cupos', models.IntegerField(default=0)),
],
),
]
|
#!/usr/bin/env python
'''
Filename : VCFscreen_listcarriers.py
Author : Matt Halvorsen
Email : mhalvors1@gmail.com
Date created : 12/17/2019
Date last modified : 12/17/2019
Python version : 2.7
'''
import sys
import os
import argparse
import cyvcf2
from collections import defaultdict
import pandas as pd
import numpy as np
import vcfscreen.screens as screens
import vcfscreen.misc as misc
from vcfscreen.cyvcf2_variant import Cyvcf2Vcf,Cyvcf2Variant
from vcfscreen.samples import Samples
from vcfscreen.vcf_cnds import VcfCnds
from vcfscreen.annot import AnnotTxs
def main():
"""
read user-provided args
"""
args = parse_args()
"""
read samples from fam file, send basic stats to stdout
"""
samples_i = Samples(args.in_fam)
n_samples = len(samples_i.samples)
n_males = len(samples_i.males)
n_females = len(samples_i.females)
n_cases = len(samples_i.cases)
n_ctrls = len(samples_i.ctrls)
"""
load fam file into df. Coverage statistics will be stored here.
"""
df = pd.read_table(args.in_fam, header=None, sep=" ")
df = df.rename({0:"FID",1:"IID",2:"PID",3:"MID",4:"SEX",5:"PHENO"},
axis='columns')
df = df.set_index("IID", drop=False)
"""
if varlist file defined, load into memory
"""
if args.varlist != None:
varlist = set([])
varlist_fh = open(args.varlist, "r")
for line in varlist_fh: varlist.add(line.rstrip())
varlist_fh.close()
args.varlist = varlist
"""
init cyvcf2 VCF obj
"""
vcf = cyvcf2.VCF(args.in_gt_vcf, strict_gt=True, gts012=True)
"""
create sample idx
"""
samples_i.get_vcf_idx(vcf.samples)
"""
form dictionary of idxs
"""
case_idxs = [samples_i.samples[x].idx for x in samples_i.cases]
ctrl_idxs = [samples_i.samples[x].idx for x in samples_i.ctrls]
idxs=dict()
for x in samples_i.samples:
idx = samples_i.samples[x].idx
idxs[idx]=x
"""
main loop for pulling out genotypes
"""
nvar=0
nbuff=0
prev_chrom=None
for vcf_variant in vcf:
if args.varlist != None:
if vcf_variant.ID not in args.varlist: continue
# if nvar == 10000: break
# get genotypes at variant site
gt = vcf_variant.gt_types
# get indeces for het or homalt genotypes
if args.gts_include == "1":
idxs_i = np.where(gt == 1)[0]
elif args.gts_include == "2":
idxs_i = np.where(gt == 2)[0]
else:
idxs_i = np.where((gt == 1) | (gt == 2))[0]
# get iids to go along with idxs, append to output file
for idx_i in idxs_i:
iid_i = idxs[idx_i]
out_str = "\t".join([vcf_variant.ID, iid_i])
print(out_str)
nvar += 1
vcf.close()
return
def parse_args():
args = argparse.ArgumentParser()
args.add_argument("--varlist", action="store", type=str, default=None,
help="file with list of variants that qualify for " + \
"inclusion.")
args.add_argument("--gts-include", action="store", type=str,
default="12", choices=["1", "2", "12"],
help="which genotypes to include.")
args.add_argument("in_fam",
help="input fam with with sample IDs.")
args.add_argument("in_gt_vcf",
help="input genotype vcf")
return args.parse_args()
if __name__ == "__main__":
main()
|
import random
import cv2
from notifier.utils import read_info
import numpy as np
from PIL import ImageFont, ImageDraw, Image
import os
def generate_image(images_info,
image,
message,
font_path="OpenSans-Semibold.ttf"):
left, top, right, bottom, anchor_x, anchor_y, r, g, b = \
images_info.loc[image].to_numpy()
bubble_color = (255, 255, 255)
x, y = left + (right-left)//2, top + (bottom-top)//2
img = cv2.imread(os.path.join('notifier/images', image))
# main bubble
cv2.ellipse(img,
(x, y),
((right-left)//2, (bottom-top)//2),
0,
0,
360,
bubble_color,
-1)
# speech triangle
triangle_cnt = np.array([(x, y), (x, top), (anchor_x, anchor_y)])
cv2.drawContours(img, [triangle_cnt], 0, bubble_color, -1)
r, g, b = map(int, [r, g, b])
# Text part
img_pil = Image.fromarray(img)
draw = ImageDraw.Draw(img_pil)
bubble_width, bubble_height = right-left, bottom-top
w, h, font_size = 0, 0, 0
font = ImageFont.truetype(font_path, font_size)
for font_size in range(100, 10, -5):
font = ImageFont.truetype(font_path, font_size)
w, h = draw.textsize(message, font=font)
if w < bubble_width-bubble_width*0.1 and \
h < bubble_height-bubble_height*0.1:
break
draw.text((left+(bubble_width-w)/2,
top+(bubble_height-h)/2 - h/6),
message,
font=font,
fill=(b, g, r, 255))
img = np.array(img_pil)
cv2.imwrite('tmp.png', img)
return 'tmp.png'
if __name__ == '__main__':
images_info = read_info(file_name='notifier/resources/images_info.csv')
img = generate_image(images_info,
images_info.index.values[-1],
'ТестТестТестТестТестТест',
font_path='notifier/resources/OpenSans-Semibold.ttf')
img = cv2.imread(img)
cv2.imshow('sfs', img)
cv2.imwrite('resources/tmp.png', img)
cv2.waitKey()
|
import argparse
import models
import trackers
import experiments
from configs import cfg
parser = argparse.ArgumentParser(description='Benchmark SiamBroadcastRPN on a dataset.')
parser.add_argument("--checkpoint")
parser.add_argument("--visualize", type=bool, default=False)
parser.add_argument("--sequences", nargs='+', default=[])
parser.add_argument("--version", default=2015)
parser.add_argument(
"--config-file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
if args.config_file:
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
if len(args.sequences) == 0:
args.sequences = None
net = models.load_net(cfg.MODEL.NET, cfg)
tracker = trackers.load_tracker(net, args.checkpoint, cfg)
experiment = experiments.ExperimentOTB(cfg, version=args.version, sequences=args.sequences)
experiment.run(tracker, visualize=args.visualize)
experiment.report([tracker.name], args=args)
|
import os
from PIL import Image, ImageDraw, ImageFont
os.makedirs('withLogo', exist_ok=True)
# Loop over all files in the working directory.
for filename in os.listdir('.'):
if not (filename.endswith('.png') or filename.endswith('.jpg')):
continue # skip non-image files and the logo file itself
im = Image.open(filename) ## If the file is image, open it as image
#Add logo text
draw = ImageDraw.Draw(im)
font = ImageFont.truetype("DroidSans.ttf", 12) ## Make sure the .ttf is available in the current directory
print ("Adding watermark to %s" % (filename))
draw.text((50,20),"All Rights Reserved",fill=(128,255,255))
# Save changes.
im.save(os.path.join('withLogo', filename))
|
# from matplotlib import pyplot as plt
# import pandas as pd
import os
def getFileName(old_file_name, dt):
filename, file_ext = os.path.splitext(old_file_name)
if file_ext != '.mat':
raise ValueError('Not a mat file')
filename_parts = filename.split('_')
if filename_parts[-1] == 'trace3C':
filename_parts[-2] = '%03d' % (dt.microsecond / 1000)
filename_parts[-3] = '%02d%02d%02d' % (dt.hour, dt.minute, dt.second)
filename_parts[-4] = '%04d%02d%02d' % (dt.year, dt.month, dt.day)
else:
# TODO support other file name format
# raise ValueError('file name error: expecting name ends with trace3C')
filename_parts[-1] = '%03d' % (dt.microsecond / 1000)
filename_parts[-2] = '%02d%02d%02d' % (dt.hour, dt.minute, dt.second)
filename_parts[-3] = '%04d%02d%02d' % (dt.year, dt.month, dt.day)
filename = '_'.join(filename_parts)
new_file_name = filename+file_ext
return new_file_name
#
# # for plotting
# def rolling_std(timeseries, window=1000):
# # Determing rolling statistics
# rolmean = pd.rolling_mean(timeseries, window=window)
# rolstd = pd.rolling_std(timeseries, window=window)
#
# # Plot rolling statistics:
# orig = plt.plot(timeseries, color='blue', label='Original')
# mean = plt.plot(rolmean, color='black', label='Rolling Mean')
# std = plt.plot(rolstd, color='red', label='Rolling Std')
# plt.legend(loc='best')
# plt.title('Rolling Standard Deviation')
# plt.show(block=False)
#
# # for plotting
# def plot3C(timeseries, i_rec):
# plt.plot(timeseries['traceData'][:, i_rec * 3], color='red')
# plt.plot(timeseries['traceData'][:, i_rec * 3 + 1], color='gray')
# plt.plot(timeseries['traceData'][:, i_rec * 3 + 2], color='blue')
|
from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(Person)
admin.site.register(Course)
admin.site.register(Response)
admin.site.register(Assignment)
admin.site.register(Question)
admin.site.register(Enrollment) |
# coding: utf-8
"""
DataDomain Rest API Documentation
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from dd_sdk_1_0.configuration import Configuration
class FileReplicationList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'start': 'int',
'end': 'int',
'interval': 'FileReplicationIntervalQuery',
'context': 'list[FileReplicationDetails]',
'paging_info': 'Paging'
}
attribute_map = {
'start': 'start',
'end': 'end',
'interval': 'interval',
'context': 'context',
'paging_info': 'paging_info'
}
def __init__(self, start=None, end=None, interval=None, context=None, paging_info=None, _configuration=None): # noqa: E501
"""FileReplicationList - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._start = None
self._end = None
self._interval = None
self._context = None
self._paging_info = None
self.discriminator = None
self.start = start
self.end = end
self.interval = interval
if context is not None:
self.context = context
if paging_info is not None:
self.paging_info = paging_info
@property
def start(self):
"""Gets the start of this FileReplicationList. # noqa: E501
:return: The start of this FileReplicationList. # noqa: E501
:rtype: int
"""
return self._start
@start.setter
def start(self, start):
"""Sets the start of this FileReplicationList.
:param start: The start of this FileReplicationList. # noqa: E501
:type: int
"""
if self._configuration.client_side_validation and start is None:
raise ValueError("Invalid value for `start`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
start is not None and start < 0): # noqa: E501
raise ValueError("Invalid value for `start`, must be a value greater than or equal to `0`") # noqa: E501
self._start = start
@property
def end(self):
"""Gets the end of this FileReplicationList. # noqa: E501
:return: The end of this FileReplicationList. # noqa: E501
:rtype: int
"""
return self._end
@end.setter
def end(self, end):
"""Sets the end of this FileReplicationList.
:param end: The end of this FileReplicationList. # noqa: E501
:type: int
"""
if self._configuration.client_side_validation and end is None:
raise ValueError("Invalid value for `end`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
end is not None and end < 0): # noqa: E501
raise ValueError("Invalid value for `end`, must be a value greater than or equal to `0`") # noqa: E501
self._end = end
@property
def interval(self):
"""Gets the interval of this FileReplicationList. # noqa: E501
:return: The interval of this FileReplicationList. # noqa: E501
:rtype: FileReplicationIntervalQuery
"""
return self._interval
@interval.setter
def interval(self, interval):
"""Sets the interval of this FileReplicationList.
:param interval: The interval of this FileReplicationList. # noqa: E501
:type: FileReplicationIntervalQuery
"""
if self._configuration.client_side_validation and interval is None:
raise ValueError("Invalid value for `interval`, must not be `None`") # noqa: E501
self._interval = interval
@property
def context(self):
"""Gets the context of this FileReplicationList. # noqa: E501
:return: The context of this FileReplicationList. # noqa: E501
:rtype: list[FileReplicationDetails]
"""
return self._context
@context.setter
def context(self, context):
"""Sets the context of this FileReplicationList.
:param context: The context of this FileReplicationList. # noqa: E501
:type: list[FileReplicationDetails]
"""
self._context = context
@property
def paging_info(self):
"""Gets the paging_info of this FileReplicationList. # noqa: E501
:return: The paging_info of this FileReplicationList. # noqa: E501
:rtype: Paging
"""
return self._paging_info
@paging_info.setter
def paging_info(self, paging_info):
"""Sets the paging_info of this FileReplicationList.
:param paging_info: The paging_info of this FileReplicationList. # noqa: E501
:type: Paging
"""
self._paging_info = paging_info
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FileReplicationList, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FileReplicationList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FileReplicationList):
return True
return self.to_dict() != other.to_dict()
|
from PiControl.models import Pin, Schedule
import rollbar
import sys
import RPi.GPIO as RPIO
class PinController(object):
def __init__(self):
RPIO.setmode(RPIO.BCM)
self.my_pins = None
self.set_all_pins()
def get_thermometers(self):
return self.my_pins.filter(is_thermometer=True)
def set_all_pins(self):
RPIO.cleanup()
self.my_pins = Pin.objects.all()
for pin in self.my_pins:
try:
RPIO.setup(pin.pin_number, pin.get_direction())
except:
rollbar.report_exc_info(sys.exc_info())
self.my_pins.exclude(id=pin.id)
def get_next_schedules(self, amount=3):
Schedule.objects.all()
return None
def get_dashboard_data(self):
return {
'pins': self.my_pins,
'thermometers': self.get_thermometers(),
'schedules': self.get_next_schedules()
}
def get_all_pins(self):
return {'pins': self.my_pins}
|
# coding: utf-8
import os
import requests
import bot_define
from errbot import BotPlugin, botcmd
from qbittorrent import Client
class Torrent(BotPlugin):
"""
Torrent Job only oNsemy
"""
@botcmd # flags a command
def torrent(self, msg, args): # a command callable with !tryme
"""
url or magnet add (one by one)
"""
send_id = msg.to
if msg.to == self.bot_identifier:
send_id = msg.frm
if msg.frm != self.build_identifier(bot_define.BOT_ADMIN_ID):
# deny!
stream = self.send_stream_request(send_id, open(os.getcwd() + '/resources/deny_new.jpg', 'rb'), name = 'deny_new.jpg', stream_type = 'photo')
return
self.log.info('args: ' + args)
validations = ['http://', 'magnet:', 'https://', 'bc://bt/']
if all(not (val in args) for val in validations):
stream = self.send_stream_request(send_id, open(os.getcwd() + '/resources/nooo.gif', 'rb'), name = 'nooo.gif', stream_type = 'document')
return
qb = Client(bot_define.TORRENT_URL)
yield "Request Login"
res = qb.login(bot_define.TORRENT_USER_NAME, bot_define.TORRENT_PASSWORD)
if res:
yield "Failed to Login"
return
yield "Request Torrent Job!"
res = qb.download_from_link(args)
if res:
yield "Something has wrong!"
return
stream = self.send_stream_request(send_id, open(os.getcwd() + '/resources/sloth.gif', 'rb'), name = 'sloth.gif', stream_type = 'document')
yield "Request Done."
qb.logout()
|
from django.contrib import admin
from joomlacontent.models import JosContent,JosSections,JosCategories
#from bookbinders.models import Image, Bookbinder
class JosContentAdmin(admin.ModelAdmin):
list_display = ('title', 'modified', 'state',)
prepopulated_fields = { 'alias': ('title',)}
class JosCategoriesAdmin(admin.ModelAdmin):
list_display = ('title', 'name', 'section', )
admin.site.register(JosCategories, JosCategoriesAdmin)
admin.site.register(JosContent, JosContentAdmin)
admin.site.register(JosSections)
# class ImageAdmin(admin.ModelAdmin):
# date_hierarchy = 'upload_date'
# list_display =('title', 'upload_date',)
# save_as = True
# save_on_top = True
#
#
# admin.site.register(Image, ImageAdmin)
#
#
# class BookbinderAdmin(admin.ModelAdmin):
#
# list_display = ('name','location','modified','created')
# list_filter = ('modified',)
# search_fields = ['name','location']
#
# filter_horizontal = ('image',)
# readonly_fields = ('jostext','joscontent','joscreated')
# save_as = True
# save_on_top = True
#
# admin.site.register(Bookbinder, BookbinderAdmin) |
import Config.websites_config as cfg
from Networking import Http
from HTMLUtil import LinkExtractor, Identifiers
from CommonUtil import StringUtil
from termcolor import colored, cprint
def crawl(search_string=None):
website_stream = ""
website_links = []
website_page_links = []
search_results = []
crawl_sites = cfg.websites
for website in crawl_sites:
cprint("Accessing website : " + website + " for links ...", "blue")
print("")
website_stream = Http.make_request(website)
if website_stream is not None:
website_links = LinkExtractor.extract_anchor_links(website_stream)
for link in website_links:
website_page_links.append(LinkExtractor.extract_attribute(link, 'href'))
filtered_website_links = StringUtil.filter_links(website_page_links)
for link in filtered_website_links:
if Identifiers.is_internal_route(link):
route = website + link
cprint("Accessing sub route : " + route, "blue")
print("")
elif Identifiers.is_external_route(link):
route = link
cprint("Accessing route : " + route, "blue")
print("")
route_stream = Http.make_request(route)
if search_string is not None:
if search_string in str(route_stream):
cprint(" '" + search_string + "' found in " + route, "green")
print("")
search_results.append(route)
|
import numpy as np
import pytest
from piecewise.environment import EnvironmentStepTypes, make_discrete_mux_env
from piecewise.environment.supervised.multiplexer.multiplexer_util import \
calc_total_bits
from piecewise.error.environment_error import OutOfDataError
class TestClassificationEnvironmentViaDiscreteMultiplexer:
_DUMMY_ACTION = 0
def _setup_short_epoch(self):
num_address_bits = 1
total_bits = calc_total_bits(num_address_bits)
num_data_points = 2**total_bits
mux = make_discrete_mux_env(num_address_bits=num_address_bits,
shuffle_dataset=False)
return mux, num_data_points
def test_step_type(self):
mux = make_discrete_mux_env()
assert mux.step_type == EnvironmentStepTypes.single_step
def test_observe_order_no_shuffle(self):
mux = make_discrete_mux_env(num_address_bits=1, shuffle_dataset=False)
expected_obs_seq_iter = \
iter([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],
[1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]])
while not mux.is_terminal():
obs = mux.observe()
assert np.array_equal(obs, next(expected_obs_seq_iter))
mux.act(self._DUMMY_ACTION)
def test_act_all_correct(self):
mux = make_discrete_mux_env(num_address_bits=1, shuffle_dataset=False)
correct_actions_iter = iter([0, 0, 1, 1, 0, 1, 0, 1])
while not mux.is_terminal():
response = mux.act(next(correct_actions_iter))
assert response.was_correct_action
def test_act_all_incorrect(self):
mux = make_discrete_mux_env(num_address_bits=1, shuffle_dataset=False)
incorrect_actions_iter = iter([1, 1, 0, 0, 1, 0, 1, 0])
while not mux.is_terminal():
response = mux.act(next(incorrect_actions_iter))
assert not response.was_correct_action
def test_act_changes_next_obs(self):
mux, num_data_points = self._setup_short_epoch()
last_obs = None
for _ in range(num_data_points):
obs = mux.observe()
if last_obs is not None:
assert not np.array_equal(last_obs, obs)
mux.act(self._DUMMY_ACTION)
last_obs = obs
def test_same_obs_on_repeated_observe(self):
mux, num_data_points = self._setup_short_epoch()
last_obs = None
for _ in range(num_data_points):
obs = mux.observe()
if last_obs is not None:
assert np.array_equal(last_obs, obs)
last_obs = obs
def test_is_terminal_act_only_epoch(self):
mux, num_data_points = self._setup_short_epoch()
for _ in range(num_data_points):
mux.act(self._DUMMY_ACTION)
assert mux.is_terminal()
def test_is_not_terminal_observe_only_epoch(self):
mux, num_data_points = self._setup_short_epoch()
for _ in range(num_data_points):
mux.observe()
assert not mux.is_terminal()
def test_is_terminal_act_and_observe_epoch(self):
mux, num_data_points = self._setup_short_epoch()
for _ in range(num_data_points):
mux.observe()
mux.act(self._DUMMY_ACTION)
assert mux.is_terminal()
def test_out_of_data_on_extra_act(self):
mux, num_data_points = self._setup_short_epoch()
for _ in range(num_data_points):
mux.observe()
mux.act(self._DUMMY_ACTION)
with pytest.raises(OutOfDataError):
mux.act(self._DUMMY_ACTION)
def test_out_of_data_on_extra_observe(self):
mux, num_data_points = self._setup_short_epoch()
for _ in range(num_data_points):
mux.observe()
mux.act(self._DUMMY_ACTION)
with pytest.raises(OutOfDataError):
mux.observe()
def test_reset_with_two_epochs_no_shuffle(self):
mux = make_discrete_mux_env(num_address_bits=1,
shuffle_dataset=False)
first_epoch_obs_seq = []
first_epoch_reward_seq = []
while not mux.is_terminal():
first_epoch_obs_seq.append(mux.observe())
response = mux.act(self._DUMMY_ACTION)
first_epoch_reward_seq.append(response.reward)
mux.reset()
second_epoch_obs_seq = []
second_epoch_reward_seq = []
while not mux.is_terminal():
second_epoch_obs_seq.append(mux.observe())
response = mux.act(self._DUMMY_ACTION)
second_epoch_reward_seq.append(response.reward)
assert np.array_equal(first_epoch_obs_seq, second_epoch_obs_seq)
assert np.array_equal(first_epoch_reward_seq, second_epoch_reward_seq)
|
hashValues = dict()
for number in range(1,100000000):
x = number/100000000
newHash = hash(x) % (2**32)
if newHash in hashValues:
print("----------------")
print(repr(x))
print(repr(newHash))
print(repr(hashValues[newHash]))
print("----------------")
else:
hashValues[newHash] = x
|
import os
today = '22-04-19_'
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='Correlation of reserved stars')
parser.add_argument('--piff_cat',
default='/home2/dfa/sobreira/alsina/catalogs/y3a1-v29',
help='Full Path to the Only stars Piff catalog')
parser.add_argument('--exps_file',
default='/home/dfa/sobreira/alsina/DESWL/psf/ally3.grizY',
#default='/home/dfa/sobreira/alsina/DESWL/psf/testexp',
help='list of exposures (in lieu of separate exps)')
parser.add_argument('--bands', default='riz', type=str,
help='Limit to the given bands')
parser.add_argument('--use_reserved', default=True,
action='store_const', const=True,
help='just use the objects with the RESERVED flag')
parser.add_argument('--frac', default=1., type=float,
help='Choose a random fraction of the input stars')
parser.add_argument('--mod', default=True,
action='store_const', const=True,
help='If true it substracts the mean to each field before calculate correlations')
parser.add_argument('--obs', default=False,
action='store_const', const=True,
help='Use e_obs instead of e_piff to calculate modified rho stats')
parser.add_argument('--outpath', default='/home/dfa/sobreira/alsina/alpha-beta-gamma/code/correlations/',
help='location of the output of the files')
args = parser.parse_args()
return args
def measure_rho(data, max_sep=300, sep_units='arcmin', tag=None, prefix='piff', mod=True, obs=False ):
"""Compute the rho statistics
"""
import treecorr
import numpy as np
e1 = data['obs_e1']
e2 = data['obs_e2']
p_e1 = data[prefix+'_e1']
p_e2 = data[prefix+'_e2']
T = data['obs_T']
p_T = data[prefix+'_T']
de1 = e1-p_e1
de2 = e2-p_e2
dt = (T-p_T)/T
w1 = p_e1*dt
w2 = p_e2*dt
w1obs = e1*dt
w2obs = e2*dt
#Modified ellipticities
if(mod):
e1 = e1 - np.array(np.mean(e1))
e2 = e2 - np.array(np.mean(e2))
p_e1 = p_e1 - np.array(np.mean(p_e1))
p_e2 = p_e2 - np.array(np.mean(p_e2))
de1 = de1 - np.array(np.mean(de1))
de2 = de2 - np.array(np.mean(de2))
w1 = w1 - np.array(np.mean(w1))
w2 = w2 - np.array(np.mean(w2))
w1obs = w1obs - np.array(np.mean(w1obs))
w2obs = w2obs - np.array(np.mean(w2obs))
ra = data['ra']
dec = data['dec']
print('ra = ',ra)
print('dec = ',dec)
if(obs):
ecat = treecorr.Catalog(ra=ra, dec=dec, ra_units='deg', dec_units='deg', g1=e1, g2=e2)
decat = treecorr.Catalog(ra=ra, dec=dec, ra_units='deg', dec_units='deg', g1=de1, g2=de2)
wcat = treecorr.Catalog(ra=ra, dec=dec, ra_units='deg', dec_units='deg', g1=w1obs, g2=w2obs)
else:
ecat = treecorr.Catalog(ra=ra, dec=dec, ra_units='deg', dec_units='deg', g1=p_e1, g2=p_e2)
decat = treecorr.Catalog(ra=ra, dec=dec, ra_units='deg', dec_units='deg', g1=de1, g2=de2)
#wcat = treecorr.Catalog(ra=ra, dec=dec, ra_units='deg', dec_units='deg', g1=w1, g2=w2)
wcat = treecorr.Catalog(ra=ra, dec=dec, ra_units='deg', dec_units='deg', g1=w1obs, g2=w2obs)
ecat.name = 'ecat'
decat.name = 'decat'
wcat.name = 'wcat'
if tag is not None:
for cat in [ ecat, decat, wcat ]:
cat.name = tag + ":" + cat.name
bin_config = dict(
sep_units = sep_units,
nbins = 20,
min_sep = 2.5,
max_sep = 250,)
#sep_units = 'degrees',
'''
bin_slop = 0.1,
min_sep = 0.5,
max_sep = max_sep,
bin_size = 0.2,
'''
results = []
for (cat1, cat2) in [(ecat, ecat),
(decat, decat),
(decat, ecat),
(wcat, wcat),
(decat, wcat),
(ecat, wcat) ]:
print('Doing correlation of %s vs %s'%(cat1.name, cat2.name))
rho = treecorr.GGCorrelation(bin_config, verbose=2)
if cat1 is cat2:
rho.process(cat1)
else:
rho.process(cat1, cat2)
print('mean xi+ = ',rho.xip.mean())
print('mean xi- = ',rho.xim.mean())
results.append(rho)
return results
def main():
import sys
sys.path.insert(0, '/home/dfa/sobreira/alsina/alpha-beta-gamma/code/src')
#sys.path.insert(0, '/global/cscratch1/sd/alsina/alpha-beta-gamma/code/src')
import numpy as np
from read_psf_cats import read_data, toList
from astropy.io import fits
args = parse_args()
#Make directory where the ouput data will be
outpath = os.path.expanduser(args.outpath)
try:
if not os.path.exists(outpath):
os.makedirs(outpath)
except OSError:
if not os.path.exists(outpath): raise
#STATISTIC USING ONLY RESERVED STARS
keys = ['ra', 'dec','obs_e1', 'obs_e2', 'obs_T',
'piff_e1', 'piff_e2', 'piff_T', 'mag']
exps = toList(args.exps_file)
data, bands, tilings = read_data(exps, args.piff_cat , keys,
limit_bands=args.bands,
use_reserved=args.use_reserved)
print("Objects", len(data))
data = data[data['mag']<20]
print("Objects with magnitude <20", len(data))
names=['BIN1', 'BIN2','ANGBIN', 'VALUE', 'ANG']
forms = ['i4', 'i4', 'i4', 'f8', 'f8']
dtype = dict(names = names, formats=forms)
nrows = 20
outdata = np.recarray((nrows, ), dtype=dtype)
namesout=['RHO0P','RHO1P', 'RHO2P', 'RHO3P', 'RHO4P', 'RHO5P',
'RHO0M','RHO1M', 'RHO2M', 'RHO3M', 'RHO4M', 'RHO5M']
rho0, rho1, rho2, rho3, rho4, rho5 = measure_rho(data, mod=args.mod, obs=args.obs)
angarr = np.arange(nrows)
thetaarr = np.exp(rho0.meanlogr)
rho0parr = rho0.xip; rho1parr = rho1.xip; rho2parr = rho2.xip
rho3parr = rho3.xip; rho4parr = rho4.xip; rho5parr = rho5.xip
rho0marr = rho0.xim; rho1marr = rho1.xim; rho2marr = rho2.xim
rho3marr = rho3.xim; rho4marr = rho4.xim; rho5marr = rho5.xim
varrho0arr = 2*rho0.varxi; varrho1arr = 2*rho1.varxi; varrho2arr = 2*rho2.varxi;
varrho3arr = 2*rho3.varxi; varrho4arr = 2*rho4.varxi; varrho5arr = 2*rho5.varxi;
rhos = [rho0parr, rho1parr, rho2parr, rho3parr, rho4parr,
rho5parr, rho0marr, rho1marr, rho2marr, rho3marr, rho4marr,
rho5marr]
vares = [varrho0arr, varrho1arr, varrho2arr, varrho3arr,
varrho4arr, varrho5arr, varrho0arr, varrho1arr, varrho2arr,
varrho3arr, varrho4arr, varrho5arr]
for i, nam in enumerate(namesout):
covmat = np.diag(vares[i])
hdu = fits.PrimaryHDU()
hdul = fits.HDUList([hdu])
covmathdu = fits.ImageHDU(covmat, name='COVMAT')
hdul.insert(1, covmathdu)
angarray = np.exp(rho0.meanlogr)
valuearray = np.array(rhos[i])
bin1array = np.array([ -999]*nrows)
bin2array = np.array([ -999]*nrows)
angbinarray = np.arange(nrows)
array_list = [bin1array, bin2array, angbinarray, valuearray, angarray ]
for array, name in zip(array_list, names): outdata[name] = array
corrhdu = fits.BinTableHDU(outdata, name=nam)
hdul.insert(2, corrhdu)
hdul.writeto(outpath + nam + '.fits', overwrite=True)
if __name__ == "__main__":
main()
|
%n%d %% I l%o%ve te%ach%ing.
# T%he%re i%s n%o%th%ing as r%ewarding a%s e%duc%at%i%ng a%n%d e%m%p%ow%er%ing p%e%o%ple.
# I fo%und te%a%ching m%ore i%n%t%er%%es%ting t%h%an any other %jobs.
# D%o%es thi%s m%ot%iv%a%te %y%o%u to b%e a t%e%a%cher?'''
# matches = re.sub('%', '', txt)
# print(matches) |
"""Communicate with an Android TV or Amazon Fire TV device via ADB over a network.
ADB Debugging must be enabled.
"""
import logging
import re
from socket import error as socket_error
import sys
import threading
from adb import adb_commands
from adb.sign_pythonrsa import PythonRSASigner
from adb_messenger.client import Client as AdbClient
from . import constants
if sys.version_info[0] > 2 and sys.version_info[1] > 1:
LOCK_KWARGS = {'timeout': 3}
else:
LOCK_KWARGS = {}
FileNotFoundError = IOError # pylint: disable=redefined-builtin
class BaseTV(object):
"""Base class for representing an Android TV / Fire TV device."""
def __init__(self, host, adbkey='', adb_server_ip='', adb_server_port=5037):
"""Initialize a ``BaseTV`` object.
Parameters
----------
host : str
The address of the device in the format ``<ip address>:<host>``
adbkey : str
The path to the ``adbkey`` file for ADB authentication; the file ``adbkey.pub`` must be in the same directory
adb_server_ip : str
The IP address of the ADB server
adb_server_port : int
The port for the ADB server
"""
self.host = host
self.adbkey = adbkey
self.adb_server_ip = adb_server_ip
self.adb_server_port = adb_server_port
# the max volume level (determined when first getting the volume level)
self.max_volume = None
# keep track of whether the ADB connection is intact
self._available = False
# use a lock to make sure that ADB commands don't overlap
self._adb_lock = threading.Lock()
# the attributes used for sending ADB commands; filled in in `self.connect()`
self._adb = None # python-adb
self._adb_client = None # pure-python-adb
self._adb_device = None # pure-python-adb
# the method used for sending ADB commands
if not self.adb_server_ip:
# python-adb
self.adb_shell = self._adb_shell_python_adb
else:
# pure-python-adb
self.adb_shell = self._adb_shell_pure_python_adb
# establish the ADB connection
self.connect()
# get device properties
self.device_properties = self.get_device_properties()
# ======================================================================= #
# #
# ADB methods #
# #
# ======================================================================= #
def _adb_shell_python_adb(self, cmd):
"""Send an ADB command using the Python ADB implementation.
Parameters
----------
cmd : str
The ADB command to be sent
Returns
-------
str, None
The response from the device, if there is a response
"""
if not self.available:
return None
if self._adb_lock.acquire(**LOCK_KWARGS): # pylint: disable=unexpected-keyword-arg
try:
return self._adb.Shell(cmd)
finally:
self._adb_lock.release()
return None
def _adb_shell_pure_python_adb(self, cmd):
"""Send an ADB command using an ADB server.
Parameters
----------
cmd : str
The ADB command to be sent
Returns
-------
str, None
The response from the device, if there is a response
"""
if not self._available:
return None
if self._adb_lock.acquire(**LOCK_KWARGS): # pylint: disable=unexpected-keyword-arg
try:
return self._adb_device.shell(cmd)
finally:
self._adb_lock.release()
return None
def _key(self, key):
"""Send a key event to device.
Parameters
----------
key : str, int
The Key constant
"""
self.adb_shell('input keyevent {0}'.format(key))
def connect(self, always_log_errors=True):
"""Connect to an Android TV / Fire TV device.
Parameters
----------
always_log_errors : bool
If True, errors will always be logged; otherwise, errors will only be logged on the first failed reconnect attempt
Returns
-------
bool
Whether or not the connection was successfully established and the device is available
"""
self._adb_lock.acquire(**LOCK_KWARGS) # pylint: disable=unexpected-keyword-arg
try:
if not self.adb_server_ip:
# python-adb
try:
if self.adbkey:
# private key
with open(self.adbkey) as f:
priv = f.read()
# public key
try:
with open(self.adbkey + '.pub') as f:
pub = f.read()
except FileNotFoundError:
pub = ''
signer = PythonRSASigner(pub, priv)
# Connect to the device
self._adb = adb_commands.AdbCommands().ConnectDevice(serial=self.host, rsa_keys=[signer], default_timeout_ms=9000)
else:
self._adb = adb_commands.AdbCommands().ConnectDevice(serial=self.host, default_timeout_ms=9000)
# ADB connection successfully established
self._available = True
except socket_error as serr:
if self._available or always_log_errors:
if serr.strerror is None:
serr.strerror = "Timed out trying to connect to ADB device."
logging.warning("Couldn't connect to host: %s, error: %s", self.host, serr.strerror)
# ADB connection attempt failed
self._adb = None
self._available = False
finally:
return self._available
else:
# pure-python-adb
try:
self._adb_client = AdbClient(host=self.adb_server_ip, port=self.adb_server_port)
self._adb_device = self._adb_client.device(self.host)
self._available = bool(self._adb_device)
except: # noqa pylint: disable=bare-except
self._available = False
finally:
return self._available
finally:
self._adb_lock.release()
# ======================================================================= #
# #
# Home Assistant device info #
# #
# ======================================================================= #
def get_device_properties(self):
"""Return a dictionary of device properties.
Returns
-------
props : dict
A dictionary with keys ``'wifimac'``, ``'ethmac'``, ``'serialno'``, ``'manufacturer'``, ``'model'``, and ``'sw_version'``
"""
properties = self.adb_shell(constants.CMD_MANUFACTURER + " && " +
constants.CMD_MODEL + " && " +
constants.CMD_SERIALNO + " && " +
constants.CMD_VERSION + " && " +
constants.CMD_MAC_WLAN0 + " && " +
constants.CMD_MAC_ETH0)
if not properties:
return {}
lines = properties.strip().splitlines()
if len(lines) != 6:
return {}
manufacturer, model, serialno, version, mac_wlan0_output, mac_eth0_output = lines
mac_wlan0_matches = re.findall(constants.MAC_REGEX_PATTERN, mac_wlan0_output)
if mac_wlan0_matches:
wifimac = mac_wlan0_matches[0]
else:
wifimac = None
mac_eth0_matches = re.findall(constants.MAC_REGEX_PATTERN, mac_eth0_output)
if mac_eth0_matches:
ethmac = mac_eth0_matches[0]
else:
ethmac = None
props = {'manufacturer': manufacturer,
'model': model,
'serialno': serialno,
'sw_version': version,
'wifimac': wifimac,
'ethmac': ethmac}
return props
# ======================================================================= #
# #
# Properties #
# #
# ======================================================================= #
@property
def audio_state(self):
"""Check if audio is playing, paused, or idle.
Returns
-------
str, None
The audio state, as determined from the ADB shell command ``dumpsys audio``, or ``None`` if it could not be determined
"""
output = self.adb_shell(constants.CMD_AUDIO_STATE)
if output is None:
return None
if output == '1':
return constants.STATE_PAUSED
if output == '2':
return constants.STATE_PLAYING
return constants.STATE_IDLE
@property
def available(self):
"""Check whether the ADB connection is intact.
Returns
-------
bool
Whether or not the ADB connection is intact
"""
if not self.adb_server_ip:
# python-adb
return bool(self._adb)
# pure-python-adb
try:
# make sure the server is available
adb_devices = self._adb_client.devices()
# make sure the device is available
try:
# case 1: the device is currently available
if any([self.host in dev.get_serial_no() for dev in adb_devices]):
if not self._available:
self._available = True
return True
# case 2: the device is not currently available
if self._available:
logging.error('ADB server is not connected to the device.')
self._available = False
return False
except RuntimeError:
if self._available:
logging.error('ADB device is unavailable; encountered an error when searching for device.')
self._available = False
return False
except RuntimeError:
if self._available:
logging.error('ADB server is unavailable.')
self._available = False
return False
@property
def awake(self):
"""Check if the device is awake (screensaver is not running).
Returns
-------
bool
Whether or not the device is awake (screensaver is not running)
"""
return self.adb_shell(constants.CMD_AWAKE + constants.CMD_SUCCESS1_FAILURE0) == '1'
@property
def current_app(self):
"""Return the current app.
Returns
-------
str, None
The ID of the current app, or ``None`` if it could not be determined
"""
current_app = self.adb_shell(constants.CMD_CURRENT_APP_FULL)
if current_app:
return current_app
return None
@property
def device(self):
"""Get the current playback device.
Returns
-------
str, None
The current playback device, or ``None`` if it could not be determined
"""
stream_music = self._get_stream_music()
return self._device(stream_music)
@property
def is_volume_muted(self):
"""Whether or not the volume is muted.
Returns
-------
bool, None
Whether or not the volume is muted, or ``None`` if it could not be determined
"""
stream_music = self._get_stream_music()
return self._is_volume_muted(stream_music)
@property
def media_session_state(self):
"""Get the state from the output of ``dumpsys media_session``.
Returns
-------
int, None
The state from the output of the ADB shell command ``dumpsys media_session``, or ``None`` if it could not be determined
"""
media_session = self.adb_shell(constants.CMD_MEDIA_SESSION_STATE_FULL)
return self._media_session_state(media_session)
@property
def running_apps(self):
"""Return a list of running user applications.
Returns
-------
list
A list of the running apps
"""
ps = self.adb_shell(constants.CMD_RUNNING_APPS)
return self._running_apps(ps)
@property
def screen_on(self):
"""Check if the screen is on.
Returns
-------
bool
Whether or not the device is on
"""
return self.adb_shell(constants.CMD_SCREEN_ON + constants.CMD_SUCCESS1_FAILURE0) == '1'
@property
def volume(self):
"""Get the absolute volume level.
Returns
-------
int, None
The absolute volume level, or ``None`` if it could not be determined
"""
stream_music = self._get_stream_music()
device = self._device(stream_music)
return self._volume(stream_music, device)
@property
def volume_level(self):
"""Get the relative volume level.
Returns
-------
float, None
The volume level (between 0 and 1), or ``None`` if it could not be determined
"""
volume = self.volume
return self._volume_level(volume)
@property
def wake_lock_size(self):
"""Get the size of the current wake lock.
Returns
-------
int, None
The size of the current wake lock, or ``None`` if it could not be determined
"""
locks_size = self.adb_shell(constants.CMD_WAKE_LOCK_SIZE)
return self._wake_lock_size(locks_size)
# ======================================================================= #
# #
# Parse properties #
# #
# ======================================================================= #
@staticmethod
def _audio_state(dumpsys_audio):
"""Parse the ``audio_state`` property from the output of ``adb shell dumpsys audio``.
Parameters
----------
dumpsys_audio : str, None
The output of ``adb shell dumpsys audio``
Returns
-------
str, None
The audio state, or ``None`` if it could not be determined
"""
if not dumpsys_audio:
return None
for line in dumpsys_audio.splitlines():
if 'OpenSL ES AudioPlayer (Buffer Queue)' in line:
# ignore this line which can cause false positives for some apps (e.g. VRV)
continue
if 'started' in line:
return constants.STATE_PLAYING
if 'paused' in line:
return constants.STATE_PAUSED
return constants.STATE_IDLE
@staticmethod
def _device(stream_music):
"""Get the current playback device from the ``STREAM_MUSIC`` block from ``adb shell dumpsys audio``.
Parameters
----------
stream_music : str, None
The ``STREAM_MUSIC`` block from ``adb shell dumpsys audio``
Returns
-------
str, None
The current playback device, or ``None`` if it could not be determined
"""
if not stream_music:
return None
matches = re.findall(constants.DEVICE_REGEX_PATTERN, stream_music, re.DOTALL | re.MULTILINE)
if matches:
return matches[0]
return None
def _get_stream_music(self, dumpsys_audio=None):
"""Get the ``STREAM_MUSIC`` block from ``adb shell dumpsys audio``.
Parameters
----------
dumpsys_audio : str, None
The output of ``adb shell dumpsys audio``
Returns
-------
str, None
The ``STREAM_MUSIC`` block from ``adb shell dumpsys audio``, or ``None`` if it could not be determined
"""
if not dumpsys_audio:
dumpsys_audio = self.adb_shell("dumpsys audio")
if not dumpsys_audio:
return None
matches = re.findall(constants.STREAM_MUSIC_REGEX_PATTERN, dumpsys_audio, re.DOTALL | re.MULTILINE)
if matches:
return matches[0]
return None
@staticmethod
def _is_volume_muted(stream_music):
"""Determine whether or not the volume is muted from the ``STREAM_MUSIC`` block from ``adb shell dumpsys audio``.
Parameters
----------
stream_music : str, None
The ``STREAM_MUSIC`` block from ``adb shell dumpsys audio``
Returns
-------
bool, None
Whether or not the volume is muted, or ``None`` if it could not be determined
"""
if not stream_music:
return None
matches = re.findall(constants.MUTED_REGEX_PATTERN, stream_music, re.DOTALL | re.MULTILINE)
if matches:
return matches[0] == 'true'
return None
@staticmethod
def _media_session_state(media_session):
"""Get the state from the output of ``adb shell dumpsys media_session | grep -m 1 'state=PlaybackState {'``.
Parameters
----------
media_session : str, None
The output of ``adb shell dumpsys media_session | grep -m 1 'state=PlaybackState {'``
Returns
-------
int, None
The state from the output of the ADB shell command ``dumpsys media_session``, or ``None`` if it could not be determined
"""
if not media_session:
return None
matches = constants.REGEX_MEDIA_SESSION_STATE.search(media_session)
if matches:
return int(matches.group('state'))
return None
@staticmethod
def _running_apps(ps):
"""Get the running apps from the output of ``ps | grep u0_a``.
Parameters
----------
ps : str, None
The output of ``adb shell ps | grep u0_a``
Returns
-------
list, None
A list of the running apps, or ``None`` if it could not be determined
"""
if ps:
if isinstance(ps, list):
return [line.strip().rsplit(' ', 1)[-1] for line in ps if line.strip()]
return [line.strip().rsplit(' ', 1)[-1] for line in ps.splitlines() if line.strip()]
return None
def _volume(self, stream_music, device):
"""Get the absolute volume level from the ``STREAM_MUSIC`` block from ``adb shell dumpsys audio``.
Parameters
----------
stream_music : str, None
The ``STREAM_MUSIC`` block from ``adb shell dumpsys audio``
device : str, None
The current playback device
Returns
-------
int, None
The absolute volume level, or ``None`` if it could not be determined
"""
if not stream_music:
return None
if not self.max_volume:
max_volume_matches = re.findall(constants.MAX_VOLUME_REGEX_PATTERN, stream_music, re.DOTALL | re.MULTILINE)
if max_volume_matches:
self.max_volume = float(max_volume_matches[0])
else:
self.max_volume = 15.
if not device:
return None
volume_matches = re.findall(device + constants.VOLUME_REGEX_PATTERN, stream_music, re.DOTALL | re.MULTILINE)
if volume_matches:
return int(volume_matches[0])
return None
def _volume_level(self, volume):
"""Get the relative volume level from the absolute volume level.
Parameters
-------
volume: int, None
The absolute volume level
Returns
-------
float, None
The volume level (between 0 and 1), or ``None`` if it could not be determined
"""
if volume is not None and self.max_volume:
return volume / self.max_volume
return None
@staticmethod
def _wake_lock_size(locks_size):
"""Get the size of the current wake lock from the output of ``adb shell dumpsys power | grep Locks | grep 'size='``.
Parameters
----------
locks_size : str, None
The output of ``adb shell dumpsys power | grep Locks | grep 'size='``.
Returns
-------
int, None
The size of the current wake lock, or ``None`` if it could not be determined
"""
if locks_size:
return int(locks_size.split("=")[1].strip())
return None
# ======================================================================= #
# #
# "key" methods: basic commands #
# #
# ======================================================================= #
def power(self):
"""Send power action."""
self._key(constants.KEY_POWER)
def sleep(self):
"""Send sleep action."""
self._key(constants.KEY_SLEEP)
def home(self):
"""Send home action."""
self._key(constants.KEY_HOME)
def up(self):
"""Send up action."""
self._key(constants.KEY_UP)
def down(self):
"""Send down action."""
self._key(constants.KEY_DOWN)
def left(self):
"""Send left action."""
self._key(constants.KEY_LEFT)
def right(self):
"""Send right action."""
self._key(constants.KEY_RIGHT)
def enter(self):
"""Send enter action."""
self._key(constants.KEY_ENTER)
def back(self):
"""Send back action."""
self._key(constants.KEY_BACK)
def menu(self):
"""Send menu action."""
self._key(constants.KEY_MENU)
def mute_volume(self):
"""Mute the volume."""
self._key(constants.KEY_MUTE)
# ======================================================================= #
# #
# "key" methods: media commands #
# #
# ======================================================================= #
def media_play(self):
"""Send media play action."""
self._key(constants.KEY_PLAY)
def media_pause(self):
"""Send media pause action."""
self._key(constants.KEY_PAUSE)
def media_play_pause(self):
"""Send media play/pause action."""
self._key(constants.KEY_PLAY_PAUSE)
def media_stop(self):
"""Send media stop action."""
self._key(constants.KEY_STOP)
def media_next_track(self):
"""Send media next action (results in fast-forward)."""
self._key(constants.KEY_NEXT)
def media_previous_track(self):
"""Send media previous action (results in rewind)."""
self._key(constants.KEY_PREVIOUS)
# ======================================================================= #
# #
# "key" methods: alphanumeric commands #
# #
# ======================================================================= #
def space(self):
"""Send space keypress."""
self._key(constants.KEY_SPACE)
def key_0(self):
"""Send 0 keypress."""
self._key(constants.KEY_0)
def key_1(self):
"""Send 1 keypress."""
self._key(constants.KEY_1)
def key_2(self):
"""Send 2 keypress."""
self._key(constants.KEY_2)
def key_3(self):
"""Send 3 keypress."""
self._key(constants.KEY_3)
def key_4(self):
"""Send 4 keypress."""
self._key(constants.KEY_4)
def key_5(self):
"""Send 5 keypress."""
self._key(constants.KEY_5)
def key_6(self):
"""Send 6 keypress."""
self._key(constants.KEY_6)
def key_7(self):
"""Send 7 keypress."""
self._key(constants.KEY_7)
def key_8(self):
"""Send 8 keypress."""
self._key(constants.KEY_8)
def key_9(self):
"""Send 9 keypress."""
self._key(constants.KEY_9)
def key_a(self):
"""Send a keypress."""
self._key(constants.KEY_A)
def key_b(self):
"""Send b keypress."""
self._key(constants.KEY_B)
def key_c(self):
"""Send c keypress."""
self._key(constants.KEY_C)
def key_d(self):
"""Send d keypress."""
self._key(constants.KEY_D)
def key_e(self):
"""Send e keypress."""
self._key(constants.KEY_E)
def key_f(self):
"""Send f keypress."""
self._key(constants.KEY_F)
def key_g(self):
"""Send g keypress."""
self._key(constants.KEY_G)
def key_h(self):
"""Send h keypress."""
self._key(constants.KEY_H)
def key_i(self):
"""Send i keypress."""
self._key(constants.KEY_I)
def key_j(self):
"""Send j keypress."""
self._key(constants.KEY_J)
def key_k(self):
"""Send k keypress."""
self._key(constants.KEY_K)
def key_l(self):
"""Send l keypress."""
self._key(constants.KEY_L)
def key_m(self):
"""Send m keypress."""
self._key(constants.KEY_M)
def key_n(self):
"""Send n keypress."""
self._key(constants.KEY_N)
def key_o(self):
"""Send o keypress."""
self._key(constants.KEY_O)
def key_p(self):
"""Send p keypress."""
self._key(constants.KEY_P)
def key_q(self):
"""Send q keypress."""
self._key(constants.KEY_Q)
def key_r(self):
"""Send r keypress."""
self._key(constants.KEY_R)
def key_s(self):
"""Send s keypress."""
self._key(constants.KEY_S)
def key_t(self):
"""Send t keypress."""
self._key(constants.KEY_T)
def key_u(self):
"""Send u keypress."""
self._key(constants.KEY_U)
def key_v(self):
"""Send v keypress."""
self._key(constants.KEY_V)
def key_w(self):
"""Send w keypress."""
self._key(constants.KEY_W)
def key_x(self):
"""Send x keypress."""
self._key(constants.KEY_X)
def key_y(self):
"""Send y keypress."""
self._key(constants.KEY_Y)
def key_z(self):
"""Send z keypress."""
self._key(constants.KEY_Z)
# ======================================================================= #
# #
# volume methods #
# #
# ======================================================================= #
def set_volume_level(self, volume_level, current_volume_level=None):
"""Set the volume to the desired level.
.. note::
This method works by sending volume up/down commands with a 1 second pause in between. Without this pause,
the device will do a quick power cycle. This is the most robust solution I've found so far.
Parameters
----------
volume_level : float
The new volume level (between 0 and 1)
current_volume_level : float, None
The current volume level (between 0 and 1); if it is not provided, it will be determined
Returns
-------
float, None
The new volume level (between 0 and 1), or ``None`` if ``self.max_volume`` could not be determined
"""
# if necessary, determine the current volume and/or the max volume
if current_volume_level is None or not self.max_volume:
current_volume = self.volume
else:
current_volume = min(max(round(self.max_volume * current_volume_level), 0.), self.max_volume)
# if `self.max_volume` or `current_volume` could not be determined, do not proceed
if not self.max_volume or current_volume is None:
return None
new_volume = min(max(round(self.max_volume * volume_level), 0.), self.max_volume)
# Case 1: the new volume is the same as the current volume
if new_volume == current_volume:
return new_volume / self.max_volume
# Case 2: the new volume is less than the current volume
if new_volume < current_volume:
cmd = "(" + " && sleep 1 && ".join(["input keyevent {0}".format(constants.KEY_VOLUME_DOWN)] * int(current_volume - new_volume)) + ") &"
# Case 3: the new volume is greater than the current volume
else:
cmd = "(" + " && sleep 1 && ".join(["input keyevent {0}".format(constants.KEY_VOLUME_UP)] * int(new_volume - current_volume)) + ") &"
# send the volume down/up commands
self.adb_shell(cmd)
# return the new volume level
return new_volume / self.max_volume
def volume_up(self, current_volume_level=None):
"""Send volume up action.
Parameters
----------
current_volume_level : float, None
The current volume level (between 0 and 1); if it is not provided, it will be determined
Returns
-------
float, None
The new volume level (between 0 and 1), or ``None`` if ``self.max_volume`` could not be determined
"""
if current_volume_level is None or not self.max_volume:
current_volume = self.volume
else:
current_volume = round(self.max_volume * current_volume_level)
# send the volume up command
self._key(constants.KEY_VOLUME_UP)
# if `self.max_volume` or `current_volume` could not be determined, return `None` as the new `volume_level`
if not self.max_volume or current_volume is None:
return None
# return the new volume level
return min(current_volume + 1, self.max_volume) / self.max_volume
def volume_down(self, current_volume_level=None):
"""Send volume down action.
Parameters
----------
current_volume_level : float, None
The current volume level (between 0 and 1); if it is not provided, it will be determined
Returns
-------
float, None
The new volume level (between 0 and 1), or ``None`` if ``self.max_volume`` could not be determined
"""
if current_volume_level is None or not self.max_volume:
current_volume = self.volume
else:
current_volume = round(self.max_volume * current_volume_level)
# send the volume down command
self._key(constants.KEY_VOLUME_DOWN)
# if `self.max_volume` or `current_volume` could not be determined, return `None` as the new `volume_level`
if not self.max_volume or current_volume is None:
return None
# return the new volume level
return max(current_volume - 1, 0.) / self.max_volume
|
from pilco.policies.policy import Policy
import tensorflow as tf
class TransformedPolicy(Policy):
def __init__(self,
policy,
transform,
name="sine_bounded_action_policy",
**kwargs):
super().__init__(state_dim=policy.state_dim,
action_dim=policy.action_dim,
name=name,
dtype=policy.dtype,
**kwargs)
self.policy = policy
self.transform = transform
@property
def parameters(self):
return self.policy.parameters
@property
def action_indices(self):
return tf.range(self.state_dim, self.state_dim + self.action_dim)
def reset(self):
self.policy.reset()
def match_moments(self, state_loc, state_cov, joint_result=True):
# We first match the moments through the base policy
loc, cov = self.policy.match_moments(state_loc, state_cov)
loc, cov = self.transform.match_moments(loc=loc,
cov=cov,
indices=self.action_indices)
return loc, cov
def call(self, state):
full_vec = tf.concat([state, [self.policy(state)]], axis=0)
return self.transform(full_vec, indices=self.action_indices)[self.state_dim:]
|
# -*- coding: utf-8 -*-
"""
@version: ??
@author: xlliu
@contact: liu.xuelong@163.com
@site: https://github.com/xlliu
@software: PyCharm
@file: scheduletask.py
@time: 2016/4/26 9:39
"""
import os
import subprocess
import threading
# import sys
#
# reload(sys)
# sys.setdefaultencoding('utf-8')
class myThread(threading.Thread):
HOTPLAY_CATCHUP_DIR = os.path.dirname(__file__)
def __init__(self):
threading.Thread.__init__(self)
def run(self):
self.do_init_catchup()
def do_init_catchup(self, log_name="celery_info"):
print 'start to init catch output info log name is %s' % (log_name)
job_args = 'source %s/init_catch_up.sh >> logs/%s.log' % (self.HOTPLAY_CATCHUP_DIR, log_name)
# job_args = 'celery -A mongo2mysql.celery worker --loglevel=info > logs/%s.log' % (log_name)
# job_args = 'celery -A mongo2mysql.celery worker --loglevel=info'
print 'job_args:', job_args
P = subprocess.Popen(job_args, shell=True)
rt_code = P.wait()
if rt_code == 0:
print 'job success...'
else:
print 'job error:%d' % (rt_code)
|
def solution(s):
answer = ''
alist = []
temp =0
porm=1
for c in range(len(s)):
if '0'<=s[c]<='9':
temp = temp*10+int(s[c])
elif s[c]=='-':
porm=-1
if s[c]==' ' or c ==len(s)-1:
temp*=porm
if not alist:
alist=[temp,temp]
else:
if temp<alist[0]:
alist[0]=temp
if temp>alist[1]:
alist[1]=temp
temp=0
porm=1
answer+=str(alist[0])+" "+str(alist[1])
return answer
solution("1 2 3 4") |
"""
@details: Ingresar por teclado los nombres de 5 personas y
almacenarlos en una lista. Mostrar el nombre de
persona menor en orden alfabético.
"""
######
#
# Funciones
#
######
def imprimirLista(lista):
for i in range(0,len(lista)):
print('valor: ', lista[i])
def buscoPalabraMenor(lista):
palabraMenor = lista[0]
for i in range(1,5):
if lista[i] < palabraMenor:
palabraMenor = lista[i]
return palabraMenor
listaDeNombres = []
for i in range(5):
nombre = input("Igrese un nombre: ")
listaDeNombres.append(nombre)
print('la palabra menor es: ', buscoPalabraMenor(listaDeNombres))
|
from __future__ import print_function, division
import os
import glob
import torch
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import cv2
import torchvision.models as models
from torchvision import transforms, utils
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torch.autograd import Variable
from tqdm import tqdm
from torch.optim import lr_scheduler
import copy
import PIL
import argparse
import random
import Augmentor
import torch
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import torchvision
from torchvision import transforms
import torch.optim as optim
from torch import nn
import matplotlib.pyplot as plt
def imshow(img):
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
class KittiDataset(Dataset):
def __init__(self, directory, augment = False, transform=True):
directory = directory + "/*.png"
self.img_names = list(glob.glob(directory))
# print (self.img_names)
self.transform = transform
self.augment = augment
# self.p = Augmentor.Pipeline(directory)
# self.p.random_distortion(probability=1, grid_width=4, grid_height=4, magnitude=8)
# self.p.flip_left_right(probability=0.5)
# self.p.flip_top_bottom(probability=0.5)
def __len__(self):
return len(self.img_names)
def __getitem__(self,idx):
# args = parser.parse_args()
path = self.img_names[idx]
image = cv2.imread(path)
# print ("epoch")
newHeight = 1200
newWidth = 300
# oldHeight = image.shape[0]
# oldWidth = image.shape[1]
# r = newHeight / oldHeight
# newWidth = int(oldWidth * r)
dim = (newHeight, newWidth)
image = cv2.resize(image, dim,3, interpolation = cv2.INTER_AREA)
# image = image.transpose(1,3)
image_label = 0
# print ("works")
if 'uu_' in path:
image_label = 0
elif 'umm_' in path:
image_label = 1
elif 'um_' in path:
image_label = 2
else:
print (" error in label")
image_label = 2
# if self.augment:
# prob = args.prob
# if prob <0 or prob >1:
# prob =0.5
# #rotation of image
# row,col,ch = 1200,300,3
# cv2.imwrite('image.png',image)
# if args.rot == 1 and np.random.uniform(0,1) > prob:
# angle = random.randint(1,80)
# M = cv2.getRotationMatrix2D((300/2,1200/2),angle,1)
# image = cv2.warpAffine(image.copy(),M,(300,1200))
# """*********************************************"""
# if args.gb == 1 and np.random.uniform(0,1) > prob:
# #Gaussian Blurring
# image = cv2.GaussianBlur(image,(5,5),0)
# """*********************************************"""
# #Segmentation algorithm using watershed
# if args.isw == 1 and np.random.uniform(0,1) > prob:
# gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
# ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# # noise removal
# kernel = np.ones((3,3),np.uint8)
# opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)
# # sure background area
# sure_bg = cv2.dilate(opening,kernel,iterations=3)
# # Finding sure foreground area
# dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
# ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# # Finding unknown region
# sure_fg = np.uint8(sure_fg)
# unknown = cv2.subtract(sure_bg,sure_fg)
# # Marker labelling
# ret, markers = cv2.connectedComponents(sure_fg)
# # Add one to all labels so that sure background is not 0, but 1
# markers = markers+1
# # Now, mark the region of unknown with zero
# markers[unknown==255] = 0
# markers = cv2.watershed(image,markers)
# image[markers == -1] = [255,0,0]
# cv2.imwrite('Segmentation.png',image)
# """*********************************************"""
# #speckle noise
# if args.spk == 1 and np.random.uniform(0,1) > prob:
# row,col,ch = 1200,300,3
# gauss = np.random.randn(row,col,ch)
# gauss = gauss.reshape(row,col,ch)
# image = image + image * gauss
# #HOG descriptor of a image
# # hog = cv2.HOGDescriptor()
# # image = hog.compute(image)
# #Shear transformation
# if args.shr == 1 :
# pts1 = np.float32([[5,5],[20,5],[5,20]])
# pt1 = 5+10*np.random.uniform()-10/2
# pt2 = 20+10*np.random.uniform()-10/2
# pts2 = np.float32([[pt1,5],[pt2,pt1],[5,pt2]])
# shear = cv2.getAffineTransform(pts1,pts2)
# image = cv2.warpAffine(image,shear,(col,row))
# cv2.imwrite('shear.png',image)
if self.transform:
self.transform = transforms.Compose(
[transforms.Resize((224,224)),
# p.torch_transform(),
transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
image = self.transform(PIL.Image.fromarray(image))
dictionary ={}
# print (image.shape)
dictionary["image"] = np.array(image,dtype = float)
dictionary["label"] = float(image_label)
dictionary["path"] = str(path)
return dictionary
LATENT_DIM = 5 #size of the latent space in the variational autoencoder
BATCH_SIZE = 128
class VAE_simple(nn.Module):
def __init__(self):
super(VAE_simple, self).__init__()
self.fc1 = nn.Linear(3*224*224, 400)
self.extra_layer = nn.Linear(400, 100)
self.extra_layer2 = nn.Linear(100, 100)
self.fc21 = nn.Linear(100, LATENT_DIM)
self.fc22 = nn.Linear(100, LATENT_DIM)
self.fc3 = nn.Linear(LATENT_DIM, 100)
self.extra_layer_dec = nn.Linear(100, 100)
self.extra_layer_dec2 = nn.Linear(100, 400)
self.fc4 = nn.Linear(400, 3*224*224)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def encode(self, x):
h1 = self.relu(self.fc1(x))
h1 = self.relu(self.extra_layer(h1))
h1 = self.relu(self.extra_layer2(h1))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
h3 = self.relu(self.fc3(z))
h3 = self.relu(self.extra_layer_dec(h3))
h3 = self.relu(self.extra_layer_dec2(h3))
return self.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x.view(-1, 3*224*224))
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
# In[4]:
def loss_function(reconstruced_x, x, mu, logvar):
BCE = F.binary_cross_entropy(reconstruced_x.view(-1, 3*224*224), x.view(-1, 3*224*224))
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
# Normalise by same number of elements as in reconstruction
KLD /= BATCH_SIZE * (3*224*224)
return BCE + KLD
def get_data_train():
train_images = []
train_image_labels = []
names = []
for img in glob.glob("data_road/training/image_2/*.png"):
names.append(img)
n= np.array(cv2.resize(cv2.imread(img),(224,224)))
train_images.append(n)
if 'uu_' in img:
train_image_labels.append(1)
elif 'umm_' in img:
train_image_labels.append(2)
elif 'um_' in img:
train_image_labels.append(3)
else:
print ("Noise in data")
return np.array(train_image_labels).shape, np.array(train_images),np.array(names)
def main():
# labels, image_ , names = get_data_train()
# print (names)
net = VAE_simple()
net = torch.load('saved_model')
net.eval()
train_directory ='data_road/training/image_2'
test_directory = 'data_road/testing/image_2'
train_set = KittiDataset(directory = train_directory, augment = False)
correct_count = 0
train_loader = DataLoader(train_set, batch_size=1, shuffle=False, num_workers=0)
for i in range(0,len(train_loader)):
net.eval()
images = [0,0]
image =train_loader.dataset[i]
image = torch.from_numpy(image['image']).view(3,224,224).float()
if torch.cuda.is_available():
output = net(Variable(image.unsqueeze(0).cuda()))
else:
output = net(Variable(image.unsqueeze(0)))
images[0] = image #original image
images[1] = output[0].data.view(3,224,224) # reconstructed image
# cv2.imwrite('output1.png',images[1].cpu().numpy())
torchvision.utils.save_image(images[1],'vaedataset/' + train_loader.dataset[i]["path"])
torchvision.utils.save_image(images[0],'image1.png')
test_set = KittiDataset(directory=test_directory,augment=False)
test_loader = DataLoader(test_set, batch_size=1, shuffle=False, num_workers=0)
print (" Train set created ")
for i in range(0,len(test_loader)):
net.eval()
images = [0,0]
image =test_loader.dataset[i]
image = torch.from_numpy(image['image']).view(3,224,224).float()
if torch.cuda.is_available():
output = net(Variable(image.unsqueeze(0).cuda()))
else:
output = net(Variable(image.unsqueeze(0)))
images[0] = image #original image
images[1] = output[0].data.view(3,224,224) # reconstructed image
# cv2.imwrite('output1.png',images[1].cpu().numpy())
torchvision.utils.save_image(images[1],'vaedataset/' + test_loader.dataset[i]["path"])
torchvision.utils.save_image(images[0],'image1.png')
print (" Test set created ")
main() |
import os
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
connection = psycopg2.connect(os.environ['TEST_DATABASE_URL'])
# connection = psycopg2.connect('postgresql://postgres:1234@localhost:5432/test_2fa')
connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cursor = connection.cursor()
query = f'''CREATE TABLE users(
id SERIAL PRIMARY KEY,
login character varying(128),
password character varying(255),
pass_img character varying(255),
counter integer,
coordinates character varying(128),
banned integer,
false_click_counter integer,
zone character varying(512),
probability character varying(512)
)'''
cursor.execute(query)
connection.commit() |
#!/usr/bin/python3
"""
Contains number_of_lines function
"""
def number_of_lines(filename=""):
"""returns number of lines in a text file"""
with open(filename, encoding='utf8') as f:
for line in f:
count += 1
return count
|
import os
import logging
from airflow import DAG
from airflow.models import Variable
from airflow import AirflowException
from datetime import datetime, timedelta
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
retailers = Variable.get("retailer_config", deserialize_json=True)
HOME_DIR = os.path.expanduser('~')
SRC_HOME = os.path.join(HOME_DIR, 'dev_Sehan/Olaplex-Retail-Dev/retail/main')
def check_retailer_status(retailer_name):
error_file = os.path.join(SRC_HOME, '_data', retailer_name, 'error.log')
if os.path.isfile(error_file):
with open(error_file) as f:
logging.error(f.read())
raise AirflowException(f"Errors found for the retailer {retailer_name}")
else:
logging.info(f"No error found for the retailer {retailer_name}")
default_arguments = {
'owner': 'nabin',
'depends_on_past': False,
'start_date': datetime(2020, 12, 1),
'email': ['test@test.com'],
'email_on_failure': True,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=1)
}
dag = DAG(
dag_id="olapelx_retailers",
default_args=default_arguments,
catchup=False,
schedule_interval="@daily"
)
def createRetailerTask(retailer_name, **args):
task = BashOperator(
task_id = retailer_name,
bash_command=f"cd {SRC_HOME} && python3 main.py extract_retailer_data {retailer_name}" ,
dag=dag
)
return task
def createCheckStatusTask(retailer_name, **args):
task = PythonOperator(
task_id = f"check_{retailer_name}_status",
python_callable=check_retailer_status,
op_kwargs={'retailer_name': retailer_name},
dag=dag
)
return task
start = BashOperator(
dag=dag,
task_id='start',
bash_command=f"echo hello"
)
sync_git_repos = BashOperator(
dag=dag,
task_id='sync_git_repos',
bash_command=f"cd {SRC_HOME} && git pull origin Sehan_dev"
)
start >> sync_git_repos
load_to_stg = BashOperator(
dag=dag,
task_id='LoadToStagingTables',
bash_command=f"cd {SRC_HOME} && python3 main.py load_to_staging_tables",
trigger_rule='none_skipped'
)
for k,v in retailers.items():
if v["enable"] == "yes":
retailer_task = createRetailerTask(retailer_name=k)
check_status = createCheckStatusTask(retailer_name=k)
sync_git_repos >> retailer_task >> check_status >> load_to_stg
load_to_dwh = BashOperator(
dag=dag,
task_id='LoadToDWH',
bash_command=f"cd {SRC_HOME} && python3 main.py load_to_final_table"
)
load_to_stg >> load_to_dwh |
import numpy as np
import aesara.tensor as at
from aesara import config, function, grad, shared
from aesara.compile.mode import FAST_RUN
from aesara.scan.views import foldl, foldr
from aesara.scan.views import map as at_map
from aesara.scan.views import reduce as at_reduce
from aesara.tensor.type import scalar, vector
from tests import unittest_tools as utt
from tests.scan.test_basic import clone_optimized_graph, grab_scan_node
def test_reduce():
v = vector("v")
s = scalar("s")
result, updates = at_reduce(lambda x, y: x + y, v, s)
f = function([v, s], result, updates=updates, allow_input_downcast=True)
rng = np.random.default_rng(utt.fetch_seed())
v_v = rng.uniform(-5.0, 5.0, size=(5,))
assert abs(np.sum(v_v) - f(v_v, 0.0)) < 1e-3
def test_map():
v = vector("v")
abs_expr, abs_updates = at_map(
lambda x: abs(x), v, [], truncate_gradient=-1, go_backwards=False
)
f = function([v], abs_expr, updates=abs_updates, allow_input_downcast=True)
rng = np.random.default_rng(utt.fetch_seed())
vals = rng.uniform(-5.0, 5.0, size=(10,))
abs_vals = abs(vals)
aesara_vals = f(vals)
utt.assert_allclose(abs_vals, aesara_vals)
def test_reduce_memory_consumption():
x = shared(np.asarray(np.random.uniform(size=(10,)), dtype=config.floatX))
o, _ = at_reduce(
lambda v, acc: acc + v,
x,
at.constant(np.asarray(0.0, dtype=config.floatX)),
)
mode = FAST_RUN
mode = mode.excluding("inplace")
f1 = function([], o, mode=mode)
inputs, outputs = clone_optimized_graph(f1)
scan_nodes = grab_scan_node(outputs[0])
assert scan_nodes is not None
scan_node = scan_nodes[0]
f1 = function(inputs, scan_node.inputs[2])
# Originally, the shape would have been 1 due to the SaveMem
# optimization reducing the size to the number of taps (in this case
# 1) provided to the inner function. Now, because of the memory-reuse
# feature in Scan it can be 2 because SaveMem needs to keep a
# larger buffer to avoid aliasing between the inputs and the outputs.
if config.scan__allow_output_prealloc:
assert f1().shape[0] == 2
else:
assert f1().shape[0] == 1
gx = grad(o, x)
f2 = function([], gx)
utt.assert_allclose(f2(), np.ones((10,)))
def test_foldl_memory_consumption():
x = shared(np.asarray(np.random.uniform(size=(10,)), dtype=config.floatX))
o, _ = foldl(
lambda v, acc: acc + v,
x,
at.constant(np.asarray(0.0, dtype=config.floatX)),
)
mode = FAST_RUN
mode = mode.excluding("inplace")
f0 = function([], o, mode=mode)
inputs, outputs = clone_optimized_graph(f0)
scan_nodes = grab_scan_node(outputs[0])
assert scan_nodes is not None
scan_node = scan_nodes[0]
f1 = function(inputs, scan_node.inputs[2])
# Originally, the shape would have been 1 due to the SaveMem
# optimization reducing the size to the number of taps (in this case
# 1) provided to the inner function. Now, because of the memory-reuse
# feature in Scan it can be 2 because SaveMem needs to keep a
# larger buffer to avoid aliasing between the inputs and the outputs.
if config.scan__allow_output_prealloc:
assert f1().shape[0] == 2
else:
assert f1().shape[0] == 1
gx = grad(o, x)
f2 = function([], gx)
utt.assert_allclose(f2(), np.ones((10,)))
def test_foldr_memory_consumption():
x = shared(np.asarray(np.random.uniform(size=(10,)), dtype=config.floatX))
o, _ = foldr(
lambda v, acc: acc + v,
x,
at.constant(np.asarray(0.0, dtype=config.floatX)),
)
mode = FAST_RUN
mode = mode.excluding("inplace")
f1 = function([], o, mode=mode)
inputs, outputs = clone_optimized_graph(f1)
scan_nodes = grab_scan_node(outputs[0])
assert scan_nodes is not None
scan_node = scan_nodes[0]
f1 = function(inputs, scan_node.inputs[2])
# Originally, the shape would have been 1 due to the SaveMem
# optimization reducing the size to the number of taps (in this case
# 1) provided to the inner function. Now, because of the memory-reuse
# feature in Scan it can be 2 because SaveMem needs to keep a
# larger buffer to avoid aliasing between the inputs and the outputs.
if config.scan__allow_output_prealloc:
assert f1().shape[0] == 2
else:
assert f1().shape[0] == 1
gx = grad(o, x)
f2 = function([], gx)
utt.assert_allclose(f2(), np.ones((10,)))
|
'''
Metaclasses
A metaclass is to a class what a class is to an instance; that is, a
metaclass is used to create classes, just as classes are used to create
instances. And just as we can ask whether an instance belongs to a
class by using isinstance(), we can ask whether a class object (such as
dict, int, or SortedList) inherits another class using issubclass().
One use of metaclasses is to provide both a promise and a guarantee
about a class’s API. Another use is to modify a class in some way (like
a class decorator does). And of course, metaclasses can be used for
both purposes at the same time.
'''
class A:
a = A()
isinstance(a, A) #True
isinstance(b, A) #False
isinstance(b, B) #True
isinstance(b, A) #False
isinstance(A, type) #True
isinstance(a, type) #False
isinstance(B, type) #True
isinstance(b, type) #False
#inheritance
print("https://www.python-course.eu/python3_multiple_inheritance.php")
print("MRO - method resolution order")
|
import json
import sys
from sklearn.feature_extraction.text import CountVectorizer
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from fractions import Fraction
from sklearn import metrics
import numpy as np
import pickle
from sklearn import svm
text_clf = pickle.load(open('finalized_model.sav', 'rb'))
def check(str):
n=text_clf.predict([str])[0]
return n
print(check(sys.argv[1]))
"""
file_object = open(r"test.json","r")
title = file_object.read()
p = json.loads(title)
file_object.close()
t=[]
for i in p:
t.append(i)
b=text_clf.predict(t);
h=[0]*(len(t))
for x in range(888,1001):
h[x]=1;
print("accuracy="+str(np.mean(b == h)))
tp=0
fp=0
tn=0
fn=0
for i in range(0,1000):
if(b[i]==1 and h[i]==1):
tp += 1
elif(b[i]==1 and h[i]==0):
fp += 1
elif(b[i]==0 and h[i]==0):
tn += 1
else:
fn += 1
pre=Fraction(tp,(tp+fp))
rec=Fraction(tp,(tp+fn))
f1=Fraction((pre*rec),(pre+rec))
print("precision="+str(float(pre)))
print("recall="+str(float(rec)))
print("f1 score="+str(float(f1)))
""" |
students = {"Alice":24, "Bob":18}
students
students["Alice"]
students["Fred"] = 20
students["Fred"]
del students["Fred"]
students.keys()
students.values()
students.items()
# Won't work
#students.keys()[0]
# Instead
list(students.keys())[0]
|
from __future__ import division
a, b = input(), input()
print a // b
print a % b
print divmod(a, b)
|
# Copyright (c) 2019 Thomas Howe
import os
from queue import Queue
from threading import Thread
import sqlalchemy as sql
from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.pool import QueuePool
from problem_sets.environment import Environment
Base = declarative_base()
# Base must be created before these are imported
from problem_sets.static.data.data_manager import DATA_DIR
from problem_sets.static.data.sqlite.static_content_sqlite_repository import StaticContentSQLiteRepository
from problem_sets.static.data.sqlite.static_problem_sqlite_repository import StaticProblemSQLiteRepository
from problem_sets.static.data.sqlite.static_problem_set_sqlite_repository import StaticProblemSetSQLiteRepository
DB_PATH = os.path.join(DATA_DIR, "static.db")
static_content_repo: StaticContentSQLiteRepository = None
static_problem_set_repo: StaticProblemSetSQLiteRepository = None
static_problem_repo: StaticProblemSQLiteRepository = None
db: sql.engine = None
db_meta: sql.MetaData or None = None
Session: scoped_session = scoped_session(sessionmaker())
def initialize(env: Environment, db_path: str = None):
global db, db_meta, Session
db_path = db_path if db_path is not None else DB_PATH
db_parent_dir = os.path.realpath(os.path.join(db_path, os.path.pardir))
if not os.path.exists(db_parent_dir):
os.makedirs(db_parent_dir)
db = sql.create_engine(f"sqlite:///{db_path}")
db.echo = env == Environment.debug
db_meta = MetaData()
Session.configure(bind=db)
init_repos(db)
def init_repos(engine: sql.engine.Engine):
global static_content_repo, static_problem_set_repo, static_problem_repo
Base.metadata.bind = engine
Base.metadata.create_all(engine)
static_content_repo = StaticContentSQLiteRepository(Session)
static_problem_repo = StaticProblemSQLiteRepository(Session)
static_problem_set_repo = StaticProblemSetSQLiteRepository(Session)
|
# from common import *
import os
import shutil
import builtins
# log ------------------------------------
def remove_comments(lines, token='#'):
""" Generator. Strips comments and whitespace from input lines.
"""
l = []
for line in lines:
s = line.split(token, 1)[0].strip()
if s != '':
l.append(s)
return l
def open(file, mode=None, encoding=None):
if mode == None: mode = 'r'
if '/' in file:
if 'w' or 'a' in mode:
dir = os.path.dirname(file)
if not os.path.isdir(dir): os.makedirs(dir)
f = builtins.open(file, mode=mode, encoding=encoding)
return f
def remove(file):
if os.path.exists(file): os.remove(file)
def empty(dir):
if os.path.isdir(dir):
shutil.rmtree(dir, ignore_errors=True)
else:
os.makedirs(dir)
def write_list_to_file(strings, list_file):
with open(list_file, 'w') as f:
for s in strings:
f.write('%s\n'%s)
pass
# backup ------------------------------------
#https://stackoverflow.com/questions/1855095/how-to-create-a-zip-archive-of-a-directory
def backup_project_as_zip(project_dir, zip_file):
shutil.make_archive(zip_file.replace('.zip',''), 'zip', project_dir)
pass
# net ------------------------------------
# https://github.com/pytorch/examples/blob/master/imagenet/main.py ###############
def adjust_learning_rate(optimizer, lr):
# if type(optimizer)==optim.SGD or (type(optimizer)==optim.RMSprop and lr<0.0001):
# # print(lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def get_learning_rate(optimizer):
lr=[]
for param_group in optimizer.param_groups:
lr +=[param_group['lr']]
return lr
def lr_poly(base_lr, iter,max_iter,power):
return base_lr*((1-float(iter)/max_iter)**(power)) |
with open('data.txt', 'r') as f:
data = f.read()
slope = data.strip().split('\n')
slope_width = len(slope[0])
def check_slope(row_dist, col_dist):
idx = 0
tree_count = 0
for row_idx, row in enumerate(slope[::col_dist]):
if idx >= slope_width:
idx = idx - slope_width
if row[idx] == '#':
tree_count += 1
idx += row_dist
return tree_count
# part 1:
print(check_slope(3, 1))
# Part 2:
print("\nPart 2:")
dists = [(1,1), (3,1), (5,1), (7,1), (1,2)]
res = 1
for dist in dists:
print(check_slope(*dist))
res *= check_slope(*dist)
print(res)
|
#-*- coding: UTF-8 -*-
#+++++++++++++++++++++++++++++++++++++++++++++
#Created By CxlDragon 2018.8.30
#用于抓取中国政府采购网(ccgp)的招标公告信息
#+++++++++++++++++++++++++++++++++++++++++++++
#Updted 2018.8.31
#把url中的搜索时间类型做为参数可配置
#修复了一次连续爬取多页时最后一页序列号错误等bug
#启用all_page参数,会爬取结果的总页数,限定起始页和终止页
#在配置文件中保存最后一条记录的时间,用于控制只爬最新记录
#Updated 2018.8.30
#增加了json文件,所有配置从config.json文件中读取
#Updated 2018.8.29
#增加邮件发送功能,把查询的结果以邮件形式发至指定邮箱
import sys
import time
import urllib
import requests #pip3 install requests
import numpy as np #pip3 install numpy
from bs4 import BeautifulSoup #pip3 install beautifulsoup4
from openpyxl import Workbook #pip3 install openpyxl
from openpyxl import load_workbook
from imp import reload
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
import smtplib
#import os.path
import json
from dojson import JsonConf
from datetime import datetime
reload(sys)
#全局参数
all_page=0 #总页数
page_begin=1 #起始的开爬页
page_end=1 #结束页(即最大爬到第几页)
each_page_rows=20 #每页总记录条数
if_send_mail = True #是否发送邮件
last_datetime = "" #最新的记录时间,如果为空则爬所有数据,如果有值则根据最后时间判断新记录
search_keywords="" #搜索用的关键字,多个关键字 用 + 连接即可
time_type = "1" #搜索的时间类型:0.今日 1.近三天 2.近一周 3.近一月 4.近三月 5.近半年 6.指定日期
# User Agents 设置 {'User-Agent':''},\
hds=[{'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'},\
{'User-Agent':'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52'}]
# 邮件信息
mail_info = {
"from": "xxxx@xxx.cn",
"to": "xxxx@xxx.cn",
"hostname": "smtp.xxx.cn",
"username": "xxxx@xxx.cn",
"password": "xxxx",
"mail_subject": "",
"mail_text": "",
"mail_encoding": "utf-8",
"mail_att":""
}
#++++++++++++++++++++
#爬列表并解析
#page_begin:开爬页 page_end:结束页 zb_tag:爬关键字
#++++++++++++++++++++
def lists_spider(page_begin,page_end,zb_tag):
#声明全局变量,否则为只读,一旦修改了则定义为局部变量
global all_page #页面数量
global mail_info #邮件信息
global last_datetime #最后时间
zb_list=[]
zb_list_mail=[] #用于发送邮件
try_times=0
new_time="" #datetime.now().strftime('%Y.%m.%d %H:%M:%S')
out_count=0
#out_count=page_begin*each_page_rows-each_page_rows
while(1):
#最近三天
url='http://search.ccgp.gov.cn/bxsearch?searchtype=1&page_index='+str(page_begin)+'&bidSort=0&buyerName=&projectId=&pinMu=0&bidType=0&dbselect=bidx&kw='+urllib.request.quote(zb_tag)+'&start_time=&end_time=&timeType='+time_type+'&displayZone=&zoneId=&pppStatus=0&agentName='
#print(url)
time.sleep(np.random.rand()*5) #随机时延,避免被禁ip
#采用这种方式爬,ip不容易被禁止,更换浏览代理
try:
req = urllib.request.Request(url, headers=hds[page_begin%len(hds)])
source_code = urllib.request.urlopen(req).read()
plain_text=str(source_code.decode('utf-8')) #指明用utf-8解码
except (urllib.HTTPError, urllib.URLError) :
print("somethings is error...")
continue
soup = BeautifulSoup(plain_text, 'lxml') #对获得的页面用bs4排序,用html.parser lxml 等解析器
#如果是第一次,则取页面总数
if all_page == 0:
try:
page_soup=soup.find('p',{'class':'pager'}) #获取包含页数的soup
if page_soup!=None:
#处理java脚本
pagerstr=page_soup.find('script').get_text()
pagerstr=pagerstr.split(',')[0].split(':')[1].strip()
all_page=int(pagerstr)
print('最多可爬页面总数 %d 页(配置的起始页第 %d 页,结束页是第 %d 页)' %(all_page,page_begin,page_end))
else:
all_page=1
print('获取最多可爬页面总数失败page_soup')
except:
all_page=1
print('获取最多可爬页面总数失败:默认为1,结束页也重置为1')
#break #取不到总页数退出,注释掉则表示取不到总页数,仍然执行,但只会取一页即begin_page
#确保所取的页在总页数范围在(1…all_page)之间
#注意:在此page_begin和page_end 不是全局变量,因此只在函数内有效
if page_begin<=0: page_begin=1
if page_begin>all_page:page_begin=all_page
if page_end<=0: page_end=1
if page_end>all_page:page_end=all_page
if out_count == 0:
out_count=page_begin*each_page_rows-each_page_rows
print('实际爬页面从第 %d 页开始,爬到第 %d 页结束' %(page_begin,page_end))
#获取查询结果列表
try:
#获取查询结果列表
list_soup = soup.find('ul',{'class': 'vT-srch-result-list-bid'})
except:
print('解析查询结果列表失败')
list_soup = None
# 连续5次取不到request的信息,则退出
try_times+=1;
if list_soup==None and try_times<5:
continue
elif list_soup==None or len(list_soup)<=1:
print('给出的url取不到需要的内容')
break
print('开始解析第 %d 页的列表…' % page_begin)
#导出excel的文件名
save_path='zb_list-'+zb_tag+'.xlsx'
#开始循环处理网页
licount=0
for zb_info in list_soup.findAll('li'):
title = str(zb_info.find('a',{'style':'line-height:18px'}).get_text()).strip()
zb_url = zb_info.find('a', {'style':'line-height:18px'}).get('href')
content = str(zb_info.find('p').get_text()).strip()
desc = str(zb_info.find('span').get_text()).strip()
desc_list = desc.split('|')
try:
time_info = '' + str('|'.join(desc_list[0:1])).strip() #时间:
#取最大的时间 new_time为空或者小于当前记录的时间,则赋值
if new_time=="" or (datetime.strptime(new_time,'%Y.%m.%d %H:%M:%S')<datetime.strptime(time_info,'%Y.%m.%d %H:%M:%S')):
new_time=time_info
except:
time_info ='时间:暂无'
try:
zbcg_info = str(desc_list[1]).strip().split(':')[1] #采购人
except:
zbcg_info = '采购人:暂无'
try:
zbdl_info_list = str(desc_list[2]).split("\r\n") #代理机构 + 公告类型 .replace(" ",'')
zbdl_info = zbdl_info_list[0].strip().split(':')[1] #代理机构
zb_type = zbdl_info_list[-4].strip() #公告类型
#zb_type=desc_list[2]
except:
zbdl_info = '代理:暂无'
zb_type = '公告类型:暂无'
try:
zb_city=str(desc_list[3]).strip() #招标区域城市
except:
zb_city='城市:暂无'
try:
zbcg_bd=str(desc_list[4]).strip() #采购标的
except:
zbcg_bd='标的:暂无'
#判断是否是只取最新
if last_datetime !="":
try:
before_time=datetime.strptime(last_datetime,'%Y.%m.%d %H:%M:%S') #上次爬完后保存的时间
#print("上次保存时间:%s 第%d 页第 %d 条记录时间:%s" % (before_time,page_begin,out_count+1,time_info))
if before_time>=datetime.strptime(time_info,'%Y.%m.%d %H:%M:%S'): #只导出最新记录
continue
except:
last_datetime=""
#print("时间=%s 标题=%s 区域=%s 公告类型=%s 标的=%s zbcg=%s zbdl=%s url=%s"
# %(time_info,title,zb_city,zb_type,zbcg_bd,zbcg_info,zbdl_info,zb_url))
#获取url的文本
zb_all=zb_content(zb_url,page_begin%len(hds))
zb_list.append([time_info,title,zb_city,zb_type,zbcg_bd,zbcg_info,zbdl_info,zb_url,content,zb_all])
#是否发送邮件
if if_send_mail:
zb_list_mail.append([time_info,zb_city,zb_type,zbcg_info,title,zb_url])
licount+=1 #本页输出计数
out_count+=1 #总输出计数
try_times=0 #只要成功获取一次数据,则把尝试次数复位为 0
#判断是否有需要导出的内容
print("爬到第 %d 页的记录 %d 条" % (page_begin,licount))
if licount>=1:
#print("成功爬到第 %d 页的记录 %d 条" % (page_begin,licount))
if page_begin==1:
zb_list2excel(zb_list,zb_tag,True,out_count-licount,save_path)
else:
zb_list2excel(zb_list,zb_tag,False,out_count-licount,save_path)
#清空list,准备爬下一页
page_begin+=1
del zb_list[:]
#是否爬到了最后一页
if page_begin>page_end:
#发送邮件标识为“真”且爬到的新记录
if if_send_mail and out_count>(page_begin-1)*each_page_rows-each_page_rows:
#发送邮件
mail_info["mail_subject"]="ccgp公告-"+zb_tag+"-"+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
mail_text = "<html><body></p>"
ic=1
for bl in zb_list_mail:
mail_text = mail_text+"【"+str(ic)+"】\t"+bl[0]+"\t"+bl[1]+"\t"+bl[2]+"\t"+bl[3]+"\t"+bl[4]+"\t</br><a href="+bl[5]+">"+bl[5]+"</a><p/>"
ic+=1
mail_text+="</body></html>"
#添加正文信息
mail_info["mail_text"]=mail_text
#添加附件信息
mail_info["mail_att"]=save_path
#执行发送邮件
send_mail(mail_info)
last_datetime=new_time #更新最后的记录时间
print("最新一条记录的时间为:"+last_datetime)
break #退出while(1)循环
#循环结束过程返回
return
#++++++++++++++++++++
#把爬到的招标lits信息导出到excel表格中
#bfirst:是否第一次导出 begin_count导出的开始记录序号
#save_path:导出保存的文件名
#++++++++++++++++++++
def zb_list2excel(zb_list,zb_tag,bfirst,begin_count,save_path):
try:
if bfirst: #是否第一次写入文件
wb=Workbook()
ws = wb.active
ws.title=zb_tag
ws.append(['序号','时间','标题','区域','公告类型','标的','采购方','招标代理','url','内容','全文'])
else:
wb=load_workbook(save_path)
ws = wb.active
ws.title=zb_tag
count=1
for bl in zb_list:
ws.append([begin_count+count,bl[0],bl[1],bl[2],bl[3],bl[4],bl[5],bl[6],bl[7],bl[8],bl[9]])
count+=1
wb.save(save_path)
print('导出excel文件成功!本次导出 %d 条,累计导出记录 %d 条' %(count-1,begin_count+count-1))
except:
print('执行导出excel文件出错!')
return
#++++++++++++++++++++
#爬公告的详情页面
#useragent_index:指定模拟的浏览器
#++++++++++++++++++++
def zb_content(url,useragent_index):
result = ''
for i in range(0,3): # 连续3次取不到request的信息,则退出
time.sleep(np.random.rand()*5) #随机时延,避免被禁ip
#采用这种方式爬,ip不容易被禁止,更换浏览代理
try:
req = urllib.request.Request(url, headers=hds[useragent_index])
source_code = urllib.request.urlopen(req).read()
plain_text=str(source_code.decode('utf-8')) #指明用utf-8解码
except (urllib.HTTPError, urllib.URLError) :
print("somethings is error...")
continue
soup = BeautifulSoup(plain_text, 'html.parser') #对获得的页面用bs4排序,用html解析器
main_soup = soup.find('div',{'class': ['vF_detail_main','vT_detail_main']})
if main_soup==None and i<3: #三次取不到就退出
continue
elif main_soup==None or len(main_soup)<=1 or i==4:
print('给出的url取不到需要的内容')
return result
else: #成功取取数据
break
try:
content_soup = main_soup.find('div',{'class':['vF_detail_content','vT_detail_content w760c']}) #vF_detail_content vT_detail_content w760c
if content_soup!=None:
result=str(content_soup.get_text()).strip()
else:
result=''
print('解析url获取的内容为空 %s' %(url))
except:
print('解析url获取的内容出错 %s' %(url))
result=''
#print(result)
return result
#++++++++++++++++++++
#通过ssl发送邮件
#++++++++++++++++++++
def send_mail(mail_info):
#这里使用SMTP_SSL就是默认使用465端口
try:
smtp = smtplib.SMTP_SSL(mail_info["hostname"])
print('登录邮箱…')
#smtp.set_debuglevel(1)#设置调试日志级别
smtp.ehlo(mail_info["hostname"])
smtp.login(mail_info["username"], mail_info["password"])
print('登录成功')
#邮件内容初始化
msg = MIMEMultipart()
#邮件正文
#msg.attach(MIMEText(mail_info["mail_text"], "plain", mail_info["mail_encoding"])) #text文本格式
msg.attach(MIMEText(mail_info["mail_text"], "html", mail_info["mail_encoding"])) #html格式
msg["Subject"] = Header(mail_info["mail_subject"], mail_info["mail_encoding"])
msg["from"] = mail_info["from"]
msg["to"] = mail_info["to"]
#添加附件
if mail_info["mail_att"]!="" :
sendfile=open(mail_info["mail_att"],'rb').read()
text_att = MIMEText(sendfile, 'base64', 'utf-8')
text_att["Content-Type"] = 'application/octet-stream'
text_att.add_header('Content-Disposition', 'attachment', filename=mail_info["mail_att"])
msg.attach(text_att)
print("邮件开始发送…")
smtp.sendmail(mail_info["from"], mail_info["to"], msg.as_string())
smtp.quit()
print("邮件发送成功")
except smtplib.SMTPException as e:
print("邮件发送失败",e)
return
#++++++++++++++++++++
#加载配置文件
#++++++++++++++++++++
def load_cfg():
global page_begin #起始的开爬页
global page_end #结束页
global each_page_rows #每页记录数
global if_send_mail #是否发邮件
global mail_info #邮件信息
global hds #http agent
global last_datetime #最后时间
global search_keywords #搜索关键字
global time_type #搜索的时间类型
cfg=JsonConf.load()
try:
#先加载常量,防止丢失数据
hds=cfg["hds"]
mail_info=cfg["mail_info"]
each_page_rows=int(cfg["each_page_rows"])
if_send_mail=bool(cfg["if_send_mail"])
#再加载变量
page_begin=int(cfg["page_begin"])
page_end=int(cfg["page_end"])
last_datetime=cfg["last_datetime"]
search_keywords=cfg["search_keywords"]
time_type=cfg["time_type"]
print("成功加载配置文件")
except:
print("配置文件加载失败")
return
#++++++++++++++++++++
#保存配置文件
#++++++++++++++++++++
def save_cfg():
global mail_info
cfg={}
cfg["page_begin"]=str(page_begin)
cfg["page_end"]=str(page_end)
cfg["each_page_rows"]=str(each_page_rows)
cfg["if_send_mail"]=bool(if_send_mail)
#邮件主题,内容,附件不保存
mail_info["mail_subject"]=""
mail_info["mail_text"]=""
mail_info["mail_att"]=""
cfg["mail_info"]=mail_info
cfg["hds"]=hds
cfg["last_datetime"]=last_datetime
cfg["search_keywords"]=search_keywords
cfg["time_type"]=time_type
JsonConf.save(cfg)
return
if __name__=='__main__':
#运行参数配置
#加载配置
load_cfg()
#爬的关键字 多个关键字,用 + 号连接
if search_keywords!="":
zb_tag=search_keywords
else:
zb_tag="河长"
#开爬
zb_list=lists_spider(page_begin,page_end,zb_tag)
#保存配置
save_cfg()
|
#! /usr/bin/env python
# -*- encoding: UTF-8 -*-
"""Example: Use ALSpeechRecognition Module"""
import qi
import argparse
import sys
import time
class SpeechR(object):
def __init__(self, app):
super(SpeechR,self).__init__()
app.start()
session = app.session
self.memory = session.service("ALMemory")
self.subscriber = self.memory.subscriber("WordRecognized")
self.subscriber.signal.connect(self.on_word_detected)
self.tts = session.service("ALTextToSpeech")
self.speechRec = session.service("ALSpeechRecognition")
self.speechRec.setLanguage("English")
vocabulary = ["yes", "no", "please", "hello"]
self.speechRec.pause(True)
self.speechRec.setVocabulary(vocabulary, False)
self.speechRec.pause(False)
self.speechRec.subscribe("SpeechR")
print 'Speech recognition engine started'
self.got_word = False
self.tts = session.service("ALTextToSpeech")
# Start the speech recognition engine with user Test_ASR
#asr_service.subscribe("Test_ASR")
#print 'Speech recognition engine started'
#time.sleep(20)
#asr_service.unsubscribe("Test_ASR")
def on_word_detected(self, value):
if value == []:
self.got_word = False
elif not self.got_word:
#self.got_word = True
#for x in value:
# print x
print value
self.speechRec.pause(True)
self.tts.say("You just said " + str(value[0]))
self.speechRec.pause(False)
def run(self):
"""
Loop on, wait for events until manual interruption.
"""
print "Starting SpeechR"
try:
while True:
time.sleep(3)
except KeyboardInterrupt:
print "Interrupted by user, stopping HumanGreeter"
self.speechRec.unsubscribe("SpeechR")
#stop
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="192.168.0.106",
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
try:
# Initialize qi framework.
connection_url = "tcp://" + args.ip + ":" + str(args.port)
app = qi.Application(["SpeechR", "--qi-url=" + connection_url])
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
sp_rec = SpeechR(app)
sp_rec.run()
|
#O problema envolve uma manipulação simples de listas e dicionários, porém adaptando ele a vida real, foi utilizado orientação a objetos e estruturas de dados.
#A orientação a objetos vem com o objetivo de organizar melhor e diminuir a quantidade de linhas para um código robusto e a estrutura de dados para o processamento
#de grandes quantidade de dados, no caso, se o nosso dicionário fosse enorme, teríamos um grande benefício em utilizar as estrutura de dados,
# pois se considerarmos um cenário real, estamos armazenando esses dados para manipula-lo posteriormente e entre a lista encadeada e a lista
#normal, a duplamente encadeada ou simplesmete encadeada possui um uso de processamento (O(n)) menor dependendo do cénario do que a lista que já existe.
#Importando a lista duplamente encadeada, simulando um problema que envolve complixidade.
from DoubleLinked import DoubleList
#Utilização de uma classe, para caso no futuro possa dar mais métodos e objetivos para o dicionário.
class Dict: #Criado a classe dicionário.
#Aqui é inicializada a classe, criando o dicionário que será usadado para manipulação dos dados, caso queira adicionar ou retirar dados
#basta alterar o dicionário abaixo.
def __init__(self):
self.dictionary = { 'João': 21,
'Maria': 30,
'Matheus': 15,
'Ana': 15,
'pera': 50,
'uva': 2,
'maçã': 55,
'abacaxi': 25,
'laranja': 0,
}
'''Este método procura se a entrada do usuário está no dicionário criado acima, retornando False caso não esteja presente a chave e retornando o valor que está ligado
a chave caso a chave exista.'''
def searcyKey(self, key):
if key not in self.dictionary: #Verificando se a chave não está no dicionário, se não estiver retorna False.
return False
else: #Se estiver
return self.dictionary.get(key) #Retorna o valor da que está ligado a tal chave. A variavel key é a que está responsavel pela chave.
#Aqui estamos na main, onde acontecerá a instância das estruturas de dados e da nossa classe que acabou de ser criada.
dicionario = Dict() #Instancia do dicionário.
lista = DoubleList() #Instância da lista duplamente encadeada
#É utilizado uma repetição que verifica todos as chaves que o usuário deseja verificar, retornando se ela está ou não está no nosso dicionário.
exit = False
while exit == False: #Enquanto exit for False, continuará pedindo inputs pro usuário
key = str(input("Digite uma chave em string, caso queira sair digite 0: ")) #Aqui é onde o looping pode acabar, basta que seja digitado zero.
if key == '0': #Se key for 0 em string, o looping finaliza aqui.
break
saida = dicionario.searcyKey(key) #Se o programa não finalizou significa que a saida possui uma saida, sendo ela False ou um número inteiro.
if saida == False: #Se for False, um boolean, o programa avisará ao usuário que a chave digitada não se encontra no dicionário.
print("Chave não encontrada")
else: #Mas se não for False, saida é um número inteiro que será adicionada a uma lista.
if saida not in lista: #Se o valor inteiro saida estiver dentro da lista, não será feito nada. Mas se não houver tal valor, será adicionado a nossa lista.
lista.append(saida)
#Aqui é o retorno da lista para o usuário, utilizando um for simples.
for i in range(len(lista)):
print(lista[i])
'''Tentei simular um programa mais complexo, que utiliza vários dados dentro do dicionário. Para isso implementei uma lista duplamente encadeada, juntamente com
orientação a objeto visando um problema mais complexo que use menos dos nossos processadores. Existem diversas estruturas de dados que podem ser usada, como filas,
pilhas, arvores, listas encadeadas, etc. Cada cenário irá utilizar alguma delas. Utilizei uma lista encadeada como ilustração, mas poderia ser usada até mesmo uma
lista simples. Mas simulei um cenário mais real.
''' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.