text stringlengths 38 1.54M |
|---|
from django.db import models
class Notice(models.Model):
title = models.CharField(verbose_name='제목', max_length=255, null=False, blank=False)
content = models.TextField(verbose_name='내용', null=False, blank=False)
author = models.ForeignKey('accounts.User', db_column='user_pk', related_name='author', verbose_name='글쓴이', on_delete=models.CASCADE)
created = models.DateTimeField(verbose_name='작성날짜', auto_now_add=True)
updated = models.DateTimeField(verbose_name='수정날짜',auto_now=True)
def __str__(self):
return self.title |
import random
def m(a,b):
i,j = 0,0
c = []
while i+j < len(a) + len(b):
if i == len(a):
c += [b[j]]
j += 1
elif j == len(b):
c += [a[i]]
i += 1
elif a[i] <= b[j]:
c += [a[i]]
i += 1
else:
c += [b[j]]
j += 1
return c
def ms(a):
p = int(len(a)/2)
if p == 0:
return a
return m(ms(a[:p]),ms(a[p:]))
def randperm(a):
return random.sample(a,len(a))
A = randperm(range(20))
print A
print ms(A)
# AML details:
#
# here we want to bypass the bound conditions
# a[i], b[j]
# merge(a,b):
# i in a.inds & j in b.inds:
# a[i] <= b[j] ==> c += [a[i]] esle c += [b[j]]
#
# declarative paradigm with NLP associatiove purposes
#
# a[i], b[j]
# merge(a,b):
# i in a.inds & j in b.inds || iterate by 1: // || iterate by step
# a[i] <= b[j] ==> c += [a[i]] esle c += [b[j]]
#
# merge(a,b):
# i in a.inds & j in b.inds || iterate together (i,j) by 1 : // || iterate by step
# a[i] <= b[j] ==> c += [a[i]] esle c += [b[j]]
#
# Quessed Interpretation Paradigm
#
# merge(a,b):
# i in a.inds: // for by quess
# j in b.inds || iterate together (i,j) by 1 : // || iterate by step
# a[i] <= b[j] ==> c += [a[i]] esle c += [b[j]]
#
#
# SO in AML we have stable AML and flexible AML with quessing and associative sences
|
import sqlite3
import os
class DB:
conn_aspects = None
conn_reviews = None
conn_merged = None
conn_sentence = None
conn_aspects_one_word = None
conn_reviews_one_word = None
conn_sentences_one_word = None
conn_pmi_review = None
conn_pmi_sentence = None
conn_pmi_ideal_review = None
conn_pmi_ideal_sentence = None
conn_path_weight = None
conn_semantic_distance = None
conn_semantic_distance_ideal = None
conn_local_context_prepare = None
conn_local_context_prepare_ideal = None
conn_global_context_prepare = None
conn_global_context_prepare_extra = None
conn_global_context_prepare_ideal = None
conn_global_context_prepare_extra_ideal = None
conn_global_context = None
conn_local_context = None
conn_lexical = None
conn_lexical_ideal = None
conn_syntactic = None
conn_syntactic_ideal = None
conn_tree = None
conn_local_context_ideal = None
conn_global_context_ideal = None
conn_hierarchy = None
conn_frequent = None
conn_ideal_full = None
conn_hierarchy_real = None
conn_semantic_distance_real = None
cursor_aspects = None
cursor_aspects2 = None
cursor_reviews = None
cursor_article = None
cursor_merged = None
cursor_sentence = None
cursor_aspects_one_word = None
cursor_reviews_one_word = None
cursor_reviews_one_word_update = None
cursor_sentences_one_word = None
cursor_sentences_one_word_update = None
cursor_pmi_review = None
cursor_pmi_sentence = None
cursor_pmi_ideal_review = None
cursor_pmi_ideal_sentence = None
cursor_path_weight = None
cursor_semantic_distance = None
cursor_semantic_distance_ideal = None
cursor_local_context_prepare = None
cursor_local_context_prepare_ideal = None
cursor_global_context_prepare = None
cursor_global_context_prepare_extra = None
cursor_global_context_prepare_ideal = None
cursor_global_context_prepare_extra_ideal = None
cursor_global_context = None
cursor_local_context_ideal = None
cursor_global_context_ideal = None
cursor_local_context = None
cursor_lexical = None
cursor_lexical_ideal = None
cursor_syntactic = None
cursor_syntactic_ideal = None
cursor_tree = None
cursor_hierarchy = None
cursor_frequent = None
cursor_ideal_full = None
cursor_hierarchy_real = None
cursor_semantic_distance_real = None
db_merged_name = 'Merged.db'
db_aspects_name = 'Aspects.db'
db_reviews_name = 'Reviews.db'
db_sentence_name = 'Sentence.db'
db_aspects_one_word_name = 'Aspects_One_Word.db'
db_reviews_one_word_name = 'Reviews_One_Word.db'
db_sentences_one_word_name = 'Sentences_One_Word.db'
db_pmi_review_name = 'PMI_Review.db'
db_pmi_sentence_name = 'PMI_Sentence.db'
db_pmi_ideal_review_name = 'PMI_Ideal_Review.db'
db_pmi_ideal_sentence_name = 'PMI_Ideal_Sentence.db'
db_path_weight = "Path_Weight.db"
db_semantic_distance = "Semantic_Distance.db"
db_semantic_distance_ideal = "Semantic_Distance_Ideal.db"
db_local_context_prepare = "Local_Context_Prepare.db"
db_local_context_prepare_ideal = "Local_Context_Prepare_Ideal.db"
db_global_context_prepare = "Global_Context_Prepare.db"
db_global_context_prepare_extra = "Global_Context_Prepare_Extra.db"
db_global_context_prepare_ideal = "Global_Context_Prepare_Ideal.db"
db_global_context_prepare_extra_ideal = "Global_Context_Prepare_Extra_Ideal.db"
db_global_context = "Global_Context.db"
db_local_context = "Local_Context.db"
db_lexical = "Lexical.db"
db_lexical_ideal = "Lexical_Ideal.db"
db_syntactic = "Syntactic.db"
db_syntactic_ideal = "Syntactic_Ideal.db"
db_tree = "Tree.db"
db_local_context_ideal = "Local_Context_Ideal.db"
db_global_context_ideal = "Global_Context_Ideal.db"
db_hierarchy = "Hierarchy.db"
db_frequent = "Frequent.db"
db_ideal_full = "Ideal_Full.db"
db_hierarchy_real = "Hierarchy_Real.db"
db_semantic_distance_real = "Semantic_Distance_Real.db"
def __init__(self):
path = os.getcwd()
self.conn_aspects = sqlite3.connect(path + "/../db/" + self.db_aspects_name)
self.conn_reviews = sqlite3.connect(path + "/../db/" + self.db_reviews_name)
self.conn_merged = sqlite3.connect(path + "/../db/" + self.db_merged_name)
self.conn_sentence = sqlite3.connect(path + "/../db/" + self.db_sentence_name)
self.conn_aspects_one_word = sqlite3.connect(path + "/../db/" + self.db_aspects_one_word_name)
self.conn_reviews_one_word = sqlite3.connect(path + "/../db/" + self.db_reviews_one_word_name)
self.conn_sentences_one_word = sqlite3.connect(path + "/../db/" + self.db_sentences_one_word_name)
self.cursor_sentences_one_word_update = sqlite3.connect(path + "/../db/" + self.db_sentences_one_word_name)
self.conn_pmi_review = sqlite3.connect(path + "/../db/" + self.db_pmi_review_name)
self.conn_pmi_sentence = sqlite3.connect(path + "/../db/" + self.db_pmi_sentence_name)
self.conn_pmi_ideal_review = sqlite3.connect(path + "/../db/" + self.db_pmi_ideal_review_name)
self.conn_pmi_ideal_sentence = sqlite3.connect(path + "/../db/" + self.db_pmi_ideal_sentence_name)
self.conn_path_weight = sqlite3.connect(path + "/../db/" + self.db_path_weight)
self.conn_semantic_distance = sqlite3.connect(path + "/../db/" + self.db_semantic_distance)
self.conn_semantic_distance_ideal = sqlite3.connect(path + "/../db/" + self.db_semantic_distance_ideal)
self.conn_local_context_prepare = sqlite3.connect(path + "/../db/" + self.db_local_context_prepare)
self.conn_local_context_prepare_ideal = sqlite3.connect(path + "/../db/" + self.db_local_context_prepare_ideal)
self.conn_global_context_prepare = sqlite3.connect(path + "/../db/" + self.db_global_context_prepare)
self.conn_global_context_prepare_extra = sqlite3.connect(path + "/../db/" + self.db_global_context_prepare_extra)
self.conn_global_context_prepare_ideal = sqlite3.connect(path + "/../db/" + self.db_global_context_prepare_ideal)
self.conn_global_context_prepare_extra_ideal = sqlite3.connect(path + "/../db/" + self.db_global_context_prepare_extra_ideal)
self.conn_global_context = sqlite3.connect(path + "/../db/" + self.db_global_context)
self.conn_local_context = sqlite3.connect(path + "/../db/" + self.db_local_context)
self.conn_lexical = sqlite3.connect(path + "/../db/" + self.db_lexical)
self.conn_syntactic = sqlite3.connect(path + "/../db/" + self.db_syntactic)
self.conn_tree = sqlite3.connect(path + "/../db/" + self.db_tree)
self.conn_lexical_ideal = sqlite3.connect(path + "/../db/" + self.db_lexical_ideal)
self.conn_syntactic_ideal = sqlite3.connect(path + "/../db/" + self.db_syntactic_ideal)
self.conn_local_context_ideal = sqlite3.connect(path + "/../db/" + self.db_local_context_ideal)
self.conn_global_context_ideal = sqlite3.connect(path + "/../db/" + self.db_global_context_ideal)
self.conn_hierarchy = sqlite3.connect(path + "/../db/" + self.db_hierarchy)
self.conn_frequent = sqlite3.connect(path + "/../db/" + self.db_frequent)
self.conn_ideal_full = sqlite3.connect(path + "/../db/" + self.db_ideal_full)
self.conn_hierarchy_real = sqlite3.connect(path + "/../db/" + self.db_hierarchy_real)
self.conn_semantic_distance_real = sqlite3.connect(path + "/../db/" + self.db_semantic_distance_real)
self.cursor_merged = self.conn_merged.cursor()
self.cursor_aspects = self.conn_aspects.cursor()
self.cursor_aspects2 = self.conn_aspects.cursor()
self.cursor_reviews = self.conn_reviews.cursor()
self.cursor_article = self.conn_aspects.cursor()
self.cursor_sentence = self.conn_sentence.cursor()
self.cursor_aspects_one_word = self.conn_aspects_one_word.cursor()
self.cursor_reviews_one_word = self.conn_reviews_one_word.cursor()
self.cursor_reviews_one_word_update = self.conn_reviews_one_word.cursor()
self.cursor_sentences_one_word = self.conn_sentences_one_word.cursor()
self.cursor_pmi_review = self.conn_pmi_review.cursor()
self.cursor_pmi_sentence = self.conn_pmi_sentence.cursor()
self.cursor_pmi_ideal_review = self.conn_pmi_ideal_review.cursor()
self.cursor_pmi_ideal_sentence = self.conn_pmi_ideal_sentence.cursor()
self.cursor_path_weight = self.conn_path_weight.cursor()
self.cursor_semantic_distance = self.conn_semantic_distance.cursor()
self.cursor_semantic_distance_ideal = self.conn_semantic_distance_ideal.cursor()
self.cursor_local_context_prepare = self.conn_local_context_prepare.cursor()
self.cursor_local_context_prepare_ideal = self.conn_local_context_prepare_ideal.cursor()
self.cursor_global_context_prepare = self.conn_global_context_prepare.cursor()
self.cursor_global_context_prepare_extra = self.conn_global_context_prepare_extra.cursor()
self.cursor_global_context_prepare_ideal = self.conn_global_context_prepare_ideal.cursor()
self.cursor_global_context_prepare_extra_ideal = self.conn_global_context_prepare_extra_ideal.cursor()
self.cursor_global_context = self.conn_global_context.cursor()
self.cursor_local_context = self.conn_local_context.cursor()
self.cursor_lexical = self.conn_lexical.cursor()
self.cursor_lexical_ideal = self.conn_lexical_ideal.cursor()
self.cursor_syntactic = self.conn_syntactic.cursor()
self.cursor_tree = self.conn_tree.cursor()
self.cursor_syntactic_ideal = self.conn_syntactic_ideal.cursor()
self.cursor_local_context_ideal = self.conn_local_context_ideal.cursor()
self.cursor_global_context_ideal = self.conn_global_context_ideal.cursor()
self.cursor_hierarchy = self.conn_hierarchy.cursor()
self.cursor_frequent = self.conn_frequent.cursor()
self.cursor_ideal_full = self.conn_ideal_full.cursor()
self.cursor_hierarchy_real = self.conn_hierarchy_real.cursor()
self.cursor_semantic_distance_real = self.conn_semantic_distance_real.cursor()
def create_semantic_distance_real_db(self):
self.cursor_semantic_distance_real.execute(
'''CREATE TABLE IF NOT EXISTS Distance (aspect1 TEXT, aspect2 TEXT, distance FLOAT)''')
self.conn_semantic_distance_real.commit()
def create_hierarchy_real_db(self):
self.cursor_hierarchy_real.execute('''CREATE TABLE IF NOT EXISTS Hierarchy (parent TEXT, child TEXT)''')
self.conn_hierarchy_real.commit()
def create_ideal_full_db(self):
self.cursor_ideal_full.execute('''CREATE TABLE IF NOT EXISTS Ideal (filename TEXT, aspect1 TEXT, aspect2 TEXT, pmi_review FLOAT, pmi_sentence FLOAT, lexical FLOAT, syntactic FLOAT, local_context FLOAT, global_context FLOAT, weight FLOAT)''')
self.conn_ideal_full.commit()
def create_frequent_db(self):
self.cursor_frequent.execute('''CREATE TABLE IF NOT EXISTS Frequent (word TEXT, number INT)''')
self.conn_frequent.commit()
def create_hierarchy_db(self):
self.cursor_hierarchy.execute('''CREATE TABLE IF NOT EXISTS Hierarchy (parent TEXT, child TEXT)''')
self.conn_hierarchy.commit()
def create_context_global_ideal_db(self):
self.cursor_global_context_ideal.execute('''CREATE TABLE IF NOT EXISTS Context (aspect1 TEXT, aspect2 TEXT, kl_divergence DOUBLE)''')
self.conn_global_context_ideal.commit()
def create_context_local_ideal_db(self):
self.cursor_local_context_ideal.execute('''CREATE TABLE IF NOT EXISTS Context (aspect1 TEXT, aspect2 TEXT, kl_divergence DOUBLE)''')
self.conn_local_context_ideal.commit()
def create_tree_db(self):
self.cursor_tree.execute('''CREATE TABLE IF NOT EXISTS Tree (sentence TEXT, tree TEXT)''')
self.conn_tree.commit()
def create_syntactic_ideal_db(self):
self.cursor_syntactic_ideal.execute('''CREATE TABLE IF NOT EXISTS Syntactic (aspect1 TEXT, aspect2 TEXT, syntactic_path INT)''')
self.conn_syntactic_ideal.commit()
def create_syntactic_db(self):
self.cursor_syntactic.execute('''CREATE TABLE IF NOT EXISTS Syntactic (aspect1 TEXT, aspect2 TEXT, syntactic_path INT)''')
self.conn_syntactic.commit()
def create_lexical_db(self):
self.cursor_lexical.execute('''CREATE TABLE IF NOT EXISTS Lexical (aspect1 TEXT, aspect2 TEXT, length_difference INT)''')
self.conn_lexical.commit()
def create_lexical_ideal_db(self):
self.cursor_lexical_ideal.execute('''CREATE TABLE IF NOT EXISTS Lexical (aspect1 TEXT, aspect2 TEXT, length_difference INT)''')
self.conn_lexical_ideal.commit()
def create_context_global_db(self):
self.cursor_global_context.execute('''CREATE TABLE IF NOT EXISTS Context (aspect1 TEXT, aspect2 TEXT, kl_divergence DOUBLE)''')
self.conn_global_context.commit()
def create_context_local_db(self):
self.cursor_local_context.execute('''CREATE TABLE IF NOT EXISTS Context (aspect1 TEXT, aspect2 TEXT, kl_divergence DOUBLE)''')
self.conn_local_context.commit()
def create_context_local_prepare_db(self):
self.cursor_local_context_prepare.execute('''CREATE TABLE IF NOT EXISTS Context (aspect TEXT, context TEXT)''')
self.conn_local_context_prepare.commit()
def create_context_local_prepare_ideal_db(self):
self.cursor_local_context_prepare_ideal.execute('''CREATE TABLE IF NOT EXISTS Context (aspect TEXT, context TEXT)''')
self.conn_local_context_prepare_ideal.commit()
def create_context_global_prepare_ideal_db(self):
self.cursor_global_context_prepare_ideal.execute('''CREATE TABLE IF NOT EXISTS Context (aspect TEXT, review TEXT)''')
self.conn_global_context_prepare_ideal.commit()
def create_context_global_prepare_extra_ideal_db(self):
self.cursor_global_context_prepare_extra_ideal.execute(
'''CREATE TABLE IF NOT EXISTS Context (aspect TEXT, context TEXT)''')
self.conn_global_context_prepare_extra_ideal.commit()
def create_context_global_prepare_db(self):
self.cursor_global_context_prepare.execute('''CREATE TABLE IF NOT EXISTS Context (aspect TEXT, review TEXT)''')
self.conn_global_context_prepare.commit()
def create_context_global_prepare_extra_db(self):
self.cursor_global_context_prepare_extra.execute('''CREATE TABLE IF NOT EXISTS Context (aspect TEXT, context TEXT)''')
self.conn_global_context_prepare_extra.commit()
def create_semantic_distance_db(self):
self.cursor_semantic_distance.execute('''CREATE TABLE IF NOT EXISTS Distance (aspect1 TEXT, aspect2 TEXT, distance FLOAT)''')
self.conn_semantic_distance.commit()
def create_semantic_distance_ideal_db(self):
self.cursor_semantic_distance_ideal.execute('''CREATE TABLE IF NOT EXISTS Distance (aspect1 TEXT, aspect2 TEXT, distance FLOAT)''')
self.conn_semantic_distance_ideal.commit()
def create_path_weight_db(self):
self.cursor_path_weight.execute('''CREATE TABLE IF NOT EXISTS Weight (filename TEXT, aspect1 TEXT, aspect2 TEXT, weight INT)''')
self.conn_path_weight.commit()
def create_pmi_ideal_review_db(self):
self.cursor_pmi_ideal_review.execute('''CREATE TABLE IF NOT EXISTS PMI
(aspect1 TEXT, aspect2 TEXT, aspect1Num INT, aspect2Num INT, bothNum INT, pmi DOUBLE)''')
self.conn_pmi_ideal_review.commit()
def create_pmi_ideal_sentence_db(self):
self.cursor_pmi_ideal_sentence.execute('''CREATE TABLE IF NOT EXISTS PMI
(aspect1 TEXT, aspect2 TEXT, aspect1Num INT, aspect2Num INT, bothNum INT, pmi DOUBLE)''')
self.conn_pmi_ideal_sentence.commit()
def create_pmi_review_db(self):
self.cursor_pmi_review.execute('''CREATE TABLE IF NOT EXISTS PMI
(aspect1 TEXT, aspect2 TEXT, aspect1Num INT, aspect2Num INT, bothNum INT, pmi DOUBLE)''')
self.conn_pmi_review.commit()
def create_pmi_sentence_db(self):
self.cursor_pmi_sentence.execute('''CREATE TABLE IF NOT EXISTS PMI
(aspect1 TEXT, aspect2 TEXT, aspect1Num INT, aspect2Num INT, bothNum INT, pmi DOUBLE)''')
self.conn_pmi_sentence.commit()
def create_aspects_one_word_db(self):
self.cursor_aspects_one_word.execute('''CREATE TABLE IF NOT EXISTS Aspects
(article TEXT, advantageAspects TEXT, disadvantageAspects TEXT, commentAspects TEXT)''')
self.conn_aspects_one_word.commit()
def create_reviews_one_word_db(self):
self.cursor_reviews_one_word.execute('''CREATE TABLE IF NOT EXISTS Reviews
(article TEXT, advantageAspects TEXT, disadvantageAspects TEXT, commentAspects TEXT)''')
self.conn_reviews_one_word.commit()
def create_sentences_one_word_db(self):
self.cursor_sentences_one_word.execute('''CREATE TABLE IF NOT EXISTS Sentences (article TEXT, sentence TEXT)''')
self.conn_sentences_one_word.commit()
def create_aspects_db(self):
self.cursor_aspects.execute('''CREATE TABLE IF NOT EXISTS Aspects
(article TEXT, advantageAspects TEXT, disadvantageAspects TEXT, commentAspects TEXT)''')
self.conn_aspects.commit()
def create_sentence_db(self):
self.cursor_sentence.execute('''CREATE TABLE IF NOT EXISTS Sentences (article TEXT, sentence TEXT)''')
self.conn_sentence.commit()
def add_semantic_distance_real(self, aspect1, aspect2, distance):
self.cursor_semantic_distance_real.execute('INSERT INTO Distance (aspect1, aspect2, distance) VALUES (?, ?, ?)',
(aspect1, aspect2, distance))
def add_hierarchy_real(self, parent, child):
self.cursor_hierarchy_real.execute('INSERT INTO Hierarchy (parent, child) VALUES (?, ?)', (parent, child))
def add_ideal_full(self, filename, aspect1, aspect2, pmi_review, pmi_sentence, lexical, syntactic, local_context, global_context, weight):
self.cursor_ideal_full.execute('INSERT INTO Ideal (filename, aspect1, aspect2, pmi_review, pmi_sentence, lexical, syntactic, local_context, global_context, weight) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (filename, aspect1, aspect2, pmi_review, pmi_sentence, lexical, syntactic, local_context, global_context, weight,))
def add_frequent(self, word, number):
self.cursor_frequent.execute('INSERT INTO Frequent (word, number) VALUES (?, ?)', (word, number))
def add_hierarchy(self, parent, child):
self.cursor_hierarchy.execute('INSERT INTO Hierarchy (parent, child) VALUES (?, ?)', (parent, child))
def add_tree(self, sentence, tree):
self.cursor_tree.execute('INSERT INTO Tree (sentence, tree) VALUES (?, ?)', (sentence, tree))
def add_syntactic_ideal(self, aspect1, aspect2, syntactic_path):
self.cursor_syntactic_ideal.execute('INSERT INTO Syntactic (aspect1, aspect2, syntactic_path) VALUES (?, ?, ?)',
(aspect1, aspect2, syntactic_path))
def add_syntactic(self, aspect1, aspect2, syntactic_path):
self.cursor_syntactic.execute('INSERT INTO Syntactic (aspect1, aspect2, syntactic_path) VALUES (?, ?, ?)',
(aspect1, aspect2, syntactic_path))
def add_lexical(self, aspect1, aspect2, length_difference):
self.cursor_lexical.execute('INSERT INTO Lexical (aspect1, aspect2, length_difference) VALUES (?, ?, ?)',
(aspect1, aspect2, length_difference))
def add_lexical_ideal(self, aspect1, aspect2, length_difference):
self.cursor_lexical_ideal.execute('INSERT INTO Lexical (aspect1, aspect2, length_difference) VALUES (?, ?, ?)',
(aspect1, aspect2, length_difference))
def add_context_global_ideal(self, aspect1, aspect2, kl_divergence):
self.cursor_global_context_ideal.execute('INSERT INTO Context (aspect1, aspect2, kl_divergence) VALUES (?, ?, ?)',
(aspect1, aspect2, kl_divergence))
def add_context_local_ideal(self, aspect1, aspect2, kl_divergence):
self.cursor_local_context_ideal.execute('INSERT INTO Context (aspect1, aspect2, kl_divergence) VALUES (?, ?, ?)',
(aspect1, aspect2, kl_divergence))
def add_context_global(self, aspect1, aspect2, kl_divergence):
self.cursor_global_context.execute('INSERT INTO Context (aspect1, aspect2, kl_divergence) VALUES (?, ?, ?)',
(aspect1, aspect2, kl_divergence))
def add_context_local(self, aspect1, aspect2, kl_divergence):
self.cursor_local_context.execute('INSERT INTO Context (aspect1, aspect2, kl_divergence) VALUES (?, ?, ?)',
(aspect1, aspect2, kl_divergence))
def add_context_local_prepare(self, aspect, context):
self.cursor_local_context_prepare.execute('INSERT INTO Context (aspect, context) VALUES (?, ?)', (aspect, context))
def add_context_local_prepare_ideal(self, aspect, context):
self.cursor_local_context_prepare_ideal.execute('INSERT INTO Context (aspect, context) VALUES (?, ?)', (aspect, context))
def add_context_global_prepare_ideal(self, aspect, review):
self.cursor_global_context_prepare_ideal.execute('INSERT INTO Context (aspect, review) VALUES (?, ?)',
(aspect, review))
def add_context_global_prepare_extra_ideal(self, aspect, context):
self.cursor_global_context_prepare_extra_ideal.execute('INSERT INTO Context (aspect, context) VALUES (?, ?)',
(aspect, context))
def add_context_global_prepare(self, aspect, review):
self.cursor_global_context_prepare.execute('INSERT INTO Context (aspect, review) VALUES (?, ?)', (aspect, review))
def add_context_global_prepare_extra(self, aspect, context):
self.cursor_global_context_prepare_extra.execute('INSERT INTO Context (aspect, context) VALUES (?, ?)',
(aspect, context))
def add_semantic_distance_ideal(self, aspect1, aspect2, distance):
self.cursor_semantic_distance_ideal.execute('INSERT INTO Distance (aspect1, aspect2, distance) VALUES (?, ?, ?)',
(aspect1, aspect2, distance))
def add_semantic_distance(self, aspect1, aspect2, distance):
self.cursor_semantic_distance.execute('INSERT INTO Distance (aspect1, aspect2, distance) VALUES (?, ?, ?)',
(aspect1, aspect2, distance))
def add_path_weight(self, filename, aspect1, aspect2, weight):
self.cursor_path_weight.execute('INSERT INTO Weight (filename, aspect1, aspect2, weight) VALUES (?, ?, ?, ?)',
(filename, aspect1, aspect2, weight))
def add_pmi_ideal_review(self, aspect1, aspect2, num1, num2, both_num, pmi):
self.cursor_pmi_ideal_review.execute(
'INSERT INTO PMI (aspect1, aspect2, aspect1Num, aspect2Num, bothNum, pmi) VALUES (?, ?, ?, ?, ?, ?)',
(aspect1, aspect2, num1, num2, both_num, pmi))
def add_pmi_ideal_sentence(self, aspect1, aspect2, num1, num2, both_num, pmi):
self.cursor_pmi_ideal_sentence.execute(
'INSERT INTO PMI (aspect1, aspect2, aspect1Num, aspect2Num, bothNum, pmi) VALUES (?, ?, ?, ?, ?, ?)',
(aspect1, aspect2, num1, num2, both_num, pmi))
def add_pmi_review(self, aspect1, aspect2, num1, num2, both_num, pmi):
self.cursor_pmi_review.execute(
'INSERT INTO PMI (aspect1, aspect2, aspect1Num, aspect2Num, bothNum, pmi) VALUES (?, ?, ?, ?, ?, ?)',
(aspect1, aspect2, num1, num2, both_num, pmi))
def add_pmi_sentence(self, aspect1, aspect2, num1, num2, both_num, pmi):
self.cursor_pmi_sentence.execute(
'INSERT INTO PMI (aspect1, aspect2, aspect1Num, aspect2Num, bothNum, pmi) VALUES (?, ?, ?, ?, ?, ?)',
(aspect1, aspect2, num1, num2, both_num, pmi))
def add_sentence(self, article, sentence):
self.cursor_sentence.execute('INSERT INTO Sentences (article, sentence) VALUES (?, ?)', (article, sentence))
def add_review(self, article, advantage_aspects, disadvantage_aspects, comment_aspects):
self.cursor_aspects.execute(
'INSERT INTO Aspects (article, advantageAspects, disadvantageAspects, commentAspects) VALUES (?, ?, ?, ?)',
(article, advantage_aspects, disadvantage_aspects, comment_aspects))
def add_one_word_aspects(self, article, advantage_aspects, disadvantage_aspects, comment_aspects):
self.conn_aspects_one_word.execute(
'INSERT INTO Aspects (article, advantageAspects, disadvantageAspects, commentAspects) VALUES (?, ?, ?, ?)',
(article, advantage_aspects, disadvantage_aspects, comment_aspects))
def add_one_word_review(self, article, advantage_aspects, disadvantage_aspects, comment_aspects):
self.conn_reviews_one_word.execute(
'INSERT INTO Reviews (article, advantageAspects, disadvantageAspects, commentAspects) VALUES (?, ?, ?, ?)',
(article, advantage_aspects, disadvantage_aspects, comment_aspects))
def add_one_word_sentence(self, article, sentence):
self.conn_sentences_one_word.execute('INSERT INTO Sentences (article, sentence) VALUES (?, ?)',
(article, sentence))
# destructor - close connection
def __del__(self):
self.conn_aspects.close()
self.conn_reviews.close()
def delete_aspects(self, article):
self.cursor_aspects.execute('DELETE FROM Aspects WHERE article = ' + str(article))
self.conn_aspects.commit()
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
#
import pymssql
from scrapy import log
import datetime
from decimal import Decimal
import decimal
class sqlserver(object):
def __init__(self):
self.infoNums = 0
self.updates = 0
self.inserts = 0
self.donones = 0
self.errorNums = 0
# host = "10.1.12.16\HFDATA"
# db = "Haifeng.CrawlerData"
# user = "luopx"
# passwd = "Hf.123"
host = "10.1.18.35"
db = "Haifeng.FundRawData.DB"
user = "sa"
passwd = "8927968"
self.conn = pymssql.connect(host=host, database=db, user=user, password=passwd, charset="utf8")
self.cursor = self.conn.cursor()
self.conn.autocommit(True)
self.conn1 = pymssql.connect(host=host, database=db, user=user, password=passwd, charset="utf8")
self.cursor1 = self.conn1.cursor()
self.conn1.autocommit(True)
self.sql = ""
self.tableName = ""
self.tablekeys = []
self.querykeys = []
self.item = {}
self.olditem = {}
#sql查询
# def printsql(self):
# print(self.sql)
def printsql(func):
def foo(self,*args,**kwargs):
result = func(self,*args,**kwargs)
try:
print(self.sql)
except:
pass
return result
return foo
@printsql
def getQueryResult(self,item,keys, wherekey, tb, isfetchall=1):
wherekv = {}
items = self.changeitem(item)
print(wherekey)
for i in wherekey:
wherekv[i]=items[i]
print(items)
wherekey = dict(wherekv)
for i in wherekey:
wherekey[i] = str(wherekey[i])
q1 = ",".join(keys)
if wherekey:
q2 = "where "+" and ".join(map(lambda x: " = ".join(x) if x[1] != 'Null' else " is ".join(x), wherekey.items()))
else:
q2 = ""
sql = "select %s from %s %s" % (q1, self.tableName, q2)
self.sql = sql
self.cursor.execute(sql)
try:
if isfetchall is True:
result = self.cursor.fetchone()
else:
result = self.cursor.fetchall()
if result:
return result
else:
# print("that is none result for your query")
return [None]
except Exception as e:
log.msg("sql is:%s/n reason is :%s"%(sql,e))
try:
self.conn.commit()
except:
print("commit false")
#将item 非数值 和非空字段 添加''包衣
def changeitem2(self,item, changekey=False, changekeyToNull=[]):
items = dict(item)
for i in items.keys():
if item[i] and item[i] in changekeyToNull and changekey:
item[i] = None
elif items[i] is False and items[i] != 0:
items[i] = None
# print("finshed to change item ready for check")
return item
def changeitem(self,item,changekey=False, changekeyToNull=[]):
items = dict(item)
for i in items.keys():
if type(items[i]) is str and items[i]!="Null":
items[i] = "'%s'" % item[i]
elif items[i] == None or items[i] == "":
items[i] = "Null"
else:
items[i] = str(items[i])
return items
#insert操作
@printsql
def insert(self,item):
items = self.changeitem(item)
q1 = ",".join([str(x[0]) for x in items.items()]) + ",AddTime,Checktime"
q2 = ",".join([str(x[1]) for x in items.items()]) + ",'" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "','" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "'"
sql = "insert into %s (%s) values (%s)" % (self.tableName,q1, q2)
self.sql = sql
try:
self.cursor.execute(sql)
try:
self.conn.commit()
except:
print("commit false")
self.inserts += 1
print("insert sucess")
except Exception as e:
# print("insert fail ,%s" % e)
log.msg("sql is:%s/n reason is :%s"%(sql,e))
self.errorNums += 1
#update操作
@printsql
def update(self,item,wherekey):
items = self.changeitem(item)
wherekv = {}
for i in self.tablekeys:
wherekv[i] = items[i]
dictlist = list(items.items())
keysword = ",".join(list(
map(lambda x: " = ".join([str(x[0]), str(x[1])]), dictlist))) + " , updatetime = '%s' , checktime = '%s'" % (
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
wherekv = "where "+" and ".join(map(lambda x: " = ".join(x) if x[1] != 'Null' else " is ".join(x), wherekv.items()))
# sql = "update %s set updatetime = '%s' , checktime = '%s' %s" % (self.tableName, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), wherekey)
sql = "update %s set %s %s" % (self.tableName, keysword, wherekv)
self.sql = sql
try:
self.cursor.execute(sql)
try:
self.conn.commit()
except:
print("commit false")
print("update sucess")
self.updates += 1
except Exception as e:
# print("update fail ,%s"%e)
log.msg("sql is:%s/n reason is :%s"%(sql,e))
self.errorNums += 1
#无需任何操作 若需更新checktime执行该方法
@printsql
def donone(self,item,wherekey):
items = self.changeitem(item)
wherekv = {}
for i in wherekey:
wherekv[i] = items[i]
wherekey = "where "+" and ".join(map(lambda x: " = ".join(x) if x[1] != 'Null' else " is ".join(x), wherekv.items()))
sql = "update %s set checktime = '%s' %s" % (self.tableName, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), wherekey)
self.sql = sql
try:
self.cursor.execute(sql)
try:
self.conn.commit()
except:
print("commit false")
print("update stime sucess")
self.donones += 1
except Exception as e:
print("update stime fail,%s"%e)
log.msg("sql is:%s/n reason is :%s"%(sql,e))
self.errorNums += 1
return sql
def getid(self,item,wherekey):
items = self.changeitem(item)
wherekv = {}
for i in wherekey:
wherekv[i] = items[i]
wherekey = "where "+" and ".join(map(lambda x: " = ".join(x) if x[1] != 'Null' else " is ".join(x), wherekv.items()))
sql = "select id from %s %s"%(self.tableName, wherekey)
self.sql = sql
try:
self.cursor.execute(sql)
result = self.cursor.fetchone()
return result[0]
except Exception as e:
log.msg("sql is:%s/n reason is :%s"%(sql,e))
try:
self.conn.commit()
except:
print("get id false")
@printsql
def saveold(self,items):
v = self.getid(items,self.tablekeys)
sql = "insert into AMAC.Change_List (ChangeObjectType,ChangeObjectID,ChangePropertyName,ChangeTime,BeforeContent,AfterContent) values ('%s','%s','%s','%s','%s','%s')"
for k in items.keys():
if items[k] != self.olditem[k]:
old = float(self.olditem[k]) if isinstance(self.olditem[k],Decimal) else self.olditem[k]
new = items[k]
sql = sql%(self.tableName,v,k,datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),old,new)
self.sql = sql
try:
self.cursor.execute(sql)
try:
self.conn.commit()
except:
print("commit false")
print("saveold sucess")
except Exception as e:
print("update stime fail,%s"%e)
log.msg("sql is:%s/n reason is :%s"%(sql,e))
self.errorNums += 1
def sqlquery(self,item,keys,wherekeys,tb,isfetchall=1):
def catch(listx, listy):
for x, y in zip(listx, listy):
if type(x) is int or type(x) is float or type(y) is float or type(y) is int:
try:
x = float(x)
if x != y:
print(x,y)
return False
except:
y = float(y)
if x != y:
print(x,y)
return False
else:
if x!=y:
print(x,y)
return False
return True
itemlist = [x[1]for x in item.items()]
keys = [x[0] for x in item.items()]
result = self.getQueryResult(item,keys,wherekeys,tb,isfetchall=isfetchall)
result = result[0] if result else None
queryResult2 = []
if result:
for i in result:
# print(type(i))
if type(i) is decimal.Decimal or type(i) is float:
queryResult2.append(float(i))
elif type(i) is int:
queryResult2.append(i)
elif type(i) is str:
queryResult2.append(i.encode('latin1').decode('GBK'))
elif isinstance(i,datetime.datetime):
queryResult2.append(i.strftime("%Y-%m-%d"))
else:
queryResult2.append(i)
result = queryResult2
if catch(queryResult2, itemlist):
return 1 # 无需更新
else:
for k,v in zip(keys,queryResult2):
self.olditem[k] = float(v) if isinstance(v,Decimal) else v
print("old",self.olditem,"new",item)
# print("对比",itemlist,queryResult2)
return 2 # 需要跟新
else:
return 0 # 需要插入
def dbclose(self):
try:
self.cursor.close()
except:
pass
try:
self.conn.close()
except:
pass
def process_item(self, item, spider):
self.infoNums += 1
self.olditem = {}
def main(item):
items = self.changeitem2(item)
error = self.sqlquery(items)
print("flag is :")
print(error)
if error == 0:
self.insert(items)
elif error == 1:
self.update2(items)
elif error == 2:
self.donone(items)
def close_spider(self, spider):
print("爬虫结束,结束时间为%s,\n本次共采集%s条,插入%s,更新%s条,无需操作%s条,其中有%s条错误信息" % (
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), self.infoNums, self.inserts, self.updates, self.donones,
self.errorNums))
log.msg("爬虫结束,结束时间为%s,\n本次共成功解析%s,其中插入%s,更新%s,无需操作%s,其中有%s条错误信息" % (
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), self.infoNums, self.inserts, self.updates, self.donones,
self.errorNums), level=log.CRITICAL)
self.conn.close()
self.cursor.close()
self.conn1.close()
self.cursor1.close()
class HowbuyMangerPipeline(sqlserver):
def __init__(self):
sqlserver.__init__(self)
self.tb = ""
self.tablekeys = ["manager_id"]
self.schema = "dbo"
self.tableName = "dbo.howbuy_manager"
def process_item(self, item, spider):
self.infoNums += 1
self.olditem = {}
def main(item):
try:
self.tableName = item['db']
except KeyError as e:
pass
try:
self.tablekeys = item['keys']
except KeyError as e:
pass
item = item['result']
items = self.changeitem2(item)
error = self.sqlquery(item,item.keys(),self.tablekeys,self.tableName)
# print(error)
# print(error)
if error == 0:
self.insert(items)
elif error == 2:
print("save old比较",self.olditem,items)
self.update(items,self.tablekeys)
# self.saveold(items)
elif error == 1:
self.donone(items,self.tablekeys)
main(item)
return item |
import sys,os
import socket
import threading
import elevate
def server_loop(local_host,local_port,remote_host,remote_port,receive_first):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try :
server.bind((local_host,local_port))
except:
print ("[!!] Erreur d'ecoute sur %s:%d" %(local_host,local_port))
sys.exit(0)
print ("[*] Serveur en ecoute sur %s:%d" %(local_host,local_port))
server.listen(5)
while True:
client_socket,addr = server.accept()
print ("[*] Connexion entrante recue de %s:%d" %(addr[0],addr[1]))
proxy_thread = threading.Thread(target=proxy_handler, args=(client_socket,remote_host,remote_port,receive_first))
proxy_thread.start()
def proxy_handler(client_socket,remote_host,remote_port, receive_first):
remote_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
remote_socket.connect((remote_host,remote_port))
if receive_first:
remote_buffer = receive_from(remote_socket)
hexdump(remote_buffer)
remote_buffer = response_handler(remote_buffer)
if len(remote_buffer):
print ("[<==] Envoie de %d bytes a localhost." %len(remote_buffer))
client_socket.send(remote_buffer)
while True :
local_buffer = receive_from(client_socket)
if len(local_buffer):
print("[==>] Reception de données de localhost." %len(local_buffer))
hexdump(local_buffer)
local_buffer=request_handler(local_buffer)
remote_socket.send(local_buffer)
print ("[==>] Envoi au remote.")
remote_buffer=receive_from(remote_socket)
if len(remote_buffer):
print("[<==] Envoie de %d bytes a localhost." % len(remote_buffer))
hexdump(remote_buffer)
remote_buffer = response_handler(remote_buffer)
client_socket.send(remote_buffer)
print("[<==] Envoie a localhost")
if not len(local_buffer):
client_socket.close()
remote_socket.close()
print("[*] Plus de donnees fermeture de la connexion")
break
def hexdump(src, length=16):
result = []
digits = 4 if isinstance(src,unicode) else 2
for i in xrange (0,len(src), length):
s = src[i:i+length]
hexa = b' '.join(["%o*X" % (digits, ord(x)) for x in s])
text = b''.join([x if 0x20 <= ord(x) <0x7f else b'.' for x in s])
result.appen( b"%04X %-*s %s" % (i,length*(digits +1), hexa, text))
print (b'\n'.join(result))
def receive_from(connection):
buffer = ""
connexion.settimeout(2)
try:
while True:
data = connection.recv(4096)
if not data:
break
buffer += data
except:
pass
return buffer
def request_handler(buffer):
return buffer
def response_handler(buffer):
return buffer
def main():
if len(sys.argv[1:]) !=5:
print ("Erreur dans le lancement du proxy, ./Proxy.py [localhost] [localport] [remotehost] [remoteport] [receive_first]")
print ("Exemple : ./Porxy.py 127.0.0.1 9000 10.12.132.1 9000 True")
sys.exit(0)
local_host = sys.argv[1]
local_port = int(sys.argv[2])
remote_host = sys.argv[3]
remote_port = int(sys.argv[4])
receive_first = sys.argv[5]
if "True" in receive_first:
receive_first = True
else :
receive_first = False
server_loop(local_host,local_port,remote_host,remote_port,receive_first)
main() |
__author__ = 'Alex'
from ._mixin import FilterMultipleObjectMixin, UserFilterMultipleObjectMixin
from ._task import TaskListCreateView, TaskListActiveView, TaskListArchivedView, TaskDetailCompleteView
from django.views.generic import TemplateView
class IndexView(TemplateView):
template_name = 'index.html'
def dispatch(self, request, *args, **kwargs):
return super(IndexView, self).dispatch(request, *args, **kwargs)
|
import numpy as np
from scipy import spatial
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from SA import SimulatedAnnealingBase
def swap(individual):
n1, n2 = np.random.randint(0, individual.shape[0] - 1, 2)
if n1 >= n2:
n1, n2 = n2, n1 + 1
individual[n1], individual[n2] = individual[n2], individual[n1]
return individual
def reverse(individual):
"""
Reverse n1 to n2
Also called `2-Opt`: removes two random edges, reconnecting them so they cross
Karan Bhatia, "Genetic Algorithms and the Traveling Salesman Problem", 1994
https://pdfs.semanticscholar.org/c5dd/3d8e97202f07f2e337a791c3bf81cd0bbb13.pdf
"""
n1, n2 = np.random.randint(0, individual.shape[0] - 1, 2)
if n1 >= n2:
n1, n2 = n2, n1 + 1
individual[n1:n2] = individual[n1:n2][::-1]
return individual
def transpose(individual):
# randomly generate n1 < n2 < n3. Notice: not equal
n1, n2, n3 = sorted(np.random.randint(0, individual.shape[0] - 2, 3))
n2 += 1
n3 += 2
slice1, slice2, slice3, slice4 = (
individual[0:n1],
individual[n1:n2],
individual[n2 : n3 + 1],
individual[n3 + 1 :],
)
individual = np.concatenate([slice1, slice3, slice2, slice4])
return individual
class SA_TSP(SimulatedAnnealingBase):
def cool_down(self):
self.T = self.T_max / (1 + np.log(1 + self.iter_cycle))
def get_new_x(self, x):
x_new = x.copy()
new_x_strategy = np.random.randint(3)
if new_x_strategy == 0:
x_new = swap(x_new)
elif new_x_strategy == 1:
x_new = reverse(x_new)
elif new_x_strategy == 2:
x_new = transpose(x_new)
return x_new
if __name__ == "__main__":
points_coordinate = np.array( # data set
[
[40.00, 50.00],
[45.00, 68.00],
[45.00, 70.00],
[42.00, 66.00],
[42.00, 68.00],
[42.00, 65.00],
[40.00, 69.00],
[40.00, 66.00],
[38.00, 68.00],
[38.00, 70.00],
[35.00, 66.00],
[35.00, 69.00],
[25.00, 85.00],
[22.00, 75.00],
[22.00, 85.00],
[20.00, 80.00],
[20.00, 85.00],
[18.00, 75.00],
[15.00, 75.00],
[15.00, 80.00],
[30.00, 50.00],
[30.00, 52.00],
[28.00, 52.00],
[28.00, 55.00],
[25.00, 50.00],
[25.00, 52.00],
[25.00, 55.00],
[23.00, 52.00],
[23.00, 55.00],
[20.00, 50.00],
[20.00, 55.00],
[10.00, 35.00],
[10.00, 40.00],
[8.00, 40.00],
[8.00, 45.00],
[5.00, 35.00],
[5.00, 45.00],
[2.00, 40.00],
[0.00, 40.00],
[0.00, 45.00],
[35.00, 30.00],
[35.00, 32.00],
[33.00, 32.00],
[33.00, 35.00],
[32.00, 30.00],
[30.00, 30.00],
[30.00, 32.00],
[30.00, 35.00],
[28.00, 30.00],
[28.00, 35.00],
[26.00, 32.00],
[25.00, 30.00],
[25.00, 35.00],
[44.00, 5.00],
[42.00, 10.00],
[42.00, 15.00],
[40.00, 5.00],
[40.00, 15.00],
[38.00, 5.00],
[38.00, 15.00],
[35.00, 5.00],
[50.00, 30.00],
[50.00, 35.00],
[50.00, 40.00],
[48.00, 30.00],
[48.00, 40.00],
[47.00, 35.00],
[47.00, 40.00],
[45.00, 30.00],
[45.00, 35.00],
[95.00, 30.00],
[95.00, 35.00],
[53.00, 30.00],
[92.00, 30.00],
[53.00, 35.00],
[45.00, 65.00],
[90.00, 35.00],
[88.00, 30.00],
[88.00, 35.00],
[87.00, 30.00],
[85.00, 25.00],
[85.00, 35.00],
[75.00, 55.00],
[72.00, 55.00],
[70.00, 58.00],
[68.00, 60.00],
[66.00, 55.00],
[65.00, 55.00],
[65.00, 60.00],
[63.00, 58.00],
[60.00, 55.00],
[60.00, 60.00],
[67.00, 85.00],
[65.00, 85.00],
[65.00, 82.00],
[62.00, 80.00],
[60.00, 80.00],
[60.00, 85.00],
[58.00, 75.00],
[55.00, 80.00],
[55.00, 85.00],
]
)
num_points = points_coordinate.shape[0]
distance_matrix = spatial.distance.cdist(
points_coordinate, points_coordinate, metric="euclidean"
)
distance_matrix = distance_matrix * 1000 # 1 degree of lat/lon ~ = 111000m
def cal_total_distance(routine):
"""The objective function. input routine, return total distance.
cal_total_distance(np.arange(num_points))
"""
(num_points,) = routine.shape
return sum(
[
distance_matrix[routine[i % num_points], routine[(i + 1) % num_points]]
for i in range(num_points)
]
)
sa_tsp = SA_TSP(
func=cal_total_distance,
x0=range(num_points),
T_max=1000,
T_min=1e-9,
L=100,
)
best_points, best_distance = sa_tsp.run()
print(best_points, best_distance, cal_total_distance(best_points))
fig, ax = plt.subplots(1, 2)
best_points_ = np.concatenate([best_points, [best_points[0]]])
best_points_coordinate = points_coordinate[best_points_, :]
ax[0].plot(sa_tsp.best_y_history)
ax[0].set_xlabel("Iteration")
ax[0].set_ylabel("Distance")
ax[1].plot(
best_points_coordinate[:, 0],
best_points_coordinate[:, 1],
marker="o",
markerfacecolor="b",
color="c",
linestyle="-",
)
# ax[1].xaxis.set_major_formatter(FormatStrFormatter("%.3f"))
# ax[1].yaxis.set_major_formatter(FormatStrFormatter("%.3f"))
# ax[1].set_xlabel("Longitude")
# ax[1].set_ylabel("Latitude")
plt.show() |
# pantherine.py
# Author: Quentin Goss
# My personal collection of python methods that I frequently use
import operator # sortclasses, sortdicts
import pickle # load, save
import os # lsdir
import itertools # lncount
import glob # mrf
import xml.etree.ElementTree as ET # readXML
from bisect import bisect_left # binsearch
# Converts a string of characters into a unique number
# @param string s = string of ASCII values
# @return int = a numerical representation of s
def ascii2int(s):
return int(''.join(str(ord(c)) for c in s))
# Binary search
# @param [int] lst = Sorted list of integers to Look through
# @param int el = Element to find in the list
# @return int = index of item in the list or Value Error if not found
def binsearch(lst,el):
# Locate the leftmost value exactly equal to x
i = bisect_left(lst,el)
if i != len(lst) and lst[i] == el:
return i
raise ValueError
# Binary search - Retrieves a range of indexes that match
# @param [int] lst = Sorted list of integers to Look through
# @param int el = Element to find in the list
# @return (low,high) = low and high indices of item in the list
# or Value Error if not found
def binrangesearch(lst,el):
# Returns index if found, ValueError otherwise
index = binsearch(lst,el)
# Get the lower bound
low = index
while (not low-1 < 0) and (lst[low-1] == el):
low -= 1
# Get the uppper bound
high = index
while (not high+1 == len(lst)-1) and (lst[high+1] == el):
high += 1
return (low,high)
# Reads an XML file
# @param string _file = filename
# @return root = XML root
def readXML(_file):
tree = ET.parse(_file)
return tree.getroot()
# Reads a specified tag from an XML file
# @param string _file = filename
# @param string tag = name of XML tag
# @return [dict] = a list of dictionaries containing each tag that
# matches tag
def readXMLtag(_file,tag):
root = readXML(_file)
return [item.attrib for item in root.findall(tag)]
# Checks if a given xml object has an attribute
# For use with import xml.etree.ElementTree as ET
# @param ElementTree-xml-object xml = xml tag
# @param string attrib = attribute to check the existence of
# @return True if exists, False otherwise
def xml_has_atrribute(xml,attrib):
try:
len(xml.attrib[attrib])
return True
except:
pass
return False
# Prints out the current % progress
# @param int n = Current progress
# @param int total = the highest progress achievable
# @param str msg = Msg if any to be added to the update text
def update(n,total,msg=''):
print('%s%6.2f%%' % (msg,float(n)/float(total)*100),end='\r')
return
# Sort a list of classes by an attribute. Use this for sorting classes
# @param [classes] lst = list to be sorted
# @param string attr = atrribute name to be used in the sort.
# @param bool reverse = Reverse the order of the sorted list
# @return [] = sorted list
def sortclasses(lst,attr,reverse=False):
return lst.sort(key=operator.attrgetter(attr),reverse=reverse)
# Sort a list of dictionaries by key
# @param [dict] lst = list to be sorted
# @param string key = key to be sorted by
# @param bool reverse = Reverse the order of the sorted list
# @return [] = sorted list
def sortdicts(lst,key,reverse=False):
return sorted(lst,key=operator.itemgetter(key),reverse=reverse)
# Filters a list of dictionaries given a value and key
# @param [dict] lst = list to be sorted
# @param string key = key to be sorted by
# @param string or int val = Value to filter by
# @param bool no_sort = If False, skip the quantification and sorting
# @param bool invert = If True, return everything but the filtered items
# @return [dict] = A list of dictionaries that match the filter
def filterdicts(lst,key,val,no_sort=False,invert=False):
# Validate Input
if not objname(val) in {'int','str','float'}:
raise TypeError
if no_sort:
try:
type(lst[0]['sortID'])
except:
raise KeyError
# Cast the value accordingly
if not objname(val) in {'int','float'}:
val = ascii2int(val)
# Quantify Sort and serach
if not no_sort:
lst = quantifydicts(lst,key)
sortIDs = [d['sortID'] for d in lst]
low,high = binrangesearch(sortIDs,val)
# Invert
if invert:
if low == high:
if high == 0:
return lst[1:]
elif high == len(lst)-1:
return lst[:-1]
else:
before = lst[:high]
after = lst[high+1:]
before.extend(after)
return before
else:
if low == 0:
before = []
else:
before = lst[:low]
if high == len(lst)-1:
after = []
else:
after = lst[high+1:]
before.extend(after)
return before
# Return
if low == high:
return [lst[high]]
else:
return lst[low:high+1]
# <!> UNFINISHED <!>
# Performs filterdicts on a list of similiar values
# @param [dict] lst = list to be sorted
# @param string key = key to be sorted by
# @param [string or int] vals = List of value to filter by
# @param bool no_sort = If False, skip the quantification and sorting
# @return [[dict]] = A list of of lists of dictionaries that match the
# input values
def batchfilterdicts(lst,key,vals,no_sort=False):
# Validate
if not objname(vals) in {'list','tuple','set'}:
raise TypeError
if not no_sort:
lst = quantifydicts(lst,key)
# Look for the first item
try:
filtered = [filterdicts(lst,key,vals[0],no_sort=True)]
except ValueError:
filtered = [None]
# If there is 1 item in the list, then return here
if len(lst) == 1:
return filtered
# Try the rest of the items
for val in vals[1:]:
try:
filtered.append(filterdicts(lst,key,val,no_sort=True))
except ValueError:
filtered.append(None)
continue
return filtered
# Quantifies and sorts a list of dictionaries so they may be used in a binary search
# @param [dict] lst = list to be sorted
# @param string key = key to be sorted by
# @return [dict] with a new key ['sortID']
def quantifydicts(lst,key):
for i in range(len(lst)):
if objname(lst[i][key]) in {'int','float'}:
lst[i]['sortID'] = lst[i][key]
else:
lst[i]['sortID'] = ascii2int(lst[i][key])
return sortdicts(lst,'sortID')
# Save binary data to a file
# @param string _file = filename
# @param data = blob of data to be saved
def save(_file,data):
with open(_file,'wb') as f:
pickle.dump(data,f)
return
# Load binary data from a file
# <!> Must know what the data looks like to receive it. <!>
# @param string _file = filename
# @return = Blob of data from the file.
def load(_file):
with open(_file,'rb') as f:
return pickle.load(f)
# Get the number of lines in a file
# @param _file = filename
# @return int = number of lines in the file
def lncount(_file):
with open(_file, 'rb') as f:
bufgen = itertools.takewhile(lambda x: x, (f.raw.read(1024*1024) for _ in itertools.repeat(None)))
return sum( buf.count(b'\n') for buf in bufgen )
# Returns the class name of an object
# @param obj = object to get name of
# @return str name = name of the object
def objname(obj):
return type(obj).__name__
# Reads a csv file. Creates a list of dictionaries from the contents
# using the first line the keys
# @param string _file = filename
# @param bool guess_type = If true, guess what type the data is
# @return [dict] = A list of dictionaries
def readCSV(_file,guess_type=False):
with open(_file,'r') as f:
ln = 0; keys = []; lst = []
for line in f:
ln += 1
# Parse data from the line
data = line.strip().split(',')
# The first line holds the keys of the dictionary objects
if ln == 1:
keys = data
continue
# Guess the type if true
if guess_type:
for i in range(len(data)):
# float
if '.' in data[i]:
try: data[i] = float(data[i])
except ValueError: pass
# int
else:
try: data[i] = int(data[i])
except ValueError: pass
# Otherwise it's a str
continue
# If the data continues past the last line,
# combine it in the last item
nkeys = len(keys)
if len(data) > nkeys:
data[nkeys-1] = data[nkeys-1:]
data = data[:nkeys]
# Create the dictionary object and add to a list
lst.append(list2dict(keys,data))
continue
#
return lst
# Combine two lists to create a dictionary
# @param [str] = List is keys
# @param [] = List of values
def list2dict(keys,vals):
return dict(zip(keys,vals))
# Casts every object in the list to specified type
# @param [str or int or float] lst = list of any type of values
# or if the item is not a list, then it is simply cast and returned.
# @param string _type = What the values in the list should be cast to
# @return [] = A list where the objects are cast to the specified type
def castlist(lst,_type):
# If the object is a list
if objname(lst) in {'list','tuple','set'}:
if _type.lower() in {'str','string'}:
return [str(item) for item in lst]
elif _type.lower() in {'int','integer'}:
return [int(item) for item in lst]
elif _type.lower() in {'float'}:
return [float(item) for item in lst]
# If the object is a str, float, or int
elif objname(lst) in {'str','float','int'}:
if _type.lower() in {'str','string'}:
return str(lst)
elif _type.lower() in {'int','integer'}:
return int(lst)
elif _type.lower() in {'float'}:
return float(lst)
raise ValueError
# Casts the values of all the dictionaries in the list that correspond
# to the given key
# @param [dict] lst = list if dictionaries that will be manipulated
# @param string key = key that will be casted
# @param string _type = type to be casted to
# @return [dict] = A list of dictionaries with casted values
def castdicts(lst,key,_type):
_type = _type.lower()
if objname(lst) in {'list','tuple','set'}:
for i in range(len(lst)):
if _type in {'str','string'}:
lst[i][key] = str(lst[i][key])
elif _type in {'int','integer'}:
lst[i][key] = int(lst[i][key])
elif _type in {'float'}:
lst[i][key] = float(lst[i][key])
return lst
raise ValueError
# Retrieves a list of filenames from a specifed directory
# @param string _dir = directory path
# @return [str] = list of filenames in a directory
def lsdir(_dir):
f =[]
for (dirpath,dirnames,filenames) in os.walk(_dir):
f.extend(filenames)
break
return f
# Retrieves sud-directory names within a directory
# @param string _dir = directory path
# @return [str] = list of sub-directory names in a directory
def lssubdir(_dir):
d = []
for (dirpath,dirnames,filenames) in os.walk(_dir):
d.extend(dirnames)
break
return d
# Delete all files in a directory and remove it
# @param string _dir = directory path
def deldir(_dir):
if not os.path.exists(_dir):
raise NotADirectoryError
files = lsdir(_dir)
for f in files:
os.remove('%s/%s' % (_dir,f))
os.removedirs(_dir)
return
# Most Recent File
# @param string _dir = Directory
# @param regex ext = extension (i.e. r'*.json')
# @param bool lrf = Get the oldest file instead.
# @return string = the name of the most recent file in a directory
def mrf(_dir,ext=r'*.*',lrf=False):
_file = glob.glob(os.path.join(_dir,ext))
_file.sort(key=os.path.getctime,reverse=lrf)
return _file[0]
# <!> NOT TESTED <!>
# Attempts to cast a string to a float or an int
# @param string _str = A string
# @return _str or float(_str) or int(_str)
def caststr(_str):
# float
if '.' in _str:
try: _str = float(_str)
except ValueError: pass
# int
else:
try: _str = int(_str)
except ValueError: pass
# Otherwise it's a str
return _str
|
import pytest
import numpy as np
from gradgpad.foundations.metrics.hter import hter
scores = np.array([0.0, 0.2, 0.2, 0.5, 0.6])
labels = np.array([1, 2, 2, 0, 0])
expected_hter = 0.66
th_eer_dev = 0.25
@pytest.mark.unit
def test_should_throw_an_exception_when_input_is_not_np_array():
pytest.raises(TypeError, lambda: hter(scores.tolist(), labels, th_eer_dev))
@pytest.mark.unit
def test_should_compute_hter_correctly():
hter_value = hter(scores, labels, th_eer_dev)
assert pytest.approx(expected_hter, 0.66) == hter_value
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Generate a report for a list of project on XNAT
@author: Benjamin Yvernault, Electrical Engineering, Vanderbilt University
'''
from datetime import datetime
import logging
import os
from dax import XnatUtils
from dax.errors import XnatToolsError, XnatToolsUserError
import dax.xnat_tools_utils as utils
__copyright__ = 'Copyright 2013 Vanderbilt University. All Rights Reserved'
__exe__ = os.path.basename(__file__)
__author__ = 'byvernault'
__purpose__ = 'Print a detailed report from XNAT projects.'
__logger__ = utils.setup_info_logger(__exe__)
__description__ = """What is the script doing :
* Create a report about Xnat projects.
Examples:
*Report of a project:
Xnatreport -p PID
*Report with a specific format:
Xnatreport -p PID --format object_type,session_id,session_label,age
*print the format available:
Xnatreport --printformat
*Save report in a csv:
Xnatreport -p PID -c report.csv"""
VARIABLES_LIST = {
'commun': ['object_type'],
'project': ['project_id'],
'subject': ['subject_id', 'subject_label', 'handedness', 'gender', 'yob'],
'session': ['session_id', 'session_type', 'session_label', 'age',
'last_modified', 'last_updated'],
'scan': ['scan_id', 'type', 'series_description', 'quality', 'note',
'frames'],
'assessor': ['assessor_id', 'assessor_label', 'assessor_URI', 'proctype',
'procstatus', 'qcstatus', 'version', 'jobid', 'memused',
'walltimeused', 'jobnode', 'jobstartdate'],
'resource': ['resource']}
QUOTE_FIELDS = ['type', 'series_description', 'quality', 'qcstatus', 'note']
def display(row):
"""
Function to display the row for the report
:param row: list of values to display
"""
if not isinstance(row, list):
err = 'variable row is not a list instead %s'
raise XnatToolsError(err % type(row))
if None in row:
err = 'a value is None in row: %s'
raise XnatToolsError(err % row)
__logger__.info(','.join(row))
def is_under_sessions(header):
"""
Function to check that the level of display is under the session.
:param header: header for report
"""
is_scan = [x for x in VARIABLES_LIST['scan'] if x in header]
is_assr = [x for x in VARIABLES_LIST['assessor'] if x in header]
return is_scan or is_assr or is_default(header)
def is_default(header):
"""
Function to return True if default csv header.
:param header: header for report
"""
return header == utils.CSV_HEADER[:-1]
def quote(value):
"""
Quote the string if needed
:param value: the string
:return: quoted string
"""
if ',' in value:
return '"%s"' % value
else:
return value
def report(xnat, projects, rformat=None):
"""
Main Function to report
:param xnat: pyxnat interface
:param projects: list of projects
:param rformat: report format for display
:return: None
"""
#__logger__.info('Date: %s\n' % (str(datetime.now())))
if not rformat:
rformat = ','.join(utils.CSV_HEADER[:-1])
__logger__.info(rformat)
for project in projects:
header = rformat.split(',')
if is_under_sessions(header):
report_under_sessions(xnat, project, header, is_default(header))
elif [x for x in VARIABLES_LIST['session'] if x in header]:
report_sessions(xnat, project, header)
elif [x for x in VARIABLES_LIST['subject'] if x in header]:
report_subjects(xnat, project, header)
else:
display(get_row(xnat, {'project_id': project}, header))
def report_subjects(xnat, project, header):
"""
Function to display customized report on subjects following the header
:param xnat: pyxnat interface
:param project: project ID on XNAT
:param header: header to display
:return: None
"""
subjects_list = xnat.get_subjects(project)
for subject_dict in subjects_list:
display(get_row(xnat, subject_dict, header))
def report_sessions(xnat, project, header):
"""
Function to display customized report on sessions following the header
:param xnat: pyxnat interface
:param project: project ID on XNAT
:param header: header to display
:return: None
"""
sessions_list = xnat.get_sessions(project)
for session in sorted(sessions_list, key=lambda k: k['session_label']):
display(get_row(xnat, session, header))
def report_under_sessions(xnat, project, header, default=False):
"""
Function to display customized report under the sessions following the
header
:param xnat: pyxnat interface
:param project: project ID on XNAT
:param header: header to display
:return: None
"""
objs_list = list()
if default:
scans_list = xnat.get_project_scans(project)
assrs_list = xnat.list_project_assessors(project)
objs_list = scans_list + assrs_list
elif [x for x in VARIABLES_LIST['scan'] if x in header]:
objs_list = xnat.get_project_scans(project)
elif [x for x in VARIABLES_LIST['assessor'] if x in header]:
objs_list = xnat.list_project_assessors(project)
if not objs_list:
err = 'objs_list is empty. There is an issue with the header: %s'
raise XnatToolsError(err % header)
for obj in sorted(objs_list, key=lambda k: k['subject_label']):
display(get_row(xnat, obj, header))
def get_row(xnat, obj_dict, header):
"""
Function to generate the row for display report from object dictionary
:param xnat: pyxnat interface
:param obj_dict: dictionary containing information on object from XNAT
:param header: header to display
:return: return the string for the row associated to obj_dict
"""
row = list()
for field in header:
is_scan = 'scan_id' in list(obj_dict.keys())
_field = get_field(field, is_scan)
if _field == 'object_type':
row.append(get_object_type(obj_dict))
elif _field == 'resource':
row.append(get_resources(xnat, obj_dict))
elif _field in QUOTE_FIELDS:
row.append(quote(obj_dict.get(_field)))
else:
row.append(obj_dict.get(_field))
return row
def get_field(field, is_scan):
"""
Function to link the field name in header to obj_dict key.
:param field: header field
:param is_scan: is the field from scan object
:return: string describing the key in obj_dict
"""
_field = field
if _field in ['as_label', 'as_type', 'as_description', 'as_quality']:
if _field == 'as_label':
_field = 'scan_id' if is_scan else 'assessor_label'
elif _field == 'as_type':
_field = 'type' if is_scan else 'proctype'
elif _field == 'as_description':
_field = 'series_description' if is_scan else 'procstatus'
elif _field == 'as_quality':
_field = 'quality' if is_scan else 'qcstatus'
return _field
def get_object_type(obj_dict):
"""
Function to return the object type.
:param obj_dict: dictionary containing information on object from XNAT
:return: string describing the object type
"""
_okeys = list(obj_dict.keys())
if 'scan_id' in _okeys:
return 'scan'
elif 'assessor_label' in _okeys:
return 'assessor'
elif 'session_label' in _okeys:
return 'session'
elif 'subject_label' in _okeys:
return 'subject'
else:
return 'project'
def get_resources(xnat, obj_dict):
"""
Function to return the string displaying the resources for the object.
:param xnat: pyxnat interface
:param obj_dict: dictionary containing information on object from XNAT
:return: string describing the resources
"""
_res = ''
_okeys = list(obj_dict.keys())
if 'scan_id' in _okeys or 'assessor_label' in _okeys:
_res = '/'.join(obj_dict['resources'])
elif 'session_label' in _okeys:
res_list = xnat.get_session_resources(
obj_dict['project_id'], obj_dict['subject_label'],
obj_dict['session_label'])
_res = '/'.join([r['label'] for r in res_list])
elif 'subject_label' in _okeys:
res_list = xnat.get_subject_resources(obj_dict['project_id'],
obj_dict['subject_label'])
_res = '/'.join([r['label'] for r in res_list])
elif 'project_id' in _okeys:
res_list = xnat.get_project_resources(obj_dict['project_id'])
_res = '/'.join([r['label'] for r in res_list])
return str(_res)
def print_format():
"""
Function to print the format for variables.
:return: None
"""
__logger__.info('INFO: Printing the variables available: ')
order_header = utils.ORDER
order_header.append('resource')
for key in order_header:
__logger__.info('%s variables:' % key)
for name in VARIABLES_LIST[key]:
__logger__.info(' * %*s ' % (-30, name))
def run_xnat_report(args):
"""
Main function for xnat report.
:param args: arguments parse by argparse
"""
_format = args.format
if _format and _format[-1] == ',':
_format = _format[:-1]
var_list = [_i for _list in list(VARIABLES_LIST.values()) for _i in _list]
if _format and [x for x in _format.split(',') if x not in var_list]:
err = '--format has some variables that does not exist on XNAT. \
Please display the variables available on XNAT by using --printformat.'
raise XnatToolsUserError(__exe__, err)
if not args.print_format:
if not args.projects:
err = 'argument -p/--project is required.'
raise XnatToolsUserError(__exe__, err)
if args.csv_file:
folder = os.path.dirname(os.path.abspath(args.csv_file))
if not os.path.exists(folder):
err = 'argument -c/--csvfile set with a folder that does not \
exist to create the file: %s not found.' % folder
raise XnatToolsUserError(__exe__, err)
if args.csv_file:
handler = logging.FileHandler(args.csv_file, 'w')
__logger__.addHandler(handler)
utils.print_separators()
if args.print_format:
print_format()
if _format and _format == 'object_type':
utils.print_separators()
__logger__.info('object_type')
__logger__.info('project')
__logger__.info('subject')
__logger__.info('session')
__logger__.info('scan')
__logger__.info('assessor')
elif args.projects:
utils.print_separators()
projects = args.projects.split(',')
if _format and _format == 'resource':
print('WARNING: you gave only the resource to --format \
option. Adding object_type and project_id.')
_format = 'object_type,project_id,resource'
if args.host:
host = args.host
else:
host = os.environ['XNAT_HOST']
user = args.username
with XnatUtils.get_interface(host=host, user=user) as xnat:
print('INFO: connection to xnat <%s>:' % (host))
print("Report for the following project(s):")
print('------------------------------------')
for proj in projects:
# check if the project exists:
proj_obj = xnat.select('/project/{}'.format(proj))
if not proj_obj.exists():
err = 'Project "%s" given to -p/--project not found on \
XNAT.'
raise XnatToolsUserError(__exe__, err % proj)
print(' - %s' % proj)
print('------------------------------------')
print('WARNING: extracting information from XNAT for a full \
project might take some time. Please be patient.\n')
# Writing report
report(xnat, projects, _format)
utils.print_end(__exe__)
def add_to_parser(parser):
"""
Method to add arguments to default parser for xnat_tools in utils.
:param parser: parser object
:return: parser object with new arguments
"""
parser.add_argument("-p", "--project", dest="projects", default=None,
help="List of project ID on Xnat separate by a coma")
parser.add_argument("-c", "--csvfile", dest="csv_file", default=None,
help="csv fullpath where to save the report.")
_h = "Header for the csv. format: variables name separated by comma."
parser.add_argument("--format", dest="format", default=None, help=_h)
_h = "Print available variables names for the option --format."
parser.add_argument("--printformat", dest="print_format", help=_h,
action="store_true")
return parser
if __name__ == '__main__':
utils.run_tool(__exe__, __description__, add_to_parser, __purpose__,
run_xnat_report)
|
import click
import logging
import multiprocessing
import signal
import sys
import yaml
import replicategithub
def set_up_logging(level=logging.WARNING, library_level=logging.WARNING):
logging.captureWarnings(True)
handler = logging.StreamHandler(stream=sys.stdout)
try:
import colorlog
handler.setFormatter(colorlog.ColoredFormatter(
'%(log_color)s%(name)s[%(processName)s]: %(message)s'))
except ImportError:
handler.setFormatter(logging.Formatter('%(name)s[%(processName)s]: %(message)s'))
root = logging.getLogger()
root.setLevel(level)
root.addHandler(handler)
logging.getLogger("github").setLevel(library_level)
multiprocessing.log_to_stderr().setLevel(library_level)
class Config(dict):
def __init__(self, *args, **kwargs):
self.manager = None
dict.__init__(self, *args, **kwargs)
def get_manager(self):
if self.manager:
return self.manager
self.manager = replicategithub.MirrorManager(
path=self["mirror_path"],
user=self["github_user"],
token=self["github_token"],
worker_count=self["workers"])
return self.manager
def stop(self):
if self.manager:
self.manager.stop()
pass_config = click.make_pass_decorator(Config)
def main():
config = Config()
try:
try:
cli(standalone_mode=False, obj=config)
finally:
config.stop()
except click.ClickException as e:
e.show()
sys.exit(e.exit_code)
except click.Abort as e:
sys.exit(e)
except KeyboardInterrupt:
# Click transforms this into click.Abort, but it doesn't get the chance
# if it's raised outside of cli() (i.e. in config.stop()).
sys.exit(128 + signal.SIGINT)
except replicategithub.mirror.MirrorException as e:
sys.exit("Error: {}".format(e))
@click.group()
@click.option('--workers', '-j', type=int, default=None, metavar="COUNT",
help="Number of git subprocesses to use (default 1).")
@click.option('--verbose', '-v', default=False, is_flag=True)
@click.option('--debug', '-d', default=False, is_flag=True)
@click.option('--config-file', '-c', type=click.File('rt'),
default="/etc/replicate-github.yaml")
@click.version_option()
@pass_config
@click.pass_context
def cli(context, config, workers, verbose, debug, config_file):
"""
Mirror GitHub repositories.
\b
* Mirror arbitrary GitHub repositories.
* Mirror all GitHub repositories under an organization.
* Serve webhook endpoints to update mirrors automatically.
"""
config.update(yaml.safe_load(config_file.read()))
config_file.close()
context.default_map = config
if workers is not None:
config['workers'] = workers
if 'workers' not in config:
config['workers'] = 1
if debug:
level = logging.DEBUG
library_level = logging.INFO
elif verbose:
level = logging.INFO
library_level = logging.WARNING
else:
level = logging.WARNING
library_level = logging.WARNING
set_up_logging(level)
@cli.command()
@click.argument("matches", metavar="ORG/REPO [ORG/REPO ...]", required=True, nargs=-1)
@pass_config
def mirror(config, matches):
""" Create or update repo mirrors. """
for match in matches:
# Friendly error message for likely mistakes
parts = match.split("/")
if len(parts) != 2 or parts[0] == "*":
raise click.ClickException(
"'{}' does not match owner/repo or owner/*".format(match))
if parts[1] == "*":
config.get_manager().mirror_org(parts[0])
else:
config.get_manager().mirror_repo(match)
@cli.command()
@click.option('--older-than', type=int, default=24*60*60, metavar="SECONDS",
help="How old a mirror has to be before it's updated (default 24*60*60).")
@pass_config
def freshen(config, older_than):
""" Update oldest repos in mirror. """
logger = logging.getLogger("freshen")
logger.info("Freshening repos older than {} seconds".format(older_than))
config.get_manager().update_old_repos(older_than)
@cli.command(name="sync-org")
@click.argument("orgs", metavar="ORG [ORG ...]", required=True, nargs=-1)
@pass_config
def sync_org(config, orgs):
"""
Add and delete mirrors to match GitHub.
This does not update mirrors that haven't been added or deleted. Use the
mirror command, or combine this with freshen.
"""
logger = logging.getLogger("sync-org")
for org in orgs:
logger.info("Syncing {} organization".format(org))
config.get_manager().sync_org(org)
@cli.command()
@click.option('--port', '-p', type=int, default=8080, metavar="PORT",
help="Port to listen on (default 8080).")
@click.option('--address', default="localhost", metavar="ADDRESS",
help="Address to listen on (default localhost).")
@click.option('--secret', metavar="STRING",
help="Secret to authenticate Github.")
@click.option('--update-org', metavar="ORG", multiple=True,
help="Organizations to keep in sync (default none).")
@click.option('--update-older-than', type=int, default=24*60*60, metavar="SECONDS",
help="Ensure that all mirrors get updated at least this frequently"
" (default 24*60*60). 0 means to only update on events.")
@click.option('--periodic-interval', type=int, default=15*60, metavar="SECONDS",
help="How frequently to run periodic tasks (default 15*60).")
@click.option('--payload-log', type=click.File('at'), metavar="FILE",
help="Log file for webhook payloads for debugging.")
@pass_config
def serve(config, port, address, secret, update_org, update_older_than,
periodic_interval, payload_log):
"""
Serve webhook endpoint for GitHub events.
This will accept any event from GitHub with the specified secret, even if
the event is for a repo that is not already mirrored. In other words, this
will mirror any repo that it gets an event for, even if it doesn't already
know about it.
There are two options that are used to ensure updates are applied even if
events are lost for some reason:
\b
--update-older-than SECONDS
Ensure that every mirror is checked for updates at least every SECONDS.
By default this is set to a day (86400 seconds).
\b
--update-org ORG
Organizations to periodically check for new or deleted repos. May be
specified multiple times; no organizations are synced by default.
Neither of these options should be necessary if the webhook is set up for
all organizations being tracked; they're an extra layer of safety.
Both these checks run every interval specified by --periodic-interval.
"""
replicategithub.webhook.serve(
config.get_manager(),
secret=secret,
listen=(address, port),
periodic_interval=periodic_interval,
update_orgs=update_org,
update_older_than=update_older_than,
payload_log=payload_log)
|
"""
Author: Nathan Lim
EDITING LOG:
May 3 - write incoming data from arduino to text file for matlab interpretation, improved GUI aesthetics, display label for heart rate
May 4 - use Method of Backward Difference (first order) to find the heart rates, can display MOBD data,
"""
from Tkinter import *
#import tkMessageBox
#from multiprocessing import Process, freeze_support #test, to check out
import serial #pySerial, to communicate with Arduino
import serial.tools.list_ports #to list the COM ports
import FileDialog #for pyinstaller
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
matplotlib.use('TkAgg') #for placing matplotlib animation in tkinter
from math import ceil
"""
The HeartMonitor Class communicates with the Arduino.
"""
class HeartMonitor:
"""
Constructor for the HeartMonitor
"""
def __init__(self, to, baudRate):
#serial port connected to Arduino
self.ser = None
#boolean to see if arduino is connected or not
self.connection = False
#timeout rate for the serial port
self.timeout = to
self.baud = baudRate
self.transmitting = False
"""
Destructor. TODO: Should we have one?
"""
def __del__(self):
self.closeSerial()
"""
Sends stop transmission signal('0') to the arduino to tell it to stop sending data.
@return True if successfully stopped
@return False if could not be stopped
"""
def stopTrans(self):
if(self.ser is not None and self.ser.isOpen() is True):
self.ser.write('0')
self.transmitting = False
return True
self.transmitting = False #we know that something has gone wrong
return False
"""
Stops transmission and closes the serial port
"""
def closeSerial(self):
if(self.connection is True):
self.stopTrans()
self.ser.close()
"""
Try to create a new serial object at a given COM Port.
@return True if successfully initialized or already exists
@return False if failed to initialize
"""
def init(self,comPort):
if(self.ser is not None and self.ser.isOpen() is True):
return True
try:
self.ser = serial.Serial(comPort, self.baud,timeout = self.timeout)
self.connection = True
return True
except:
self.ser = None
self.connection = False
return False
"""
Writes start transmission signal('1') to the arduino to start the transmission
@return True if successfully sent a start signal to arduino
@return False if could not send start signal
"""
def beginTrans(self):
if(self.ser is not None and self.ser.isOpen() is True):
self.ser.write("1")
self.transmitting = True
return True
self.transmitting = False
return False
"""
reads the serial port and returns the reading TODO: change to only one failed return?
@return string of data from arduino
@return None if cannot connect with serial port
@return "" if serial port timeout
"""
def readSerial(self):
if(self.ser is None or self.connection is False):
return None;
if(self.ser.inWaiting() > 0):
return self.ser.readline()
return ""
'''
The GUI for the heart monitor class.
'''
class HeartMonitorGUI:
def __init__(self):
#create a heart monitor object
self.hm = HeartMonitor(1.0,9600)
#set up GUI
self.top = Tk()
self.top.wm_title("Heart Rate Monitor GUI")
self.top.resizable(width=FALSE, height=FALSE)
#labelString will store the message to the user on the current status
self.labelString = StringVar()
self.labelString.set("\t\t ")
buttonFrame = Frame(self.top,relief = RIDGE, bd =1,padx = 10,pady=10)
b = Button(buttonFrame,text = "Initialize",command = self.initButton)
b.grid(ipadx =5, ipady = 3,row =1, column =0,pady = 5)
beginTransB = Button(buttonFrame, text ="Begin Reading", command = self.beginButton)
beginTransB.grid(ipadx =5, ipady = 3,row = 2,column = 0,pady = 5)
stopTransB = Button(buttonFrame, text ="Stop Reading", command = self.stopButton)
stopTransB.grid(ipadx =5, ipady = 3,row = 3, column =0,pady = 5)
quitButton = Button(buttonFrame,text = "Exit", command = self._quit)
quitButton.grid(ipadx =5, ipady = 3,row = 4, column =0,pady = 5)
l = Label(buttonFrame,textvariable = self.labelString)
l.grid(ipadx =5, ipady = 15,row = 0, column = 0)
buttonFrame.grid(row =3,column =0, rowspan = 10,padx=10)
heartRateFrame = Frame(self.top ,relief = SUNKEN, bd =1, padx=10,pady=10, width = 100)
self.heartRateString = StringVar()
self.heartRateString.set("BPM")
self.bpm = True
hl = Label(heartRateFrame,textvariable = self.heartRateString)
hl.bind("<Button-1>",self.labelClicked)
hl.pack(fill=BOTH)
heartRateFrame.grid(row=17,column =0, rowspan = 3, columnspan = 1)
#set up GUI menubar
menubar = Menu(self.top)
configMenu = Menu(self.top,tearoff=0)
ports = list(serial.tools.list_ports.comports())
#comPortVar will store the COM port that the user has selected from the pull down menu as an integer (ie. '3' for COM3)
self.comPortVar = IntVar()
for p in ports:
#p[0] will return COMX, where X is the com port number, p[0][3] will return X
configMenu.add_radiobutton(label=p, command = self.comPortRadio,variable = self.comPortVar, value = p[0][3])
menubar.add_cascade(label="COM port",menu = configMenu)
# display the menu
self.top.config(menu=menubar)
"""
Old stuff for plot
"""
# # plotting stuff
# horizontalRes = 200
# self.fig = plt.Figure()
# xdata = np.arange(horizontalRes)
# ydata = np.arange(horizontalRes)
# plt.xlim(0,horizontalRes)
# plt.ylim(-1,700)
# # line
# self.x = np.arange(0, 2*np.pi, 0.01)
# self.data = np.random.rand(2, 25)
# ax = self.fig.add_subplot(111)
# self.l, = ax.plot(self.x, np.sin(self.x))
# # canvas
# canvas = FigureCanvasTkAgg(self.fig, master = self.top)
# # canvas.show()
# canvas.get_tk_widget().pack()
# # canvas._tkcanvas.pack()
#keeps track of if the arduino is sending information or not
self.transmitting=False
#stores the current x value in the line to be updated (plot)
self.currX = 0
#the frequency that the arduino sends data over the serial line
self.transmitFreq = 40
#The length of the plot in seconds
self.repeatedTime = 15
#maximum x length of plot
self.xLength = self.repeatedTime*self.transmitFreq
#set up the x and y data for the plot
self.x = np.arange(0, self.repeatedTime, 1.0/self.transmitFreq)
self.y = np.zeros(self.xLength)
"""
For finding the heart rate
"""
#values for the method of backward difference
self.MOBDX = np.zeros(self.xLength)
self.MOBDY = np.zeros(self.xLength)
#counts the number of beats in the repeated time
self.numBeats = 0
#cannot have two beats within 250 milliseconds
self.refractPeriod = ceil(0.250*self.transmitFreq)
self.threshhold = 32000
"""
For the plot
"""
#test
fig = plt.Figure()
fig.suptitle('Heart Rate Display', fontsize=14, fontweight='bold')
#puts the plot on the GUI
canvas = FigureCanvasTkAgg(fig, master=self.top)
canvas.get_tk_widget().grid(row = 1,rowspan = 20,column = 1)
self.ax = fig.add_subplot(111)
self.ax.set_ylim([0,700])
self.ax.set_xlim([0,self.repeatedTime])
self.ax.set_xticks([3,6,9,12,15])
self.ax.get_yaxis().set_visible(False)
self.ax.set_xlabel('Seconds')
self.line, = self.ax.plot(self.x, self.y)
#start the animation
ani = animation.FuncAnimation(fig, self.animate, np.arange(1, 200), interval=25, blit=False)
#start the GUI
self.top.mainloop()
"""
If the arduino is sending data, the corresponding y value for currX will be updated by the value sent by the arduino.
If the arduino is not sending data, the data will not be changed.
@return the line object
"""
def animate(self,i):
#only animates the plot if communicating with arduino
if self.transmitting is True:
#reads incoming data from arduino
inString = self.hm.readSerial()
# inString can be None, "", or "y1 y2"
if(inString is not "" and inString is not None):
data = inString.split()
if(len(data) is 2):
#do this for both y1 and y2
for i in range(0,2):
#I think that this data is upside down from arduino?
self.y[self.currX] = int(data[i])
self.MOBD()
self.currX += 1
if(self.currX >= self.xLength-2):
self.currX = 0
#either makes the line according to the incoming data or according to the MOBD
if(self.bpm is True):
self.line.set_data(self.x,self.y)
self.ax.set_ylim([0,700])
self.file.write(data[0] + "\n" + data[1] + "\n")
else:
self.line.set_data(self.x,self.MOBDY)
self.ax.set_ylim([0,self.threshhold])
return self.line,
"""
called when a new point is added onto self.y
"""
def MOBD(self):
self.MOBDX[self.currX] = self.y[self.currX] - self.y[(self.currX-1+self.xLength)%self.xLength]
self.MOBDY[self.currX] = abs(self.MOBDX[self.currX]*self.MOBDX[(self.currX-1+self.xLength)%self.xLength]*self.MOBDX[(self.currX-2+self.xLength)%self.xLength])
self.numBeats = 0
refractCount = 0
for i in range(0,self.xLength):
refractCount = refractCount-1 if refractCount >0 else 0
if(self.MOBDY[i]>self.threshhold and refractCount is 0):
refractCount = self.refractPeriod
self.numBeats += 1
self.dispayFreqChange()
"""
called by the radio button for the com ports. This will close whatever port was open before.
"""
def comPortRadio(self):
self.hm.closeSerial()
"""
called by the begin button. Should tell the heart monitor to begin sending data
"""
def beginButton(self):
self.file = open("output.txt","w")
if(self.hm.beginTrans() is True):
self.transmitting = True
self.labelString.set("Transmission Ongoing ")
else:
self.transmitting = False
self.labelString.set(" Error Beginning Trans. ")
"""
called by the init button. Calls the heartMonitor init function
"""
def initButton(self):
if(self.hm.init("COM" + str(self.comPortVar.get())) is True): #self.comPortVar is an IntVar, we get the int value and convert it into a string, then add 'COM' infront
self.labelString.set(" Connected to COM " + str(self.comPortVar.get()) + " ")
else:
self.labelString.set(" Error Connecting ")
"""
called by the stop button. Calls the heartMonitor stop tranmission function
"""
def stopButton(self):
self.file.close()
if(self.hm.stopTrans() is True):
self.transmitting = False
self.labelString.set(" Transmission Stopped ")
else:
self.labelString.set(" Error Stopping Trans. ")
"""
Called by clicking the heart rate label. changes between BPM and Hz, also between signal and MOBD display
"""
def labelClicked(self,event):
self.bpm = not self.bpm
"""
updates the heart rate on the label
"""
def dispayFreqChange(self):
if self.bpm is True:
self.heartRateString.set(str(self.numBeats*(60/self.repeatedTime)) + " BPM")
else:
self.heartRateString.set(str(self.numBeats/float(self.repeatedTime)) + "Hz")
"""
destroys the GUI. Without this, the exe will not actually stop when closed.
"""
def _quit(self):
self.top.quit()
self.top.destroy()
HeartMonitorGUI() |
import boto3
import os
from dotenv import load_dotenv
load_dotenv()
def set_resource():
s3 = boto3.resource('s3', aws_access_key_id=os.environ.get('access_key'),
aws_secret_access_key=os.environ.get('secret_key'),
region_name=os.environ.get('region'))
return s3
def get_s3_buckets():
s3 = set_resource()
buckets = s3.buckets.all()
return [bucket.name for bucket in buckets]
def delete_bucket(bucket, s3=None):
if s3:
pass
else:
s3 = set_resource()
s3.Bucket(bucket).delete()
buckets_to_skip = ['sdh-customerbkt-27', 'sdh-customerbkt-17', 'sdh-customerbkt-23', 'sdh-customerbkt-26',
'sdh-customerbkt-24', 'sdh-customerbkt-18', 'sdh-customerbkt-25']
delete_buckets = [bucket for bucket in get_s3_buckets() if 'sdh-customerbkt' in bucket]
for i in delete_buckets:
if i not in buckets_to_skip:
print(i)
delete_bucket(i)
|
class SpaceAge:
# Planet Years in seconds
EARTH_YEAR = 31557600.0
MERCURY_YEAR_RATIO = 0.2408467
VENUS_YEAR_RATIO = 0.61519726
MARS_YEAR_RATIO = 1.8808158
JUPITER_YEAR_RATIO = 11.862615
SATURN_YEAR_RATIO = 29.447498
URANUS_YEAR_RATIO = 84.016846
NEPTUNE_YEAR_RATIO = 164.79132
def __init__(self, seconds):
self.seconds = seconds
def calculate_planet_year(self, secondsPerPlanetYear):
return round(self.seconds / secondsPerPlanetYear, 2)
def on_earth(self):
return self.calculate_planet_year(SpaceAge.EARTH_YEAR)
def on_mercury(self):
return self.calculate_planet_year(SpaceAge.EARTH_YEAR * SpaceAge.MERCURY_YEAR_RATIO)
def on_venus(self):
return self.calculate_planet_year(SpaceAge.EARTH_YEAR * SpaceAge.VENUS_YEAR_RATIO)
def on_mars(self):
return self.calculate_planet_year(SpaceAge.EARTH_YEAR * SpaceAge.MARS_YEAR_RATIO)
def on_jupiter(self):
return self.calculate_planet_year(SpaceAge.EARTH_YEAR * SpaceAge.JUPITER_YEAR_RATIO)
def on_saturn(self):
return self.calculate_planet_year(SpaceAge.EARTH_YEAR * SpaceAge.SATURN_YEAR_RATIO)
def on_uranus(self):
return self.calculate_planet_year(SpaceAge.EARTH_YEAR * SpaceAge.URANUS_YEAR_RATIO)
def on_neptune(self):
return self.calculate_planet_year(SpaceAge.EARTH_YEAR * SpaceAge.NEPTUNE_YEAR_RATIO)
|
import os
from sqlalchemy import create_engine, Column, Text, Integer, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
def new_db(path):
engine = create_engine(path)
Base.metadata.create_all(engine)
Base.metadata.bind = engine
def new_session(path):
engine = create_engine(path)
return sessionmaker(bind=engine)()
def insert_static_data(db):
for d in ['Breakfast', 'Brunch', 'Lunch', 'Tea', 'Snack', 'Supper', 'Desert']:
db.add(Label(Descriptor=d))
db.commit()
class Label(Base):
__tablename__ = 'Label'
LabelID = Column(Integer, primary_key=True)
Descriptor = Column(Text) # Breakfast, Lunch, Tea, Snack, Brunch, Supper, Desert
def __str__(self):
return '<Label[LabelID=%s, Descriptor=%s]>' % (self.LabelID, self.Descriptor)
class Ingredient(Base):
__tablename__ = 'Ingredient'
IngredientID = Column(Integer, primary_key=True)
Descriptor = Column(Text)
def __str__(self):
return '<Ingredient[IngredientID=%s, Descriptor=%s]>' % (self.IngredientID, self.Descriptor)
class IngredientQuantityRecepie(Base):
__tablename__ = 'IngredientQuantityRecepie'
IngredientQuantityRecepieID = Column(Integer, primary_key=True)
RecepieID = Column(Integer, ForeignKey('Recepie.RecepieID'))
IngredientID = Column(Integer, ForeignKey('Ingredient.IngredientID'))
Quantity = Column(Integer)
def __str__(self):
return '<IngredientQuantityRecepie[IngredientQuantityRecepieID=%s, RecepieID=%s, IngredientID=%s, Quantity=%s]>' % (self.IngredientQuantityRecepieID, self.RecepieID, self.IngredientID, self.Quantity)
class Recepie(Base):
__tablename__ = 'Recepie'
RecepieID = Column(Integer, primary_key=True)
Descriptor = Column(Text)
Serves = Column(Integer)
LabelID = Column(Integer, ForeignKey(Label.LabelID))
ingredients = relationship(IngredientQuantityRecepie, primaryjoin='Recepie.RecepieID == IngredientQuantityRecepie.RecepieID')
if __name__ == '__main__':
DB_FILE_PATH = 'live.db'
DB_PATH = 'sqlite:///' + DB_FILE_PATH
new_db(DB_PATH)
db = new_session(DB_PATH)
insert_static_data(db)
print [str(l) for l in db.query(Label).all()]
db.close()
os.remove(DB_FILE_PATH)
|
import numpy as np
import cv2
img = cv2.imread('../images/typewriter.jpg')
#print(img.shape)
with open('../model/synset_words.txt', 'r') as syn_f:
all_rows = syn_f.read().strip().split("\n")
classes = [r[r.find(' ')+1:] for r in all_rows]
net = cv2.dnn.readNetFromCaffe('../model/bvlc_googlenet.prototxt')
for i, c in enumerate(classes):
if i == 100:
break
print( 1, c )
cv2.imshow('Image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
#!/usr/bin/env python
import codecs
import optparse
import os
import sys
import hashlib
import os.path
import uuid
# simplejson is included with Python 2.6 and above
# with the name json
if float(sys.version[:3]) >= 2.6:
import json
else:
# python 2.4 or 2.5 can also import simplejson
# as working alternative to the json included.
import simplejson as json
import datetime
from hepixvmitrust import VMListControler
# import below purely to get the version number
# if the command line tool will have different
# version number then the library we can take this
# out
from hepixvmitrust.__version__ import version
import logging, logging.config
# needed for the signing of images.
import M2Crypto.SMIME
import M2Crypto.BIO
# User interface
def pairsNnot(list_a,list_b):
len_generate_list = len(list_a)
len_image_list = len(list_b)
ocupies_generate_list = set(range(len_generate_list))
ocupies_image_list = set(range(len_image_list))
ocupies_pairs = ocupies_image_list.intersection(ocupies_generate_list)
diff_a = ocupies_generate_list.difference(ocupies_image_list)
diff_b = ocupies_image_list.difference(ocupies_generate_list)
arepairs = []
for i in ocupies_pairs:
arepairs.append([list_a[i],list_b[i]])
notpairs_a = []
for i in diff_a:
notpairs_a.append(list_a[i])
notpairs_b = []
for i in diff_b:
notpairs_b.append(list_b[i])
return arepairs,notpairs_a,notpairs_b
def main():
"""Runs program and handles command line options"""
actions = set([])
listcontroler = VMListControler()
p = optparse.OptionParser(version = "%prog " + version)
p.add_option('-j', '--json', action ='store', help='Path of the json output file', metavar='OUTPUTFILE')
p.add_option('-t', '--template', action ='store', help='Path of the json template file', metavar='TEMPLATE')
p.add_option('-m', '--smime-template', action ='store', help='Path of the smime encoded json template file', metavar='TEMPLATE')
p.add_option('-a', '--add', action ='append', help='adds a VM image to the JSON', metavar='IMAGEMETADATA')
p.add_option('-d', '--delete', action ='append', help='del a VM image to the JSON', metavar='IMAGEMETADATA')
p.add_option('-g', '--generate', action ='append', help='generates a VM image metadata for image', metavar='OUTPUTMETADATA')
p.add_option('-i', '--image', action ='append', help='Sets the image to generates a VM image metadata', metavar='IMAGEFILE')
p.add_option('-l', '--list', action ='store_true', help='lists VM images in the JSON')
p.add_option('-k', '--signer_key', action ='store', help='path to signer key', metavar='SIGNERKEY')
p.add_option('-c', '--signer_certificate', action ='store', help='path to signer certificate', metavar='SIGNERCERT')
p.add_option('-s', '--sign', action ='store', help='returns verbose output', metavar='SIGNEDOUTPUT')
p.add_option('-f', '--format', action ='store', help='Set the format valid values are JSON and XML', metavar='FORMAT')
p.add_option('--imagelist_version', action ='store', help='Over write the version in the message', metavar='FORMAT')
options, arguments = p.parse_args()
filename_template = None
smime_template = None
json_output = 'imagelist.json'
generate_list = []
add_image_file = []
del_image_metadata = []
format = None
signer_key = os.environ['HOME'] + '/.globus/userkey.pem'
signer_cert = os.environ['HOME'] + '/.globus/usercert.pem'
signed_message_output = None
list_images = False
imagelist = []
imagelist_version = '0.0.1'
clashingactions_template = set(['loads_smime_template','load_template'])
if options.template:
filename_template = options.template
actions.add('load_template')
if options.smime_template:
smime_template = options.smime_template
actions.add('loads_smime_template')
if options.json:
json_output = options.json
actions.add('save')
if options.add:
add_image_file = options.add
actions.add('image_add')
if options.delete:
del_image_file = options.delete
actions.add('image_del')
if options.list:
list_images = True
actions.add('image_list')
if options.signer_key:
signer_key = options.signer_key
if options.signer_certificate:
signer_cert = options.signer_certificate
if options.sign:
actions.add('verify')
actions.add('sign')
signed_message_output = options.sign
if options.generate:
generate_list = options.generate
actions.add('generate')
if options.format:
format = options.format
actions.add('format')
if options.image:
imagelist = options.image
actions.add('generate')
if options.imagelist_version:
imageListVersion = options.imagelist_version
actions.add('setversion')
temp1 = actions.intersection(clashingactions_template)
temp1_len = len (temp1)
if temp1_len > 1:
print "Cannot support more than one template with this script."
sys.exit(1)
if temp1_len == 1:
filename = filename_template
if filename == None:
filename = smime_template
fp = open(str(filename))
text = fp.read()
if actions.__contains__('loads_smime_template'):
listcontroler.loads_smime(text)
if actions.__contains__('load_template'):
listcontroler.loads(text)
listcontroler.enviroment_default()
if actions.__contains__('setversion'):
listcontroler.model.metadata[u'hv:version'] = imageListVersion
# Now process the actions.
if actions.__contains__('generate'):
pairs, extra_gens ,extra_images = pairsNnot(generate_list,imagelist)
if len(extra_images) > 0:
print "error images and no target"
for paired_items in pairs:
listcontroler.generate(paired_items[0],paired_items[1])
for gen_it in extra_gens:
rc = listcontroler.generate(gen_it)
if not rc:
print "Files '%s'" % (gen_it)
sys.exit(1)
if actions.__contains__('image_add'):
for item in add_image_file:
listcontroler.image_add(item)
if actions.__contains__('image_del'):
for item in del_image_file:
success = listcontroler.image_del(item)
if success == False:
print "Failed to delete image '%s'" % (item)
sys.exit(1)
if actions.__contains__('image_list'):
listcontroler.images_list()
if actions.__contains__('verify'):
success = listcontroler.verify()
if success == False:
print "Failed to verify valid meta data for image."
sys.exit(1)
if actions.__contains__('sign'):
content = listcontroler.dumps()
smime = M2Crypto.SMIME.SMIME()
smime.load_key(signer_key,signer_cert)
buf = M2Crypto.BIO.MemoryBuffer(content)
p7 = smime.sign(buf,M2Crypto.SMIME.PKCS7_DETACHED)
buf = M2Crypto.BIO.MemoryBuffer(content)
out = M2Crypto.BIO.MemoryBuffer()
smime.write(out, p7, buf)
message_signed = str(out.read())
f = open(signed_message_output, 'w')
f.write(message_signed)
if actions.__contains__('save'):
listcontroler.save(json_output)
if actions.__contains__('format'):
if format == 'xml':
# do nothing right now
1
else:
print "Output format type '" + format + "' not supported right now."
print "Currently only supports JSON output RDF XML output may come later"
sys.exit(1)
#test_things5()
#test_things5()
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
main()
|
import os
from math import floor
import numpy as np
import dpm
import umne
import umne.util
#=============================================================================
class remapmatrix:
"""
Remap dissimilarity matrices into new matrices
"""
#-----------------------------------------------------------------------------
@staticmethod
def to_retinotopic(matrix, stimuli):
"""
Map a dissimilarity matrix into an equivalent-but-larger matrix which should make it easier to visualize
retinotopic effect. In the new matrix, two stimuli that share a digit in the same location will be adjacent
"""
new_groups = [dict(digit=d, location=loc) for loc in range(6) for d in dpm.stimuli.decade_digits]
def stimulus_matches_group_digit(stimulus, group):
target = stimulus['target']
stim_loc = stimulus['location']
unit = target % 10
if target < 10:
return stim_loc == group['location'] and unit == group['digit']
else:
decade = int(np.floor(target / 10))
return (stim_loc == group['location'] and decade == group['digit']) or \
(stim_loc == group['location'] - 1 and unit == group['digit'])
return umne.rsa_old.remap_stimuli(matrix, stimuli, new_groups,
filter_old_by_new_func=stimulus_matches_group_digit,
sort_old_func=lambda a, b: a['target'] - b['target'],
merge_stim_func=lambda s, g, i: merge_dicts([s, g, dict(ind_in_group=i)]))
#------------------------------------------------------------------------------------
def merge_dicts(list_of_dicts):
"""
Merge several dict's into one.
:param list_of_dicts: a list of dict objects. Later entries in the list will override preceding ones.
"""
result = dict()
for d in list_of_dicts:
result.update(d)
return result
#=============================================================================
class sortmatrix:
"""
Sort dissimilarity matrices
"""
@staticmethod
def bytarget(rev=False):
"""
This function returns a comparator function that sorts stimuli by the target number
:param rev: if True, targets will be sorted in reverse order
"""
if rev:
def sortfunc(a, b):
return - ((a['target']-b['target'])*10+(a['location']-b['location']))
else:
def sortfunc(a, b):
return (a['target'] - b['target']) * 10 + (a['location'] - b['location'])
return sortfunc
@staticmethod
def bylocation(rev=False):
"""
This function returns a comparator function that sorts stimuli by their location
:param rev: if True, locations will be sorted in reverse order
"""
if rev:
def sortfunc(a, b):
return - ((a['location'] - b['location']) * 100 + (a['target'] - b['target']))
else:
def sortfunc(a, b):
return (a['location'] - b['location']) * 100 + (a['target'] - b['target'])
return sortfunc
#============================================================================================
# Compute observed dissimilarity
#============================================================================================
#-------------------------------------------------------------------------------------------
def gen_dissimilarity_multi_subj(subj_ids, data_files, tmin, tmax, out_dir, out_filename_previx='dissimilarity',
decim=1, meg_channels=True,
event_ids=dpm.stimuli.all_event_ids(ndigits=2), n_pca=30,
riemann=True, corr_metric='spearmanr', riemann_metric='riemann',
sliding_window_size=None, sliding_window_step=None, sliding_window_min_size=None,
zscore=False, include_diagonal=True, averaging_method='square'):
"""
:param subj_ids:
:param data_files:
:param tmin:
:param tmax:
:param out_dir:
:param out_filename_previx:
:param decim:
:param meg_channels:
:param event_ids:
:param n_pca:
:param riemann:
:param corr_metric:
:param riemann_metric:
:param sliding_window_size:
:param sliding_window_step:
:param sliding_window_min_size:
:param zscore:
:param include_diagonal:
:param averaging_method: how to average the result matrices across subject. Use 'squared' for correlation metrics,
'linear' for distance metrics
"""
#todo: very important - save the stimuli in the file
if sliding_window_size is None:
time_points = range(int(tmin*1000), int(tmax*1000)+1, decim)
else:
#-- compute the center of each time window
sliding_window_duration = sliding_window_size * decim
sliding_window_step_duration = sliding_window_step * decim
sliding_window_min_duration = (sliding_window_min_size or sliding_window_size) * decim
time_points = np.array(range(int(tmin*1000),
int(tmax*1000) - sliding_window_min_duration + 1,
sliding_window_step_duration))
last_window_time_duration = int(tmax*1000) - time_points[-1]
time_points[:-1] += sliding_window_duration / 2
time_points[-1] += last_window_time_duration / 2
run_params = dict(meg_channels=meg_channels,
tmin=tmin,
tmax=tmax,
decim=decim,
event_ids=event_ids,
n_pca=n_pca,
riemann=riemann,
corr_metric=corr_metric,
riemann_metric=riemann_metric,
sliding_window_size=sliding_window_size,
sliding_window_step=sliding_window_step,
sliding_window_min_size=sliding_window_min_size,
zscore=zscore,
include_diagonal=include_diagonal,
time_points_ms=time_points)
def _save_dissimilarity(filename, sid, dissimilarity_matrices):
np.save(filename,
dict(subj_ids=sid, dissim_matrices=dissimilarity_matrices, params=run_params))
per_subj_dir = out_dir + os.sep + 'persubj'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if not os.path.exists(per_subj_dir):
os.mkdir(per_subj_dir)
dm_all_subjects = []
for subj_id in subj_ids:
sdata = dpm.files.load_raw(dpm.subj_path[subj_id], data_files)
epochs = \
umne.create_epochs_from_raw(sdata.raw, sdata.stimulus_events, meg_channels=meg_channels, tmin=tmin, tmax=tmax, decim=decim,
reject=dict(grad=4000e-13, mag=4e-12))
dm = umne.rsa_old.gen_observed_dissimilarity(epochs, event_ids=event_ids, n_pca=n_pca,
riemann=riemann, corr_metric=corr_metric, riemann_metric=riemann_metric,
sliding_window_size=sliding_window_size,
sliding_window_step=sliding_window_step,
sliding_window_min_size=sliding_window_min_size,
zscore=zscore, include_diagonal=include_diagonal)
_save_dissimilarity(per_subj_dir + os.sep + out_filename_previx + '_' + subj_id + '.npy',
[subj_id], dm)
dm_all_subjects.append(dm)
#-- Average over all subjects
average_dm = []
for i in range(len(dm_all_subjects[0])):
a = umne.rsa_old.average_matrices([m[i] for m in dm_all_subjects], averaging_method=averaging_method)
average_dm.append(a)
_save_dissimilarity(out_dir+os.sep+out_filename_previx+'_all_subj.npy',
subj_ids, average_dm)
#============================================================================================
# Regressions
#============================================================================================
#------------------------------------------------------------------------------------------------------
def regress_one_subject(subj_id, subj_data, dissimilarity_predictor_funcs, dissimilarity_predictor_desc,
tmin, tmax, decim, meg_channels,
target_numbers, target_positions, riemann, window_size, metric,
n_pca=30, results_file_name='rsa_results', delete_prev_results=False):
epochs = \
umne.create_epochs_from_raw(subj_data.raw, subj_data.stimulus_events, meg_channels=meg_channels, tmin=tmin, tmax=tmax, decim=decim,
reject=dict(grad=4000e-13, mag=4e-12))
#-- Get results file name, load it if needed
results_file = dpm.consts.results_dir + 'rsa/' + subj_id + "_" + results_file_name + ".npy"
append_to_existing_results = os.path.exists(results_file) and not delete_prev_results
if append_to_existing_results:
print('Appending to previous results')
else:
print('WARNING: any previous results will be deleted')
event_ids = [target*10+pos for target in target_numbers for pos in target_positions]
target_pos_pairs = [(target, pos) for target in target_numbers for pos in target_positions]
new_results = {}
start_times = np.arange(tmin, tmax-window_size, window_size) if riemann else [0]
if not riemann:
# Ignore window size: only one window
window_size = tmax - tmin
for start_time in start_times:
epochs_cropped = epochs.copy().crop(tmin=start_time, tmax=start_time+window_size) if riemann else epochs
for curr_metric in metric:
print('Processing time window {:.3f}-{:.3f}, {:}riemann method, metric={:}'.format(
start_time, start_time + window_size, '' if riemann else 'non-', curr_metric))
if riemann:
observed_dissimilarity = umne.rsa_old.gen_observed_dissimilarity(epochs_cropped, event_ids, riemann=True,
n_pca=n_pca, riemann_metric=curr_metric)
else:
observed_dissimilarity = umne.rsa_old.gen_observed_dissimilarity(epochs, event_ids, riemann=False,
n_pca=n_pca, corr_metric=curr_metric)
rr = umne.rsa_old.regress_dissimilarity(observed_dissimilarity, dissimilarity_predictor_funcs, target_pos_pairs)
rr = np.array(rr)
key = ('//riemann={:}//metric={:}//decim={:}//target_numbers={:}//target_pos={:}//' +
'n_pca={:}//predictors={:}//meg_channels={:}//')\
.format(riemann, curr_metric, decim, target_numbers, target_positions, n_pca, dissimilarity_predictor_desc,
meg_channels)
result_data = dict(subj_id=subj_id,
decim=decim,
meg_channels=meg_channels,
riemann=riemann,
metric=curr_metric,
target_numbers=target_numbers,
target_positions=target_positions,
n_pca=n_pca,
predictors=dissimilarity_predictor_desc,
tmin=start_time,
tmax=start_time + window_size,
result=rr)
if key not in new_results:
new_results[key] = []
new_results[key].append(result_data)
#-- Save results
print('Saving all results')
if append_to_existing_results:
all_results = dict(np.load(results_file).item())
for k, v in new_results.items():
all_results[k] = v
np.save(results_file, all_results)
else:
np.save(results_file, new_results)
print('The results were saved to {:}'.format(results_file))
|
"""
Tests for the beta poisson module
"""
import unittest
import numpy as np
import sys
import os
sys.path.append(os.path.abspath(f"{os.getcwd()}/."))
from tbk import bp
class TestBetaPoisson(unittest.TestCase):
def test_beta_poisson_equal(self):
"""
Test whether beta poisson 4 is equal to beta poisson 3 with lambda2 set to 1.0
"""
for i in range(10):
np.random.seed(i)
bp3 = bp.beta_poisson3(2, 3, 1)
np.random.seed(i)
bp4 = bp.beta_poisson4(2, 3, 1, 1)
self.assertEqual(bp3, bp4)
if __name__ == '__main__':
unittest.main() |
'''
给定任意一个整数,打印出该整数的十进制、八进制、十六进制(大写)、二进制形式的字符串。
'''
int_num = int(input('input the integer'))
# 转二进制
print('二进制形式为 : {}'.format(bin(int_num)))
# 转八进制
print('八进制形式为 : {}'.format(oct(int_num)))
# 转十进制
print('十进制形式为 : {}'.format(int(int_num)))
# 转十六进制
print('十六进制形式为 : {}'.format(hex(int_num).upper())) |
import time
def ensure_execution(func):
def inner(*args, **kwargs):
while 1:
try:
func(*args, **kwargs)
break
except:
time.sleep(0.01)
return inner |
from collections import defaultdict
import math
from tqdm import tqdm
from util import load_json_file,norm
import numpy as np
def get_tfidf_weight(passages):
doc_frequency=defaultdict(int)
for list in passages:
for i in list:
doc_frequency[i]+=1
# calculate tf value
tf={}
for i in doc_frequency:
tf[i]=doc_frequency[i]/sum(doc_frequency.values())
# calculate idf value
doc_num=len(passages)
idf={}
doc=defaultdict(int) # doc number including the word
for i in doc_frequency:
for j in passages:
if i in j:
doc[i]+=1
for i in doc_frequency:
idf[i]=math.log(doc_num/(doc[i]+1))
# calculate tf_idf value
tf_idf={}
for i in doc_frequency:
tf_idf[i]=tf[i]*idf[i]
return tf_idf
if __name__=="__main__":
data=load_json_file(root_path="./data/demo.json")
psg_list=[dic['psg'] for dic in data]
with open("./weights/tfidf.txt","w") as f:
tf_idf=get_tfidf_weight(psg_list)
# get tfidf weight list of each passage
for i in tqdm(range(len(psg_list))):
w=[]
for j in range(len(psg_list[i])):
word=psg_list[i][j]
w.append(tf_idf[word])
print(list(norm(w)),file=f)
|
list1 = ['qkl', 'zgq', 'pcb', 'nice']
print(list1)
print(list1[0])
print(list1[-1])
list1[0] = 'first'
list1.append('new');
list1.insert(0, 'insertFirst');
list1.insert(1, 'insertSecond');
print(list1)
lastItem = list1.pop()
print(lastItem)
print(list1)
#pop可指定index删除
index2Item = list1.pop(2)
print(index2Item)
print(list1)
#指定值删除
zgqItem = list1.remove('zgq')
print(zgqItem)
print(list1)
#用变量指定值删除
pcb = 'pcb'
pcbItem = list1.remove(pcb)
print(pcbItem)
print(list1)
#sort 按ascii码排序,一次取对比
list1.sort()
print(list1)
#sort 倒序排序
list1.sort(reverse=True)
print(list1)
# sortd 方式排序 不改变原有list1
print(sorted(list1))
print(list1)
# reverse 倒序排序
list1.reverse()
print(list1)
|
# Generated by Django 3.1.4 on 2021-03-03 23:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0005_auto_20210303_2346'),
('school', '0009_auto_20210303_2329'),
]
operations = [
migrations.RenameField(
model_name='extracurricular',
old_name='user',
new_name='student',
),
migrations.AlterField(
model_name='course',
name='course_name',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='lecture',
name='lecture_title',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterUniqueTogether(
name='extracurricular',
unique_together={('student', 'activity_name')},
),
migrations.CreateModel(
name='Guardian',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('phone_number', models.CharField(max_length=20)),
('address', models.CharField(max_length=255)),
('relation', models.CharField(max_length=255)),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.student')),
],
options={
'unique_together': {('student', 'first_name', 'last_name')},
},
),
]
|
import datetime
import json
import itertools
import math
import requests
import bz2
import re
import string
import pandas as pd
import nltk
import numpy
from nltk.corpus import stopwords
from elasticsearch import Elasticsearch, helpers
# ----------------------------------------------------------------------------------------------
# nltk.download()
cachedStopWordsFullList = stopwords.words("english")
# cachedStopWordsFullList = cachedStopWordsFullList + ['->', '-']
cachedStopWordsFullList = cachedStopWordsFullList + ['->']
cachedStopWordsFullList = [word for word in cachedStopWordsFullList if
word not in ['have', 'has', 'should', 'would', 'only', 'no', 'yes']]
# cachedStopWordsShortList = ['is', 'are', 'a', 'an', 'the', 'in', 'on', 'of', '->', '-']
cachedStopWordsShortList = ['is', 'are', 'a', 'an', 'the', 'in', 'on', 'of', '->']
punc_reg = re.compile("[" + "".join(['.', ',', '?', '_']) + "]")
def unique_list(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def remove_stop_words(text, shortList):
text = re.sub(punc_reg, "", text)
text = re.sub(" ", " ", text)
if shortList:
stop_words = cachedStopWordsShortList
else:
stop_words = cachedStopWordsFullList
return ' '.join([word for word in text.split() if word not in stop_words]).strip()
# ----------------------------------------------------------------------------------------------
es = Elasticsearch([{'host': 'localhost', 'port': 9200, 'timeout': 30}])
# print(es.ping())
training_set = pd.read_csv('data/training_set.tsv', sep='\t')
# print(training_set[1:10])
validation_set = pd.read_csv('data/validation_set.tsv', sep='\t')
sample_submission_score = pd.read_csv('data/sample_submission.csv')
sample_submission_mult = sample_submission_score.copy()
start_index = 0
start_index = int(input('Start index: '))
right_answers_by_mult_counter = 0
right_answers_by_score_counter = 0
total_answers_short = 0
right_answers_short = 0
total_answers_long = 0
right_answers_long = 0
indexes = "ai_c12,ai_wiki_full"
# indexes = "ai_c12"
# ----------------------------------------------------------------------------------------------
try:
# for index, row in validation_set[start_index:(validation_set.question.count()-1)].iterrows():
# for index, row in validation_set[0:100].iterrows():
for index, row in training_set[start_index:(start_index + 200)].iterrows():
print('=============================================================================')
print(row)
question = row['question'].lower()
question = remove_stop_words(question, False)
question = question.strip()
answers = ' '.join(row[['answerA', 'answerB', 'answerC', 'answerD']])
# Find a knowledge domain
for minimum_should_match in range(95, 50, -5):
res = es.search(
body={"query":
{"match":
{'text': {
'query': question + ' ' + answers,
# 'operator': 'and',
'cutoff_frequency': 0.001,
'fuzziness': (10 - math.floor(minimum_should_match / 10)),
"minimum_should_match": str(minimum_should_match) + '%'
}
}
}
},
index=indexes,
size=10
)
if res['hits']['total'] > 5:
break
print("Got %d hits for domain with %d%%" % (res['hits']['total'], minimum_should_match))
if res['hits']['total'] == 0:
continue
categories = []
for hit in res['hits']['hits']:
source = hit['_source']
categories = categories + source['categories']
categories = [cat for cat in categories
if re.search('^[0-9]{4}s? ', cat) is None
and re.search('^[0-9]{2}th-century ', cat) is None
and re.search(' in [0-9]{4}s?', cat) is None
and re.search(' films$', cat) is None
and re.search(' clips$', cat) is None
and re.search(' novels$', cat) is None
and re.search('^Film[s]? ', cat) is None
and re.search('^Screenplays ', cat) is None
]
categories.sort()
categories_freq = [(cat[0], len(list(cat[1]))) for cat in itertools.groupby(categories)]
categories_freq.sort(key=lambda x: x[1], reverse=True)
# Create knowledge domain
knowledge_domain = [x[0] for x in categories_freq[0:20]]
print(knowledge_domain)
# Search answers inside knowledge domain
# { "term": { "categories": categories[0] }}
es_domain_query = [{"match": {"categories": cat}} for cat in knowledge_domain]
mean_answers_count = numpy.mean([
x.split(' ').__len__() for x
in row[['answerA', 'answerB', 'answerC', 'answerD']]
])
hits = []
scores = []
fuzziness_arr = []
for answer in row[['answerA', 'answerB', 'answerC', 'answerD']]:
answer = answer.lower()
answer = remove_stop_words(answer, True)
answer = answer.strip()
minimum_should_match_answer = 100
if mean_answers_count > 3:
minimum_should_match_answer = 80
# Search documents
for fuzziness in [1, 2, 3, 4, 5]:
res = es.search(
body={"query": {"filtered": {
"query": {
"match": {
'text': {
'query': question + ' ' + answer,
'cutoff_frequency': 0.001,
'fuzziness': fuzziness,
'minimum_should_match': str(minimum_should_match - fuzziness*2) + '%'
}
}
},
"filter": {
"and": [
{
'bool': {
'must': {
'match': {'text': {
'query': answer,
'minimum_should_match': str(minimum_should_match_answer) + '%',
'fuzziness': fuzziness - 1
}}
}
}
},
# {
# "bool": {
# "should": [
# es_domain_query,
# ],
# "minimum_should_match": 1
# }
# }
]
}
}}},
index=indexes
)
if res['hits']['total'] > 1:
break
fuzziness_arr = fuzziness_arr + [fuzziness]
hits = hits + [res['hits']['total']]
if res['hits']['total'] > 1:
hit1 = res['hits']['hits'][0]
hit2 = res['hits']['hits'][1]
scores = scores + [round((hit1["_score"] + hit2["_score"])/2, 3)]
else:
scores = scores + [0]
# breakpointstub = 0
mult_array = [a * b for a, b in zip(hits, scores)]
mult_max_index = mult_array.index(max(mult_array))
mult_answer = ('A', 'B', 'C', 'D')[mult_max_index]
score_max_index = scores.index(max(scores))
score_answer = ('A', 'B', 'C', 'D')[score_max_index]
print('Hits: ' + str(hits))
print('Fuzziness: ' + str(fuzziness_arr))
print('Scores: ' + str(scores))
print('By score*hits result: ' + mult_answer)
print('By score result: ' + score_answer)
if mean_answers_count > 5:
total_answers_long += 1
else:
total_answers_short += 1
if mult_answer == row['correctAnswer']:
right_answers_by_mult_counter += 1
print('Right answers by mult: %d%%' % (100 * right_answers_by_mult_counter / (index - start_index + 1)))
if score_answer == row['correctAnswer']:
right_answers_by_score_counter += 1
if mean_answers_count > 5:
right_answers_long += 1
else:
right_answers_short += 1
print('Right answers by score: %d%%' % (100 * right_answers_by_score_counter / (index - start_index + 1)))
print('total_answers_short: ' + str(total_answers_short))
print('right_answers_short: ' + str(right_answers_short))
print('total_answers_long: ' + str(total_answers_long))
print('right_answers_long: ' + str(right_answers_long))
print()
print()
# !!!!!!!
sample_submission_score.loc[index, 'correctAnswer'] = score_answer
sample_submission_mult.loc[index, 'correctAnswer'] = mult_answer
if index % 100 == 0:
sample_submission_score.to_csv(
'data/impl_3_' + str(index)
+ '_' + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
+ '.csv',
index=False
)
# # sample_submission_mult.to_csv(
# # 'data/result_set_mult_' + str(index)
# # + '_' + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
# # + '.csv',
# # index=False
# # )
except:
raise
finally:
sample_submission_score.to_csv('data/impl_3_' + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") + '.csv', index=False)
# # sample_submission_mult.to_csv('data/result_set_total_mult' + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") + '.csv', index=False)
|
from utils.listNode import ListNode
class Solution:
def rotate(self, head, k):
if not head:
return None
count, node = 1, head
while node.next:
node = node.next
count += 1
# link the end and the start of the list
node.next = head
to_move = count - (k % count)
while to_move > 0:
node = node.next
to_move -= 1
# next is the new last element of the list
head = node.next
node.next = None
return head
one = ListNode(1)
two = ListNode(2)
three = ListNode(3)
four = ListNode(4)
five = ListNode(5)
one.next = two
two.next = three
three.next = four
four.next = five
print(one)
solution = Solution()
print(solution.rotate(one, 6)) |
"""
Exercício Python 27: Faça um programa que leia o nome completo de uma pessoa,
mostrando em seguida o primeiro e o último nome separadamente.
"""
n = str(input('Informe seu nome completo: ')).strip()
nome = n.split()
print(nome)
print(n)
print(f'Seu primeiro nome é: \033[1;34m{nome[0]}\033[m \nSeu último nome é: \033[7;30m{nome[len(nome) - 1]}\033[m')
print()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-10-10 11:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0014_auto_20181009_1247'),
('deployment_manager', '0016_deploymentbuildoption'),
]
operations = [
migrations.AddField(
model_name='remotedeploymentmachine',
name='admin_user',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='remote_deployment_machines', related_query_name='remote_deployment_machine', to='api.AdminUser'),
),
]
|
# Kyle Jorgensen, CS 271, HW 1, 10/23/14
# This code is built upon https://gist.github.com/tai2/3684493
def marzullo_algorithm(ranges):
'''
Clock synchronization algorithm based on:
http://en.wikipedia.org/wiki/Marzullo%27s_algorithm
'''
table = []
for l,r in ranges:
table.append((l,-1))
table.append((r,+1))
def my_cmp(x, y):
result = cmp(x[0], y[0])
if result == 0:
result = -cmp(x[1], y[1]) # to exclude 'pathological overlaps'
return result
table.sort(my_cmp)
best = 0
cnt = 0
for i in range(len(table) - 1):
cnt = cnt - table[i][1]
if best <= cnt:
best = cnt
beststart = table[i][0]
bestend = table[i+1][0]
# if we had overlapping intervals, return the Marzullo answer
if best > 1:
return (beststart, bestend)
else: # Otherwise, find the two intervals that were closest
# and take the midpoints of those two as the result
cnt = 0
diff = 9999999 # some large integer
for i in range(1, len(table)-1, 2):
cnt = abs(table[i][0] - table[i+1][0])
if cnt < diff:
diff = cnt
mid1 = (table[i-1][0] + table[i][0])/2
mid2 = (table[i+1][0] + table[i+2][0])/2
return (mid1, mid2)
def test(data_list):
for data in data_list:
result = marzullo_algorithm(data['input'])
if not result == data['expected']:
print 'test failed input=', data['input'], 'expected=', data['expected'], 'actual=', result
return
print 'test suceeded'
if __name__ == "__main__":
test_data_list = (
{'input' : ((8,12),(11,13),(10,12)), 'expected' : (11,12)},
{'input' : ((8,12),(11,13),(14,15)), 'expected' : (11,12)},
{'input' : ((8,9),(8,12),(10,12)), 'expected' : (10,12)},
{'input' : ((8,12),(9,10),(11,13),(10,12)), 'expected' : (11,12)},
{'input' : ((8,12),(9,10),(11,13),(14,15)), 'expected' : (11,12)},
{'input' : ((11,15),(8,15),(9,11),(10,14),(11,14),(9,10),(9,13),(12,15),(8,11),(14,15)), 'expected' : (12,13)},
{'input' : ((-13.0,-11.0),(-6.2,-6.0),(-2.4,-2.2),(4.8,5.0),(6.0,6.4)), 'expected' : (4.9, 6.2)},
{'input' : ((8.0,9.0),(10.0,11.0),(12.0,13.0)), 'expected' : (8.5,10.5) },
{'input' : ((1.0,2.0),(2.0,3.0),(4.0,5.0)), 'expected' : (1.5,2.5) })
test(test_data_list)
|
#! /usr/bin/env python
import os
import time
import pickle
import pandas as pd
import numpy as np
import tensorflow as tf
from keras.models import load_model
import cv2
import scipy.ndimage as ndimage
import seaborn as sns
import matplotlib.pyplot as plt
from skimage.draw import circle
from skimage.feature import peak_local_max
import rospy
from cv_bridge import CvBridge
from rospy.numpy_msg import numpy_msg
from rospy_tutorials.msg import Floats
import math
# some message type
from geometry_msgs.msg import PoseStamped
from sensor_msgs.msg import Image, CameraInfo
from std_msgs.msg import Float32MultiArray
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
bridge = CvBridge()
# Load the Network.
print("Tensorflow Version: %s" % str(tf.__version__))
# comment by Aaron
# MODEL_FILE = '/home/aarons/catkin_kinect/src/yumi_grasp/src/epoch_10_model.hdf5' # UG-Net
# MODEL_FILE = '/home/aarons/catkin_kinect/src/yumi_grasp/src/epoch_97_model.hdf5' #ggcnn
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181210_2003__u-net_9_5_3__32_16_8/epoch_10_model.hdf5'
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181210_1437__u-net_9_5_3__32_16_8/epoch_10_model.hdf5' # muqian zuihao
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181215_1409__u-net_9_5_3__32_16_8/epoch_10_model.hdf5'
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181216_1527__u-net_9_5_3__32_16_8/epoch_10_model.hdf5' #zhengcheng le
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181219_2252__UG-Net/epoch_15_model.hdf5'
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181220_1737__UG-Net/epoch_10_model.hdf5' # hao
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181223_2227__UG-Net/epoch_10_model.hdf5'
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181225_1902__UG-Net/epoch_05_model.hdf5'
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181227_2156__UG-Net/epoch_05_model.hdf5'
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181228_1136__UG-Net/epoch_12_model.hdf5'
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181228_2042__UG-Net/epoch_12_model.hdf5'
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181229_0024__UG-Net/epoch_08_model.hdf5'
MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181229_1049__UG-Net/epoch_08_model.hdf5'
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181229_1237__UG-Net/epoch_10_model.hdf5'
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181229_1542__UG-Net_lightv2/epoch_08_model.hdf5'
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181229_2044__UG-Net/epoch_06_model.hdf5' #0104
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181230_1053__UG-Net_lightv2/epoch_08_model.hdf5'
# MODEL_FILE = '/media/aarons/hdd_2/ggcnn_model/networks/181230_2032__UG-Net_lightv2/epoch_06_model.hdf5'
model = load_model(MODEL_FILE)
print(MODEL_FILE)
rospy.init_node('ggcnn_detection')
# Output publishers.
grasp_pub = rospy.Publisher('ggcnn/img/grasp', Image, queue_size=1)
ang_pub_heatmap = rospy.Publisher('ggcnn/img/angheatmap', Image, queue_size=1)
width_pub = rospy.Publisher('ggcnn/img/width', Image, queue_size=1)
grasp_plain_pub = rospy.Publisher('ggcnn/img/grasp_plain', Image, queue_size=1)
depth_pub = rospy.Publisher('ggcnn/img/depth', Image, queue_size=1)
depth_raw_pub = rospy.Publisher('ggcnn/img/depth_raw', Image, queue_size=1)
ang_pub = rospy.Publisher('ggcnn/img/ang', Image, queue_size=1)
cmd_pub = rospy.Publisher('ggcnn/out/command', Float32MultiArray, queue_size=1)
state_pub = rospy.Publisher('ggcnn/out/state', Float32MultiArray, queue_size=1)
pointout_pub = rospy.Publisher('ggcnn/out/points_out', Image, queue_size=1)
# pointout_pub = rospy.Publisher('ggcnn/out/points_out', numpy_msg(Floats), queue_size=1)
# Initialise some globals.
prev_mp = np.array([150, 150])
ROBOT_Z = 0
Input_Res = 304
crop_size = 304 #400 330
VISUALISE = True
# Tensorflow graph to allow use in callback.
graph = tf.get_default_graph()
# Get the camera parameters
# camera_info_msg = rospy.wait_for_message('/kinect2/qhd/camera_info', CameraInfo) #0321
# camera_info_msg = rospy.wait_for_message('/kinect2/sd/camera_info', CameraInfo)
# K = camera_info_msg.K
fx = 1
cx = 1
fy = 1
cy = 1
# print(fx)
# print(cx)
# print(fy)
# print(cy)
# Execution Timing
class TimeIt:
def __init__(self, s):
self.s = s
self.t0 = None
self.t1 = None
self.print_output = True
def __enter__(self):
self.t0 = time.time()
def __exit__(self, t, value, traceback):
self.t1 = time.time()
if self.print_output:
print('%s time: %s' % (self.s, self.t1 - self.t0))
def visu_heatmap(data):
global Input_Res
out_img = np.zeros((Input_Res, Input_Res, 3), dtype=np.uint8)
fig, ax = plt.subplots()
ax = sns.heatmap(data, cmap='jet', xticklabels=False, yticklabels=False, cbar=False)
fig.add_axes(ax)
fig.canvas.draw()
data_heatmap = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data_heatmap = data_heatmap.reshape(fig.canvas.get_width_height()[::-1] + (3,))
data_crop = cv2.resize(data_heatmap[(480-crop_size)//2:(480-crop_size)//2+crop_size, \
(640-crop_size)//2:(640-crop_size)//2+crop_size,:], (Input_Res, Input_Res))
out_img[:, :, 0] = data_crop[:,:,2] # R
out_img[:, :, 1] = data_crop[:,:,1] # G
out_img[:, :, 2] = data_crop[:,:,0] # B
return out_img
def robot_pos_callback(data):
global ROBOT_Z
ROBOT_Z = data.pose.position.z
def depth_callback(depth_message):
global model
global graph
global prev_mp
global ROBOT_Z
global fx, cx, fy, cy
global crop_size
with TimeIt('prediction'):
# with TimeIt('Crop'):
depthImg = bridge.imgmsg_to_cv2(depth_message)
rospy.loginfo(depthImg.shape)
near = 0.01
far = 0.24
depth = far * near / (far - (far - near) * depthImg)
# Crop a square out of the middle of the depth and resize it to 300*300
depth_crop = cv2.resize(depth[(304-crop_size)//2:(304-crop_size)//2+crop_size, (304-crop_size)//2:(304-crop_size)//2+crop_size], (304, 304))
# print((424-crop_size)//2, (424-crop_size)//2+crop_size)
# print((512-crop_size)//2, (512-crop_size)//2+crop_size)
# exit()
depth_crop = cv2.resize(depth, (Input_Res, Input_Res))
# Replace nan with 0 for inpainting.
depth_crop = depth_crop.copy()
depth_nan = np.isnan(depth_crop).copy()
# print(depth_nan)
depth_crop[depth_nan] = 0
# np.save("/home/aarons/catkin_kinect/src/yumi_grasp/src/depth_raw_pub2.npy", depth_crop)
# with TimeIt('Inpaint'):
# open cv inpainting does weird things at the border.
depth_crop = cv2.copyMakeBorder(depth_crop, 1, 1, 1, 1, cv2.BORDER_DEFAULT)
mask = (depth_crop == 0).astype(np.uint8)
# Scale to keep as float, but has to be in bounds -1:1 to keep opencv happy.
depth_scale = np.abs(depth_crop).max()
depth_crop = depth_crop.astype(np.float32)/(depth_scale) # Has to be float32, 64 not supported.
depth_crop = cv2.inpaint(depth_crop, mask, 1, cv2.INPAINT_NS)
# Back to original size and value range.
depth_crop = depth_crop[1:-1, 1:-1]
depth_crop = depth_crop * depth_scale / 1.0 # kinect output unit is millemeter, but realsense output unit is meter
# kernel=cv2.getStructuringElement(cv2.MORPH_RECT,(5,5))
# depth_crop=cv2.morphologyEx((depth_crop * 1000),cv2.MORPH_OPEN,kernel)
# depth_crop = depth_crop /1000.0
# depth_crop = cv2.bilateralFilter(depth_crop,7,31,31)
# with TimeIt('Calculate Depth'):
# Figure out roughly the depth in mm of the part between the grippers for collision avoidance.
depth_crop_neighbor = depth_crop.copy()
# depth_center = depth_crop[100:141, 130:171].flatten()
depth_center = depth_crop.flatten()
depth_center.sort()
# depth_center = depth_center[:10].mean() * 1000.0
depth_center = depth_center.mean() * 1000.0
# print('depth_center')
# print(depth_center)
# if depth_center > 785:
# return
# with TimeIt('Inference'):
# Run it through the network.
# np.save("/home/aarons/catkin_kinect/src/yumi_grasp/src/depth_raw_pub1.npy", depth_crop)
depth_raw_pub.publish(bridge.cv2_to_imgmsg(depth_crop))
# print(depth_crop.max())
# print(depth_crop.mean())
depth_crop = np.clip((depth_crop - depth_crop.mean()), -1, 1.1) #1224
# depth_crop = np.clip((depth_crop - depth_crop.max()), -1, 1.1) #1224
# depth_crop = np.clip(np.true_divide((depth_crop - depth_crop.mean()), depth_crop.std()), -1, 1.1)
# depth_crop = np.clip((depth_crop - aaa), -1, 1.1)
# print(depth_crop.shape)
# depth_crop = np.clip((depth_crop), -1, 1)
with TimeIt('Inference'):
with graph.as_default():
# print("begin prediction")
pred_out = model.predict(depth_crop.reshape((1, Input_Res, Input_Res, 1)))
# print("end prediction")
# print(np.shape(pred_out))
points_out = pred_out[0].squeeze()
# print(np.shape(points_out))
# print(depth_nan.shape)
# print(depth_nan)
# Replace nan with 0 for inpainting.
points_out[depth_nan] = 0
# with TimeIt('Trig'):
# Calculate the angle map.
cos_out = pred_out[1].squeeze()
sin_out = pred_out[2].squeeze()
ang_out = np.arctan2(sin_out, cos_out)/2.0
width_out = pred_out[3].squeeze() * 150.0 # Scaled 0-150:0-1
# with TimeIt('Filter'):
# Filter the outputs.
points_out = ndimage.filters.gaussian_filter(points_out, 5.0) # 3.0 5.0 aaron
ang_out = ndimage.filters.gaussian_filter(ang_out, 2.0)
# with TimeIt('Control'):
# Calculate the best pose from the camera intrinsics.
maxes = None
ALWAYS_MAX = False # Use ALWAYS_MAX = True for the open-loop solution.
if ROBOT_Z > 0.34 or ALWAYS_MAX: # > 0.34 initialises the max tracking when the robot is reset.
# Track the global max.
max_pixel = np.array(np.unravel_index(np.argmax(points_out), points_out.shape))
prev_mp = max_pixel.astype(np.int)
else:
# Calculate a set of local maxes. Choose the one that is closes to the previous one.
# maxes = peak_local_max(points_out, min_distance=20, threshold_abs=0.1, num_peaks=20) #min_distance=10, threshold_abs=0.1, num_peaks=3 15 0.1 20
maxes = peak_local_max(points_out, min_distance=5, threshold_abs=0.1, num_peaks=1) #min_distance=10, threshold_abs=0.1, num_peaks=3 15 0.1 20
if maxes.shape[0]:
max_pixel = maxes[np.argmin(np.linalg.norm(maxes - prev_mp, axis=1))]
# max_pixel = np.array(np.unravel_index(np.argmax(points_out), points_out.shape))
visual_max_pixel = max_pixel.copy()
# Keep a global copy for next iteration.
# prev_mp = (max_pixel * 0.25 + prev_mp * 0.75).astype(np.int)
grasp_quality = points_out[max_pixel[0],max_pixel[1]]
else:
rospy.loginfo("no lacal maxes! ")
# grasp_quality = 0
# cmd_msg = Float32MultiArray()
# x = 0
# y = 0
# z = 0
# ang = 0
# width = 0
# depth_grasp_neighbor = 0
# grasp_quality = 0
# cmd_msg.data = [x, y, z, ang, width, depth_grasp_neighbor, grasp_quality]
# cmd_pub.publish(cmd_msg)
# rospy.loginfo(cmd_msg)
state_msg = Float32MultiArray()
state_msg.data = [True]
rospy.loginfo(state_msg)
state_pub.publish(state_msg)
return
# max_pixel = maxes[np.argmin(np.linalg.norm(maxes - prev_mp, axis=1))]
# visual_max_pixel = max_pixel.copy()
# # Keep a global copy for next iteration.
# prev_mp = (max_pixel * 0.25 + prev_mp * 0.75).astype(np.int)
# # print(max_pixel)
# grasp_quality = points_out[max_pixel[0],max_pixel[1]]
if max_pixel[0]>=10 and max_pixel[0]<=394 and max_pixel[1]>=10 and max_pixel[1]<=394:
# print('bound exists! ')
depth_grasp_neighbor = depth_crop_neighbor[max_pixel[0]-10:max_pixel[0]+10, max_pixel[1]-10:max_pixel[1]+10].flatten()
depth_grasp_neighbor.sort()
depth_grasp_neighbor = depth_grasp_neighbor[:50].mean() * 1000.0
# print(depth_grasp_neighbor)
else:
depth_grasp_neighbor = depth_center
ang = ang_out[max_pixel[0], max_pixel[1]]
width = width_out[max_pixel[0], max_pixel[1]]
if (depth_center - depth_grasp_neighbor) < 5:
rospy.loginfo('task space is empty!')
print(depth_center - depth_grasp_neighbor)
grasp_quality = 0
# Convert max_pixel back to uncropped/resized image coordinates in order to do the camera transform.
# max_pixel = ((np.array(max_pixel) / 300.0 * crop_size) + np.array([(424 - crop_size)//2, (512 - crop_size) // 2]))
max_pixel = ((np.array(max_pixel) / 304.0 * crop_size) + np.array([(304 - crop_size)//2, (304 - crop_size) // 2])) #[2,1]
# depth_crop = cv2.resize(depth[(540-crop_size)//2:(540-crop_size)//2+crop_size, (960-crop_size)//2:(960-crop_size)//2+crop_size], (300, 300))
# where is orign point
max_pixel = np.round(max_pixel).astype(np.int)
point_depth = depthImg[max_pixel[0], max_pixel[1]]
# print('point_depth')
# print(point_depth)
view_matrix = np.array([[0.0, 1.0, -0.0, 0.0],[-1.0, 0.0, -0.0, 0.0],[0.0, 0.0, 1.0, 0.0], [-0.0, -0.6499999761581421, -1.2400000095367432, 1.0]])
proj_matrix = np.array([[4.510708808898926, 0.0, 0.0, 0.0], [0.0, 4.510708808898926, 0.0, 0.0],[ 0.0, 0.0, -1.0020020008087158, -1.0], [0.0, 0.0, -0.0200200192630291, 0.0] ])
inter_gl = np.dot(view_matrix, proj_matrix)
# inter_gl = np.dot(proj_matrix, view_matrix)
px = 2.0*(max_pixel[1] - 0)/304.0 - 1.0
py = 1.0 - (2.0*max_pixel[0])/304.0
# py = 2.0*(max_pixel[0] - 0)/304.0 - 1.0
pz = 2.0*point_depth - 1.0
PP3D = np.array([px, py, pz, 1.0])
PP_world = np.dot(PP3D, np.linalg.inv(inter_gl))
# PP_world = np.dot( np.linalg.inv(inter_gl), PP3D)
rospy.loginfo("PP_world")
print(PP3D)
# print(PP_world)
print(PP_world/PP_world[3])
x = PP_world[0]/PP_world[3]
y = PP_world[1]/PP_world[3]
z = PP_world[2]/PP_world[3]
# print(depth[max_pixel[0], max_pixel[1]])
# These magic numbers are my camera intrinsic parameters.
# fov = 25.0
# fx = 1/math.tan(math.radians(fov/2.0))
# theta_x = math.radians(-90) + math.atan2(max_pixel[1],fx)
# X = point_depth * math.tan(theta_x)
# Z = point_depth
# fy = 1/math.tan(math.radians(fov/2.0))
# theta_y = math.atan2(max_pixel[0],fy)
# Y = math.tan(theta_y) * point_depth/ math.cos(theta_x)
# print([X, Y, Z])
# x = (max_pixel[1] - cx)/(fx) * point_depth
# y = (max_pixel[0] - cy)/(fy) * point_depth
# z = point_depth
# if np.isnan(z):
# print("depth is nan!")
# return
# print("predict: ")
# print(x, y, z, ang, width, depth_center)
# with TimeIt('Draw'):
# Draw grasp markers on the points_out and publish it. (for visualisation)
grasp_img = np.zeros((Input_Res, Input_Res, 3), dtype=np.uint8)
# with open('/home/aarons/catkin_kinect/src/yumi_grasp/src/heatmap.pkl', 'w') as f:
# pickle.dump(points_out, f)
# print(points_out.shape)
# np.save("/home/aarons/catkin_kinect/src/yumi_grasp/src/points_out_1229_householdclutter.npy", points_out)
# np.savetxt("/home/aarons/catkin_kinect/src/yumi_grasp/src/light_txt.npy", points_out)
# exit()
# pd_pointout = pd.DataFrame(points_out)
# pd_pointout.to_csv('/home/aarons/catkin_kinect/src/yumi_grasp/src/points_out.csv')
# heatmap test code
# fig, ax = plt.subplots()
# ax = sns.heatmap(ang_out, cmap='jet', xticklabels=False, yticklabels=False, cbar=False)
# fig.add_axes(ax)
# fig.canvas.draw()
# data_heatmap = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
# data_heatmap = data_heatmap.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# data_crop = cv2.resize(data_heatmap[(480-crop_size)//2:(480-crop_size)//2+crop_size, (640-crop_size)//2:(640-crop_size)//2+crop_size,:], (Input_Res, Input_Res))
# print(data_crop.shape)
if VISUALISE :
# pointout_visu = points_out.flatten().tolist()
# print(len(pointout_visu))
# bridge.cv2_to_imgmsg(points_out)
pointout_pub.publish(bridge.cv2_to_imgmsg(points_out)) # for visualization module
# pose_heatmap = visu_heatmap(points_out)
# grasp_img = pose_heatmap
# ang_heatmap = visu_heatmap(ang_out)
# ang_img = ang_heatmap
# width_heatmap = visu_heatmap(width_out)
# width_img = width_heatmap
# grasp_img[:,:,2] = (points_out * 255.0)
grasp_img_plain = grasp_img.copy()
grasp_img[:,:,2] = (points_out * 255.0)
# rr, cc = circle(prev_mp[0], prev_mp[1], 5)
rr, cc = circle(visual_max_pixel[0], visual_max_pixel[1], 5)
# depth_crop[rr, cc] = 200
grasp_img[rr, cc, 0] = 0 # R
grasp_img[rr, cc, 1] = 255 # G
grasp_img[rr, cc, 2] = 0 # B
# with TimeIt('Publish'):
# Publish the output images (not used for control, only visualisation)
# grasp_img = bridge.cv2_to_imgmsg(grasp_img, 'bgr8')
grasp_img = bridge.cv2_to_imgmsg(grasp_img, 'bgr8')
grasp_img.header = depth_message.header
grasp_pub.publish(grasp_img)
# ang_img = bridge.cv2_to_imgmsg(ang_img, 'bgr8')
# ang_img.header = depth_message.header
# ang_pub_heatmap.publish(ang_img)
# width_heatmap = bridge.cv2_to_imgmsg(width_heatmap, 'bgr8')
# width_heatmap.header = depth_message.header
# width_pub.publish(width_heatmap)
grasp_img_plain = bridge.cv2_to_imgmsg(grasp_img_plain, 'bgr8')
grasp_img_plain.header = depth_message.header
grasp_plain_pub.publish(grasp_img_plain)
depth_pub.publish(bridge.cv2_to_imgmsg(depth_crop))
ang_pub.publish(bridge.cv2_to_imgmsg(ang_out))
# Output the best grasp pose relative to camera.
cmd_msg = Float32MultiArray()
cmd_msg.data = [x, y, z, ang, width, depth_grasp_neighbor, grasp_quality, depth_center, visual_max_pixel[0], visual_max_pixel[1] ]
rospy.loginfo(cmd_msg)
cmd_pub.publish(cmd_msg)
state_msg = Float32MultiArray()
state_msg.data = [False]
rospy.loginfo(state_msg)
state_pub.publish(state_msg)
depth_sub = rospy.Subscriber('pybullet/img/depth_raw', Image, depth_callback, queue_size=1)
## comment by Aaron
# robot_pos_sub = rospy.Subscriber('/m1n6s200_driver/out/tool_pose', PoseStamped, robot_pos_callback, queue_size=1)
while not rospy.is_shutdown():
rospy.spin()
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Diffuse emission spectra.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from astropy.units import Quantity
__all__ = ['diffuse_gamma_ray_flux']
def _power_law(E, N, k):
E = Quantity(E, 'TeV')
E0 = Quantity(1, 'TeV')
N = Quantity(N, 'm^-2 s^-1 TeV^-1 sr^-1')
flux = N * (E / E0) ** (-k)
return flux
def diffuse_gamma_ray_flux(energy, component='isotropic'):
"""Diffuse gamma ray flux.
TODO: describe available spectra.
References:
* 'isotropic': http://adsabs.harvard.edu/abs/2010PhRvL.104j1101A
Parameters
----------
energy : `~astropy.units.Quantity`
Gamma-ray energy
component : {'isotropic', 'bubble', 'galactic_fermi2', 'galactic_fermi4'}
Diffuse model component
Returns
-------
flux : `~astropy.units.Quantity`
Gamma-ray flux in unit ``m^-2 s^-1 TeV^-1 sr^-1``
"""
# flux = Quantity(1, 'm^-2 s^-1 TeV^-1 sr^-1')
if component == 'isotropic':
# Reference: abstract from this Fermi paper:
# http://adsabs.harvard.edu/abs/2010PhRvL.104j1101A
integral_flux = Quantity(1.03e-5, 'cm^-2 s^-1 sr^-1')
gamma = 2.41
return _power_law(energy, 1, 2)
elif component == 'bubble':
raise NotImplementedError
elif component == 'galactic_fermi2':
raise NotImplementedError
else:
raise ValueError('Invalid argument for component: {0}'.format(component))
|
from django.contrib.auth import get_user_model
from rest_framework import serializers
from notes.models import Note
user_model = get_user_model()
class NoteSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Note
fields = (
'id',
'title',
'subject',
'text',
'owner',
)
|
from telethon.errors.rpcerrorlist import PeerFloodError
import sys
import random
import utils
import config
import asyncio
import time
input_file = sys.argv[1]
users = utils.load_user(input_file)
print(len(users))
accounts = config.TELEGRAM_ACCOUNTS[:1]
clients = utils.login(accounts)
# client = clients[0]
async def worker(client, queue):
while True:
# print("Waiting {} seconds".format(config.SLEEP_TIME))
# await asyncio.sleep(config.SLEEP_TIME)
(user, message) = await queue.get()
receiver = await utils.get_user(user, client)
try:
# FIXME: skipped if this user was sent
print("Sending Message to: ", user["name"])
await client.send_message(receiver, message.format(user["name"]))
except PeerFloodError:
# FIXME: too much requests at the same time
print("Getting Flood Error from telegram. Script is stopping now. Please try again after some time.")
# client.disconnect()
# sys.exit()
except Exception as e:
print("Error:", e)
print("Trying to continue...")
queue.task_done()
async def main():
queue = asyncio.Queue()
for user in users:
message = random.choice(config.MESSAGES_TEMPLATE)
queue.put_nowait((user, message))
tasks = []
for client in clients:
task = asyncio.create_task(worker(client, queue))
tasks.append(task)
# wait queue
started_at = time.monotonic()
await queue.join()
total_slept_for = time.monotonic() - started_at
# Cancel our worker tasks.
for task in tasks:
task.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
print("Cancelling all client.")
for client in clients:
await client.disconnect()
print(f"Workers slept in parallel for {total_slept_for:.2f} seconds")
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
print("Done. Message sent to all users.")
|
import math
import types
import torch
import torch.optim
class FairseqOptimizer(object):
def __init__(self, args, params):
super().__init__()
self.args = args
self.params = list(params)
@property
def optimizer(self):
"""Return a torch.optim.optimizer.Optimizer instance."""
if not hasattr(self, '_optimizer'):
raise NotImplementedError
if not isinstance(self._optimizer, torch.optim.Optimizer):
raise ValueError('_optimizer must be an instance of torch.optim.Optimizer')
return self._optimizer
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
raise NotImplementedError
def __getstate__(self):
return self._optimizer.__getstate__()
def get_lr(self):
"""Return the current learning rate."""
return self.optimizer.param_groups[0]['lr']
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def state_dict(self):
"""Return the optimizer's state dict."""
return self.optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
self.optimizer.load_state_dict(state_dict)
if optimizer_overrides is not None and len(optimizer_overrides) > 0:
# override learning rate, momentum, etc. with latest values
for group in self.optimizer.param_groups:
group.update(optimizer_overrides)
def backward(self, loss):
"""Computes the sum of gradients of the given tensor w.r.t. graph leaves."""
loss.backward()
def multiply_grads(self, c):
"""Multiplies grads by a constant *c*."""
for p in self.params:
if p.grad is not None:
p.grad.data.mul_(c)
def clip_grad_norm(self, max_norm):
"""Clips gradient norm."""
if max_norm > 0:
return torch.nn.utils.clip_grad_norm_(self.params, max_norm)
else:
return math.sqrt(sum(p.grad.data.norm()**2 for p in self.params if p.grad is not None))
def step(self, closure=None):
"""Performs a single optimization step."""
self.optimizer.step(closure)
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for group in self.optimizer.param_groups:
for p in group['params']:
p.grad = None
self.optimizer.zero_grad()
@property
def supports_memory_efficient_fp16(self):
if hasattr(self.optimizer, 'supports_memory_efficient_fp16'):
return self.optimizer.supports_memory_efficient_fp16
return False
class Adam(torch.optim.Optimizer):
"""Implements Adam algorithm.
This implementation is modified from torch.optim.Adam based on:
`Fixed Weight Decay Regularization in Adam`
(see https://arxiv.org/abs/1711.05101)
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
.. _Adam : A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
p_data_fp32 = p.data.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p_data_fp32)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
if amsgrad:
state['max_exp_avg_sq'] = state['max_exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
p.data.copy_(p_data_fp32)
return loss
class FairseqAdam(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args, params)
self._optimizer = Adam(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
'lr': self.args.lr,
'betas': eval(self.args.adam_betas),
'eps': self.args.adam_eps,
'weight_decay': self.args.weight_decay,
}
class FairseqLRScheduler(object):
def __init__(self, args, optimizer):
super().__init__()
self.args = args
self.optimizer = optimizer
self.best = None
def state_dict(self):
"""Return the LR scheduler state dict."""
return {'best': self.best}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
self.best = state_dict['best']
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
if val_loss is not None:
if self.best is None:
self.best = val_loss
else:
self.best = min(self.best, val_loss)
def step_update(self, num_updates):
"""Update the learning rate after each update."""
return self.optimizer.get_lr()
class InverseSquareRootSchedule(FairseqLRScheduler):
"""Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (``--warmup-init-lr``) until the configured
learning rate (``--lr``). Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup::
lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)
lr = lrs[update_num]
After warmup::
decay_factor = args.lr * sqrt(args.warmup_updates)
lr = decay_factor / sqrt(update_num)
"""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
warmup_end_lr = args.lr
if args.warmup_init_lr < 0:
args.warmup_init_lr = warmup_end_lr
# linearly warmup for the first args.warmup_updates
self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates
# then, decay prop. to the inverse square root of the update number
self.decay_factor = warmup_end_lr * args.warmup_updates**0.5
# initial learning rate
self.lr = args.warmup_init_lr
self.optimizer.set_lr(self.lr)
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates < self.args.warmup_updates:
self.lr = self.args.warmup_init_lr + num_updates*self.lr_step
else:
self.lr = self.decay_factor * num_updates**-0.5
self.optimizer.set_lr(self.lr)
return self.lr |
print("File main.")
#vian
print("Virus Corona Heboh")
print("wish you were here")
print("ExR")
print("Faisal was here")
print("coba ubah nanang")
print("wish you were here")
print("ExR")
print("Faisal was here")
print("adit")
print("wish you were here")
print("ExR")
<<<<<<< HEAD
print("Harris was here")
=======
#alianto
print('alianto')
print("wish you were here")
print("ExR")
print("Faisal was here")
print("adit")
print("Faisal was here")
|
from random import randint, random
class Individ:
def __init__(self, intel=0, attract=0, age=0, MAX_AGE=100):
self.intel = intel
self.attract = attract
self.age = age
self.MAX_AGE = MAX_AGE
# Mutation
def mutate(self, probability=20):
if randint(0, 99) < probability:
if randint(0, 1):
self.intel += random(-0.5, 0.5)
else:
self.attract += random(-0.5, 0.5)
# Updates individ intelligenst
# def study(self):
|
# PART I
# Create a Python class called MathDojo that has the methods add and subtract. Have these 2 functions take at least 1 parameter.
#
class MathDojo(object):
def __init__(self):
print ("The answer is: ")
self.summed = 0
def add(self, *numbers):
for x in numbers:
self.summed = self.summed + x
return self
def subtract(self, *numbers):
for x in numbers:
self.summed = self.summed - x
return self
def result(self):
print(self.summed)
md = MathDojo()
md.add(2).add(2, 5).subtract(3, 2).result()
# PART II
# Modify MathDojo to take at least one integer(s) and/or list(s) as a parameter with as many value passed in the list.
#
class MathDojo1(object):
def __init__(self):
print ("The answer is: ")
self.summed = 0
def add(self, *args, **kwargs):
for x in args:
if type(x) is list:
for i in x:
self.summed = self.summed + i
for x in args:
if type(x) is int:
self.summed = self.summed + x
return self
def subtract(self, *args, **kwargs):
for x in args:
if type(x) is list:
for i in x:
self.summed = self.summed - i
for x in args:
if type(x) is int:
self.summed = self.summed - x
return self
def result(self):
print(self.summed)
md1 = MathDojo1()
md1.add([1], 3, 4).add([3,5,7,8], [2, 4.3, 1.25]).subtract(2, [2, 3], [1.1, 2.3]).result()
# PART III
# Make any needed changes in MathDojo in order to support tuples of values in addition to lists and singletons.
class MathDojo2(object):
def __init__(self):
print ("The answer is: ")
self.summed = 0
def add(self, *args, **kwargs):
for x in args:
if type(x) is list:
for i in x:
self.summed = self.summed + i
for x in args:
if type(x) is int:
self.summed = self.summed + x
for x in args:
if type(x) is tuple:
for i in x:
self.summed + i
return self
def subtract(self, *args, **kwargs):
for x in args:
if type(x) is list:
for i in x:
self.summed = self.summed - i
for x in args:
if type(x) is int:
self.summed = self.summed - x
for x in args:
if type(x) is tuple:
for i in x:
self.summed - i
return self
def result(self):
print(self.summed)
md2 = MathDojo2()
tup = (2, 4, 7)
md2.add(1, [3, 4, 2.5], 5, 11).add(tup, 3, 4, 2, [6, 6, 7.5]).subtract(4, tup, [5, 6.2]).result()
|
# Copyright (c) 2019 kamyu. All rights reserved.
#
# Google Code Jam 2019 World Finals - Problem C. Won't sum? Must now
# https://codingcompetitions.withgoogle.com/codejam/round/0000000000051708/000000000016c77e
#
# Time: O(2^(D/2) * D), D is the number of digits of S
# Space: O(D)
#
from itertools import imap
def to_int(x): # list of ints to int
return int("".join(map(str, x)))
def to_list(X): # int to list of ints
return map(int, list(str(X)))
def gen_palindromes(S):
# at most 208 times because the smallest palindrome of triples
# is at most 10801 (208-th smallest palindrome) in this problem
l, n = 1, None
while True:
lefts = [""] if n is None else imap(str, xrange(n, 10*n))
for left in lefts:
mids = [""] if l%2 == 0 else imap(str, xrange(10))
for mid in mids:
P = int(left + mid + left[::-1])
if P > S:
return
yield P
if l%2 == 1:
n = 1 if n is None else n*10
l += 1
def set_digits(x, o, start):
for i in xrange(len(o)):
if x[start+i] is not None and x[start+i] != o[i]:
return False
x[start+i], x[-1-(start+i)] = o[i], o[i]
return True
def clear_digits(x, o, start):
for i in xrange(len(o)):
x[start+i], x[-1-(start+i)] = None, None
def find_pair_with_same_length(s, x, y, start, left_carry, right_carry):
def gen_X_Y():
for Y in xrange(max(target-9, 0), min(target+1, 10)): # make X >= Y
X = target-Y
if start == 0 and (X == 0 or Y == 0): # leading digit can't be 0
continue
yield X, Y
if len(x)-start*2 <= 0:
return left_carry == right_carry
for new_left_carry in xrange(2):
target = s[len(x)-1-start] + left_carry*10 - new_left_carry
if s[start] != (target+right_carry)%10:
continue
new_right_carry = right_carry if len(x)-start*2 == 1 else (target+right_carry)//10
for X, Y in gen_X_Y(): # it doesn't matter which of options we take except for making a leading 0
set_digits(x, [X], start), set_digits(y, [Y], start)
if find_pair_with_same_length(s, x, y, start+1, new_left_carry, new_right_carry):
return True
clear_digits(y, [Y], start), clear_digits(x, [X], start)
break # if an option fails, other options also fail
return False
def find_pair_with_overhang_length(s, x, y, start, left_carry, right_carry, left_Y):
def find_left_x():
left_X = to_int(s[len(x)-1-(start+overhang-1):len(x)-start][::-1]) + \
left_carry*(10**overhang) - new_left_carry - left_Y
if not (0 <= left_X < 10**overhang):
return None
left_x = to_list(left_X)
left_x = [0]*(overhang-len(left_x)) + left_x # 0-padding
if start == 0 and left_x[0] == 0: # leading digit can't be 0
return None
if not set_digits(x, left_x, start):
clear_digits(x, left_x, start)
return None
return left_x
def find_left_y():
if len(y)-start*2 <= 0:
return [], right_carry # pass current right carry if y is not updated
right_y_len = min(len(y)-start*2, overhang)
right_S, right_X = map(to_int, [s[start:start+right_y_len][::-1], left_x[:right_y_len][::-1]])
new_right_carry, right_Y = map(abs, divmod(right_S-right_X-right_carry, 10**right_y_len))
right_y = to_list(right_Y)
right_y = [0]*(right_y_len-len(right_y)) + right_y # 0-padding
left_y = right_y[::-1]
if start == 0 and left_y[0] == 0: # leading digit can't be 0
clear_digits(x, left_x, start)
return None, None
if not set_digits(y, left_y, start):
clear_digits(y, left_y, start), clear_digits(x, left_x, start)
return None, None
return left_y, new_right_carry
if len(x)-start*2 <= 0:
return left_carry == right_carry
overhang = min(len(x)-2*start, len(x)-len(y))
for new_left_carry in xrange(2):
left_x = find_left_x()
if left_x is None:
continue
left_y, new_right_carry = find_left_y()
if left_y is None or new_right_carry is None:
continue
new_left_Y = 0 if len(y)-start*2 <= overhang else to_int(left_y[:(len(y)-start*2)-overhang])
if find_pair_with_overhang_length(s, x, y, start+overhang,
new_left_carry, new_right_carry, new_left_Y):
return True
clear_digits(y, left_y, start), clear_digits(x, left_x, start)
return False
def find_pair(s, i, j, left_carry):
x, y = [None]*i, [None]*j
result = find_pair_with_same_length(s, x, y, 0, left_carry, 0) if i == j else \
find_pair_with_overhang_length(s, x, y, 0, left_carry, 0, 0)
if not result:
return None, None
x.reverse(), y.reverse()
return to_int(x), to_int(y)
def wont_sum_must_now():
S = input()
s = to_list(S)
if s == s[::-1]:
return S
for P in gen_palindromes(S):
s = to_list(S-P)
s.reverse()
carry = int(s[-1] == 1)
for i in reversed(xrange(len(s)-carry, len(s)+1)): # prefer larger X
left_carry = len(s)-i
for j in xrange(1, i+1):
X, Y = find_pair(s, i, j, left_carry)
if X is None or Y is None:
continue
assert(X >= Y >= P)
result = [X, Y]
if P != 0:
result.append(P)
return " ".join(map(str, result))
assert(False)
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, wont_sum_must_now())
|
n = int(input())
a = list (map (int, input().strip().split()))
x = 0
for i in range(n):
x^=a[i]
for i in range(n):
print(a[i]^x,end=" ")
|
import numpy as np
import scipy.stats as st
from statistics import mean, median
import os
import sys
if sys.argv[1] in ["seq","kseq","chain","kchain"]:
NAME = sys.argv[1]+"test"
else:
NAME="kseqtest"
#NAME="seqtest"
files = set()
for fn in os.listdir("."):#["chaintest-8388608.csv"]:#["chaintest-results.csv","seqtest-results.csv"]:
if not fn.startswith(NAME) or not fn.endswith(".csv") or fn == NAME+"-totals.csv" or fn == NAME+"-results.csv":
continue
files.add(fn)
out = open(NAME+"-totals.csv","w")
out.write(";".join(["Size","Min","Max","Mean","Median","Conf95start","Conf95end"])+"\n")
for fn in sorted(files):
#print(fn)
transfer = {}
with open(fn) as f:
for l in f:
size,ts = l.split(',',1)
t = ts.split(',')
times = transfer.setdefault(size,[[],[],[],[],[]])
times[0].append( (int(t[1])-int(t[0])) / 1000000.0 )
times[1].append( (int(t[3])-int(t[2])) / 1000000.0 )
times[2].append( (int(t[5])-int(t[4])) / 1000000.0 )
times[3].append( (int(t[7])-int(t[6])) / 1000000.0 )
times[4].append( (int(t[9])-int(t[8])) / 1000000.0 )
for s,times in transfer.items():
print("Size %9s:" % s)
print("STEP MIN MAX MEAN MEDIAN ConfInt .95")
t = [item for sublist in times for item in sublist]
a = np.array(t)
ci95 = st.t.interval(0.95, len(a)-1, loc=np.mean(a), scale=st.sem(a))
print("Total: %8.3f %8.3f %8.3f %8.3f %8.3f-%8.3f" % (min(t),max(t),mean(t),median(t),ci95[0],ci95[1]))
out.write(";".join([str(x) for x in [s,min(t),max(t),mean(t),median(t),ci95[0],ci95[1]]])+"\n")
i=0
for ts in times:
a = np.array(ts)
ci95 = st.t.interval(0.95, len(a)-1, loc=np.mean(a), scale=st.sem(a))
print(" %1d->%1d: %8.3f %8.3f %8.3f %8.3f %8.3f-%8.3f" % (i, i+1, min(ts),max(ts),mean(ts),median(ts),ci95[0],ci95[1]))
i+=1
out.close()
|
import telebot
TOKEN = "412526464:AAGPBU6liOdo8CsMmWJssFR08KPPZpJZaWg"
bot = telebot.TeleBot(TOKEN)
@bot.message_handler(commands=["Привет"])
def answer(message):
bot.send_massage(message.chat.id, "Валейкум")
@bot.message_handler(func=lambda message:True)
def say_smth(message):
if message.text == "Здрасте":
bot.send_message(message.chat.id, "Животное, пошел вон!")
elif message.text == "Здровеньке булы":
bot.reply_to(message, "Оооооо, братуха")
if __name__=="__main__":
bot.polling() |
import json
import praw
import boto3
import base64
from botocore.exceptions import ClientError
def get_secret():
secret_name = "API_KEY"
region_name = "us-east-1"
# Create a Secrets Manager client
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'DecryptionFailureException':
# Secrets Manager can't decrypt the protected secret text using the provided KMS key.
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
# An error occurred on the server side.
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
# You provided an invalid value for a parameter.
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
# You provided a parameter value that is not valid for the current state of the resource.
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
# We can't find the resource that you asked for.
raise e
else:
# Decrypts secret using the associated KMS CMK.
# Depending on whether the secret is a string or binary, one of these fields will be populated.
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
return json.loads(secret)
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return json.loads(decoded_binary_secret)
def put_id(uid, dynamodb=None):
if not dynamodb:
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('watchexchange_uid_table')
response = table.put_item(Item={'uid': uid},ConditionExpression = "attribute_not_exists(id)")
return response
def lambda_handler(event, context):
new_item_found = False
client = boto3.client('sns')
reddit_api_keys = get_secret()
reddit = praw.Reddit(client_id=reddit_api_keys['REDDIT_CLIENT_ID'],
client_secret=reddit_api_keys['REDDIT_CLIENT_SECRET'],
user_agent="watchexchange_notification_system:v1 (by u/Gandor)")
posts = {"Title" : [],
"Link" : [],
"Price" : []}
keywords = ['rolex', 'omega', 'patek', 'audemars', 'lange', 'vacheron']
for submission in reddit.subreddit("watchexchange").new(limit=15):
prices_found = []
if(any(substring in submission.title.lower() for substring in keywords)):
uid = submission.id
try:
put_id(uid)
except:
print("{} ID Already Exists in Database".format(uid))
continue
print("NEW ENTRY {}".format(uid))
new_item_found = True
title = submission.title
link = "reddit.com{}".format(submission.permalink)
seller = submission.author
comments = submission.comments
for comment in comments:
if(comment.author == seller):
detail_substrings = comment.body.split(" ")
for substring in detail_substrings:
if("$" in substring):
prices_found.append(substring)
posts['Title'].append(title)
posts['Link'].append(link)
posts['Price'].append(prices_found)
message = {"Title": posts['Title'],
"Link" : posts['Link'],
"Price": posts['Price']}
if(new_item_found == True):
response = client.publish(TargetArn=reddit_api_keys['REDDIT_SNS_ARN'],
Message=json.dumps({'default': json.dumps(message)}),
MessageStructure='json')
return {
'statusCode': 200,
}
|
import sys
from quantum_espresso_tools.superconductivity.plots import plot_tc_vs_smearing
plot_tc_vs_smearing(sys.argv)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 5 09:08:33 2014
@author: Lanfear
"""
import urllib2
import getopt
import BeautifulSoup
import collections
import json
import csv
import re
base_url = 'http://www.jinni.com/movies/'
file_url = '/Users/Lanfear/Desktop/Research/CLuuData/CLuuScriptsGeneData/Holdoutgenes.txt'
movieinfopath = '/Users/Lanfear/Desktop/Research/CLuuData/CLuuResults/FinalHoldoutSet.csv'
genedict = collections.OrderedDict()
class GetGenes:
def __init__(self):
pass
def Parse_Genes(self, html):
#Obtain genome elements by parsing link href values
soup = BeautifulSoup.BeautifulSoup(html)
attrs = ''
genelist = []
for link in soup.findAll("a"):
attrs = link.get("href")
genelist.append(str(attrs[attrs.rfind('=') + 1:]))
return genelist
def Get_Genes(self, mName):
#Remove all non-alphanumeric, non-dot and non-space characters
newName = re.sub(r'[^a-zA-Z0-9\.-]', '', mName)
#Replace dots with spaces
newName = newName.replace('.', ' ')
#Strip extraneous spaces
newName = newName.rstrip(' ')
#Add the name to the url
url = base_url + newName.replace(' ', '-')
response = urllib2.urlopen(url)
html = response.read()
sIndex = html.find('right_genomeGroup')
eIndex = html.lower().find('more online info')
html = html[sIndex:eIndex]
if (html.find('right_genomeTitleColor Audience')):
'''
We are removing extraneous genome elemtents such as audience
by stripping off that part of the HTML content
'''
temphtml = html[html.find('right_genomeTitleColor Audience'):]
temphtml = temphtml[temphtml.find('right_genomeGroup'):]
html = html[: html.find('right_genomeTitleColor Audience')] +\
temphtml
tempname = mName.replace('-', ' ')
genedict[tempname] = {}
genedict[tempname]['Genes'] = self.Parse_Genes(html)
#return self.Parse_Genes(html)
def Get_Dict(self):
return genedict
def main(argv):
if not len(argv) == 2 and not len(argv) == 4:
print ("Usage: python GetGenes.py -m 'movieName'",
" -f genefilename (optional)")
return
myopts, args = getopt.getopt(argv, 'm:f:')
mName = ''
gFName = ''
for o, a in myopts:
if o == '-m':
mName = a
elif o == '-f':
gFName = a
mName = mName.replace(' ', '-')
url = base_url + mName
Get_Genes(url, mName)
if __name__ == '__main__':
#main(sys.argv[1:])
movierawdata = list(csv.reader(open(movieinfopath, 'rU'),
dialect=csv.excel_tab, delimiter=','))[1:]
movierawdata = [row[0:7] for row in movierawdata]
G = GetGenes()
for row in movierawdata:
mName = row[0].replace(' ', '-')
print mName
try:
G.Get_Genes(mName)
genedict[row[0]]['Year'] = row[1]
genedict[row[0]]['Genre'] = row[2]
genedict[row[0]]['ROI'] = row[6]
except:
print 'YOU MAD BRO? SHOULD NEVER SEE THIS'
json.dump(genedict, open(file_url, 'w'))
print genedict
|
# %%spark
from pyspark import SparkContext
from pyspark.sql import SparkSession
from pyspark.sql import functions as sf
from pyspark.sql.types import *
from pyspark import SQLContext
from datetime import date,datetime,timedelta
from pytz import timezone
from pyspark.sql.functions import *
from pyspark.sql.window import Window
import boto3
import sys
import uuid
##########################################################################################################
def get_recent_dir (prefix_input):
while True:
result = client.list_objects(Bucket=bucket, Prefix=prefix_input, Delimiter='/',MaxKeys=1500)
if result.get('CommonPrefixes') == None:
last_dir = prefix_input
return last_dir
else:
last_dir = sorted([prefix.get('Prefix') for prefix in result.get('CommonPrefixes')])[-1]
prefix_input = last_dir
return prefix_input
##########################################################################################################
def blank_as_null(x):
return when(sf.col(x) != "", sf.col(x)).otherwise(None)
concat_udf = sf.udf(lambda cols: "~".join([x if x is not None else "" for x in cols]), StringType())
##########################################################################################################
# bucket = ""
# access_key = ""
# secret_key = ""
# vendor = ""
# date = "1900-01-01"
bucket = sys.argv[1]
access_key = sys.argv[2]
secret_key = sys.argv[3]
vendor = sys.argv[4]
date = sys.argv[5]
enriched_path = sys.argv[6]
appNameSuffix = vendor + "Spark_DataModels"
year = date.split('-',1)[0]
month = date.split('-',2)[1]
day = date.split('-',3)[2]
# If you run in pyspark, ignore sc = SparkContext(). Else if you run via spark-submit, uncomment this.
sc = SparkContext()
sc._jsc.hadoopConfiguration().set("fs.s3.awsAccessKeyId", "" + access_key + "")
sc._jsc.hadoopConfiguration().set("fs.s3.awsSecretAccessKey", ""+ secret_key + "")
sc._jsc.hadoopConfiguration().set("hadoop.tmp.dir", "/mnt/var/lib/hadoop/tmp/"+str(uuid.uuid4()))
sparkSession = (SparkSession
.builder
.appName('SparkApp_' + appNameSuffix)
# .config("spark.hadoop.fs.s3.enableServerSideEncryption", "true")
# .config("spark.hadoop.fs.s3.serverSideEncryptionAlgorithm", "aws:kms")
# .config("spark.hadoop.fs.s3.impl", "org.apache.hadoop.fs.s3native.NativeS3FileSystem")
.config("spark.sql.parquet.filterPushdown", "true")
.config("spark.sql.parquet.mergeSchema", "true")
.config("spark.sql.caseSensitive","true")
.config("spark.sql.shuffle.partitions","5")
.config("spark.sql.sources.partitionOverwriteMode","dynamic")
.getOrCreate())
client = boto3.client('s3',aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name="us-west-2")
srcfilePathTrans = "s3://" + bucket + "/" + enriched_path + vendor + "/Parquet/transactions/year=" + year + "/month=" + month + "/day=" + day +""
srcfilePathSummary = "s3://" + bucket + "/" + enriched_path + vendor + "/Parquet/summary/year=" + year + "/month=" + month + "/day=" + day +""
srcfilePathStreams = "s3://" + bucket + "/" + enriched_path + vendor + "/Parquet/streams/year=" + year + "/month=" + month + "/day=" + day +""
tgtfilePathTrans = "s3://" + bucket + "/" + enriched_path + vendor + "/DataModels/transactions/"
tgtfilePathSummary = "s3://" + bucket + "/" + enriched_path + vendor + "/DataModels/summary/"
tgtfilePathStreams = "s3://" + bucket + "/" + enriched_path + vendor + "/DataModels/streams/"
tgtfilePathMapTbl = "s3://" + bucket + "/" + enriched_path + vendor + "/DataModels/mappingTable/"
dfparquetSrcTrans = sparkSession.read.format("parquet").load(srcfilePathTrans)
dfparquetSrcSummary = sparkSession.read.format("parquet").load(srcfilePathSummary)
dfparquetSrcStreams = sparkSession.read.format("parquet").load(srcfilePathStreams)
dfTrans = dfparquetSrcTrans.withColumn("year",sf.split("createdDate","\-")[0]) \
.withColumn("month",sf.split("createdDate","\-")[1]) \
.withColumn("day",sf.split((sf.split((sf.split("createdDate","\-")[2]),"T")[0])," ")[0])
dfSummary = dfparquetSrcSummary.withColumn("year",sf.split("createdDate","\-")[0]) \
.withColumn("month",sf.split("createdDate","\-")[1]) \
.withColumn("day",sf.split((sf.split((sf.split("createdDate","\-")[2]),"T")[0])," ")[0])
dfStreams = dfparquetSrcStreams.withColumn("year",sf.split("createdDate","\-")[0]) \
.withColumn("month",sf.split("createdDate","\-")[1]) \
.withColumn("day",sf.split((sf.split((sf.split("createdDate","\-")[2]),"T")[0])," ")[0])
dfbaseDataTrans = dfTrans.select([col for col in dfTrans.columns])
dfbaseDataSummary = dfSummary.select([col for col in dfSummary.columns])
dfbaseDataStreams = dfStreams.select([col for col in dfStreams.columns if not col.startswith("streams_transactions_")])
dfbaseDataMapTblInt = dfStreams.select(sf.col("id"),sf.col("year"),sf.col("month"),sf.col("day"),sf.col("streams_id"),dfStreams.colRegex("`streams_transactions_[0-9_]+_id`"))
dfbaseDataMapTblStr = dfStreams.select(sf.col("id"),sf.col("year"),sf.col("month"),sf.col("day"),sf.col("streams_id"),dfStreams.colRegex("`streams_transactions_[a-zA-Z_]*`"))
dfbaseDataMapTblInt_1 = dfbaseDataMapTblInt.withColumn("streams_transactions_id", concat_udf(sf.sort_array(sf.array([col for col in dfbaseDataMapTblInt.columns if col.startswith("streams_transactions_")]))))
dfbaseDataMapTblInt_2 = dfbaseDataMapTblInt_1.select(sf.col("id"),sf.col("year"),sf.col("month"),sf.col("day"),sf.col("streams_id"),sf.col("streams_transactions_id"))
dfbaseDataMapTblInt_2 = dfbaseDataMapTblInt_2.withColumn("transactions_id",sf.explode(sf.split(sf.trim(sf.regexp_replace("streams_transactions_id","~"," "))," ")))
# .withColumn("replace",sf.trim(sf.regexp_replace("streams_transactions_id","~"," "))) \
# dfbaseDataMapTblInt_2.show(10,False)
dfbaseDataTransFinal = dfbaseDataTrans.select(sf.col("id").alias("mongoId"),sf.col("year"),sf.col("month"),sf.col("day"),sf.col("transactions_id").alias("tran_transactions_id"),sf.col("transactions_value").alias("tran_transactions_value"),sf.col("transactions_date").alias("tran_transactions_date")).distinct()
dfbaseDataSummaryFinal = dfbaseDataSummary.select(sf.col("id").alias("mongoId"),sf.col("year"),sf.col("month"),sf.col("day"),sf.col("summary_end_date"),sf.col("summary_num_transactions"),sf.col("summary_start_date"),sf.col("summary_total_irregular_income"),sf.col("summary_total_regular_income"))
# dfbaseDataStreamsFinal = dfbaseDataStreams.select(sf.col("id"),sf.col("year"),sf.col("month"),sf.col("day"),sf.col("applicantId"),sf.col("applicationSource") \
# ,sf.col("clientID"),sf.col("createdDate"),sf.col("loanApplicationId"),sf.col("mvpApplicantId") \
# ,sf.col("noHit"),sf.col("successful"),sf.col("timestamp"),sf.col("updatedAt"),sf.col("createdDatePT") \
# ,sf.col("transactions_id").alias("tran_transactions_id"),sf.col("transactions_value").alias("tran_transactions_value"),sf.col("transactions_date").alias("tran_transactions_date")).distinct()
dfbaseDataMapTblInt_3 = dfbaseDataMapTblInt_2.select(sf.col("id").alias("mongoId"),sf.col("year"),sf.col("month"),sf.col("day"),sf.col("streams_id").alias("mptbl_streams_id"),sf.col("transactions_id").alias("mptbl_transactions_id"))
dfbaseDataMapTblStr_1 = dfbaseDataMapTblStr.select(sf.col("id").alias("mongoId"),sf.col("year"),sf.col("month"),sf.col("day"),sf.col("streams_id").alias("mptbl_streams_id"),sf.col("streams_transactions_id").alias("mptbl_transactions_id"))
dfbaseDataMapTblFinal = dfbaseDataMapTblInt_3.union(dfbaseDataMapTblStr_1)
dfbaseDataTransFinal.repartition(sf.col("year"),sf.col("month"),sf.col("day")) \
.write.format("parquet") \
.partitionBy("year","month","day") \
.mode("overwrite") \
.save(tgtfilePathTrans)
dfbaseDataSummaryFinal.repartition(sf.col("year"),sf.col("month"),sf.col("day")) \
.write.format("parquet") \
.partitionBy("year","month","day") \
.mode("overwrite") \
.save(tgtfilePathSummary)
dfbaseDataStreams.repartition(sf.col("year"),sf.col("month"),sf.col("day")) \
.write.format("parquet") \
.partitionBy("year","month","day") \
.mode("overwrite") \
.save(tgtfilePathStreams)
dfbaseDataMapTblFinal.repartition(sf.col("year"),sf.col("month"),sf.col("day")) \
.write.format("parquet") \
.partitionBy("year","month","day") \
.mode("overwrite") \
.save(tgtfilePathMapTbl) |
import argparse
import os
import random
import subprocess
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
from functools import partial
from glob import glob
from multiprocessing.pool import Pool
from os import cpu_count
import cv2
cv2.ocl.setUseOpenCL(False)
cv2.setNumThreads(0)
from tqdm import tqdm
def compress_video(video, root_dir):
parent_dir = video.split("/")[-2]
out_dir = os.path.join(root_dir, "compressed", parent_dir)
os.makedirs(out_dir, exist_ok=True)
video_name = video.split("/")[-1]
out_path = os.path.join(out_dir, video_name)
lvl = random.choice([23, 28, 32])
command = "ffmpeg -i {} -c:v libx264 -crf {} -threads 1 {}".format(video, lvl, out_path)
try:
subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
except Exception as e:
print("Could not process vide", str(e))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Extracts jpegs from video")
parser.add_argument("--root-dir", help="root directory", default="/mnt/sota/datasets/deepfake")
args = parser.parse_args()
videos = [video_path for video_path in glob(os.path.join(args.root_dir, "*/*.mp4"))]
with Pool(processes=cpu_count() - 2) as p:
with tqdm(total=len(videos)) as pbar:
for v in p.imap_unordered(partial(compress_video, root_dir=args.root_dir), videos):
pbar.update()
|
from mord import LogisticAT
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import RidgeClassifier
from xgboost import XGBClassifier
import numpy as np
def get_estimator(settings):
"""Returns an estmator as specified in the settings dict."""
name = settings["name"]
params = settings["params"]
if name in ["Ordinal", "mord.LogisticAT"]:
return LogisticAT(**params)
elif name in ["Multiclass_Linear",
"sklearn.linear_model.LogisticRegression"]:
return LogisticRegression(**params)
elif name in ["Multiclass_Nonlinear",
"sklearn.ensemble.RandomForestClassifier"]:
return RandomForestClassifier(**params)
elif name in ["xgboost.XGBClassifier"]:
return XGBClassifier(**params)
elif name in ["sklearn.linear_model.RidgeClassifier"]:
return RidgeClassifierBTR(**params)
else:
raise NotImplementedError
class RidgeClassifierBTR(RidgeClassifier):
def __init__(self, *args, **kwargs):
RidgeClassifier.__init__(self, *args, **kwargs)
def predict_proba(self, X_test):
"""Returns the distance from the hyperplane of the RidgeClassifer.
Not a probability, per se, but values are still rankable for LPOCV.
Probability for a pair looks like this:
array([[0.56079167, 0.43920833],
[0.562586 , 0.437414 ]])
The decision function produces distance from hyperplane for each sample.
predict_proba usually produces one set of probabilities per sample
[p_class_A, p_class_B] where p_class_A = 1-p_class_B
To produce something similar to this, negate the distances.
Append negated values, and return transposed array.
[dist_hyperplane, -dist_hyperplane]
"""
probas = self.decision_function(X_test)
probas = np.transpose(np.array([probas, -probas]))
return probas |
from dao.parking_spot_pay_information import ParkingSpotPayInformation
from dao.parking_spot_information import ParkingSpotInformation
from dao.resident_information import ResidentInformation
from dao.pr import PR
from typing import *
from global_var import db
def get_parking_spot_pay_all_from_pid(pid: int):
parking_spot_pay_infos: List[
ParkingSpotPayInformation,
ResidentInformation.username
] = db.session.query(ParkingSpotPayInformation, ResidentInformation.username).filter(
ParkingSpotInformation.id == pid,
ParkingSpotInformation.id == ParkingSpotPayInformation.pid
).all()
return [{
'id': parking_spot_pay_info[0].id,
'date': parking_spot_pay_info[0].date.strftime('%Y-%M-%d'),
'pay_date': parking_spot_pay_info[0].pay_date.strftime('%Y-%M-%d') if parking_spot_pay_info[0].ispay else "未支付",
'pay_amount': parking_spot_pay_info[0].pay_amount,
'pay_username': parking_spot_pay_info[1] if parking_spot_pay_info[0].ispay else "未支付"
} for parking_spot_pay_info in parking_spot_pay_infos]
def get_parking_spot_pay_all_from_username(username: str):
parking_spot_pays: List[Tuple[
ParkingSpotPayInformation,
str
]] = db.session.query(
ParkingSpotPayInformation,
ResidentInformation.username
).filter(
ResidentInformation.username == username,
PR.rid == ResidentInformation.id,
ParkingSpotPayInformation.pid == PR.pid
).all()
return [
{
"id": parking_spot_pay[0].id,
"date": parking_spot_pay[0].date.strftime('%Y-%M-%d'),
"pay_date": parking_spot_pay[0].pay_date.strftime('%Y-%M-%d') if parking_spot_pay[0].ispay else "未缴费",
"pay_amount": parking_spot_pay[0].pay_amount,
"handle_id": parking_spot_pay[1] if parking_spot_pay[0].ispay else "未缴费",
"is_pay": "已缴费" if parking_spot_pay[0].ispay else "未缴费"
}
for parking_spot_pay in parking_spot_pays]
|
import logging
import pytest
from remote_command_executor import RemoteCommandExecutionError, RemoteCommandExecutor
# timeout in seconds
OPENFOAM_INSTALLATION_TIMEOUT = 300
OPENFOAM_JOB_TIMEOUT = 5400 # Takes long time because during the first time, it's not only execute the job but also
# builds and installs many things
TASK_VCPUS = 36 # vCPUs are cut in a half because multithreading is disabled
BASELINE_CLUSTER_SIZE_ELAPSED_SECONDS = {8: 742, 16: 376, 32: 185}
PERF_TEST_DIFFERENCE_TOLERANCE = 5
def perf_test_difference(perf_test_result, number_of_nodes):
baseline_result = BASELINE_CLUSTER_SIZE_ELAPSED_SECONDS[number_of_nodes]
percentage_difference = 100 * (perf_test_result - baseline_result) / baseline_result
return percentage_difference
def openfoam_installed(headnode):
cmd = '[ -d "/shared/ec2-user/SubspaceBenchmarks" ]'
try:
headnode.run_remote_command(cmd)
return True
except RemoteCommandExecutionError:
return False
@pytest.mark.parametrize(
"number_of_nodes",
[[8, 16, 32]],
)
def test_openfoam(
vpc_stack,
instance,
os,
region,
scheduler,
pcluster_config_reader,
clusters_factory,
number_of_nodes,
test_datadir,
):
cluster_config = pcluster_config_reader(number_of_nodes=max(number_of_nodes))
cluster = clusters_factory(cluster_config)
logging.info("Cluster Created")
remote_command_executor = RemoteCommandExecutor(cluster)
if not openfoam_installed(remote_command_executor):
logging.info("Installing OpenFOAM")
remote_command_executor.run_remote_script(
str(test_datadir / "openfoam.install.sh"), timeout=OPENFOAM_INSTALLATION_TIMEOUT, hide=False
)
logging.info("OpenFOAM Installed")
performance_degradation = {}
subspace_benchmarks_dir = "/shared/ec2-user/SubspaceBenchmarks"
for node in number_of_nodes:
logging.info(f"Submitting OpenFOAM job with {node} nodes")
remote_command_executor.run_remote_command(
f'bash openfoam.slurm.sh "{subspace_benchmarks_dir}" "{node}" 2>&1',
additional_files=[str(test_datadir / "openfoam.slurm.sh")],
timeout=OPENFOAM_JOB_TIMEOUT,
)
perf_test_result = remote_command_executor.run_remote_script(
(str(test_datadir / "openfoam.results.sh")), hide=False
)
output = perf_test_result.stdout.strip()
elapsed_time = output.split("\n")[-1].strip()
baseline_value = BASELINE_CLUSTER_SIZE_ELAPSED_SECONDS[node]
logging.info(f"The elapsed time for {node} nodes is {elapsed_time} seconds")
percentage_difference = perf_test_difference(int(elapsed_time), node)
if percentage_difference < 0:
outcome = "improvement"
else:
outcome = "degradation"
logging.info(
f"Nodes: {node}, Baseline: {baseline_value} seconds, Observed: {elapsed_time} seconds, "
f"Percentage difference: {percentage_difference}%, Outcome: {outcome}"
)
if percentage_difference > PERF_TEST_DIFFERENCE_TOLERANCE:
performance_degradation[node] = perf_test_result.stdout
if performance_degradation:
degraded_nodes = list(performance_degradation.keys())
pytest.fail(f"Performance test results show performance degradation for the following nodes: {degraded_nodes}")
else:
logging.info("Performance test results show no performance degradation")
|
# coding: utf-8
# from config.views.config_pub import *
# from config.forms import *
import time
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required, permission_required
import json
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from common.const import AuthCodeName, get_nav_text, CmsModule, CheckOpType, CONFIG_ITEMS, \
CheckStatu, open_type
from common.views import get_check_status_str, filter_none, make_timestamp, timestamp2str
from config.forms import CmsNativeActivityForm, ActivitiesForm
from config.views.config_pub import CMS_CHECK_ON, CmsCheck, new_associate, exchange_obj
from main.models import CmsChannels, get_valid_time, get_city_str, CmsNativeActivity, get_scene_name, \
timestamp2str_space, getCVT, CmsViewNativeActivity
from main.views.main_pub import add_main_var, get_scenes, get_city_list, get_city_group, format_form, \
get_actions_select
@login_required
@add_main_var
def native_activities(request, template_name):
t = request.GET.get("t")
if not t:
t = "1"
v = request.GET.get("v")
c = request.GET.get("c")
channel = CmsChannels.objects.get(channel_no=c, app_version__app_version=v, app_version__type_id=t).id
return render_to_response(template_name, {
"text": get_nav_text(t),
"t": t,
"v": v,
"c": c,
"channel": channel
}, context_instance=RequestContext(request))
@login_required
def search_activities(request):
channel_id = request.GET.get('channel')
objs = CmsNativeActivity.objects.filter(cmsviewnativeactivity__channel_id=channel_id)
result = []
for obj in objs:
try:
status_str, status_int = get_check_status_str("CmsNativeActivity", obj.id)
result.append([
get_scene_name(obj.scene_id),
obj.sort,
obj.title,
obj.title_color,
obj.subtitle,
obj.image_url,
timestamp2str_space(obj.start_time),
timestamp2str_space(obj.end_time),
obj.action_id,
get_valid_time(obj.valid_time),
get_city_str(obj.city),
status_str,
status_int,
obj.id])
except:
continue
result.sort(key=lambda o: (o[0], o[1]))
filter_none(result)
return HttpResponse(json.dumps(result))
@login_required
@permission_required(u'%s.%s' % (AuthCodeName.APP_LABEL, AuthCodeName.CONFIG), raise_exception=True)
def exchange_activities(request):
id1 = request.POST.get("id1")
id2 = request.POST.get("id2")
channel_id = request.POST.get("channel")
exchange_obj(CmsNativeActivity, id1, CmsNativeActivity, id2, channel_id, CmsModule.CONFIG_NATIVE_ACTIVITY, request,
"sort", "sort")
return HttpResponse(0)
@login_required
@permission_required(u'%s.%s' % (AuthCodeName.APP_LABEL, AuthCodeName.CONFIG), raise_exception=True)
@add_main_var
def new_activity(request, template_name):
"""
配置库 本地活动
url :{% url 'new_native_activity' %}?channel={{ channel }}
:请求方式: Get
:请求参数:channel
:返回数据:fields errors scenes 场景列表 open_type(类别) citygroups cities
:例如:scenes 场景列表 和之前一样
:请求方式:Post
:请求参数:
"""
channel_id = request.GET.get('channel')
c, v, t = getCVT(channel_id)
channel = CmsChannels.objects.get(id=channel_id)
# 根据类型得到名称
text = get_nav_text(str(t))
times = ['start_time', 'end_time', 'open_time', 'close_time']
if request.method == 'POST':
form = CmsNativeActivityForm(request.POST)
for time1 in times:
if form.data[time1]:
form.data[time1] = make_timestamp(form.data[time1])
if form.is_valid():
activity = form.save()
import time
if CMS_CHECK_ON:
check = CmsCheck(
channel_id=channel_id,
module=CmsModule.CONFIG_NATIVE_ACTIVITY,
table_name="CmsNativeActivity",
data_id=activity.id,
op_type=CheckOpType.NEW,
status=CheckStatu.WAIT_SUBMIT,
is_show=1,
alter_person=request.user.username,
alter_date=time.strftime("%Y-%m-%d %X", time.localtime()))
check.save()
new_associate(channel_id, activity.id, CONFIG_ITEMS.NATIVE_ACTIVITY, request)
oCmsViewNativeActivity = CmsViewNativeActivity(nactivity=activity, channel=channel)
oCmsViewNativeActivity.save()
if CMS_CHECK_ON:
check = CmsCheck(
channel_id=channel_id,
module=CmsModule.CONFIG_NATIVE_ACTIVITY,
table_name="CmsViewNativeActivity",
data_id=oCmsViewNativeActivity.id,
op_type=CheckOpType.NEW,
status=CheckStatu.WAIT_SUBMIT,
is_show=0,
alter_person=request.user.username,
alter_date=time.strftime("%Y-%m-%d %X", time.localtime()))
check.save()
return HttpResponseRedirect(reverse("native_activities") + "?t=%d&c=%s&v=%s" % (t, c, v))
else:
form = ActivitiesForm()
actions = get_actions_select()
scenes = get_scenes()
cities = get_city_list()
citygroups = get_city_group()
errors, fields = format_form(form)
for time in times:
if time in fields.keys() and json.loads(fields[time]):
fields[time] = json.dumps(timestamp2str(fields[time]))
return render_to_response(template_name, {
"scenes": scenes,
"actions": actions,
"fields": fields,
"errors": errors,
"cities": cities,
"citygroups": citygroups,
"open_type": open_type,
"t": t,
"c": c,
"v": v,
"text": text,
"channel": channel_id
}, context_instance=RequestContext(request))
@login_required
@permission_required(u'%s.%s' % (AuthCodeName.APP_LABEL, AuthCodeName.CONFIG), raise_exception=True)
@add_main_var
def edit_activity(request, template_name):
"""
配置库 编辑本地活动
url :{% url 'edit_native_activity' %}?channel={{ channel }}&id={{ id }}
:请求方式: Get
:请求参数:channel
:返回数据:fields errors scenes 场景列表 open_type(类别) citygroups cities actions
:例如:scenes 场景列表 和之前一样
:请求方式:Post
:请求参数:
"""
channel_id = request.GET.get('channel')
c, v, t = getCVT(channel_id)
id = request.GET.get("id")
oCmsNativeActivity = CmsNativeActivity.objects.get(id=id)
# 根据类型得到名称
text = get_nav_text(str(t))
times = ['start_time', 'end_time', 'open_time', 'close_time']
if request.method == 'POST':
form = CmsNativeActivityForm(request.POST, instance=oCmsNativeActivity)
for time in times:
if form.data[time]:
form.data[time] = make_timestamp(form.data[time])
if form.is_valid():
activity = form.save()
import time
if CMS_CHECK_ON:
check = CmsCheck(
channel_id=channel_id,
module=CmsModule.CONFIG_NATIVE_ACTIVITY,
table_name="CmsNativeActivity",
data_id=activity.id,
op_type=CheckOpType.EDIT,
status=CheckStatu.WAIT_SUBMIT,
is_show=1,
alter_person=request.user.username,
alter_date=time.strftime("%Y-%m-%d %X", time.localtime()))
check.save()
return HttpResponseRedirect(reverse("native_activities") + "?t=%d&c=%s&v=%s" % (t, c, v))
else:
form = ActivitiesForm(instance=oCmsNativeActivity)
scenes = get_scenes()
cities = get_city_list()
citygroups = get_city_group()
actions = get_actions_select()
errors, fields = format_form(form)
for time in times:
if time in fields.keys() and json.loads(fields[time]):
fields[time] = json.dumps(timestamp2str(fields[time]))
return render_to_response(template_name, {
"scenes": scenes,
"fields": fields,
"errors": errors,
"cities": cities,
"actions": actions,
"citygroups": citygroups,
"open_type": open_type,
"t": t,
"c": c,
"v": v,
"text": text,
"channel": channel_id,
"id": id
}, context_instance=RequestContext(request))
@login_required
@permission_required(u'%s.%s' % (AuthCodeName.APP_LABEL, AuthCodeName.CONFIG), raise_exception=True)
def delete_activity(request):
id = request.POST.get("id")
channel_id = request.POST.get('channel')
views = CmsViewNativeActivity.objects.filter(nactivity_id=id)
if CMS_CHECK_ON:
for view in views:
check = CmsCheck(
channel_id=channel_id,
module=CmsModule.CONFIG_NATIVE_ACTIVITY,
table_name="CmsViewNativeActivity",
data_id=view.id,
op_type=CheckOpType.DELETE,
status=CheckStatu.WAIT_SUBMIT,
is_show=0,
alter_person=request.user.username,
alter_date=time.strftime("%Y-%m-%d %X", time.localtime()))
check.save()
views.delete()
CmsNativeActivity.objects.filter(id=id).delete()
if CMS_CHECK_ON:
check = CmsCheck(
channel_id=channel_id,
module=CmsModule.CONFIG_NATIVE_ACTIVITY,
table_name="CmsNativeActivity",
data_id=id,
op_type=CheckOpType.DELETE,
status=CheckStatu.WAIT_SUBMIT,
is_show=1,
alter_person=request.user.username,
alter_date=time.strftime("%Y-%m-%d %X", time.localtime()))
check.save()
return HttpResponse(0)
|
# This script contains useful functions
import sys
import os
import socket
import subprocess
from . import logger
install_data_dir = [
os.path.join(os.environ['HOME'], '.pdusim'),
os.path.join(sys.prefix, 'pdusim'),
os.path.join(sys.prefix, 'share', 'pdusim'),
os.path.join(os.path.split(__file__)[0], 'pdusim'),
os.path.dirname(os.path.abspath(__file__))
]
def run_command(cmd="", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
"""
:param cmd: the command should run
:param shell: if the type of cmd is string, shell should be set as True, otherwise, False
:param stdout: reference subprocess module
:param stderr: reference subprocess module
:return: tuple (return code, output)
"""
child = subprocess.Popen(cmd, shell=shell, stdout=stdout, stderr=stderr)
cmd_result = child.communicate()
cmd_return_code = child.returncode
if cmd_return_code != 0:
result = ""
if cmd_result[1] is not None:
result = cmd + ":" + cmd_result[1]
else:
result = cmd
logger.error(result)
raise Exception(result, cmd_result[0])
return 0, cmd_result[0]
def get_install_dir():
configdir_found = False
for dir in install_data_dir:
path = os.path.join(dir, 'conf', 'host.conf')
if os.path.exists(path):
return dir
if not configdir_found:
return None
def add_third_party_to_path():
for dir in install_data_dir:
path = os.path.join(dir, 'third-party')
if os.path.exists(path):
for d in os.listdir(path):
sys.path.insert(0, os.path.join(path, d))
def get_pid_from_pid_file(file):
with open(file, "r+") as f:
pid = f.readline().strip()
return pid
def check_if_port_in_use(address, port):
"""
True if port in use, false if not in use
"""
s = socket.socket()
try:
s.connect((address, port))
s.close()
return True
except socket.error:
s.close()
return False |
import json, ast
from optparse import OptionParser
from datetime import datetime
from elasticsearch import Elasticsearch
def get_options():
parser=OptionParser()
parser.add_option('-s', '--es-host', dest='es_host', help='ES host name')
parser.add_option('-o', dest='es_output', help='ES Output file name')
(options, args) = parser.parse_args()
for arg in required_args():
if getattr(options, arg) is None:
parser.error('Missing required argument %s' %arg)
return options
def required_args():
return ['es_host']
if __name__ == '__main__':
start_time = datetime.now()
options = get_options()
|
from torch.utils.data.dataset import Dataset
import numpy as np
from PIL import Image
from torchvision import transforms
transforms = transforms.Compose([
transforms.Resize((60, 60)),
transforms.ToTensor(),
transforms.Normalize([0.0, 0.0, 0.0], [1.0, 1.0, 1.0])
])
def load_one_image(image_path, transforms):
image = Image.open(image_path)
image = transforms(image).float()
# image = Variable(image, requires_grad=True)
image = image.unsqueeze(0)
return image
class TrainValTestDataset(Dataset):
def __init__(self, dataset, train_size=0.6, val_size=0.2, mode="train", random_state=42):
assert mode in ["train", "validate", "test"]
assert train_size + val_size <= 1.0
assert train_size <= 1.0
assert val_size <= 1.0
self.dataset = dataset
# make it reproducible
np.random.seed(random_state)
self.index = np.arange(len(self.dataset))
np.random.shuffle(self.index)
stop1 = int(train_size*len(self.index))
stop2 = int((train_size + val_size)*len(self.index))
if mode == "train":
self.index = self.index[:stop1]
elif mode == "validate":
self.index = self.index[stop1:stop2]
elif mode == "test":
self.index = self.index[stop2:]
def __getitem__(self, item: int) -> tuple:
i = self.index[item]
return self.dataset[i]
def __len__(self) -> int:
return len(self.index)
|
from ebonite.core.objects.requirements import InstallableRequirement, Requirements, resolve_requirements
def test_resolve_requirements_arg():
requirements = Requirements([InstallableRequirement('dumb', '0.4.1'), InstallableRequirement('art', '4.0')])
actual_reqs = resolve_requirements(requirements)
assert actual_reqs == requirements
def test_resolve_requirement_arg():
req = InstallableRequirement('dumb', '0.4.1')
actual_reqs = resolve_requirements(req)
assert actual_reqs.installable[0] == req
def test_resolve_requirement_list_arg():
req = [InstallableRequirement('dumb', '0.4.1'), InstallableRequirement('art', '4.0')]
actual_reqs = resolve_requirements(req)
assert len(actual_reqs.installable) == 2
assert actual_reqs.installable == req
def test_resolve_str_arg():
req = "dumb==0.4.1"
actual_reqs = resolve_requirements(req)
assert actual_reqs.installable[0].to_str() == req
def test_resolve_str_list_arg():
req = ["dumb==0.4.1", "art==4.0"]
actual_reqs = resolve_requirements(req)
assert len(actual_reqs.installable) == 2
assert req == [r.to_str() for r in actual_reqs.installable]
|
dic={}
for i in list(map(int,input().split())):
dic[i]=1+dic.get(i,0)
for i in sorted(dic ,key=lambda k: (dic[k],arr.index(k))):
for j in range(dic[i]):
print(i,end=" ")
|
#Given an array nums with n integers, your task is to check if it could become non-decreasing by modifying at most one element.
#We define an array is non-decreasing if nums[i] <= nums[i + 1] holds for every i (0-based) such that (0 <= i <= n - 2).
class Solution(object):
def checkPossibility(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
mx = float('-inf')
mn = float('inf')
n=0
m=0
for i in range(len(nums)):
if (mx>nums[i]):
n = n + 1
mx = max(mx,nums[i])
for i in range((len(nums)-1), -1, -1):
if (mn<nums[i]):
m = m + 1
mn = min(mn,nums[i])
return (n<=1 or m<=1)
|
def compare(number1, number2):
if number1 == number2:
return 'They are equal'
elif number1 > number2:
return '{} is greater than {}'.format(number1, number2)
elif number1 < number2:
return '{} is greater than {}'.format(number2, number1)
print('You imported compare_two_numbers.py')
|
#! /usr/bin/env python
import nmrglue as ng
# read in the Bruker data
dic,data = ng.bruker.read("bruker_2d")
# Set the spectral parameters
u = ng.bruker.guess_udic(dic, data)
# Direct Dimsion #Indirect Dimension
u[1]['size'] = 768 ; u[0]['size'] = 600
u[1]['complex'] = True ; u[0]['complex'] = True
u[1]['encoding'] = 'direct' ; u[0]['encoding'] = 'states'
u[1]['sw'] = 11061.947 ; u[0]['sw'] = 4000.000
u[1]['obs'] = 800.134 ; u[0]['obs'] = 201.204
u[1]['car'] = 4.773 * 800.134 ; u[0]['car'] = 58.742 * 201.204
u[1]['label'] = '1H' ; u[0]['label'] = '13C'
# create the converter object and initialize with Bruker data
C = ng.convert.converter()
C.from_bruker(dic, data, u)
# create NMRPipe data and then write it out
ng.pipe.write("2d_pipe.fid", *C.to_pipe(), overwrite=True)
|
from api import *
class Pheromone_A(Chemical) :
diffusion = 1.
decay = 0.01
class Pheromone_B(Chemical) :
diffusion = 1.
decay = 0.01
class Material(Chemical) :
decay = 0.001
class Start(Agent) :
clock = 1
pos = 10,50
reaction = lambda agent, conc : (1.0, 0.0, 0.0)
class Finish(Agent) :
clock = 1
pos = 90,50
reaction = lambda agent, conc : (0.0, 1.0, 0.0)
class Worker(Agent) :
states = 'free', 'ascending'
clock = 0.1
sensors = {
'near_start' : Sensor(Pheromone_A, 0.5),
'near_finish' : Sensor(Pheromone_B, 0.5),
}
actuators = {
'explore' : Actuator('free'),
'generate' : Actuator('ascending'),
}
transitions = {
('free', 'near_start', 'near_finish' ) : 'free',
('free', 'near_start', '^near_finish') : 'ascending',
('free', '^near_start', 'near_finish' ) : 'free',
('free', '^near_start', '^near_finish') : 'free',
('ascending', 'near_start', 'near_finish' ) : 'ascending',
('ascending', 'near_start', '^near_finish') : 'ascending',
('ascending', '^near_start', 'near_finish' ) : 'free',
('ascending', '^near_start', '^near_finish') : 'ascending',
}
ascent = {
Pheromone_A : Agent.actuated('explore', 1.0, 0.0),
Pheromone_B : Agent.actuated('generate', 1.0, 0.0),
}
displacement = 0.5
r = Agent.actuated('generate', 0.1, 0.0)
reaction = lambda agent, conc : (0.0, 0.0, agent.r)
class Space(Space) :
size_x = 0,100
size_y = 0,100
resolution = 1.
chemicals = [Pheromone_A, Pheromone_B, Material]
swarms = [Start] + [Finish] + 48*[Worker]
|
x = input("Digite o valor correspondente ao lado de um quadrado:")
x = int(x)
Perímetro = x+x+x+x
Área = x*x
print("perímetro:",Perímetro,"- área:",Área)
|
# Program to generate a random number between 0 and 9
# importing the random module
import random
print(random.randint(50, 100))
|
class Foo:
a = 10
def test(self):
a = 54
return a
def metaclass(self):
return "metaclass!"
def __dict__(self):
return "__dict__!"
print(Foo.a)
f = Foo()
print(f.test())
print(f.test())
print(f.metaclass())
print(f.__dict__())
|
#!/usr/bin/env python3
import numpy
import cv2
import sys
cap = cv2.VideoCapture(sys.argv[1])
OVERLAY_HUE = 0 # red
prev = None
hsv = None
while cap.isOpened():
ret, rawFrame = cap.read()
if ret:
frame = cv2.cvtColor(rawFrame, cv2.COLOR_BGR2GRAY)
if hsv is None:
hsv = numpy.zeros_like(rawFrame)
hsv[...,0] = OVERLAY_HUE
if prev is not None:
flow = cv2.calcOpticalFlowFarneback(prev, frame, None, 0.5, 10, 5, 5, 5, 1.1, 0)
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
hsv[...,1] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
hsv[...,2] = frame
flowBgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imshow('Flow', flowBgr)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
prev = frame
else:
break
cap.release()
cv2.destroyAllWindows()
|
import click
from fandogh_cli.fandogh_client.namespace_client import details_namespace
from .presenter import present
from .base_commands import FandoghCommand
from .utils import format_text, TextStyle
@click.group("namespace")
def namespace():
"""Namespace management commands"""
@click.command("status", cls=FandoghCommand)
def status():
"""list secrets filtered by type"""
result = details_namespace()
def print_value(name, current, total):
click.echo("{}: {} of {}".format(
format_text(name, TextStyle.HEADER),
format_text(current, TextStyle.OKGREEN),
format_text(total, TextStyle.OKGREEN),
))
click.echo('Namespace: {}'.format(result['name']))
print_value('Service count', result['current_used_resources'].get('service_count'),
result['quota'].get('service_limit',
'N/A'))
print_value(
name='Memory',
current="{} MB".format(result['current_used_resources'].get('memory_usage')),
total="{} MB".format(result['quota'].get('memory_limit', 'N/A'))
)
# print_value('CPU', result['current_used_resources'].get('cpu_usage'), result['quota'].get('cpu_limit', 'N/A'))
namespace.add_command(status)
|
import numpy as np
config = {
# 'function': lambda x: -1 / ((x[0] - 1)**2 + (x[1] - 1.5)**2 + 1) # функция для оптимизации,
# * np.cos(2 * (x[0] - 1)**2 + 2 * (x[1] - 1.5)**2), # x - массив переменных (их две)
'function': lambda x: (
-1 / ((x[0] - 1)**2 + (x[1] - 1.5)**2 + 1)
* np.cos(2 * (x[0] - 1)**2 + 2 * (x[1] - 1.5)**2)
),
'epsilon': 10 ** -4, # эпсилон для частных производных
'alpha': 0.01, # множитель смещения при спуске
'max_loop': 10 ** 4, # максимальное число итераций
'accuracy': 10 ** -2, # точность вычисления минимума
'dots_count': 7, # число начальных точек, полученных равномерным распределнием в интервале [low, high]
'fps': 10, # кадры в секунду для анимации
'save_frequency': 100, # частота сохранения кадров
'low': -3, # минимум интервала поиска по двум осям
'high': 3, # максимум интервала поиска по двум осям
'c_map': 'jet', # цветовая карта для построения графика
'plot_alpha': .4, # прозрачность графика (чтобы лучше было видно точки)
'markers': ['D', '*', 'X', 's', 'o', 'H', 'p', '>', '<', '^', 'v'], # маркеры для точек
'line_style': '--', # стиль линий между точками
'marker_size': 15, # размер точекна графике
'shown_dots_start': 0, # начальная точка из массива точек для анимации
'shown_dots_end': 7 # конечная точка из массива точек для анимации (не включительно)
}
|
import win32com.client
speaker = win32com.client.Dispatch("SAPI.SpVoice")
'''
while True:
print("Enter the word you want to speak it out by computer")
s = input()
speaker.Speak(s)
'''
def speak(string):
speaker = win32com.client.Dispatch("SAPI.SpVoice")
speaker.Speak(string)
|
#!/usr/bin/python3
from os.path import isfile, isdir
from os import mkdir, makedirs, listdir, remove
import errno
import time
from zkstate import ZKState
import json
import threading
import subprocess
import multiprocessing
import requests
from db import DataBase
from abr_hls_dash import GetABRCommand
import shutil
adinsert_archive_root="/var/www/adinsert"
segment_dash_root="/var/www/adinsert/segment/dash"
segment_hls_root="/var/www/adinsert/segment/hls"
dash_root="/var/www/adinsert/dash"
hls_root="/var/www/adinsert/hls"
fallback_root="/var/www/skipped"
ad_decision_server="http://ad-decision-service:8080/metadata"
ad_content_server="http://ad-content-service:8080"
timeout = 30
request_template={
"meta-db" : {
"stream" : "",
"time_range" : [],
"time_field" : "time"
},
"ad_config": {
"codec": "AVC",
"resolution": {
"width": 1280,
"height": 720,
},
"bandwidth": 10000,
"streaming_type": "hls"
},
"destination": {
"adpath": "/var/www/adinsert/hls/xxx",
},
"user_info": {
"name": "guest",
"keyword": ["sport","car"]
},
"audio_fade": {
"fade_in": "http://content-provider:8080/hls/7QDJL9c9qTI.mp4/360p_024.ts",
"fade_out": "http://content-provider:8080/hls/7QDJL9c9qTI.mp4/360p_025.ts",
"target_path": "/var/www/adinsert/hls/xxx"
}
}
def ADPrefetch(ad_uri):
# retrive the ad from the ad content and save to local adinsert_archive_root firstly and return the local stream name
#ad_uri = ad_content_server+"/" +"GibfM0FYj_g.mp4"
target=adinsert_archive_root+"/" + ad_uri.split("/")[-1]
#print(target)
if ad_uri.find("http://") != -1:
try:
r = requests.get(ad_uri,timeout=timeout)
if r.status_code == 200:
with open(target, "wb") as f:
f.write(r.content)
return target
except requests.exceptions.RequestException as e: # This is the correct syntax
print("Error sending status request in ADPrefetch()" + str(e), flush=True)
elif not isfile(ad_uri):
print("The ad content uri %s is not a file!"+ad_uri)
return None
def ADClipDecision(msg, db):
duration = msg.time_range[1]-msg.time_range[0]
query_times = 10
for t in range(query_times):
print("query db with time range: "+str(msg.time_range[0])+"-"+str(msg.time_range[1]))
metaData = db.query(msg.content, msg.time_range, msg.time_field)
if metaData or msg.bench_mode:
try:
jdata = json.dumps({
"metadata":metaData,
"user":{
"name":msg.user_name,
"keywords":msg.user_keywords
},
"bench_mode":msg.bench_mode
})
r = requests.post(ad_decision_server, data=jdata, timeout=timeout)
if r.status_code == 200:
ad_info = r.json()
print(ad_info,flush=True)
return ad_info[0]["source"]["uri"]
except requests.exceptions.RequestException as e:
print("Error in ADClipDecision() " + str(e), flush=True)
return None
time.sleep(1)
if t == query_times - 2:
msg.time_range[0]=msg.time_range[0]-duration/2
return None
class KafkaMsgParser(object):
def __init__(self, kafkamsg):
self.msg = json.loads(kafkamsg)
self.streaming_type = self.msg["ad_config"]["streaming_type"]
self.target = self.msg["destination"]["adpath"]
self.target_path = self.target[0:self.target.rfind("/")]
self.target_name = self.target.split("/")[-1]
# use mp4 stream name as the index
self.content = self.msg["meta-db"]["stream"]
self.time_range = self.msg["meta-db"]["time_range"]
self.time_field = self.msg["meta-db"]["time_field"]
self.user_name = self.msg["user_info"]["name"]
self.user_keywords = self.msg["user_info"]["keywords"]
self.segment_duration=self.msg["ad_config"]["segment"]
self.height = self.msg["ad_config"]["resolution"]["height"]
self.width = self.msg["ad_config"]["resolution"]["width"]
self.bitrate = self.msg["ad_config"]["bandwidth"]
self.bench_mode = self.msg["bench_mode"]
self.segment_path = segment_hls_root
if self.streaming_type=="dash":
self.segment_path = segment_dash_root
def GetRedition(self):
redition = ([self.width, self.height, self.bitrate, 128000],)
return redition
# this will
def CopyAD(msg,height,height_list=[2160, 1440, 1080, 720, 480, 360]):
source_path = msg.target_path
target_path = msg.target_path
streaming_type = msg.streaming_type
all_files = list(listdir(source_path))
#all_files = list(listdir(target_path))
org_files = []
suffix=str(height)+"p"
#print(all_files)
for name in all_files:
if name.startswith(suffix):
org_files += [name]
for name in org_files:
src = source_path + "/" + name
if streaming_type=="dash" and (name.endswith(".m4s") or name.endswith(".mpd")):
for item in height_list:
tmp = name
dst = target_path + "/" + tmp.replace(str(height),str(item))
if item != height:
shutil.copyfile(src,dst)
if streaming_type=="hls" and (name.endswith(".ts") or name.endswith(".m3u8")):
for item in height_list:
tmp = name
dst = target_path + "/" + tmp.replace(str(height),str(item))
if item != height:
shutil.copyfile(src,dst)
def CopyADStatic(msg, prefix="na"):
# first copy all streams
all_files=list(listdir(fallback_root))
for name in all_files:
target_file=msg.target_path+"/"+name
if msg.streaming_type=="dash" and (name.endswith(".m4s") or name.endswith(".mpd")):
if not isfile(target_file) or name.startswith(prefix):
shutil.copyfile(fallback_root+"/"+name,target_file)
if msg.streaming_type=="hls" and (name.endswith(".ts") or name.endswith(".m3u8")):
if not isfile(target_file) or name.startswith(prefix):
shutil.copyfile(fallback_root+"/"+name,target_file)
# then signal complete for all streams.
for name in all_files:
target_file=msg.target_path+"/"+name
complete_file=msg.target_path+"/"+name+".complete"
if msg.streaming_type=="dash" and name.endswith(".mpd"):
if not isfile(complete_file) or name.startswith(prefix):
SignalCompletion(target_file)
if msg.streaming_type=="hls" and name.endswith(".m3u8"):
if not isfile(complete_file) or name.startswith(prefix):
SignalCompletion(target_file)
def CopyADSegment(msg, stream, prefix="na"):
segment_folder = msg.segment_path + "/" + stream.split("/")[-1]
# first copy all streams
all_files=list(listdir(segment_folder))
for name in all_files:
target_file=msg.target_path+"/"+name
if msg.streaming_type=="dash" and (name.endswith(".m4s") or name.endswith(".mpd")):
shutil.copyfile(segment_folder+"/"+name,target_file)
if msg.streaming_type=="hls" and (name.endswith(".ts") or name.endswith(".m3u8")):
shutil.copyfile(segment_folder+"/"+name,target_file)
# then signal complete for all streams.
for name in all_files:
target_file=msg.target_path+"/"+name
complete_file=msg.target_path+"/"+name+".complete"
if msg.streaming_type=="dash" and name.endswith(".mpd"):
if not isfile(complete_file) or name.startswith(prefix):
SignalCompletion(target_file)
if msg.streaming_type=="hls" and name.endswith(".m3u8"):
if not isfile(complete_file) or name.startswith(prefix):
SignalCompletion(target_file)
def SignalCompletion(name):
with open(name+".complete","w") as f:
pass
def SignalIncompletion(name):
try:
remove(name+".complete")
except:
pass
def ADTranscode(kafkamsg,db):
zk = None
msg=KafkaMsgParser(kafkamsg)
# add zk state for each resolution file if we generate the ad clip each time for one solution
zk=ZKState(msg.target_path, msg.target_name)
if zk.processed():
print("AD transcoding finish the clip :",msg.target, flush=True)
zk.close()
return
if zk.process_start():
try:
print("mkdir -p "+msg.target_path, flush=True)
makedirs(msg.target_path)
except OSError as exc: # Python >2.5 (except OSError, exc: for Python <2.5)
if exc.errno == errno.EEXIST and isdir(msg.target_path):
pass
else: raise
# copy static ADs to fill all resolutions
CopyADStatic(msg)
stream = ADClipDecision(msg,db)
if not stream:
print("Query AD clip failed and fall back to skipped ad clip!", flush=True)
# mark zk as incomplete (so that the valid one can be generated next time)
zk.process_abort()
zk.close()
return
# try to re-generate resolution specific AD
SignalIncompletion(msg.target)
try:
stream_folder = msg.segment_path + "/" + stream.split("/")[-1]
if isdir(stream_folder):
print("Prefetch the AD segment {} \n".format(stream_folder),flush=True)
CopyADSegment(msg,stream)
else:
print("Transcoding the AD segment {} \n".format(stream),flush=True)
# only generate one resolution for ad segment, if not generated, ad will fall back to skipped ad.
cmd = GetABRCommand(stream, msg.target_path, msg.streaming_type, msg.GetRedition(), duration=msg.segment_duration, fade_type="audio", content_type="ad")
process_id = subprocess.Popen(cmd,stdout=subprocess.PIPE)
# the `multiprocessing.Process` process will wait until
# the call to the `subprocess.Popen` object is completed
process_id.wait()
SignalCompletion(msg.target)
zk.process_end()
except Exception as e:
print(str(e),flush=True)
CopyADStatic(msg)
zk.process_abort()
zk.close()
class Process(object):
"""This class spawns a subprocess asynchronously and calls a
`callback` upon completion; it is not meant to be instantiated
directly (derived classes are called instead)"""
def __init__(self, type):
self.db = DataBase()
def __call__(self, kafkamsg):
# store the arguments for later retrieval
self._kafkamsg = kafkamsg
# define the target function to be called by
# `multiprocessing.Process`
def target():
ADTranscode(self._kafkamsg,self.db)
# upon completion, call `callback`
return self.callback()
mp_process = multiprocessing.Process(target=target)
# this call issues the call to `target`, but returns immediately
mp_process.start()
return mp_process
def callback(self):
print("finished ad transcoding command ")
|
from torch.utils.data.sampler import SubsetRandomSampler
import torch
import torch.utils.data
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
class MNIST_CNN_Encoder(nn.Module):
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(
nn.Conv2d(1, 16, 3, stride=3, padding=1),
nn.ReLU(True),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(16, 8, 3, stride=2, padding=1),
nn.ReLU(True),
nn.MaxPool2d(2, stride=1)
)
def forward(self, x):
z = self.encoder(x)
return z
class MNIST_CNN_Decoder(nn.Module):
def __init__(self):
super().__init__()
self.decoder = nn.Sequential(
nn.ConvTranspose2d(8, 16, 3, stride=2),
nn.ReLU(True),
nn.ConvTranspose2d(16, 8, 5, stride=3, padding=1),
nn.ReLU(True),
nn.ConvTranspose2d(8, 1, 2, stride=2, padding=1),
nn.Tanh()
)
def forward(self, z):
x_ = self.decoder(z)
return x_
def one_hot_embedding(labels, num_classes):
# 단일 라벨 텐서를 원핫 벡터로 바꿔줍니다.
y = torch.eye(num_classes)
one_hot = y[labels]
return one_hot
def softmax_to_one_hot(tensor):
# softmax 결과를 가장 높은 값이 1이 되도록 하여 원핫 벡터로 바꿔줍니다. acuuracy 구할 때 씁니다.
max_idx = torch.argmax(tensor, 1, keepdim=True)
if tensor.is_cuda:
one_hot = torch.zeros(tensor.shape).cuda()
else:
one_hot = torch.zeros(tensor.shape)
one_hot.scatter_(1, max_idx, 1)
return one_hot
def weight_init(m):
# Conv layer와 batchnorm layer를 위한 가중치 초기화를 추가함.
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
epochs = 10
learning_rate = 0.01
batch_size = 100
loss_function = nn.MSELoss()
# load the dataset
dataset = datasets.MNIST('../data', train=True,
download=True, transform=transforms.Compose([
transforms.ToTensor()
, transforms.Normalize((0.5,), (0.5,))
]))
num_train = len(dataset)
valid_size = 500
indices = list(range(num_train))
split = num_train - valid_size
np.random.shuffle(indices)
train_idx, valid_idx = indices[:split], indices[split:]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size, sampler=train_sampler)
valid_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size, sampler=valid_sampler)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor()
, transforms.Normalize((0.5,), (0.5,))
])),
batch_size=batch_size, shuffle=True)
encoder = MNIST_CNN_Encoder().cuda()
encoder.apply(weight_init)
decoder = MNIST_CNN_Decoder().cuda()
decoder.apply(weight_init)
net_params = list(encoder.parameters())+list(decoder.parameters())
optimizer = optim.Adam(net_params, betas=(0.5, 0.999),lr=learning_rate)
train_loss_list = []
val_loss_list = []
encoder.train()
decoder.train()
for epoch in range(epochs):
for i, (X, _) in enumerate(train_loader):
X = X.cuda()
z = encoder(X)
recon_X = decoder(z)
loss = loss_function(recon_X, X)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# validation loss 계산.
if i % 100 == 0:
with torch.no_grad():
val_100_loss = []
for (X, _) in valid_loader:
X = X.cuda()
z = encoder(X)
recon_X = decoder(z)
loss = loss_function(recon_X, X)
val_100_loss.append(loss)
train_loss_list.append(loss)
val_loss_list.append(np.asarray(val_100_loss).sum() / len(valid_loader))
print("[%d/%d][%d/%d] loss : %f" % (i, len(train_loader), epoch, epochs, loss))
print("testing")
encoder.eval()
decoder.eval()
correct = 0
with torch.no_grad():
for i, (X, _) in enumerate(test_loader):
X = X.cuda()
z = encoder(X)
recon_X = decoder(z)
print("테스트 결과")
for i in range(5):
plt.imshow(X[i].cpu().reshape(28, 28))
plt.gray()
plt.show()
plt.imshow(recon_X[i].cpu().reshape(28, 28))
plt.gray()
plt.show()
break
plt.plot(np.column_stack((train_loss_list, val_loss_list)))
#################################
class MNIST_CNN_Encoder(nn.Module):
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(
nn.Conv2d(1, 16, 3, stride=3, padding=1),
nn.ReLU(True),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(16, 8, 3, stride=2, padding=1),
nn.ReLU(True),
nn.MaxPool2d(2, stride=1)
)
def forward(self, x):
z = self.encoder(x)
return z
class MNIST_FCN(nn.Module):
def __init__(self, class_num):
super().__init__()
self.class_num = class_num
self.fc_net = nn.Sequential(
nn.Linear(32, 50),
nn.ReLU(),
nn.Linear(50, self.class_num),
nn.Softmax()
)
def forward(self, x):
x = x.view(-1, 32)
y = self.fc_net(x)
return y
fcn = MNIST_FCN(class_num=10).cuda()
fcn.apply(weight_init)
pretrained_encoder = MNIST_CNN_Encoder().cuda()
saved_weights = encoder.state_dict()
pretrained_encoder.load_state_dict(saved_weights)
#pretrained_encoder.apply(weight_init)
epochs = 5
learning_rate = 0.01
batch_size = 100
loss_function = nn.BCELoss()
optimizer = optim.Adam(list(fcn.parameters())+list(pretrained_encoder.parameters()), betas=(0.5, 0.999), lr=learning_rate)
#optimizer = optim.Adam(fcn.parameters(), betas=(0.5, 0.999), lr=learning_rate) # Adam optimizer로 변경. betas =(0.5, 0.999)
train_loss_list = []
fcn.train()
for epoch in range(epochs):
for i, (X, t) in enumerate(train_loader):
X = X.cuda()
t = one_hot_embedding(t, 10).cuda()
z = pretrained_encoder(X)
Y = fcn(z)
loss = loss_function(Y, t)
train_loss_list.append(loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("[%d/%d][%d/%d] loss : %f"%(i,len(train_loader),epoch,epochs, loss))
print("calculating accuracy...")
fcn.eval()
correct = 0
with torch.no_grad():
for i, (X, t) in enumerate(test_loader):
X = X.cuda()
t = one_hot_embedding(t, 10).cuda()
z = pretrained_encoder(X)
Y = fcn(z)
onehot_y= softmax_to_one_hot(Y)
correct += int(torch.sum(onehot_y * t))
print("Accuracy : %f" % (100. * correct / len(test_loader.dataset)))
plt.plot(train_loss_list) |
import requests
from bs4 import BeautifulSoup as bs
def is_holiday():
url = 'https://economictimes.indiatimes.com/markets/stocks/stock-market-holiday-calendar'
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'}
r = requests.get(url, headers = headers)
soup = bs(r.text, 'html.parser')
rows = soup.find('tbody').find_all('tr')
trading_holiday_list = []
for row in rows[:-1]:
td_list = row.find_all("td")
trading_holiday_list.append(td_list[2].string)
trading_holiday_list[:2]
from datetime import date, datetime
#get today's date in string format to compare it with trading holiday list
today_str = date.today().strftime("%B %d, %Y")
# Returns the day as a number, from 0 to 6, with Sunday being 0 and Saturday being 6
today_day = date.today().strftime("%w")
if (today_day == "0") or (today_day == "6") or (today_str in trading_holiday_list):
return 'Today is a Holiday'
else:
return 'Today is not a Holiday'
print(is_holiday()) |
import sys
sys.path.append('/home/lujin/script')
from job.mr_service_order import MROrderTrade
mr_job = MROrderTrade(args=['-r', 'local', 'data/ods_service_order.mini.txt'])
COLUMES = ['predict_origin_amount',
'predict_amount',
'total_amount',
'compute_amount',
'pay_amount',
# status
'status',
'is_asap',
'is_auto_dispatch',
'is_night',
'abnormal_mark',
# coparator
'user_type',
'corporate_id',
# time
'create_time',
'confirm_time',
'expect_start_time',
'arrival_time',
'start_time',
'time_length',
'actual_time_length',
# cancle
'reason_id',
# extend
'deadhead_distance',
'system_distance',
'product_type_id'
]
with mr_job.make_runner() as runner:
runner.run()
print ','.join(COLUMES)
for line in runner.stream_output():
# Use the job's specified protocol to read the output
key, value = mr_job.parse_output_line(line)
print key + "," + value |
from ._ComputeControl import *
from ._MPC_ACC import *
from ._MPC_CC import *
from ._MPC_LK import *
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 28 18:51:03 2019
@author: gille
"""
file = pd.read_excel(io='D:/Analyses/Fig E.1/Figure/35_Raster.xlsx')
data0 = file[['B']]
data1 = file[['C']]
|
import numpy as np
import pandas as pd
import datetime
from pymongo import MongoClient
def get_data_glucose(date1, date2):
#creamos los datos de glucosa para un dia y los hacemos dataframe
data = []
for _ in range(1,367):
s = pd.DataFrame(np.random.poisson(140, 24))
data.append(s)
glucose = pd.concat(data).reset_index()
glucose.drop(["index"], inplace = True, axis = 1)
# creamos las fechas y las horas (frecuencia de una hora) y convertimos a dataframe
mydates = pd.date_range(date1, date2, freq='H').tolist()
df2 = pd.DataFrame(mydates)
# concatenamos los dataframes
final = pd.concat([glucose, df2], axis = 1)
# limpieza
final.columns = ["Glucose", "Date"]
final = final.dropna()
final.set_index("Date")
return final
def get_data_hemoglobine(date1, date2):
#creamos los datos de glucosa para un dia y los hacemos dataframe
data = []
for _ in range(1,367):
s = pd.DataFrame(np.random.poisson(3, 24))
data.append(s)
hb = pd.concat(data).reset_index()
hb.drop(["index"], inplace = True, axis = 1)
# creamos las fechas y las horas (frecuencia de una hora) y convertimos a dataframe
mydates = pd.date_range(date1, date2, freq='H').tolist()
df2 = pd.DataFrame(mydates)
# concatenamos los dataframes
final = pd.concat([hb, df2], axis = 1)
# limpieza
final.columns = ["Hb", "Date"]
final = final.dropna()
final.set_index("Date")
return final
def clean_date(df, col):
df['Dates'] = pd.to_datetime(df[col]).dt.date
df['Time'] = pd.to_datetime(df[col]).dt.time
df['year']= df['Date'].dt.year
df['month']= df['Date'].dt.month
df['day']= df['Date'].dt.day
df['hour']= df['Date'].dt.hour
df.drop(["Dates", "Date", "Time"], axis = 1, inplace = True )
return df
def hb (col):
if col in range(0,71):
return 4
elif col in range(71,101):
return 5
elif col in range(101,127):
return 6
elif col in range(127,153):
return 7
elif col in range(153,184):
return 8
elif col in range(184,213):
return 9
elif col in range(213,241):
return 10
elif col in range(241,270):
return 11
else:
return 12
def insert_data (df, nombre):
client = MongoClient()
db = client.Diabetes
collection = db.create_collection(name = f"{nombre}")
collection = db[f"{nombre}"]
data = df.to_dict(orient='records')
#print(data)
collection.insert_many(data)
return "Done" |
from multiprocessing import Queue,Process
# 队列,进程
import time,random
# 列表是数据源;在队列中读取数据源,将列表中的值添加到队列,然后从队列中读取该值
list1 = ["java","Python","JavaScript"]
def write(queue):
for value in list1:
print(f'正在向队列中添加数据-->{value}')
# 异步添加
queue.put_nowait(value)
time.sleep(random.random())
def read(queue):
while True:
# 如果队列不为空,就读
if not queue.empty():
# 获取值
value = queue.get_nowait()
print(f'从队列中取到的数据为-->{value}')
time.sleep(random.random())
else:
break
# 创建队列实例
queue = Queue()
# 创建两个进程;一个写一个读
write_data = Process(target=write,args=(queue,))
#?这里为什么(queue,);args= 这里参数需要一个元组
read_data = Process(target=read,args=(queue,))
write_data.start()
# 等待写完
write_data.join()
read_data.start()
# 等待
read_data.join()
print('ok') |
import pprint
from gdascore.gdaAttack import gdaAttack
from gdascore.gdaScore import gdaScores
from gdascore.gdaTools import setupGdaAttackParameters
def showResult(x,result,uidCol=None):
print(f"Received result length {len(result)}")
if uidCol is not None:
uids = []
for thing in result:
uids.append(thing[uidCol])
print(f"Distinct UIDs: {len(set(uids))}")
for i in range(5):
print(f" {result[i]}")
attackResult = x.getResults()
sc = gdaScores(attackResult)
score = sc.getScores()
print(f"knowledgeCells = {score['base']['knowledgeCells']}")
x.cleanUp(doExit=False)
pp = pprint.PrettyPrinter(indent=4)
doCache = True
config = {
'anonTypes': [ ['no_anon'] ],
'tables': [ ['banking','transactions'] ]
}
paramsList = setupGdaAttackParameters(config)
params = paramsList[0]
pp.pprint(params)
# Test bad inputs
if False:
x = gdaAttack(params)
result = x.getPriorKnowledge(['frequency'],'users',selectColumn='bad',values=[1])
result = x.getPriorKnowledge(['bad'],'rows',count=20)
result = x.getPriorKnowledge(['frequency'],'users',selectColumn='uid',values='bad')
result = x.getPriorKnowledge(['frequency'],'users',selectColumn='uid',colRange='bad')
result = x.getPriorKnowledge(['frequency'],'users',selectColumn='uid')
result = x.getPriorKnowledge(['frequency'],'rows',count=20,selectColumn='uid')
result = x.getPriorKnowledge(['frequency'],'rows',fraction=0.55,count=20)
result = x.getPriorKnowledge(['frequency'],'rows',count=3.55)
result = x.getPriorKnowledge(['frequency'],'rows',fraction=20)
result = x.getPriorKnowledge(['frequency'],'rows',count='bad')
result = x.getPriorKnowledge(['frequency'],'rows',fraction='bad')
result = x.getPriorKnowledge(['frequency'],'rows')
result = x.getPriorKnowledge(['frequency'],'boo')
result = x.getPriorKnowledge('uid','rows')
x.cleanUp(doExit=False)
print("------------------------------------------------")
x = gdaAttack(params)
print("x.getPriorKnowledge(['uid','lastname'],'users',count=500")
result = x.getPriorKnowledge(['uid','lastname'],'users',count=500)
print("x.getPriorKnowledge(['uid','lastname'],'rows',count=500")
result = x.getPriorKnowledge(['uid','lastname'],'rows',count=500)
showResult(x,result,uidCol=0)
print("------------------------------------------------")
x = gdaAttack(params)
print("x.getPriorKnowledge(['uid','lastname'],'users',count=500")
result = x.getPriorKnowledge(['uid','lastname'],'users',count=500)
showResult(x,result,uidCol=0)
print("------------------------------------------------")
x = gdaAttack(params)
print("x.getPriorKnowledge(['uid','lastname'],'rows',count=500")
result = x.getPriorKnowledge(['uid','lastname'],'rows',count=500)
showResult(x,result,uidCol=0)
print("------------------------------------------------")
x = gdaAttack(params)
print("x.getPriorKnowledge(['uid','lastname'],'rows',selectColumn='acct_district_id',values=[1,2,3,4])")
result = x.getPriorKnowledge(['uid','lastname'],'rows',selectColumn='acct_district_id',values=[1,2,3,4])
showResult(x,result,uidCol=0)
print("------------------------------------------------")
x = gdaAttack(params)
print("x.getPriorKnowledge(['uid','lastname'],'rows',selectColumn='acct_district_id',colRange=[0,20])")
result = x.getPriorKnowledge(['uid','lastname'],'rows',selectColumn='acct_district_id',colRange=[0,20])
showResult(x,result,uidCol=0)
print("------------------------------------------------")
x = gdaAttack(params)
print("x.getPriorKnowledge(['uid','lastname'],'rows',selectColumn='lastname',colRange=['A','C'])")
result = x.getPriorKnowledge(['uid','lastname'],'rows',selectColumn='lastname',colRange=['A','C'])
showResult(x,result,uidCol=0)
print("------------------------------------------------")
x = gdaAttack(params)
print("x.getPriorKnowledge(['uid','frequency'],'users',fraction=0.05)")
result = x.getPriorKnowledge(['uid','frequency'],'users',fraction=0.05)
showResult(x,result,uidCol=0)
print("------------------------------------------------")
x = gdaAttack(params)
print("x.getPriorKnowledge(['uid','frequency'],'rows',fraction=0.05)")
result = x.getPriorKnowledge(['uid','frequency'],'rows',fraction=0.05)
showResult(x,result,uidCol=0)
|
import sqlite3
from sqlite3 import Error
def create_connection(db_file):
""" create a database connection to a SQLite database """
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return None
def create_table(conn, create_table_sql):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
try:
c = conn.cursor()
c.execute(create_table_sql)
except Error as e:
print(e)
def execute_command(db_file, command):
""" create a database connection to a SQLite database """
try:
conn = sqlite3.connect(db_file)
cur = conn.cursor()
cur.execute(command)
conn.commit()
except Error as e:
print(e)
finally:
conn.close()
return None
sql_delete_drinks_table = """DROP TABLE drinks;"""
sql_create_drinks_table = """ CREATE TABLE drinks (
id integer PRIMARY KEY,
name text NOT NULL,
vol number NOT NULL
); """
sql_delete_users_table = """DROP TABLE users;"""
sql_create_users_table = """ CREATE TABLE IF NOT EXISTS users (
id integer PRIMARY KEY,
name text NOT NULL,
weight number,
height number,
is_female boolean
); """
sql_delete_consumptions_table = """DROP TABLE consumptions;"""
sql_create_consumptions_table = """CREATE TABLE IF NOT EXISTS consumptions (
id integer PRIMARY KEY autoincrement,
user_id integer NOT NULL,
drink_id integer NOT NULL,
amount number NOT NULL,
ts timestamp NOT NULL,
command text NOT NULL,
precision number NOT NULL,
deleted boolean NOT NULL,
FOREIGN KEY (user_id) REFERENCES users (id),
FOREIGN KEY (drink_id) REFERENCES drinks (id)
); """
sql_add_height_column = """ALTER TABLE users ADD height number;"""
db_file = "zapfen.db"
conn = create_connection(db_file)
create_table(conn, sql_delete_drinks_table)
create_table(conn, sql_create_drinks_table)
#create_table(conn, sql_delete_users_table)
create_table(conn, sql_create_users_table)
create_table(conn, sql_add_height_column)
create_table(conn, sql_delete_consumptions_table)
create_table(conn, sql_create_consumptions_table)
execute_command(db_file, "INSERT INTO drinks (id,name,vol) VALUES (0, 'Bier',5);")
execute_command(db_file, "INSERT INTO drinks (id,name,vol) VALUES (1, 'Cocktail',10);")
execute_command(db_file, "INSERT INTO drinks (id,name,vol) VALUES (2, 'Shot',35);")
execute_command(db_file, "INSERT INTO drinks (id,name,vol) VALUES (3, 'Wine',15);")
|
#! /usr/bin/env python
import pygame, sys, re, random, copy
from pygame.locals import *
class Square:
def __init__ (self, img, coords,
left = None, right = None, up = None, down = None,
robot = None, symbol = None):
self.img = img
self.coords = coords
self.left = left
self.right = right
self.up = up
self.down = down
self.box = pygame.Rect (coords[0] * 40, coords[1] * 40, 40, 40)
self.robot = robot
self.symbol = symbol
class Robot:
def __init__ (self, img, color, square = None):
self.img = img
self.color = color
self.square = square
class Symbol:
def __init__ (self, img, color, type, square = None):
self.img = img
self.color = color
self.type = type
self.square = square
def draw ():
screen.fill ((0xff, 0xff, 0xff))
for row in grid:
for square in row:
screen.blit (square.img, (square.coords[0] * 40,
square.coords[1] * 40))
for color in symbols:
for form in symbols[color]:
if symbols[color][form].square:
screen.blit (symbols[color][form].img,
(symbols[color][form].square.coords[0] * 40,
symbols[color][form].square.coords[1] * 40))
if target:
screen.blit (target.img, (grid[7][7].box.center,
grid[8][8].box.center))
if red.square:
screen.blit (red.img, (red.square.coords[0] * 40,
red.square.coords[1] * 40))
if blue.square:
screen.blit (blue.img, (blue.square.coords[0] * 40,
blue.square.coords[1] * 40))
if green.square:
screen.blit (green.img, (green.square.coords[0] * 40,
green.square.coords[1] * 40))
if yellow.square:
screen.blit (yellow.img, (yellow.square.coords[0] * 40,
yellow.square.coords[1] * 40))
for row in grid:
for square in row:
if not square.right:
pygame.draw.line (screen, (0xff, 0x0, 0x0),
(square.coords[0] * 40 + 40 - 1,
square.coords[1] * 40),
(square.coords[0] * 40 + 40 - 1,
square.coords[1] * 40 + 40),
5)
if not square.left:
pygame.draw.line (screen, (0xff, 0x0, 0x0),
(square.coords[0] * 40,
square.coords[1] * 40),
(square.coords[0] * 40,
square.coords[1] * 40 + 40),
5)
if not square.up:
pygame.draw.line (screen, (0xff, 0x0, 0x0),
(square.coords[0] * 40,
square.coords[1] * 40),
(square.coords[0] * 40 + 40 - 1,
square.coords[1] * 40),
5)
if not square.down:
pygame.draw.line (screen, (0xff, 0x0, 0x0),
(square.coords[0] * 40,
square.coords[1] * 40 + 40),
(square.coords[0] * 40 + 40,
square.coords[1] * 40 + 40),
5)
if selected:
pygame.draw.rect (screen, (0xff, 0xff, 0x0), selected.box, 1)
def gengrid ():
grid = []
for x in range (16):
grid += [[]]
for y in range (16):
grid[x] += [Square (squareimg, (x, y))]
for x in range (16):
for y in range (16):
if grid[x][y].coords[0] != 0:
grid[x][y].left = grid[x-1][y]
if grid[x][y].coords[0] != 15:
grid[x][y].right = grid[x+1][y]
if grid[x][y].coords[1] != 0:
grid[x][y].up = grid[x][y-1]
if grid[x][y].coords[1] != 15:
grid[x][y].down = grid[x][y+1]
grid = loadmap (grid)
return grid
def loadmap (grid):
#File line format: (x, y)
# left right up down redbot bluebot greenbot yellowbot
# redbio redhex redtar redtri
# bluebio bluehex bluetar bluetri
# greenbio greenhex greentar greentri
# yellowbio yellowhex yellowtar yellowtri
# Example: (14, 10) 1 0 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
file = open ("first.map")
line = file.readline ()
while line != "":
coords = re.findall (r'[0-9]*', line)
data = []
for num in coords:
if num != '':
data.append (int (num))
coords = data[:2]
cursquare = grid[coords[0]][coords[1]]
if data[2]:
if cursquare.left:
cursquare.left.right = None
cursquare.left = None
if data[3]:
if cursquare.right:
cursquare.right.left = None
cursquare.right = None
if data[4]:
if cursquare.up:
cursquare.up.down = None
cursquare.up = None
if data[5]:
if cursquare.down:
cursquare.down.up = None
cursquare.down = None
if data[6]:
red.square = cursquare
red.square.robot = red
if data[7]:
blue.square = cursquare
blue.square.robot = blue
if data[8]:
green.square = cursquare
green.square.robot = green
if data[9]:
yellow.square = cursquare
yellow.square.robot = yellow
if data[10]:
symbols['red']['bio'].square = cursquare
cursquare.symbol = symbols['red']['bio']
if data[11]:
symbols['red']['hex'].square = cursquare
cursquare.symbol = symbols['red']['hex']
if data[12]:
symbols['red']['tar'].square = cursquare
cursquare.symbol = symbols['red']['tar']
if data[13]:
symbols['red']['tri'].square = cursquare
cursquare.symbol = symbols['red']['tri']
if data[14]:
symbols['blue']['bio'].square = cursquare
cursquare.symbol = symbols['blue']['bio']
if data[15]:
symbols['blue']['hex'].square = cursquare
cursquare.symbol = symbols['blue']['hex']
if data[16]:
symbols['blue']['tar'].square = cursquare
cursquare.symbol = symbols['blue']['tar']
if data[17]:
symbols['blue']['tri'].square = cursquare
cursquare.symbol = symbols['blue']['tri']
if data[18]:
symbols['green']['bio'].square = cursquare
cursquare.symbol = symbols['green']['bio']
if data[19]:
symbols['green']['hex'].square = cursquare
cursquare.symbol = symbols['green']['hex']
if data[20]:
symbols['green']['tar'].square = cursquare
cursquare.symbol = symbols['green']['tar']
if data[21]:
symbols['green']['tri'].square = cursquare
cursquare.symbol = symbols['green']['tri']
if data[22]:
symbols['yellow']['bio'].square = cursquare
cursquare.symbol = symbols['yellow']['bio']
if data[23]:
symbols['yellow']['hex'].square = cursquare
cursquare.symbol = symbols['yellow']['hex']
if data[24]:
symbols['yellow']['tar'].square = cursquare
cursquare.symbol = symbols['yellow']['tar']
if data[25]:
symbols['yellow']['tri'].square = cursquare
cursquare.symbol = symbols['yellow']['tri']
line = file.readline ()
return grid
def check_target (target):
if target.color == 'red':
if target.square == red.square:
foundsymbols.append (target)
if len (foundsymbols) == 16:
end_game ()
while foundsymbols.count (target):
symcolor = random.choice (symbols.keys ())
symtype = random.choice (symbols[symcolor].keys ())
target = symbols[symcolor][symtype]
elif target.color == 'blue':
if target.square == blue.square:
foundsymbols.append (target)
if len (foundsymbols) == 16:
end_game ()
while foundsymbols.count (target):
symcolor = random.choice (symbols.keys ())
symtype = random.choice (symbols[symcolor].keys ())
target = symbols[symcolor][symtype]
elif target.color == 'green':
if target.square == green.square:
foundsymbols.append (target)
if len (foundsymbols) == 16:
end_game ()
while foundsymbols.count (target):
symcolor = random.choice (symbols.keys ())
symtype = random.choice (symbols[symcolor].keys ())
target = symbols[symcolor][symtype]
elif target.color == 'yellow':
if target.square == yellow.square:
foundsymbols.append (target)
if len (foundsymbols) == 16:
end_game ()
while foundsymbols.count (target):
symcolor = random.choice (symbols.keys ())
symtype = random.choice (symbols[symcolor].keys ())
target = symbols[symcolor][symtype]
return target
def end_game ():
print "done"
exit ()
pygame.init ()
screen = pygame.display.set_mode ((640, 640))
pygame.display.set_caption ('Ricochet Robots')
squareimg = pygame.image.load ("./square.png")
red = Robot (pygame.image.load ("./redbot.png"), "red")
blue = Robot (pygame.image.load ("./bluebot.png"), "blue")
green = Robot (pygame.image.load ("./greenbot.png"), "green")
yellow = Robot (pygame.image.load ("./yellowbot.png"), "yellow")
symbols = {'red': {}, 'blue': {}, 'green': {}, 'yellow': {}}
for color in symbols:
symbols[color]['bio'] = Symbol (pygame.image.load
("./" + color + "bio.png"), color, 'bio')
symbols[color]['hex'] = Symbol (pygame.image.load
("./" + color + "hex.png"), color, 'hex')
symbols[color]['tar'] = Symbol (pygame.image.load
("./" + color + "tar.png"), color, 'tar')
symbols[color]['tri'] = Symbol (pygame.image.load
("./" + color + "tri.png"), color, 'tri')
grid = gengrid ()
mousepos = pygame.mouse.get_pos ()
selected = None
symcolor = random.choice (symbols.keys ())
symtype = random.choice (symbols[symcolor].keys ())
#target = symbols[symcolor][symtype]
target = symbols['red']['tar']
foundsymbols = []
while True:
for event in pygame.event.get ():
if event.type == QUIT:
sys.exit ()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
sys.exit ()
elif event.key == K_LEFT:
if selected:
while selected.left and not selected.left.robot:
selected.left.robot = selected.robot
selected.robot.square = selected.left
selected.robot = None
selected = selected.left
elif event.key == K_RIGHT:
if selected:
while selected.right and not selected.right.robot:
selected.right.robot = selected.robot
selected.robot.square = selected.right
selected.robot = None
selected = selected.right
elif event.key == K_UP:
if selected:
while selected.up and not selected.up.robot:
selected.up.robot = selected.robot
selected.robot.square = selected.up
selected.robot = None
selected = selected.up
elif event.key == K_DOWN:
if selected:
while selected.down and not selected.down.robot:
selected.down.robot = selected.robot
selected.robot.square = selected.down
selected.robot = None
selected = selected.down
elif event.unicode == 'r':
selected = red.square
elif event.unicode == 'b':
selected = blue.square
elif event.unicode == 'g':
selected = green.square
elif event.unicode == 'y':
selected = yellow.square
elif event.type == MOUSEMOTION:
mousepos = event.pos
elif event.type == MOUSEBUTTONDOWN:
for row in grid:
for square in row:
if square.box.collidepoint (mousepos) and square.robot:
selected = square
target = check_target (target)
draw ()
pygame.display.flip ()
pygame.time.wait (100)
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
Webhook automated Dota 2 match parsing service for Discord
"""
from json import load as jload, dump as jdump
from time import sleep
from pathlib import Path
from Bot import WebHookBot
from requests import get, post
class Dotabot(WebHookBot):
DELAY_GAP = 30
OPENDOTA_API = "https://api.opendota.com/api"
OPENDOTA_URL = "https://opendota.com/matches"
HERO_FILE = "heroes.json"
def __init__(self, name):
super(Dotabot, self).__init__(name)
self.SLEEP_TIME = 60 * 20
self.filegen = self._create_filegen("shared")
self.heroes = list()
self.herojson = self.filegen(self.HERO_FILE)
def _load_hero_data(self):
if self.herojson.is_file():
with open(self.herojson, 'r') as f:
self.heroes = jload(f)
else:
r = get(f"{self.OPENDOTA_API}/heroes")
if r.status_code != 200:
raise IOError("Failed to prefetch Hero JSON data from OpenDota")
with open(self.herojson, 'w') as f:
jdump(r.json(), f)
self.heroes = r.json()
return
def get_last_match(self, player_path):
"""
Get the last match from a given file
Yield a pair of strings (Match ID * Dota ID) when the match
differs from the cache
If not newer, return (None * None)
"""
dota_id = None
if not player_path.is_file():
raise IOError("Invalid Path() given for get_last_match()")
with open(player_path, 'r') as f:
dota_id = f.read().strip("\n")
resp = get(f"{self.OPENDOTA_API}/players/{dota_id}/matches?limit=1")
if resp.status_code != 200:
self.logger(f"Couldn't load matches for {dota_id}")
return (None, None)
data = resp.json()
if not data:
return (None, None) # no matches played
match_id = data[0]["match_id"]
# Check the match_id versus the one in the cache
# Check if a cache even exists
cache = Path(f"{player_path}.cache")
cache_v = 0
if cache.is_file():
with open(cache, 'r') as f:
cache_v = int(f.read())
# If the cache value is different from the new match iD,
# Write to the cache file and return the (Match * ID) pair
if cache_v != match_id:
with open(cache, 'w') as f:
f.write(str(match_id))
return (match_id, dota_id)
return (None, None)
def get_payload(self, match_id, dota_id):
"""
Craft a payload of Dota 2 match info
The nested JSON is per the Discord webhook Embed format
TODO: think of a sane way to compose these into functions perhaps?
"""
resp = get(f"{self.OPENDOTA_API}/matches/{match_id}")
if resp.status_code != 200:
raise IOError("Failed to get data from OpenDota API")
jsonblob = resp.json()
data = dict()
embs = list()
radiant_win = jsonblob["radiant_win"]
radiant_score = jsonblob["radiant_score"]
dire_score = jsonblob["dire_score"]
game_mode = jsonblob["game_mode"]
duration = jsonblob["duration"]
minutes = str(duration // 60)
seconds = str(duration % 60).zfill(2)
player = None
for p in jsonblob["players"]:
if str(p["account_id"]) == dota_id:
self.logger("Found the player")
player = p
if player is None:
self.logger("Failed to find the player")
return False
# Player variable declarations for use later
player_team = player["isRadiant"]
pname = player["personaname"]
hero_name = "LOL"
# Score of game
embs.append({
"name": "Final Score",
"value": f"Radiant **{radiant_score}** - **{dire_score}** Dire",
"inline": True
})
# Player Stats field
if player_team:
percent_of_team = round(((player["kills"]+player["assists"]) / float(radiant_score)) * 100.0, 2)
else:
percent_of_team = round(((player["kills"]+player["assists"]) / float(dire_score)) * 100.0, 2)
embs.append({
"name": "Stats (KDA)",
"value": f"{player['kills']}/{player['deaths']}/{player['assists']} ({percent_of_team}% involvement)",
"inline": True
})
# GPM / XPM
embs.append({
"name": "GPM / XPM",
"value": f"{player['gold_per_min']} / {player['xp_per_min']}",
"inline": True
})
# Replay sensitive data follows after this comment
# OpenDota takes time to parse the replay of each match, such that more info can be gathered
# from the match. Things like chat, pings, runes and damage instances are all based on replays
# If this stuff isn't immediately available, it shouldn't be added to the Embed
# ping details
if 'pings' in player:
total_pings = sum((p["pings"] for p in jsonblob["players"] if p["isRadiant"] == player_team))
pingpc = round((float(player["pings"]) / total_pings) * 100.0, 2)
embs.append({
"name": "Total Pings",
"value": f"{player['pings']} ({pingpc}% of team)",
"inline": True
})
# rune details
if 'runes' in player:
pass
# craft the main embed
winstatus = "won" if player_team == radiant_win else "lost"
data["embeds"] = [{
"title": f"Results for Match #{match_id}",
"description": f"{player['personaname']} {winstatus} as {hero_name} ({minutes}:{seconds})",
"url": f"{self.OPENDOTA_URL}/{match_id}",
"color": int(match_id % 0xffffff),
"fields": embs,
"footer": {
"text": "Provided by OpenDota API"
}
}]
return data
def post_payload(self, data={}):
"""
Send a dictionary of data to the Discord endpoint
"""
if not data:
return False
re = post(self.endpoint, json=data, headers={'content-type': 'application/json'})
return re
def main(self):
self._load_hero_data()
self.logger(f"Heroes loaded: {len(self.heroes)}")
files = [f for f in self.filegen().iterdir() if f"{f}".endswith("dota")]
self.logger(f"Keys: {files}")
for keypath in files:
last_match, dota_id = self.get_last_match(keypath)
if last_match is not None:
print("Posting match")
payload = self.get_payload(last_match, dota_id)
if not payload:
self.logger("Failed to craft a payload")
r = self.post_payload(payload)
print(r.status_code)
sleep(self.DELAY_GAP)
pass
if __name__ == "__main__":
bot = Dotabot("dotabot")
bot.run()
# end
|
#importing necessary libraries for math calculations and image generation
import math
from PIL import Image
#defines a function to generate a mandelbrot image
def mandelbrot(size, accuracy):
#initializes a new blank RGB image with a resolution of size x size
mandelbrot = Image.new("RGB", (size, size))
#creates a one dimentional array which will hold the values for each pixel
coords = [[] for x in range(0, size)]
#adds another dimention to the aforementioned array, thus creating a 2D array of pixels
for x in range(0, size):
coords[x] = [[] for y in range(0, size)]
#This function loops for each x,y value in the array and determines which shade of red each pixel will be
for x in range(0, size):
for y in range(0, size):
#This variable represents the x component value of C
cx = -2 + ((4)/(size - 1)*x)
#This variable represents the y component value of C
cy = -2 + ((4)/(size - 1)*y)
#This represents the first (not zeroeth) x component value of Z
zx = -2 + ((4/(size - 1))*x)
#This represents the first (not zeroeth) y component value of Z
zy = -2 + ((4/(size - 1))*y)
#This is a temporary variable used for updating the value of zx and zy
tempvar = 0
#This is a variable to determine whether or not Z has escaped
escaped = False
#This is a counter to measure how many times the following while loop has run
counter = 0
#This while loop checks how many times it takes Z to escape, or if it doesn't after 256 attempts
#Accuracy is how many shades of red the user wants to use 0-256
while escaped == False and counter < accuracy:
#This checks if Z has escaped
if math.sqrt(zx**2 + zy**2) >= 2:
#This sets the shade of red of the pixel x,y depending on how many attempts it took to escape and the level of accuracy
coords[x][y] = int(counter/accuracy * 255)
#This tells the loop that the Z has escaped
escaped = True
#If Z has not escaped:
else:
#This uses 'tempvar' to store the value of zx
tempvar = zx
#This squares Z and adds C
zx = zx**3 - zy**3 + cx
zy = 3*tempvar*zy + cy
#This increases the counter by 1
counter += 1
#If Z has not escaped after 255 attempts:
if counter == accuracy:
#This sets the shade of red of the pixel x,y to solid red
coords[x][y] = int(counter/accuracy * 255)
#This function loops over for each pixel x,y
for x in range(0, size):
for y in range(0, size):
#Converts the color value of each pixel in the pixel array to an integer
color = str(coords[x][y])
color = int(color)
#Places the pixel on the image
mandelbrot.putpixel((x, y), (color-50, color%2*x-50, color%256*y-50))
#Saves the image
mandelbrot.save("demo_image.png", "PNG")
#Calls the Mandelbrot function
mandelbrot(512, 256)
|
from itunes_to_universal_scrobbler import parse
import nose.tools as tools
def test_parse():
args = {
"<playlist>": """Porcelain 3:13 Hundredth RARE B-Sides - Single Rock 0
Bound 3:13 Hail the Sun Secret Wars - EP Rock 0
Guillotinas 3:15 Viva Belgrado Guillotinas - Single Electronic 0
(Telebrothy) 2:11 Demons Embrace Wolf Alternative 2 1
Hope Is Lost 4:34 Open Hand Hope Is Lost - Single Alternative 0
Ghosts of Former Lives 3:54 Icarus The Owl Rearm Circuits Alternative 0
Swans 3:12 Heal No Love / No Light Rock 2
The Ponytail Parade (Reimagined) 3:54 Emery Revival: Emery Classic Reimagined Rock 0
Doubt Mines 2:38 Terrible Love Doubt Mines - Single Alternative 0
Deserted Dunes Welcome Weary Feet 3:34 King Gizzard & The Lizard Wizard Polygondwanaland Alternative 2
Northern Skin 4:17 Actor Observer One Another - Single Heavy Metal 2
To Venomous Depths / Where No Light Shines 7:53 Cloak To Venomous Depths Heavy Metal 2
The Grave 3:27 Tracy Bryant A Place for Nothing and Everything in Its Place Indie Rock 0
To Be Given a Body 8:04 TORRES Three Futures Alternative 0
Asktell (Audiotree Live Version) 2:56 Lina Tullgren Lina Tullgren on Audiotree Live - EP Singer/Songwriter 0
Whispers From the Surface of a Lake 2:43 Hior Chronik Out of the Dust Modern Era 0
Old Anew 7:21 Ensemble, Et Al. The Slow Reveal Electronic 0
Luxury 2:17 Martyn Heyne Electric Intervals Contemporary Era 0
Secret Life of Waves 3:14 Robert Haigh Creatures of the Deep Ambient 0
Blue Eyes Reflection 5:03 Vanity Productions Only the Grains of Love Remain Electronic 0
One With You 2:29 Backtrack Bad To My World Punk 0
Coming To 2:27 Hangman A Vile Decree - EP Rock 0
Shattering 2:28 Sincere Engineer Rhombithian EMO 0
Moon Curser 8:27 Dead Quiet Grand Rites Heavy Metal 0
Freedom 3:01 Bib Moshpit - EP Alternative 0
"""
}
expected = """
"Hundredth", "Porcelain", "RARE B-Sides - Single", "", "", "193"
"Hail the Sun", "Bound", "Secret Wars - EP", "", "", "193"
"Viva Belgrado", "Guillotinas", "Guillotinas - Single", "", "", "195"
"Demons", "(Telebrothy)", "Embrace Wolf", "", "", "131"
"Open Hand", "Hope Is Lost", "Hope Is Lost - Single", "", "", "274"
"Icarus The Owl", "Ghosts of Former Lives", "Rearm Circuits", "", "", "234"
"Heal", "Swans", "No Love / No Light", "", "", "192"
"Emery", "The Ponytail Parade (Reimagined)", "Revival: Emery Classic Reimagined", "", "", "234"
"Terrible Love", "Doubt Mines", "Doubt Mines - Single", "", "", "158"
"King Gizzard & The Lizard Wizard", "Deserted Dunes Welcome Weary Feet", "Polygondwanaland", "", "", "214"
"Actor Observer", "Northern Skin", "One Another - Single", "", "", "257"
"Cloak", "To Venomous Depths / Where No Light Shines", "To Venomous Depths", "", "", "473"
"Tracy Bryant", "The Grave", "A Place for Nothing and Everything in Its Place", "", "", "207"
"TORRES", "To Be Given a Body", "Three Futures", "", "", "484"
"Lina Tullgren", "Asktell (Audiotree Live Version)", "Lina Tullgren on Audiotree Live - EP", "", "", "176"
"Hior Chronik", "Whispers From the Surface of a Lake", "Out of the Dust", "", "", "163"
"Ensemble, Et Al.", "Old Anew", "The Slow Reveal", "", "", "441"
"Martyn Heyne", "Luxury", "Electric Intervals", "", "", "137"
"Robert Haigh", "Secret Life of Waves", "Creatures of the Deep", "", "", "194"
"Vanity Productions", "Blue Eyes Reflection", "Only the Grains of Love Remain", "", "", "303"
"Backtrack", "One With You", "Bad To My World", "", "", "149"
"Hangman", "Coming To", "A Vile Decree - EP", "", "", "147"
"Sincere Engineer", "Shattering", "Rhombithian", "", "", "148"
"Dead Quiet", "Moon Curser", "Grand Rites", "", "", "507"
"Bib", "Freedom", "Moshpit - EP", "", "", "181"
""".strip()
tools.eq_(expected, "\n".join(line for line in parse(args)))
|
from eppy.doc import EppDoc
class EppInfoLaunch(EppDoc):
_path = ('launch: info', )
def __init__(self, phase: str, applicationid: str):
dct = {
'launch:info': {
'@includeMark': 'true',
'phase': phase,
'applicationID': applicationid
},
}
super(EppInfoLaunch, self).__init__(dct=self.annotate(dct))
|
# Project: CS426 Spring 2018, Team #23: SkyWarden Senior Project, Aerial Drone Notification System (ADNS)
# Team: Rony Calderon, Bryan Kline, Jia Li, Robert Watkins
# Subsystem: Ground Base Unit
# File name: ROSNodeManager.py
# Description: ROSNodeManager class implementation (Headless ANDS)
# ROSNodeManager class creates a two ROS nodes, one for the voltage, one for the
# proximity, and then continually publishes and subscribes to the data coming into
# the ground unit
import geometry_msgs.msg
import std_msgs.msg
import rospy
from visualization_msgs.msg import Marker
from time import sleep
class ROSNodeManager:
__VOLTAGE_CHAR = 'v'
__PROXIMITY_CHAR_START = 'a'
__PROXIMITY_CHAR_END = 'n'
# voltage and proximity topic publisher nodes
__voltagePublisher = None
__proximityPublisher = None
# Name: Default constructor
# Description: ROSNodeManager class default constructor which initializes the node and then creates a number
# of other nodes, both publishers and subscribers, by calling the various initializer methods
# Parameters: None
# Return: None
def __init__(self):
rospy.init_node('headlessGroundUnitNode')
self.initializeVoltageNode()
self.initializeProximityNode()
# Name: initializeVoltageNode
# Description: ROSNodeManager class method which initializes the voltage publisher node which
# continually publishes the voltage value
# Parameters: None
# Return: None
def initializeVoltageNode(self):
self.__voltagePublisher = rospy.Publisher('headlessVoltagePublish', std_msgs.msg.String, queue_size=1)
# Name: initializeVoltageNode
# Description: ROSNodeManager class method which initializes the proximity publisher node which
# continually publishes the proximity values
# Parameters: None
# Return: None
def initializeProximityNode(self):
# for publishing the proximity as a string
#self.__proximityPublisher = rospy.Publisher('proximityPublish', std_msgs.msg.String, queue_size=1)
# for publishing the proximity as a Marker object
self.__proximityPublisher = rospy.Publisher('headlessProximityPublish', Marker, queue_size=10)
# Name: convertSensorDesc
# Description: ROSNodeManager class method which takes in a sensor identifier as a char, converts
# it to an ordinal and appends it to the end of the quaternion label, which is returned
# as a string so that the correct sensor on-board the drone can be referenced for the
# quaternion launch file builder
# Parameters: Takes in an identifier to convert to an ordinal
# Return: Returns a string which is a quaternion label that can be used when relating the current
# proximity reading with the corresponding sensor on-board the drone
def convertSensorDesc(self, identifier):
return "/quat/tof_tf_" + str(ord(identifier) - ord('a') + 1)
# Name: publishSensorValue
# Description: ROSNodeManager class method which takes in a value, checks whether it is a
# voltage or a proximity reading, and publishes the value either as a voltage
# topic or a proximity topic, if it is a proximity reading then it is first converted
# into a marker message which is tied to the quaternion of the corresponding sensor
# on-board the drone
# Parameters: Takes in a sensor value, either a voltage or a proximity, and publishes it to the
# appropriate topic
# Return: None
def publishSensorValue(self, sensorValue):
if sensorValue != None and len(sensorValue) > 1:
if sensorValue[0] == self.__VOLTAGE_CHAR:
self.__voltagePublisher.publish(str(sensorValue))
elif sensorValue[0] >= self.__PROXIMITY_CHAR_START and sensorValue[0] <= self.__PROXIMITY_CHAR_END:
# for publishing the proximity as a Marker object
val = ord( sensorValue[0] ) - ord('a') + 1
marker = Marker()
marker.header.frame_id = self.convertSensorDesc( sensorValue[0] )
marker.header.stamp = rospy.Time()
marker.ns = "tof_system"
marker.id = ord( sensorValue[0] )
marker.type = 0
marker.action = 0
marker.pose.position.x = 0
marker.pose.position.y = 0
marker.pose.position.z = 0
marker.pose.orientation.x = 0.0
marker.pose.orientation.y = 0.0
marker.pose.orientation.z = 0.0
marker.pose.orientation.w = 1.0
marker.scale.x = int(sensorValue[1]) * 0.001
marker.scale.y = 0.1
marker.scale.z = 0.1
marker.color.a = 1.0
marker.color.r = 0.0
marker.color.g = 1.0
marker.color.b = 0.0
self.__proximityPublisher.publish( marker )
# for publishing the proximity as a string
#self.__proximityPublisher.publish(str(sensorValue))
|
'''
Crie um programa que gerencie o aproveitamento de um jogador de futebol. O programa vai ler o nome do jogador e
quantas partidas ele jogou. Depois vai ler a quatidade de gols feitos em cada partida. No final, tudo isso será
guardado em um dicionário, incluindo o total de gols feitos durante o campeonato.
'''
nome = str(input('Nome do Jogador: '))
qtde_par = int(input(f'Quantas partidas {nome} jogou? '))
tot_gols = qtde_gols = 0
gols = list()
partidas = list()
aproveitamento = dict()
for i in range(0, qtde_par):
qtde_gols = (int(input(f'Quantos gols na partida {i}? ')))
tot_gols += qtde_gols
gols.append(qtde_gols)
aproveitamento['nome'] = nome
aproveitamento['gols'] = gols
aproveitamento['total'] = tot_gols
partidas = aproveitamento['gols']
print('-=' * 30)
print(aproveitamento)
print('-=' * 30)
for k, v in aproveitamento.items():
print(f'O campo {k} tem o valor {v}')
print('-=' * 30)
print(f'O jogador {nome} jogou {qtde_par} partidas.')
for k, i in enumerate(partidas):
print(f'=> Na partida {k}, fez {i} gols.')
print(f'Foi um total de {tot_gols} gols')
|
#!/usr/bin/python
print "Number of words length"
list1 = input("Enter any words of list separated by commas :")
print list1
newlist = [len(str(element)) for element in list1]
print newlist
print list1[2]
|
from drf_yasg.utils import swagger_auto_schema
from rest_framework import generics, status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from apps.account.custom_openapi import (
AuthRetrieveAPIView, AuthUpdateAPIView, auth_param,
)
from apps.account.models import City
from apps.account.serializers import (
PhoneAuthSerializer, LoginConfirmationCodeSerializer, CitySerializer,
UserUpdateSerializer, LoginConfirmAPIViewResponseSerializer,
UserAvatarUpdateSerializer, AuthErrorSerializer, ConfirmationCodeSerializer,
ChageOldPhoneSerializer, UserRetrieveSerializer,
)
from apps.account.service import (
UserAuthService, PhoneConfirmationService, ChangeOldPhoneService
)
from apps.integration.service import User1cUpdateService
class AuthAPIView(generics.GenericAPIView):
""" Эндпоинт для login или создания пользователя и отсылки SMS """
serializer_class = PhoneAuthSerializer
@swagger_auto_schema(
responses={
200: '{"message": "Сообщение отправлено"}',
201: '{"message": "User создан! Сообщение отправлено"}',
400: "It will return error type",
429: '{"message": "Вы слишком часто отправляете сообщение."}',
}
)
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
return UserAuthService.get_response(serializer)
class LoginConfirmAPIView(generics.GenericAPIView):
""" Endpoint для подтверждения номера и авторизации пользователя """
serializer_class = LoginConfirmationCodeSerializer
@swagger_auto_schema(
responses={
200: LoginConfirmAPIViewResponseSerializer(),
400: 'It will return error type',
403: '{"message": "Неверный код"}',
404: '{"detail": "user not found"}',
}
)
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
return PhoneConfirmationService.get_response(serializer)
class SendSmsToOldPhoneAPIView(generics.GenericAPIView):
""" Endpoint for send sms to old phone number """
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(
manual_parameters=[auth_param],
responses={
200: '{"message": "Сообщение отправлено"}',
400: "It will return error type",
401: AuthErrorSerializer(),
429: '{"message": "Вы слишком часто отправляете сообщение."}',
}
)
def get(self, request, *args, **kwargs):
user = request.user
return UserAuthService.send_to_old_phone(user)
class OldPhoneConfirmAPIView(generics.GenericAPIView):
""" Endpoint для подтверждения old phone number """
permission_classes = (IsAuthenticated,)
serializer_class = ConfirmationCodeSerializer
@swagger_auto_schema(
manual_parameters=[auth_param],
responses={
200: '{"message": "Old phone is confirmed"}',
400: 'It will return error type',
401: AuthErrorSerializer(),
403: '{"message": "Неверный код"}',
}
)
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
user = request.user
return PhoneConfirmationService.get_response_for_old_phone_confirmation(user, serializer)
class ChangeOldPhoneAPIView(generics.GenericAPIView):
""" Endpoint для смены old phone number """
permission_classes = (IsAuthenticated,)
serializer_class = ChageOldPhoneSerializer
@swagger_auto_schema(
responses={
200: '{"message": "Сообщение отправлено"}',
400: "It will return error type",
406: "{'message': 'Такой номер телефона уже существует'}",
429: '{"message": "Вы слишком часто отправляете сообщение."}',
}
)
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(
data=request.data, context={'request': request}
)
serializer.is_valid(raise_exception=True)
return ChangeOldPhoneService.get_response(serializer)
class NewPhoneConfirmAPIView(generics.GenericAPIView):
""" Endpoint для подтверждения new phone number """
permission_classes = (IsAuthenticated,)
serializer_class = ConfirmationCodeSerializer
@swagger_auto_schema(
manual_parameters=[auth_param],
responses={
200: '{"message": "New phone is confirmed"}',
400: 'It will return error type',
401: AuthErrorSerializer(),
403: '{"message": "Неверный код"}',
}
)
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
user = request.user
return ChangeOldPhoneService.get_response_for_new_phone_confirmation(user, serializer)
class CityListAPIView(generics.ListAPIView):
"""Endpoint for get city list"""
queryset = City.objects.all()
serializer_class = CitySerializer
class UserUpdateAPIView(AuthUpdateAPIView):
"""Endpoint for update user"""
serializer_class = UserUpdateSerializer
def get_object(self):
return self.request.user
def perform_update(self, serializer):
user = serializer.save()
User1cUpdateService.update_1c_user_id(user)
class UserRetrieveAPIView(AuthRetrieveAPIView):
"""Endpoint for update user"""
serializer_class = UserRetrieveSerializer
def get_object(self):
return self.request.user
class UserAvatarRetrieveUpdateAPIView(AuthRetrieveAPIView, AuthUpdateAPIView,
generics.DestroyAPIView):
"""Endpoint for update user image"""
serializer_class = UserAvatarUpdateSerializer
def get_object(self):
return self.request.user
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
instance.avatar = None
instance.save(update_fields=['avatar'])
return Response(status=status.HTTP_204_NO_CONTENT)
|
# 函数名规范
# SayHello
# def 8isay(name):
# print("你好:" + name)
# 1. 以大写或者小写字母或者下划线开始
# 2. 包含大小或小写字母,下划线,数字
#
def _isay(name):
print("你好:" + name) |
import pprint
import time
import argparse
from interface import input_terms, input_N_topN, input_choose_algo
from algorithms import NaiveAlgorithm, FaginsThreshold_Algorithm, FaginsThreshold_WithEpsilon_Algorithm, FaginAlgorithmW
from indexing import InvertedFileBuilder
from htmlwriter import HtmlWriter
from processing import Tokenization, idf
from settings import DATAFOLDER, TEST_DATAFOLDER, PL_FILE, STEMMING, BATCH_SIZE, EPSILON
def operation_file(datafolder, filename, map):
inverted_file = InvertedFileBuilder(datafolder, filename, map, BATCH_SIZE, STEMMING)
inverted_file.build_partial()
inverted_file.merge()
inverted_file.save()
return inverted_file
def calculate(algo_op,N,terms,algoF,algoN,algoFT,algoFTE,epsilon=EPSILON):
if algo_op == 0 :
ans = algoN.search(N,terms)
elif algo_op == 1 :
ans = algoF.search(N,terms)
elif algo_op == 2 :
ans = algoFT.search(N,terms)
elif algo_op == 3 :
ans = algoFTE.search(N,terms, epsilon)
return ans
def init(inverted_file):
algoF = FaginAlgorithmW(inverted_file)
algoN = NaiveAlgorithm(inverted_file)
algoFT = FaginsThreshold_Algorithm(inverted_file)
algoFTE = FaginsThreshold_WithEpsilon_Algorithm(inverted_file)
return [algoF,algoN,algoFT,algoFTE]
def op_arg_parser():
arg_parser = argparse.ArgumentParser("Better than Google and Duckduckgo")
arg_parser.add_argument('-d', '--datafolder', help='Choose datafolder', type=str)
arg_parser.add_argument('-n', '--name', help='Choose filename', type=str)
arg_parser.add_argument('-m', '--map', help='Map id term, set to load an index', type=str)
arg_parser.add_argument('-s', '--stemming', help='Do you want stemming ? (yes) -take a lit of time ==', type=str)
arg_parser.add_argument('-b', '--batchsize', help='Choose your batch size - default=1000', type=int)
arg_parser.add_argument('-e', '--epsilon', help='Epsilon for Fagins', type=int)
datafolder = DATAFOLDER
filename = PL_FILE
map = ''
args = arg_parser.parse_args()
if args.datafolder is not None:
datafolder = args.datafolder
if datafolder is 't':
datafolder = TEST_DATAFOLDER
if args.name is not None:
filename = args.name
if args.map is not None:
map = args.map
if args.stemming is not None:
STEMMING = True
if args.batchsize is not None:
BATCHSIZE = args.batchsize
if args.epsilon is not None:
EPSILON = args.epsilon
return [arg_parser,args,datafolder,filename,map]
def main():
arg_parser,args,datafolder,filename,map = op_arg_parser()
inverted_file = operation_file(datafolder, filename, map)
algoF,algoN,algoFT,algoFTE = init(inverted_file)
html = HtmlWriter(datafolder)
while not input('Enter Q (or q) for quit, otherwise continue ...\n') in ['Q','q'] :
algo_op = input_choose_algo()
N = input_N_topN(algo_op)
terms = input_terms()
#remove stop words
tokenize = Tokenization()
full_term = ''
for t in terms:
full_term += t + ' '
full_term = full_term[:-1]
terms = tokenize.__remove_stopwords__(full_term)
terms = [x.lower() for x in terms.split(' ')]
if STEMMING:
porter = nltk.PorterStemmer()
[porter.stem(t) for t in terms]
print(terms)
t1 = time.time()
ans = calculate(algo_op,N,terms,algoF,algoN,algoFT,algoFTE)
t2 = time.time()
print("-------------ans--------------")
# import pdb; pdb.set_trace()
# html.writeHTMLresponse(str(terms), ans)
pprint.pprint(ans)
print("-------------ans--------------")
print('Found in ' + str(t2-t1))
# main()
if __name__ == '__main__':
main()
|
import time
import curses
from maximize_console import maximize_console
def some(n, character):
stdscr = curses.initscr()
for x in range(25):
for y in range(35):
if not x:
stdscr.addstr(y, 0, str(y))
if x == y:
stdscr.addstr(y, x+n, character)
stdscr.refresh()
# time.sleep(0.025)
return True
if __name__ == "__main__":
maximize_console()
time.sleep(0.1)
for x in range(35):
some(x, character='X')
time.sleep(0.025)
# stdscr.clear()
for x in range(35):
some(x, character='.')
time.sleep(0.01)
for x in range(35):
some(x, character=' ')
time.sleep(0.01)
# curses.reset_shell_mode() # return to shell mode
# print(42)
|
from rest_framework import filters
from .models import Project
class ProjectPermissionFilter(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
return queryset.filter(Project.objects.permitted_query(request.user)).distinct()
|
#!/usr/bin/env python
import os
import string
import re
import struct
os.getcwd()
os.listdir('.')
f = open('c005r9951.eu', 'rb')
hdrKey = f.readline()
hdrStr = f.readline()
q1, q2, q3, loc, cell, dateStr, q4, q5, q6 = hdrStr.split()
print q1, q2, q3, loc, cell, dateStr, q4, q5
names_str = f.readline()
delimiters = ' \t()\n'
reg_ex = re.compile("[" + re.escape(delimiters) + "]*")
names_token = reg_ex.split(names_str)
names_key = names_token[0]
num_names = string.atoi(names_token[1])
name_format = names_token[2]
loc_a = string.find(name_format, 'A')
names_per_line = string.atoi(name_format[:loc_a])
num_name_lines = num_names / names_per_line
if num_names % names_per_line != 0:
num_name_lines = num_name_lines + 1
names = []
[names.extend([nm for nm in f.readline().split()]) for i in range(num_name_lines)]
print names
frequencies_str = f.readline()
delimiters = ' \t()\n'
reg_ex = re.compile("[" + re.escape(delimiters) + "]*")
frequencies_token = reg_ex.split(frequencies_str)
frequenciesKey = frequencies_token[0]
num_frequencies = string.atoi(frequencies_token[1])
frequencies_format = frequencies_token[2]
locF = string.find(frequencies_format, 'F')
frequenciesPerLine = string.atoi(frequencies_format[:locF])
numFreqLines = num_frequencies / frequenciesPerLine
if num_frequencies % frequenciesPerLine != 0:
numFreqLines = numFreqLines + 1
frequencies = []
[frequencies.extend([string.atof(fs) for fs in f.readline().split()]) for i in range(numFreqLines)]
print frequencies
par_str_next = f.readline().split()
data = []
for j in range(num_names):
data.append([])
par_str = par_str_next
data_str = f.readline()
loc_next = string.find(data_str, '*VALUEB')
par_str_next = data_str[loc_next:]
data_str = data_str[:loc_next]
num_data = len(data_str) / 4
i_data = []
for i in range(num_data):
temp = data_str[4 * i:4 * i + 4]
packed = struct.pack('4c', temp[0], temp[1], temp[2], temp[3])
data[j].append(struct.unpack('f', packed))
print 'size data=', len(data)
for k in range(len(data)):
print len(data[k]),
print ''
for j in range(len(names)):
print names[j], ',',
print ''
for i in range(num_data):
for j in range(len(names)):
print data[j][i], ',',
print ''
f.close()
|
from django.shortcuts import render
import requests
import json
import os
from fpdf import FPDF
from json import dumps
from zipfile import ZipFile
from urllib.request import urlopen
from json import load
from django.http import HttpResponse
from hops.models import OngoingJobs
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
#VPN Username and Password:
#Username:- dermatology
#Password:- XJ8S@DA1$&
WIDTH = 200
HEIGHT = 297
TEST_DATE = "10/20/20"
class PDF(FPDF):
def header(self):
self.image("hops_logo.png", 10, 8, 33)
self.set_font('Arial', 'B', 15)
self.cell(85)
self.cell(10, 9, 'Hops Report', 0, 0, 'C')
self.ln(10)
def footer(self):
self.set_y(-15)
self.set_font('Arial', 'I', 8)
self.set_text_color(128)
self.cell(0, 10, 'Page ' + str(self.page_no()), 0, 0, 'C')
def create_title(day, pdf):
pdf.ln(20)
epw = pdf.w - 2*pdf.l_margin
col_width = epw/2
data = [['Name','Rahul Jain'],
['Age','21'],
['Num of lung slices','400'],[
'Num of positive slices','200'],
['ResultType','Abnormal'],
['GlobalDiagnosis','Something']
]
th = pdf.font_size
pdf.set_font('Times','B',13.0)
pdf.cell(epw, 0.0, 'Date : 08/06/2021', align='L')
pdf.set_font('Times','',12.0)
pdf.ln(2*th)
for row in data:
for datum in row:
pdf.cell(col_width, 2*th, str(datum), border=1)
pdf.ln(2*th)
pdf.ln(2.5*th)
def home_page(request):
return render(request,'base.html')
def report(request):
studyid = '1.2.826.0.1.3680043.8.1678.101.10637297040685652766.532213_result'
dirname = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
media =os.path.join(dirname, 'media')
outpath = os.path.join(media,studyid)
exis = os.path.isfile(outpath)
#print(outpath)
# for name in zipfile.namelist():
# zipfile.extract(name, outpath)
with ZipFile('1.2.826.0.1.3680043.8.1678.101.10637297040685652766.532213_result.zip', "r") as zipfile:
arr = zipfile.namelist()
with zipfile.open(arr[len(arr)-4], "r") as json_file:
output = load(json_file)
if not exis:
zipfile.extractall(outpath)
dicti = {
'studyid':studyid,
'list':arr,
'out':output
}
dataJSON = dumps(dicti)
return render(request,'report.html',{'data':dataJSON,'output': output})
def faqs(request):
return render(request,'faqs.html')
def login(request):
return render(request,'login.html')
def register(request):
return render(request,'register.html')
def view_2d_images(request):
#resp = urlopen('https://drive.google.com/uc?export=download&id=1dWUye322dBFsER-O_2yni3z9vBjhcIHh')
#zipfile = ZipFile(BytesIO(resp.read()))
#print(zipfile.namelist())
studyid = '1.2.826.0.1.3680043.8.1678.101.10637297040685652766.532213_result'
dirname = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
media =os.path.join(dirname, 'media')
outpath = os.path.join(media,studyid)
exis = os.path.isfile(outpath)
#print(outpath)
# for name in zipfile.namelist():
# zipfile.extract(name, outpath)
with ZipFile('1.2.826.0.1.3680043.8.1678.101.10637297040685652766.532213_result.zip', "r") as zipfile:
if not exis:
zipfile.extractall(outpath)
dicti = {
'studyid':studyid,
'list':zipfile.namelist()
}
dataJSON = dumps(dicti)
return render(request,'twodimages.html',{'data':dataJSON})
def files(request):
POST = request.POST
if POST:
typ = POST['type']
reason = POST['reason']
description = POST['description']
review = POST['review']
dicti = {
'type':typ,
'reason':reason,
'description':description,
'review':review
}
dataJSON = dumps(dicti)
return render(request,'comments.html',{'data':dataJSON})
else:
return render(request,'files.html')
def comments(request):
return render(request,'comments.html')
def vtkviewer(request):
return render(request,'vtkviewer.html')
def jobs(request):
data_dict = {}
study_ids = OngoingJobs.objects.all().values_list('studyid', flat=True)
study_ids = list(set(study_ids))
names = OngoingJobs.objects.all().values_list('name', flat=True)
id = ','.join(study_ids)
post_data = {'study_instance_ids': str(id)}
percent_completed = requests.post('http://192.168.1.196:5000/get_progress_percents', data=post_data)
percent_completed = json.loads(percent_completed.text)['status']
percent_completed = percent_completed.split(",")
status = []
for i in percent_completed:
if i=="100":
status.append('Completed')
else:
status.append('Ongoing')
headers = ['Name','Status','Percent Completed']
rows = []
for i in range(len(study_ids)):
row = []
row.append(study_ids[i])
row.append(status[i])
row.append(percent_completed[i])
row.append(row.append(OngoingJobs.objects.filter(studyid=study_ids[i])[0].name))
rows.append(row)
data_dict = {'headers' : headers, 'rows' : rows}
return render(request,'jobs.html',{'data_dict' : data_dict,'study_ids' : study_ids,'percent_completed': percent_completed})
@csrf_exempt
def save_to_jobs(request):
id = request.POST.get("studyid")
patient_name = request.POST.get("name")
jobs = OngoingJobs(studyid=id,name=patient_name)
jobs.save()
return JsonResponse({'result' : "Successful",'Name' : patient_name,"Study ID" : id})
def download_report(request,studyid):
"""
post_data = {'study_instance_id': str(studyid)}
response_path = requests.post('http://192.168.1.196:5000/get_report', data=post_data)
path_to_file = response_path.text """
path_to_file = r"1.2.826.0.1.3680043.8.1678.101.10637297040685652766.532213_result.zip"
response = HttpResponse(content_type='application/zip')
with open(path_to_file, 'rb') as fh:
response = HttpResponse(fh.read(), content_type='application/zip')
response['Content-Disposition'] = 'inline; filename=' + studyid + ".zip"
return response
def download_vti(request):
studyid = '1.2.826.0.1.3680043.8.1678.101.10637297040685652766.532213_result'
dirname = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
media =os.path.join(dirname, 'media')
outpath = os.path.join(media,studyid)
exis = os.path.isfile(outpath)
#print(outpath)
# for name in zipfile.namelist():
# zipfile.extract(name, outpath)
with ZipFile('1.2.826.0.1.3680043.8.1678.101.10637297040685652766.532213_result.zip', "r") as zipfile:
arr = zipfile.namelist()
if not exis:
zipfile.extractall(outpath)
path = os.path.join(outpath,arr[len(arr)-1])
response = HttpResponse(content_type='application/vti')
with open(path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type='application/vti')
response['Content-Disposition'] = 'inline; filename=' + studyid + ".vti"
return response
def get_report(request):
day = TEST_DATE
filename = 'report.pdf'
pdf = PDF()
pdf.add_page()
create_title(day, pdf)
pdf.set_draw_color(0, 80, 180)
pdf.set_fill_color(230, 230, 0)
pdf.set_text_color(220, 50, 50)
#zipfile.extractall()
#print(zipfile.namelist())
#resp = urlopen('https://drive.google.com/uc?export=download&id=1dWUye322dBFsER-O_2yni3z9vBjhcIHh')
#zipfile = ZipFile(BytesIO(resp.read()))
#print(zipfile.namelist())
studyid = '1.2.826.0.1.3680043.8.1678.101.10637297040685652766.532213_result'
dirname = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
media =os.path.join(dirname, 'media')
outpath = os.path.join(media,studyid)
exis = os.path.isfile(outpath)
#print(outpath)
# for name in zipfile.namelist():
# zipfile.extract(name, outpath)
with ZipFile('1.2.826.0.1.3680043.8.1678.101.10637297040685652766.532213_result.zip', "r") as zipfile:
arr = zipfile.namelist()
if not exis:
zipfile.extractall(outpath)
pdf.image(os.path.join(outpath,arr[len(arr)-8]), 5, 120, WIDTH/2-10)
pdf.cell(32)
pdf.cell(20, 10, 'front', 1, 0 , 'C')
pdf.image(os.path.join(outpath,arr[len(arr)-6]), WIDTH/2, 120, WIDTH/2-10)
pdf.cell(77)
pdf.cell(20, 10, 'lateral', 1, 0, 'C')
pdf.image(os.path.join(outpath,arr[len(arr)-5]), 5, 200, WIDTH/2-10)
pdf.ln(77)
pdf.cell(32)
pdf.cell(20, 10, 'natural', 1, 0 , 'C')
pdf.image(os.path.join(outpath,arr[len(arr)-2]), WIDTH/2, 200, WIDTH/2-10)
pdf.cell(77)
pdf.cell(20, 10, 'top', 1, 0, 'C')
result = pdf.output(name = 'filename.pdf', dest = 'F')
with open('filename.pdf', 'rb') as pdf2:
response = HttpResponse(pdf2,content_type='application/pdf')
response['Content-Disposition'] = 'filename=some_file.pdf'
return response |
import urllib
import logging
import json
import heapq
from datetime import datetime
from datetime import time as ti
from spyne import Application, srpc, ServiceBase, Integer, String
from spyne import Iterable
from spyne.protocol.http import HttpRpc
from spyne.protocol.json import JsonDocument
from spyne.server.wsgi import WsgiApplication
class CrimeReport(ServiceBase):
@srpc(String, String, String, _returns=Iterable(String))
def checkcrime(lat, lon, radius):
res = urllib.urlopen('https://api.spotcrime.com/crimes.json?lat='+lat+'&lon='+lon+'&radius='+radius+'&key=.')
#res = urlib.urlopen('https://api.spotcrime.com/crimes.json?lat=37.334164&lon=-121.884301&radius=0.02&key=.')
crimedata = json.loads(res.read())
address_dict = {}
dict = {
"total_crime" : 0,
"the_most_dangerous_streets" : 0,
"crime_type_count" : {
"Assault" : 0,
"Arrest" : 0,
"Burglary" : 0,
"Robbery" : 0,
"Theft" : 0,
"Other" : 0
},
"event_time_count" : {
"12:01am-3am" : 0,
"3:01am-6am" : 0,
"6:01am-9am" : 0,
"9:01am-12noon" : 0,
"12:01pm-3pm" : 0,
"3:01pm-6pm" : 0,
"6:01pm-9pm" : 0,
"9:01pm-12midnight" : 0
}
}
dict["total_crime"] = len(crimedata['crimes'])
for each in crimedata['crimes']:
if each.get('type') == "Assault":
dict["crime_type_count"]["Assault"]+=1
elif each.get('type') == "Arrest":
dict["crime_type_count"]["Arrest"]+=1
elif each.get('type') == "Burglary":
dict["crime_type_count"]["Burglary"]+=1
elif each.get('type') == "Robbery":
dict["crime_type_count"]["Robbery"]+=1
elif each.get('type') == "Theft":
dict["crime_type_count"]["Theft"]+=1
elif each.get('type') == "Other":
dict["crime_type_count"]["Other"]+=1
timedate = each.get('date')[9:]
t1 = datetime.strptime(timedate,"%I:%M %p")
crimet = t1.time()
if crimet >= ti(00,01,00) and crimet <= ti(03,00,00):
dict["event_time_count"]["12:01am-3am"]+=1
elif crimet >= ti(03,01,00) and crimet <= ti(06,00,00):
dict["event_time_count"]["3:01am-6am"]+=1
elif crimet >= ti(06,01,00) and crimet <= ti(9,00,00):
dict["event_time_count"]["6:01am-9am"]+=1
elif crimet >= ti(9,01,00) and crimet <= ti(12,00,00):
dict["event_time_count"]["9:01am-12noon"]+=1
elif crimet >= ti(12,01,00) and crimet <= ti(15,00,00):
dict["event_time_count"]["12:01pm-3pm"]+=1
elif crimet >= ti(15,01,00) and crimet <= ti(18,00,00):
dict["event_time_count"]["3:01pm-6pm"]+=1
elif crimet >= ti(18,01,00) and crimet <= ti(21,00,00):
dict["event_time_count"]["6:01pm-9pm"]+=1
elif crimet >= ti(21,01,00) and crimet < ti(00,00,00) or crimet == ti(00,00,00):
dict["event_time_count"]["9:01pm-12midnight"]+=1
address = each.get('address')
ad = ''
if 'OF' in address:
ad = address.split('OF')
for obj in ad:
obj=obj.strip()
if 'ST' in obj:
if address_dict.has_key(obj):
address_dict[obj]+=1
else:
address_dict.update({obj:1})
if 'RD' in obj:
if address_dict.has_key(obj):
address_dict[obj]+=1
else:
address_dict.update({obj:1})
if 'AV' in obj:
if address_dict.has_key(obj):
address_dict[obj]+=1
else:
address_dict.update({obj:1})
if '&' in address:
ad = address.split('&')
for obj in ad:
obj=obj.strip()
if 'ST' in obj:
if address_dict.has_key(obj):
address_dict[obj]+= 1
else:
address_dict.update({obj:1})
elif 'AV' in obj:
if address_dict.has_key(obj):
address_dict[obj]+= 1
else:
address_dict.update({obj:1})
dict["the_most_dangerous_streets"] = heapq.nlargest(3,address_dict, key=address_dict.get)
yield dict
if __name__ == '__main__':
from wsgiref.simple_server import make_server
logging.basicConfig(level=logging.DEBUG)
application = Application([CrimeReport], 'checkcrime.',
in_protocol=HttpRpc(validator='soft'),
out_protocol=JsonDocument(ignore_wrappers=True)
)
# Now that we have our application, we must wrap it inside a transport.
# In this case, we use Spyne's standard Wsgi wrapper. Spyne supports
# popular Http wrappers like Twisted, Django, Pyramid, etc. as well as
# a ZeroMQ (REQ/REP) wrapper.
wsgi_application = WsgiApplication(application)
# More daemon boilerplate
server = make_server('127.0.0.1', 8000, wsgi_application)
logging.info("listening to http://127.0.0.1:8000")
logging.info("wsdl is at: http://localhost:8000/?wsdl")
server.serve_forever()
|
import zmq
context =zmq.Context()
#From user to server
frontend = context.socket(zmq.SUB)
frontend.connect("tcp://nhkim91.ddns.net:2224")
#From server to user
frontreq = context.socket(zmq.REQ)
frontreq.connect("tcp://nhkim91.ddns.net:2225")
frontreq.send_string("t_STYROR_0xFF")
# frontend.setsockopt_string(zmq.SUBSCRIBE, '10001'.decode('ascii'))
while True:
try:
message = frontend.recv_string(zmq.DONTWAIT)
print message
except zmq.Again:
pass |
from django.contrib import admin
from apps.tema.models import Tema
# Register your models here.
admin.site.register(Tema) |
# 列表的所有元素均变为两倍(函数)
def double(n):
return 2 * n
x = [1, 2, 3, 4]
y = map(double, x)
print(list(y))
|
'''
Métodos ---> (Funções)
-> Representam os comportamentos do objeto. Ou seja, as ações que este objeto pode realizar
no sistema
Divide-se métodos em 2 grupos:
- Métodos de Instância
- Métodos de Classe
# O método __init__ é um método especial chamado de 'construtor'. Sua função é construir o
objeto a partir da classe ---> OBS: Lê-se 'dunder init'.
OBS: Os métodos 'dunder' no python são chamados de métodos mágicos
OBS: É possível criar um método 'dunder' por conta própria, porém não é aconselhado.
'''
print()
# >>>>>>>>>> Métodos de instância <<<<<<<<<
# Uma classe pode possuir quantos métodos forem necessários
# São métodos que são realizados pelo objeto (instância) criado através da classe
class Lampada:
def __init__(self, cor, voltagem, luminosidade):
self.cor = cor
self.__voltagem = voltagem
self.__luminosidade = luminosidade
self.__ligada = False
class Produtos:
imposto = 1.05
contador = 0
def __init__(self, nome, descricao, valor):
self.__id = Produtos.contador + 1
self.__nome = nome
self.__descricao = descricao
self.__valor = (Produtos.imposto * valor)
Produtos.contador = self.__id
def desconto(self, porcentagem):
"Retorna o valor do produto com o desconto aplicado"
return f'Valor anterior: {self.__valor}\nNovo valor: {(self.__valor * (100 - porcentagem))/100}'
from passlib.hash import pbkdf2_sha256 as cryp
class Usuario:
def __init__(self, nome, sobrenome, email, senha):
self.__nome = nome
self.__sobrenome = sobrenome
self.__email = email
self.__senha = cryp.hash(senha, rounds=200000, salt_size=16)
def nomeCompleto(self):
"Retorna o nome completo do usuário"
return f'{self.__nome} {self.__sobrenome}'
def checaEmail(self, email):
self.__email = email.strip()
self.__email = email.replace(' ', '')
try:
assert('@' in self.__email)
except AssertionError:
return False
exit(1)
return True
def checaSenha(self, senha):
if cryp.verify(senha, self.__senha):
return True
return False
'''def __deslog__(self, verif, senha): # Não é aconselhável criar um método 'dunder'
print(f'Verificando: {verif}, Senha: {senha}')'''
#===============================================================================================
prod1 = Produtos('XBox One S', 'Console - Microsoft', 2000)
print(prod1.desconto(20)) # 20% de desconto
user1 = Usuario('Felipe', 'Ribeiro', 'email@gmail.com', '123456789'); print()
print(user1.nomeCompleto())
user2 = Usuario('Pessoa', 'Qualquer', 'mail1@gmail.com', '543211234')
print(user2.nomeCompleto())
print(Usuario.nomeCompleto(user1)) # -> Outra forma de acessar o método
print(f'Senha User1: {user1._Usuario__senha}') # Acesso de forma errada, mesmo que funcione
print()
novoUser = Usuario('User', 'sobrenomeUser', 'user @gmail. com', '23344466666')
#print(novoUser.checaEmail('user @gmail. com'))
print()
outroUser = Usuario('Outro', 'Usuário', 'outro usergm a il .com', 'umdoistresquatro')
#print(outroUser.checaEmail('outro usergm a il .com'))
#print(outroUser.checaSenha('1234'))
print(outroUser.checaSenha('umdoistresquatro'))
print()
#===============================================================================================
nome = input('Nome: ')
sobrenome = input('Sobrenome: ')
email = input('Email: ')
senha = input('Senha: ')
confirmSenha = input('Confirme sua senha: '); print()
if senha == confirmSenha:
newUser = Usuario(nome, sobrenome, email, senha)
else:
print('Senha não confere.')
exit(1)
if newUser.checaEmail(email):
pass
else:
print('Acesso negado!')
exit(1)
if newUser.checaSenha(senha):
print('Acesso permitido!')
print(f'Senha criptografada: {newUser._Usuario__senha}')
else:
print('Acesso negado!')
print()
#===============================================================================================
# >>>>>>>>>> Métodos de Classe <<<<<<<<<
# Utilizam decorators
'''
Em métodos de classe, não se faz acesso aos atributos de instância (que utilizam o self),
eles fazem acesso à classe.
'''
# Deve ser acessado através da classe
# São métodos que são realizados pela classe
class Usuario:
contador = 0
@classmethod # Decorator
def countUser(cls): # cls --> índica a classe -> necessita do decorator @classmethod
print(f'Temos {cls.contador} usuário(s) no sistema')
def __init__(self, nome, sobrenome, email, senha):
self.id = Usuario.contador + 1
self.__nome = nome
self.__sobrenome = sobrenome
self.__email = email
self.__senha = cryp.hash(senha, rounds=200000, salt_size=16)
Usuario.contador = self.id
print(f'Usuário criado: {self.__geraUsuario()}')
def nomeCompleto(self):
"Retorna o nome completo do usuário"
return f'{self.__nome} {self.__sobrenome}'
def checaEmail(self, email):
self.__email = email.strip()
self.__email = email.replace(' ', '')
try:
assert('@' in self.__email)
except AssertionError:
return False
return True
def checaSenha(self, senha):
if cryp.verify(senha, self.__senha):
return True
return False
def __geraUsuario(self): # Método privado (por causa do double underline '__')
return self.__email.split('@')[0]
newUser = Usuario('Felipe', 'Ribeiro', 'felipe02@gmail.com', '123456789'); print()
Usuario.countUser() # Forma correta / Pois está sendo acessado através da classe
newUser.countUser() # Possível, porém incorreto / Pois está sendo acessado através do objeto
print(newUser._Usuario__geraUsuario()) # Acesso de forma ruim
print()
#===============================================================================================
# >>>>>>>>>> Métodos Estáticos <<<<<<<<<
# Não recebem argumentos de instância ou de classe
# Utiliza um outro decorador ---> @staticmethod
class Usuario:
contador = 0
@classmethod # Decorator
def countUser(cls): # cls --> índica a classe -> necessita do decorator @classmethod
print(f'Temos {cls.contador} usuário(s) no sistema')
@staticmethod
def definicao(): # Não recebe nenhum parâmetro de instância ou classe
return '1234'
def __init__(self, nome, sobrenome, email, senha):
self.id = Usuario.contador + 1
self.__nome = nome
self.__sobrenome = sobrenome
self.__email = email
self.__senha = cryp.hash(senha, rounds=200000, salt_size=16)
Usuario.contador = self.id
print(f'Usuário criado: {self.__geraUsuario()}')
def nomeCompleto(self):
"Retorna o nome completo do usuário"
return f'{self.__nome} {self.__sobrenome}'
def checaEmail(self, email):
self.__email = email.strip()
self.__email = email.replace(' ', '')
try:
assert('@' in self.__email)
except AssertionError:
return False
return True
def checaSenha(self, senha):
if cryp.verify(senha, self.__senha):
return True
return False
def __geraUsuario(self): # Método privado (por causa do double underline '__')
return self.__email.split('@')[0]
novoUsuario = Usuario('Felipe', 'Ribeiro', 'felipemail@gmail.com', '123456789')
print(novoUsuario.definicao())
print(novoUsuario.contador)
print()
outroUsuario = Usuario('Alguem', 'Qualquer', 'qualqueremail@gmail.com', '987654321')
print(outroUsuario.definicao())
print(outroUsuario.contador)
#===============================================================================================
|
import numpy as np
def dft(x):
N = x.shape[0]
n = np.arange(N)
k = n.reshape((N, 1))
M = np.exp(-2j * np.pi * k * n / N)
return np.dot(M, x)
def fft(x):
n = x.shape[0]
if n <= 32:
return dft(x)
else:
x0 = fft(x[::2])
x1 = fft(x[1::2])
factor = np.exp(-2j * np.pi * np.arange(n) / n)
return np.concatenate([x0 + factor[:n // 2] * x1, x0 + factor[n // 2:] * x1])
|
import re
import itertools
import textwrap
import functools
try:
from importlib.resources import files # type: ignore
except ImportError: # pragma: nocover
from pkg_resources.extern.importlib_resources import files # type: ignore
from pkg_resources.extern.jaraco.functools import compose, method_cache
from pkg_resources.extern.jaraco.context import ExceptionTrap
def substitution(old, new):
"""
Return a function that will perform a substitution on a string
"""
return lambda s: s.replace(old, new)
def multi_substitution(*substitutions):
"""
Take a sequence of pairs specifying substitutions, and create
a function that performs those substitutions.
>>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
'baz'
"""
substitutions = itertools.starmap(substitution, substitutions)
# compose function applies last function first, so reverse the
# substitutions to get the expected order.
substitutions = reversed(tuple(substitutions))
return compose(*substitutions)
class FoldedCase(str):
"""
A case insensitive string class; behaves just like str
except compares equal when the only variation is case.
>>> s = FoldedCase('hello world')
>>> s == 'Hello World'
True
>>> 'Hello World' == s
True
>>> s != 'Hello World'
False
>>> s.index('O')
4
>>> s.split('O')
['hell', ' w', 'rld']
>>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
['alpha', 'Beta', 'GAMMA']
Sequence membership is straightforward.
>>> "Hello World" in [s]
True
>>> s in ["Hello World"]
True
You may test for set inclusion, but candidate and elements
must both be folded.
>>> FoldedCase("Hello World") in {s}
True
>>> s in {FoldedCase("Hello World")}
True
String inclusion works as long as the FoldedCase object
is on the right.
>>> "hello" in FoldedCase("Hello World")
True
But not if the FoldedCase object is on the left:
>>> FoldedCase('hello') in 'Hello World'
False
In that case, use ``in_``:
>>> FoldedCase('hello').in_('Hello World')
True
>>> FoldedCase('hello') > FoldedCase('Hello')
False
"""
def __lt__(self, other):
return self.lower() < other.lower()
def __gt__(self, other):
return self.lower() > other.lower()
def __eq__(self, other):
return self.lower() == other.lower()
def __ne__(self, other):
return self.lower() != other.lower()
def __hash__(self):
return hash(self.lower())
def __contains__(self, other):
return super().lower().__contains__(other.lower())
def in_(self, other):
"Does self appear in other?"
return self in FoldedCase(other)
# cache lower since it's likely to be called frequently.
@method_cache
def lower(self):
return super().lower()
def index(self, sub):
return self.lower().index(sub.lower())
def split(self, splitter=' ', maxsplit=0):
pattern = re.compile(re.escape(splitter), re.I)
return pattern.split(self, maxsplit)
# Python 3.8 compatibility
_unicode_trap = ExceptionTrap(UnicodeDecodeError)
@_unicode_trap.passes
def is_decodable(value):
r"""
Return True if the supplied value is decodable (using the default
encoding).
>>> is_decodable(b'\xff')
False
>>> is_decodable(b'\x32')
True
"""
value.decode()
def is_binary(value):
r"""
Return True if the value appears to be binary (that is, it's a byte
string and isn't decodable).
>>> is_binary(b'\xff')
True
>>> is_binary('\xff')
False
"""
return isinstance(value, bytes) and not is_decodable(value)
def trim(s):
r"""
Trim something like a docstring to remove the whitespace that
is common due to indentation and formatting.
>>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
'foo = bar\n\tbar = baz'
"""
return textwrap.dedent(s).strip()
def wrap(s):
"""
Wrap lines of text, retaining existing newlines as
paragraph markers.
>>> print(wrap(lorem_ipsum))
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
minim veniam, quis nostrud exercitation ullamco laboris nisi ut
aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
culpa qui officia deserunt mollit anim id est laborum.
<BLANKLINE>
Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
"""
paragraphs = s.splitlines()
wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
return '\n\n'.join(wrapped)
def unwrap(s):
r"""
Given a multi-line string, return an unwrapped version.
>>> wrapped = wrap(lorem_ipsum)
>>> wrapped.count('\n')
20
>>> unwrapped = unwrap(wrapped)
>>> unwrapped.count('\n')
1
>>> print(unwrapped)
Lorem ipsum dolor sit amet, consectetur adipiscing ...
Curabitur pretium tincidunt lacus. Nulla gravida orci ...
"""
paragraphs = re.split(r'\n\n+', s)
cleaned = (para.replace('\n', ' ') for para in paragraphs)
return '\n'.join(cleaned)
class Splitter(object):
"""object that will split a string with the given arguments for each call
>>> s = Splitter(',')
>>> s('hello, world, this is your, master calling')
['hello', ' world', ' this is your', ' master calling']
"""
def __init__(self, *args):
self.args = args
def __call__(self, s):
return s.split(*self.args)
def indent(string, prefix=' ' * 4):
"""
>>> indent('foo')
' foo'
"""
return prefix + string
class WordSet(tuple):
"""
Given an identifier, return the words that identifier represents,
whether in camel case, underscore-separated, etc.
>>> WordSet.parse("camelCase")
('camel', 'Case')
>>> WordSet.parse("under_sep")
('under', 'sep')
Acronyms should be retained
>>> WordSet.parse("firstSNL")
('first', 'SNL')
>>> WordSet.parse("you_and_I")
('you', 'and', 'I')
>>> WordSet.parse("A simple test")
('A', 'simple', 'test')
Multiple caps should not interfere with the first cap of another word.
>>> WordSet.parse("myABCClass")
('my', 'ABC', 'Class')
The result is a WordSet, so you can get the form you need.
>>> WordSet.parse("myABCClass").underscore_separated()
'my_ABC_Class'
>>> WordSet.parse('a-command').camel_case()
'ACommand'
>>> WordSet.parse('someIdentifier').lowered().space_separated()
'some identifier'
Slices of the result should return another WordSet.
>>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
'out_of_context'
>>> WordSet.from_class_name(WordSet()).lowered().space_separated()
'word set'
>>> example = WordSet.parse('figured it out')
>>> example.headless_camel_case()
'figuredItOut'
>>> example.dash_separated()
'figured-it-out'
"""
_pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
def capitalized(self):
return WordSet(word.capitalize() for word in self)
def lowered(self):
return WordSet(word.lower() for word in self)
def camel_case(self):
return ''.join(self.capitalized())
def headless_camel_case(self):
words = iter(self)
first = next(words).lower()
new_words = itertools.chain((first,), WordSet(words).camel_case())
return ''.join(new_words)
def underscore_separated(self):
return '_'.join(self)
def dash_separated(self):
return '-'.join(self)
def space_separated(self):
return ' '.join(self)
def trim_right(self, item):
"""
Remove the item from the end of the set.
>>> WordSet.parse('foo bar').trim_right('foo')
('foo', 'bar')
>>> WordSet.parse('foo bar').trim_right('bar')
('foo',)
>>> WordSet.parse('').trim_right('bar')
()
"""
return self[:-1] if self and self[-1] == item else self
def trim_left(self, item):
"""
Remove the item from the beginning of the set.
>>> WordSet.parse('foo bar').trim_left('foo')
('bar',)
>>> WordSet.parse('foo bar').trim_left('bar')
('foo', 'bar')
>>> WordSet.parse('').trim_left('bar')
()
"""
return self[1:] if self and self[0] == item else self
def trim(self, item):
"""
>>> WordSet.parse('foo bar').trim('foo')
('bar',)
"""
return self.trim_left(item).trim_right(item)
def __getitem__(self, item):
result = super(WordSet, self).__getitem__(item)
if isinstance(item, slice):
result = WordSet(result)
return result
@classmethod
def parse(cls, identifier):
matches = cls._pattern.finditer(identifier)
return WordSet(match.group(0) for match in matches)
@classmethod
def from_class_name(cls, subject):
return cls.parse(subject.__class__.__name__)
# for backward compatibility
words = WordSet.parse
def simple_html_strip(s):
r"""
Remove HTML from the string `s`.
>>> str(simple_html_strip(''))
''
>>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
A stormy day in paradise
>>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
Somebody tell the truth.
>>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
What about
multiple lines?
"""
html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
texts = (match.group(3) or '' for match in html_stripper.finditer(s))
return ''.join(texts)
class SeparatedValues(str):
"""
A string separated by a separator. Overrides __iter__ for getting
the values.
>>> list(SeparatedValues('a,b,c'))
['a', 'b', 'c']
Whitespace is stripped and empty values are discarded.
>>> list(SeparatedValues(' a, b , c, '))
['a', 'b', 'c']
"""
separator = ','
def __iter__(self):
parts = self.split(self.separator)
return filter(None, (part.strip() for part in parts))
class Stripper:
r"""
Given a series of lines, find the common prefix and strip it from them.
>>> lines = [
... 'abcdefg\n',
... 'abc\n',
... 'abcde\n',
... ]
>>> res = Stripper.strip_prefix(lines)
>>> res.prefix
'abc'
>>> list(res.lines)
['defg\n', '\n', 'de\n']
If no prefix is common, nothing should be stripped.
>>> lines = [
... 'abcd\n',
... '1234\n',
... ]
>>> res = Stripper.strip_prefix(lines)
>>> res.prefix = ''
>>> list(res.lines)
['abcd\n', '1234\n']
"""
def __init__(self, prefix, lines):
self.prefix = prefix
self.lines = map(self, lines)
@classmethod
def strip_prefix(cls, lines):
prefix_lines, lines = itertools.tee(lines)
prefix = functools.reduce(cls.common_prefix, prefix_lines)
return cls(prefix, lines)
def __call__(self, line):
if not self.prefix:
return line
null, prefix, rest = line.partition(self.prefix)
return rest
@staticmethod
def common_prefix(s1, s2):
"""
Return the common prefix of two lines.
"""
index = min(len(s1), len(s2))
while s1[:index] != s2[:index]:
index -= 1
return s1[:index]
def remove_prefix(text, prefix):
"""
Remove the prefix from the text if it exists.
>>> remove_prefix('underwhelming performance', 'underwhelming ')
'performance'
>>> remove_prefix('something special', 'sample')
'something special'
"""
null, prefix, rest = text.rpartition(prefix)
return rest
def remove_suffix(text, suffix):
"""
Remove the suffix from the text if it exists.
>>> remove_suffix('name.git', '.git')
'name'
>>> remove_suffix('something special', 'sample')
'something special'
"""
rest, suffix, null = text.partition(suffix)
return rest
def normalize_newlines(text):
r"""
Replace alternate newlines with the canonical newline.
>>> normalize_newlines('Lorem Ipsum\u2029')
'Lorem Ipsum\n'
>>> normalize_newlines('Lorem Ipsum\r\n')
'Lorem Ipsum\n'
>>> normalize_newlines('Lorem Ipsum\x85')
'Lorem Ipsum\n'
"""
newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
pattern = '|'.join(newlines)
return re.sub(pattern, '\n', text)
def _nonblank(str):
return str and not str.startswith('#')
@functools.singledispatch
def yield_lines(iterable):
r"""
Yield valid lines of a string or iterable.
>>> list(yield_lines(''))
[]
>>> list(yield_lines(['foo', 'bar']))
['foo', 'bar']
>>> list(yield_lines('foo\nbar'))
['foo', 'bar']
>>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
['foo', 'baz #comment']
>>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
['foo', 'bar', 'baz', 'bing']
"""
return itertools.chain.from_iterable(map(yield_lines, iterable))
@yield_lines.register(str)
def _(text):
return filter(_nonblank, map(str.strip, text.splitlines()))
def drop_comment(line):
"""
Drop comments.
>>> drop_comment('foo # bar')
'foo'
A hash without a space may be in a URL.
>>> drop_comment('http://example.com/foo#bar')
'http://example.com/foo#bar'
"""
return line.partition(' #')[0]
def join_continuation(lines):
r"""
Join lines continued by a trailing backslash.
>>> list(join_continuation(['foo \\', 'bar', 'baz']))
['foobar', 'baz']
>>> list(join_continuation(['foo \\', 'bar', 'baz']))
['foobar', 'baz']
>>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
['foobarbaz']
Not sure why, but...
The character preceeding the backslash is also elided.
>>> list(join_continuation(['goo\\', 'dly']))
['godly']
A terrible idea, but...
If no line is available to continue, suppress the lines.
>>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
['foo']
"""
lines = iter(lines)
for item in lines:
while item.endswith('\\'):
try:
item = item[:-2].strip() + next(lines)
except StopIteration:
return
yield item
|
from Crypto.Cipher import AES
from itertools import *
import os,sys
import random
l=["MDAwMDAwTm93IHRoYXQgdGhlIHBhcnR5IGlzIGp1bXBpbmc=",
"MDAwMDAxV2l0aCB0aGUgYmFzcyBraWNrZWQgaW4gYW5kIHRoZSBWZWdhJ3MgYXJlIHB1bXBpbic=",
"MDAwMDAyUXVpY2sgdG8gdGhlIHBvaW50LCB0byB0aGUgcG9pbnQsIG5vIGZha2luZw==",
"MDAwMDAzQ29va2luZyBNQydzIGxpa2UgYSBwb3VuZCBvZiBiYWNvbg==",
"MDAwMDA0QnVybmluZyAnZW0sIGlmIHlvdSBhaW4ndCBxdWljayBhbmQgbmltYmxl",
"MDAwMDA1SSBnbyBjcmF6eSB3aGVuIEkgaGVhciBhIGN5bWJhbA==",
"MDAwMDA2QW5kIGEgaGlnaCBoYXQgd2l0aCBhIHNvdXBlZCB1cCB0ZW1wbw==",
"MDAwMDA3SSdtIG9uIGEgcm9sbCwgaXQncyB0aW1lIHRvIGdvIHNvbG8=",
"MDAwMDA4b2xsaW4nIGluIG15IGZpdmUgcG9pbnQgb2g=",
"MDAwMDA5aXRoIG15IHJhZy10b3AgZG93biBzbyBteSBoYWlyIGNhbiBibG93"]
key=os.urandom(16)
def xor(a,b) :
#---------xors a and b------------------------------------------------------------------------
return ''.join(chr(ord(i)^ord(j)) for i,j in izip(a,cycle(b)))
def pad(m) :
#---------pads the string passed as parameter with pkcs7 standards
tmp=16-len(m)%16
return m+chr(tmp)*tmp
def unpad(m) :
#------unpads the message passed as parameter after checking for the padding first------------
r=checkpad(m)
try :
assert r==0
ch=ord(m[-1])
# print ch
return m[:-ch]
except :
return r
def checkpad(m) :
#------checks padding ; returns 0 for correct padding and -1 in all other cases---------------
ch=ord(m[-1])
if ch<1 or ch>16 :
#print "Incorrect padding"
return -1
for i in range(ch) :
if m[(-i-1)]!=chr(ch) :
return -1
return 0
def Encrypt() :
#----------selects a random string from l and encrypts it in AES-CBC mode-----------------------
c=random.randint(0,9)
s=l[c].decode('base64')
iv=os.urandom(16)
obj=AES.new(key,AES.MODE_CBC,iv)
return iv+obj.encrypt(pad(s))
def Decrypt(m) :
#---------decrypts the string passed as argument and returns the decrypted string---------------
iv,cipher=m[:16],m[16:]
obj=AES.new(key,AES.MODE_CBC,iv)
return unpad(obj.decrypt(cipher))
if __name__ == "__main__" :
c=Encrypt()
b=len(c)/16
Df=""
#------loop to decrypt the entire block----------------
for k in range((b-2)*16,-1,-16) :
D=""
#----loop to decrypt each block------------------------
for i in range(16) :
#----loop to decrypt each character---------------------
for j in range(256) :
mc='\x00'*(16-i-1) + chr(j) + xor(D,chr(i+1)) #---------mc is the crafter cipher text---------------
print("xor.. : " ,xor(D,chr(i+1))
#assert len(mc) == 16
C=c[:k]+mc+c[k+16:k+32]
#---------mc is being passed as the second last block-
if Decrypt(C) != -1 :
#---------enters the if condition only if padding is correct
D=xor(chr(j),chr(i+1)) + D
break
Df=D+Df
print xor(Df,c[:-16])
|
name = '"Nightmare-fuel"'
location = 'Furi'
note = 'XBee rain gauge. Firmware rain0.1, hardware v0.2.'
#latitude = 21.3237992
#longitude = -157.8311465
conf = [
{
'dbtag':'ts',
'description':'Sample time',
'interval':60*60,
},
{
'dbtag':'mm',
'unit':'mm/hr',
'description':'Hourly rain fall',
'lb':0,
'ub':433, # annual average
'interval':60*60,
},
{
'dbtag':'Vb',
'unit':'V',
'description':'Battery voltage',
'lb':3.7,
'ub':5.5,
'interval':60*60,
},
{
'dbtag':'Vs',
'unit':'V',
'description':'Solar panel voltage',
'lb':0,
'ub':7.0,
'interval':60*60,
},
{
'dbtag':'tc',
'unit':'-',
'description':'Tip count (debug; all-time cumulative)',
'lb':0,
'interval':60*60,
},
{
'dbtag':'idx',
'unit':'-',
'description':'Sample index',
'lb':2*24,
'interval':60*60,
},
]
if '__main__' == __name__:
for c in conf:
print('- - -')
for k, v in c.items():
print(k, ':' ,v)
import sys
sys.path.append('../..')
from os.path import basename
from storage.storage2 import create_table
create_table(conf, basename(__file__).split('.')[0].replace('_', '-'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.