index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
17,200 | bcbb73af7fbd1eac8e6c7b5841d998c4b0720038 |
from gensim import utils
from gensim.models.doc2vec import LabeledSentence
from gensim.models import Doc2Vec
from gensim.models.doc2vec import LabeledSentence
# numpy
import numpy
from sklearn.metrics.pairwise import cosine_similarity
# random
from random import shuffle
import string
import nltk
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
import re
import pandas as pd
nltk.download('punkt')
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import re
from gensim import utils
from gensim.models.doc2vec import LabeledSentence
from gensim.models import Doc2Vec
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
def preprocess(model,sentence):
sentence = sentence.lower()
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(sentence)
stop_words= set(stopwords.words('english'))
filtered_words = [w for w in tokens if not w in stop_words]
#w = filter(lambda x: x in model.vocab, filtered_words)
return " ".join(filtered_words)
def review_to_wordlist(review, remove_stopwords=True):
# Clean the text, with the option to remove stopwords.
# Convert words to lower case and split them
words = review.lower().split()
# Optionally remove stop words (true by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
review_text = " ".join(words)
# Clean the text
review_text = re.sub(r"[^A-Za-z0-9(),!.?\'\`]", " ", review_text)
review_text = re.sub(r"\'s", " 's ", review_text)
review_text = re.sub(r"\'ve", " 've ", review_text)
review_text = re.sub(r"n\'t", " 't ", review_text)
review_text = re.sub(r"\'re", " 're ", review_text)
review_text = re.sub(r"\'d", " 'd ", review_text)
review_text = re.sub(r"\'ll", " 'll ", review_text)
review_text = re.sub(r",", " ", review_text)
review_text = re.sub(r"\.", " ", review_text)
review_text = re.sub(r"!", " ", review_text)
review_text = re.sub(r"\(", " ( ", review_text)
review_text = re.sub(r"\)", " ) ", review_text)
review_text = re.sub(r"\?", " ", review_text)
review_text = re.sub(r"\s{2,}", " ", review_text)
words = review_text.split()
# Shorten words to their stems
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word) for word in words]
review_text = " ".join(stemmed_words)
# Return a list of words
return(review_text)
def process_questions(model,question_list, questions, question_list_name):
# function to transform questions and display progress
for question in questions:
question_list.append(preprocess(model,question))
if len(question_list) % 10000 == 0:
progress = len(question_list)/len(df) * 100
print("{} is {}% complete.".format(question_list_name, round(progress, 1)))
def performance_report(value,df, score_list):
# the value (0-1) is the cosine similarity score to determine if a pair of questions
# have the same meaning or not.
scores = []
for score in score_list:
if score >= value:
scores.append(1)
else:
scores.append(0)
X=df.is_duplicate
accuracy = accuracy_score(X, scores) * 100
print("Accuracy score is {}%.".format(round(accuracy),1))
print()
print("Confusion Matrix:")
print(confusion_matrix(X, scores))
print()
print("Classification Report:")
print(classification_report(X, scores))
def main():
model = Doc2Vec.load('./doc2vec.bin')
df = pd.read_csv("questions.csv")
df= df[15:20]
questions1 = []
process_questions(model,questions1, df.question1, "questions1")
print()
questions2 = []
process_questions(model,questions2, df.question2, "questions2")
# Split questions for computing similarity and determining the lengths of the questions.
questions1_split = []
for question in questions1:
questions1_split.append(question.split())
questions2_split = []
for question in questions2:
questions2_split.append(question.split())
# Determine the length of questions to select more optimal parameters.
lengths = []
for i in range(len(questions1_split)):
lengths.append(len(questions1_split[i]))
lengths.append(len(questions2_split[i]))
lengths = pd.DataFrame(lengths, columns=["count"])
doc2vec_scores = []
for i in range(len(questions1_split)):
# n_similarity computes the cosine similarity in Doc2Vec
score = model.n_similarity(questions1_split[i],questions2_split[i])
doc2vec_scores.append(score)
if i % 10000 == 0:
progress = i/len(questions1_split) * 100
print("{}% complete.".format(round(progress,2)))
performance_report(0.92, df,doc2vec_scores)
if __name__ == "__main__":
main()
|
17,201 | d81f338c73e51c1013a6d6570061e7ae73251705 | #! /usr/bin/python
import time
import RPi.GPIO as GPIO
import threading
class TrafficLight:
def __init__(self):
GPIO.setmode(GPIO.BCM)
self.ledList = []
def addLed(self, ledname, gpiono, delay_secs, status_blink=False):
self.ledList.append(Led(ledname, gpiono, delay_secs, status_blink))
return len(self.ledList)
def startTrafficLight(self):
for led in self.ledList:
print ('startTrafficLight %s led off status : %d' % (led.ledName, led.ledStatus))
led.poweredTurnOffLed()
for led in self.ledList:
led.turnOnLedAndAutoOff()
while led.ledStatus != 0:
print ('startTrafficLight %s led run status : %d' % (led.ledName, led.ledStatus))
time.sleep(1)
class Led:
def __init__(self, ledname, gpiono, delay_secs, status_blink=False):
#
self.ledName = ledname
# This LED's RaspberryPi GPIO BCM No.
self.GPIONO = -1
self.LED_STATUS_OFF = 0
self.LED_STATUS_ON = 1
self.LED_STATUS_BLINK = 2
self.ledStatus = self.LED_STATUS_OFF
self.GPIONO = gpiono
self.DELAY_SECS = delay_secs
self.STATUS_BLINK = status_blink
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.GPIONO, GPIO.OUT)
self.setDelaySecs(self.DELAY_SECS)
self.poweredTurnOffLed()
def setDelaySecs(self, delay_secs):
#print('%s setDelaySecs %d sec' % (self.ledName, delay_secs))
self.DELAY_SECS = delay_secs
def setStatusBlink(self, status_blink):
#print('%s status_blink %d' % (self.ledName, status_blink))
self.STATUS_BLINK = status_blink
def turnOnLed(self):
GPIO.output(self.GPIONO, True)
#print('%s turnOnLed ledStatus :%d, GPIO:%d' % (self.ledName, self.ledStatus, self.GPIONO))
if self.ledStatus == self.LED_STATUS_ON:
turnoff_timmer.cancel()
self.ledStatus = self.LED_STATUS_ON
self.turnoff_timmer = threading.Timer(self.DELAY_SECS, self.turnOffLed)
self.turnoff_timmer.start()
def turnOffLed(self):
try:
#print('%s turnOffLed ledStatus :%d, GPIO:%d' % (self.ledName, self.ledStatus, self.GPIONO))
if self.STATUS_BLINK:
self.ledStatus = self.LED_STATUS_BLINK
for i in range(5):
#print('%s self.LED_STATUS_BLINK :%d, GPIO:%d' % (self.ledName, self.ledStatus, self.GPIONO))
GPIO.output(self.GPIONO, True)
time.sleep(0.7)
GPIO.output(self.GPIONO, False)
time.sleep(0.7)
else:
GPIO.output(self.GPIONO, False)
self.ledStatus = self.LED_STATUS_OFF
#print('%s turnOffLed change ledStatus :%d, GPIO:%d' % (self.ledName, self.ledStatus, self.GPIONO))
except Exception as Err:
print (Err)
def poweredTurnOffLed(self):
#print('%s poweredTurnOffLed ledStatus :%d ' % (self.ledName, self.ledStatus))
GPIO.output(self.GPIONO, False)
if self.ledStatus == self.LED_STATUS_ON:
self.turnoff_timmer.cancel()
self.ledStatus = self.LED_STATUS_OFF
#print('%s poweredTurnOffLed ledStatus :%d ' % (self.ledName, self.ledStatus))
def turnOnLedAndAutoOff(self):
try:
#print('%s turnOnLedAndAutoOff turnOnLed ledStatus :%d, GPIO:%d ' % (self.ledName, self.ledStatus, self.GPIONO))
self.turnOnLed()
#print('%s turnOnLedAndAutoOff turnoff_timmer.start ledStatus :%d, GPIO:%d' % (self.ledName, self.ledStatus, self.GPIONO))
except Exception as Err:
print (Err)
if __name__ == "__main__":
trafficlight = TrafficLight()
# Adding RedLed
trafficlight.addLed('Red', 10, 10, status_blink=False)
# Adding GreenLed
trafficlight.addLed('Green', 11, 5, status_blink=False)
# Adding YellowLed
trafficlight.addLed('Yellow', 9, 2, status_blink=False)
trafficlight2 = TrafficLight()
# Adding RedLed
trafficlight2.addLed('Red', 25, 10, status_blink=False)
# Adding GreenLed
trafficlight2.addLed('Green', 7, 5, status_blink=False)
# Adding GreenLed
trafficlight2.addLed('Green2', 24, 5, status_blink=False)
# Adding YellowLed
trafficlight2.addLed('Yellow', 8, 2, status_blink=False)
while True:
trafficlight.startTrafficLight()
trafficlight2.startTrafficLight()
#trafficlight.test()
|
17,202 | bddfe9c0856bd6101b41c5b444b6c35e258c607e | DIR = 'data/'
LINEA = 30
bMovil = False
HOST = 'localhost'
USUARIO = 'ipaspudo'
PASSWD = 'qazwsxedc'
BDEDATOS = 'ipaspudo' |
17,203 | a08301ce1f89ebb8847c5ad01962cb441f443c6f | target = 'VPatch'
files = Split("""
apply_patch.c
checksum.c
md5.c
vpatchdll.c
""")
libs = Split("""
kernel32
user32
""")
Import('BuildPlugin env')
BuildPlugin(target, files, libs, defines = ['DLL_CHECKSUMS'])
|
17,204 | 1cc92383d4adc1fddaa7b8ed5f67533f99509000 | from testsuits.base_testcase import BaseTestCase
from pageobjects.znbwl_add_menu import ZnbwlAddMenu
import unittest
class Znbwladdmenu(BaseTestCase):
def test_znbwl_add_menu(self):
zam=ZnbwlAddMenu(self.driver)
zam.first_add_menu("today is Thursday")
zam.second_add_menu("a fine day")
zam.search("a")
if __name__=="__main__":
unittest.main() |
17,205 | 6d2b0ee790f67028499dca784fa13f55250ae082 | # Generated by Django 2.1.5 on 2020-08-30 17:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rockermind', '0002_auto_20200830_1147'),
]
operations = [
migrations.RemoveField(
model_name='rocker',
name='members',
),
migrations.RemoveField(
model_name='rocker',
name='songs',
),
]
|
17,206 | cfc72e4a19de490d18d0df56e8da1d02721c8949 | """
For the time being, the most important heuristic for determining article priority will be based on the presence of
authors
"""
from sklearn import svm
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from warden.models import AuthorRating, ArticleRating
class AuthorRank:
"""
Given a member and a set of articles, return articles rated on author ratings.
The more authors per article that have a good rating, the higher the article rating.
"""
@classmethod
def rank(cls, member, articles):
author_ratings = AuthorRating.objects.filter(member=member)
authors = {x.author: x.rating for x in author_ratings}
ranks = {}
for article in articles:
a = article.authors.all()
ranks[article] = sum([authors.get(author, 0) for author in a])
return ranks
class CompositeRank:
"""
Given a member and a set of articles, return articles rated on a set of article criteria:
Basically everything, including a composite author rating. The weights will be learned.
For now, only the abstracts of articles will be used to reduce the dimensionality of the problem space.
TODO:
1. autoencoder feature learning using pytorch into collaborative filtering
2. bayesian something something
3. category rating learning
"""
def rank(self, member, articles):
ranks = {}
labeled = list(ArticleRating.objects.filter(member=member))
if len(labeled) > 0:
labeled_text = [rating.article.abstract for rating in labeled]
labeled_authors = [rating.article.authors.all() for rating in labeled]
labels = [rating.rating for rating in labeled]
unlabeled_text = [article.abstract for article in articles]
model, count_vect, tfidf_transformer = self.train_model(labeled_text, labels)
predictions = self.predict(model, count_vect, tfidf_transformer, unlabeled_text)
author_rating = {}
for label, l_authors in zip(labels, labeled_authors):
for author in l_authors:
if author in author_rating:
author_rating[author] += label
else:
author_rating[author] = label
author_pred = [sum([author_rating.get(author, 0) for author in article.authors.all()]) for article in
articles]
for article, author_pred, prediction in zip(articles, author_pred, predictions):
ranks[article] = (author_pred, prediction)
else:
ranks = {article: (0, 0) for article in articles}
return ranks
def predict(self, model, count_vect, tfidf_transformer, text):
counts = count_vect.transform(text)
tfidf = tfidf_transformer.transform(counts)
return model.predict(tfidf)
def train_model(self, text, labels):
"""
This is a SVM that uses tfidf vectors as features. In the future, we want to use a more sophisticated
model for recommendation, but this should suffice on naive examples (there's no basis for this assumption).
:param text:
:return:
"""
clf = svm.SVR()
count_vect = CountVectorizer()
tfidf_transformer = TfidfTransformer()
counts = count_vect.fit_transform(text)
tfidf = tfidf_transformer.fit_transform(counts)
clf.fit(tfidf, labels)
return clf, count_vect, tfidf_transformer
|
17,207 | f22d2bd2db9d0b106dcbc0bc4c6b222bb2bee502 | day = "sunday"
if day == sunday or day == monday:
print "it's the weekend" |
17,208 | f63041ce95281c34f97b569f32a00d7ce148f1c7 | # Let d(n) be defined as the sum of proper divisors of n (numbers less than
# n which divide evenly into n).
# If d(a) = b and d(b) = a, where a b, then a and b are an amicable pair
# and each of a and b are called amicable numbers.
# For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22,
# 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are
# 1, 2, 4, 71 and 142; so d(284) = 220.
# Evaluate the sum of all the amicable numbers under 10000.
#---------------------------------------------------------------------------
from utils import factorize_rec, product
def aliquot_sum(n):
fzn = factorize_rec(n)
return product((sum((p**j for j in xrange(i+1))) for p,i in fzn.items())) - n
N = 10000
total = 0
for n in xrange(1,N):
dn = aliquot_sum(n)
if aliquot_sum(dn) == n and dn != n:
total += n
print total
|
17,209 | 524dd00cd878c3f59a01909b29fb35e83f1ceb9c | #!/usr/bin/env python
################################################################################
# Mauricio Esguerra
# mauricio.esguerra@gmail.com
# March 6, 2017
#
# Pymol Script for CGO display of an xyz axis centered on the center of mass.
#
# This script can be invoked without
# X11 display with:
# >pymol -qc scriptname.pml >& scriptoutput.log &
################################################################################
fetch 2kpo
orient (2kpo and chain A)
com1=cmd.get_position()
print cmd.get_position()
hide everything, 2kpo
show cartoon, 2kpo
# create the axes object, draw axes with cylinders coloured red, green,
#blue for X, Y and Z
python
# axes.py
from pymol.cgo import *
from pymol import cmd
from pymol.vfont import plain
# create the axes object, draw axes with cylinders coloured red, green,
#blue for X, Y and Z
obj = [
CYLINDER, com1[0], com1[1], com1[2], com1[0]+10, com1[1]+10, com1[2], 0.3, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0,
CYLINDER, com1[0], com1[1], com1[2], com1[0]-10, com1[1]-10, com1[2], 0.3, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0,
CYLINDER, com1[0], com1[1], com1[2], com1[0]-10, com1[1]+10, com1[2], 0.3, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0,
CYLINDER, com1[0], com1[1], com1[2], com1[0]+10, com1[1]-10, com1[2], 0.3, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0,
CYLINDER, com1[0], com1[1], com1[2], com1[0], com1[1], com1[2]+10, 0.3, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0,
CYLINDER, com1[0], com1[1], com1[2], com1[0], com1[1], com1[2]-10, 0.3, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0,
]
cmd.load_cgo(obj,'axes')
python end
#png axisincom.png, width=1200, height=1200, dpi=600, ray=1
spectrum count, rainbow, all and name CA
deselect
|
17,210 | 42c6473e3ce8159732e8279b8d30767a061d0c54 | import random
def countNumbers(table,
parzyste = 0,
nieparzyste = 0,
output = [0, 0]):
for x in table:
if x%2==0:
parzyste += 1
else: nieparzyste += 1
output[0] = round((parzyste/len(table))*100,2)
output[1] = round((nieparzyste/len(table))*100,2)
return output
numbers = []
for x in range(0, 1001):
numbers.append(random.randrange(1, 51))
result = countNumbers(numbers)
print(f'Dla 1000 liczb losowych z przedziału <1,50>:')
print(f"Liczby parzyste: {result[0]}%")
print(f"Liczby nieparzyste: {result[1]}%")
|
17,211 | 40609dc090658234b7c10650ceea2a3acc54af5d | BUSINESS_ROLES = [('LANDLORD', 'landlord'),
('TENANT', 'tenant'),
('THIRD_PARTY', 'third party')]
CASE_TYPES = [('TRIBUNAL', 'tribunal'),
('ADJUDICATION', 'adjudication')]
|
17,212 | d5ba60f2b4d8b45da9048cdd0a316a18e7b28a6a | # -*- coding: utf-8 -*-
"""
Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
sys.path.append('..')
from hackathon.database.models import (
AzureKey,
HackathonAzureKey,
Hackathon,
)
from hackathon import RequiredFeature, Component
import os
import commands
class AzureCertManagement(Component):
def __init__(self):
self.CERT_BASE = self.util.get_config('azure.cert_base')
self.CONTAINER_NAME = self.util.get_config('azure.container_name')
self.file_service = RequiredFeature("file_service")
def create_certificate(self, subscription_id, management_host, hackathon_name):
"""
1. check certificate dir
2. generate pem file
3. generate cert file
4. add azure key to db
5. add hackathon azure key to db
:param subscription_id:
:param management_host:
:param hackathon_name:
:return:
"""
# make sure certificate dir exists
if not os.path.isdir(self.CERT_BASE):
self.log.debug('certificate dir not exists')
os.mkdir(self.CERT_BASE)
base_url = '%s/%s' % (self.CERT_BASE, subscription_id)
pem_url = base_url + '.pem'
# avoid duplicate pem generation
if not os.path.isfile(pem_url):
pem_command = 'openssl req -x509 -nodes -days 365 -newkey rsa:1024 -keyout %s -out %s -batch' % \
(pem_url, pem_url)
commands.getstatusoutput(pem_command)
else:
self.log.debug('%s exists' % pem_url)
cert_url = base_url + '.cer'
# avoid duplicate cert generation
if not os.path.isfile(cert_url):
cert_command = 'openssl x509 -inform pem -in %s -outform der -out %s' % (pem_url, cert_url)
commands.getstatusoutput(cert_command)
else:
self.log.debug('%s exists' % cert_url)
azure_key = self.db.find_first_object_by(AzureKey,
cert_url=cert_url,
pem_url=pem_url,
subscription_id=subscription_id,
management_host=management_host)
# avoid duplicate azure key
if azure_key is None:
azure_key = self.db.add_object_kwargs(AzureKey,
cert_url=cert_url,
pem_url=pem_url,
subscription_id=subscription_id,
management_host=management_host)
self.db.commit()
else:
self.log.debug('azure key exists')
hackathon_id = self.db.find_first_object_by(Hackathon, name=hackathon_name).id
hackathon_azure_key = self.db.find_first_object_by(HackathonAzureKey,
hackathon_id=hackathon_id,
azure_key_id=azure_key.id)
# avoid duplicate hackathon azure key
if hackathon_azure_key is None:
self.db.add_object_kwargs(HackathonAzureKey,
hackathon_id=hackathon_id,
azure_key_id=azure_key.id)
self.db.commit()
else:
self.log.debug('hackathon azure key exists')
azure_cert_url = self.file_service.upload_file_to_azure_from_path(cert_url, self.CONTAINER_NAME,
subscription_id + '.cer')
azure_key.cert_url = azure_cert_url
self.db.commit()
return azure_cert_url
def get_certificates(self, hackathon_name):
hackathon_id = self.db.find_first_object_by(Hackathon, name=hackathon_name).id
hackathon_azure_keys = self.db.find_all_objects_by(HackathonAzureKey, hackathon_id=hackathon_id)
if hackathon_azure_keys is None:
self.log.error('hackathon [%s] has no certificates' % hackathon_id)
return None
certificates = []
for hackathon_azure_key in hackathon_azure_keys:
dic = self.db.get_object(AzureKey, hackathon_azure_key.azure_key_id).dic()
certificates.append(dic)
return certificates
def delete_certificate(self, certificate_id, hackathon_name):
certificate_id = int(certificate_id)
hackathon_id = self.db.find_first_object_by(Hackathon, name=hackathon_name).id
hackathon_azure_keys = self.db.find_all_objects_by(HackathonAzureKey, hackathon_id=hackathon_id)
if hackathon_azure_keys is None:
self.log.error('hackathon [%d] has no certificates' % hackathon_id)
return False
azure_key_ids = map(lambda x: x.azure_key_id, hackathon_azure_keys)
if certificate_id not in azure_key_ids:
self.log.error('hackathon [%d] has no certificate [%d]' % (hackathon_id, certificate_id))
return False
self.db.delete_all_objects_by(HackathonAzureKey, hackathon_id=hackathon_id, azure_key_id=certificate_id)
certificate = self.db.get_object(AzureKey, certificate_id)
self.db.delete_object(certificate)
self.db.commit()
return True
# if __name__ == '__main__':
# azure_management = AzureManagement()
# cert_url = azure_management.create_certificate('guhr34nfj', 'fhdufew3', 'open-xml-sdk')
# print cert_url |
17,213 | bedb4db485aae537c806a8c7099ca7679e4d34e8 | #Made by: Kouah Mohammed Aymen
#Computer science student at "National Computer science Engineering School, Algiers (ESI)"
#E-mail: jm_kouah@esi.dz
#Github: https://github.com/aymenkouah
#Requires installaling "filetype"
#https://pypi.org/project/filetype/
# Modules
import os
import filetype
import random
import shutil
# Classes
class file():
def __init__(self, path):
self.path = path
self.name = os.path.basename(self.path)
self.type = filetype.guess(path)
try:
self.extension = self.type.extension
self.mime = self.type.mime
except:
self.extension = "Other"
self.mime = "Other"
def __repr__(self):
return os.path.basename(self.path)
class folder():
def __init__(self, path):
self.path = path
self.name = os.path.basename(path)
self.files_entries = [entry for entry in os.scandir(
self.path) if entry.is_file()]
self.files_paths = [entry.path for entry in self.files_entries]
self.files_names = [entry.name for entry in self.files_entries]
def __repr__(self):
return os.path.basename(self.path)
def organize_by_type(self):
for entry in self.files_paths:
new_file = file(entry)
dest = self.path + '/' + new_file.mime
if not os.path.exists(dest):
os.makedirs(dest)
os.rename(entry, dest + '/' + new_file.name)
def just_files(self, dest):
for entry in os.scandir(self.path):
if entry.is_dir():
new_fol = folder(entry.path)
new_fol.just_files(dest)
else:
new = dest + '\\' + entry.name
new = self.new_name(new)
os.rename(entry.path, new)
def new_name(self, new):
while os.path.exists(new) and os.path.isfile(new):
i = len(new)-1
while (new[i] != '.') and i > 0:
i = i - 1
if i == 0:
i = len(new)
new = new[0:i] + str(random.randint(0, 10000)) + '-' + new[i:]
return new
# Variables
Path = input("Enter the path of the directory: ")
target = folder(
r"%s" % Path)
# Functions
def organize(fol):
to_organize = fol.path + '/../to_organize'
new_fol = fol.name
os.mkdir(to_organize)
fol.just_files(to_organize)
org = folder(to_organize)
org.organize_by_type()
os.chdir(fol.path+'/..')
shutil.rmtree(fol.path)
os.rename(os.path.basename(to_organize), new_fol)
def backup(path):
back = r"%s\\..\\Backup_temp" % path
shutil.copytree(path, back)
# Main Code
if __name__ == '__main__':
backup(target.path)
organize(target)
input("Done")
|
17,214 | ccc5fbc1a2665f389f32b7fef500dc8a4add9fe2 | from urllib.parse import urlparse
from base import process_external_url_queue
import url_queue
import csv
from bs4 import BeautifulSoup
from selenium import webdriver
HOST = 'localhost'
PASSWORD = 'python'
USER = 'python'
DB = 'cpa'
PORT = 3306
def main():
# 1. instantiate stuff
canada_queue = url_queue.URLSearchQueue()
list_of_external_queues = []
set_of_external_base_urls = set([])
email_set = set([])
list_of_firms = []
driver = webdriver.PhantomJS()
url = 'https://www.cpacanada.ca/en/the-cpa-profession/cpas-and-what-we-do/find-an-accounting-firm'
url_start = '/en/the-cpa-profession/cpas-and-what-we-do/find-an-accounting-firm'
# 2. get internal firm links from base page
driver.get(url)
tree = BeautifulSoup(driver.page_source, 'lxml')
for url in extract_firm_page_urls(tree, url_start):
canada_queue.enqueue(url)
# 3. grab relevant info from each individual firm listing
n = 0
while canada_queue.queue_len() > 0:
n += 1
if n % 100 == 0:
print('processed %s cpacanada pages' % n)
curr_url = canada_queue.dequeue()
firm_name, firm_details, email_list, web_list = scrape_for_firm_info(
curr_url, driver
)
if len(web_list) > 0:
for site in web_list:
if site is not None and 'linkedin' not in site \
and 'facebook' not in site:
if site[:4] != 'http':
site = 'http://' + site
update_external_queue(list_of_external_queues,
set_of_external_base_urls,
site)
if len(email_list) > 0:
email_set.update(email_list)
list_of_firms.append({'firm_name': firm_name,
'firm_details': firm_details})
# with open('canada_firm_list.csv', 'w') as csvfile:
# fieldnames = ['firm_name', 'firm_details']
# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# writer.writeheader()
# for firm in list_of_firms:
# writer.writerow(firm)
# 4. crawl each firm site for emails
while len(list_of_external_queues) > 0:
active_queue = list_of_external_queues.pop()
email_set.update(process_external_url_queue(active_queue, driver))
# with open('canada_email_list.csv', 'w') as csvfile:
# fieldnames = ['email']
# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# writer.writeheader()
# for email in email_set:
# writer.writerow(email)
def update_external_queue(list_of_queues, set_of_urls, new_url):
parsed_url = urlparse(new_url)
base_url = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_url)
new_queue = url_queue.URLSearchQueue()
if base_url not in set_of_urls:
new_queue.enqueue(base_url)
set_of_urls.add(base_url)
if new_url != base_url:
new_queue.enqueue(new_url)
set_of_urls.add(new_url)
list_of_queues.append(new_queue)
def scrape_for_firm_info(url, driver=None):
"""
Extracts firm name, detail text, any emails and web site from page
:param url:
:param driver:
:return:
"""
driver_created = False
if not driver:
driver = webdriver.PhantomJS()
driver_created = True
driver.get(url)
firm_elements = driver.find_elements_by_id(
'lockedcontent_0_leftcolumn_0_pnlFirmNames'
)
if len(firm_elements) > 0:
firm_name = firm_elements[0].text
else:
firm_name = ''
email_elements = driver.find_elements_by_id(
'lockedcontent_0_leftcolumn_0_lEmailValue'
)
email_list = []
if len(email_elements) > 0:
for element in email_elements:
email_list.append(element.text)
web_elements = driver.find_elements_by_id(
'lockedcontent_0_leftcolumn_0_lWebsiteValue'
)
web_list = []
if len(web_elements) > 0:
for element in web_elements:
web_list.append(element.text)
firm_elements = driver.find_elements_by_id(
'lockedcontent_0_leftcolumn_0_pnlEnhancedLayout'
)
if len(firm_elements) > 0:
firm_details = firm_elements[0].text
else:
firm_details = ''
return firm_name, firm_details, email_list, web_list
def extract_firm_page_urls(soup_item, test_start=''):
link_list = []
for link in soup_item.find_all('a'):
test_link = link.get('href')
if test_link is not None and test_link[:len(test_start)] == test_start:
link_list.append('https://www.cpacanada.ca' + test_link)
return link_list
if __name__ == '__main__':
main()
|
17,215 | 7d654f30d60eed2359d789ef7b9edac44bbb6164 | import logging
from bernard import (
layers as lyr,
)
from bernard.i18n import (
translate as t,
)
from bernard.platforms.telegram import layers as tll
from src.did_the_rocket_launch_yet.utils.urls_rocket import get_url_rocket_frame
logger = logging.getLogger('bisect')
def get_response_rocket_did_launch(mid):
return [
lyr.Text(get_url_rocket_frame(mid)),
lyr.Text(t.LAUNCH),
tll.InlineKeyboard([
[tll.InlineKeyboardCallbackButton(
text=t.YES_LAUNCH,
payload={
'action': 'choose_option_player',
'current': mid,
'option': 'yes'
},
)],
[tll.InlineKeyboardCallbackButton(
text=t.NO_LAUNCH,
payload={
'action': 'choose_option_player',
'current': mid,
'option': 'no'
},
)]
])
]
|
17,216 | 212086623cfd8693e3ffb1db509fdd2eb43eada0 | # Add an array to ``point_data`` to a DataSet and then clear the
# point_data.
#
import pyvista
mesh = pyvista.Cube()
mesh.clear_data()
mesh.point_data['my_data'] = range(mesh.n_points)
len(mesh.point_data)
# Expected:
## 1
mesh.point_data.clear()
len(mesh.point_data)
# Expected:
## 0
|
17,217 | a0c8bb77c3158c4c9e4284a679cf950f1cab1fca | from app.models.models import *
from django.db.models import Avg
from django.shortcuts import render
import datetime
import difflib
import html
## A singleton class for caching successful responses
class ResponseCache:
class __ResponseCache:
def __init__(self, most_recent_response):
self.most_recent_response = most_recent_response
instance = None
def __init__(self, most_recent_response):
if not ResponseCache.instance:
ResponseCache.instance = ResponseCache.__ResponseCache(most_recent_response)
else:
print('CACHING RESPONSE: ' + most_recent_response['movie'].Title)
ResponseCache.instance.most_recent_response = most_recent_response
def get_most_recent(self):
return ResponseCache.instance.most_recent_response
cache = ResponseCache(None)
titles = [title.strip() for title in open('static/titles.txt', 'r').readlines()]
## =============================================================================
## Handles sending an error message to the user when their request has failed
def error_response(request, message):
previous_result = cache.get_most_recent()
if not previous_result:
print('RETURNING ONLY ERROR RESPONSE')
return render(request, 'error.html', {'error_message' : message})
previous_result['error_message'] = message
print('RETURNING ERROR RESPONSE WITH CACHED CONTENT')
return render(request, 'data.html', previous_result)
## =============================================================================
## Autocorrect poorly formed search terms based on current titles in db
def autocorrect_search_term(search_term):
search_term = html.unescape(search_term).title()
return search_term
## =============================================================================
## Processes clean_tweets and returns dictionary of data to render
def prepare_movie_data_for_render(request, clean_tweets, movie):
## Chart sentiment scores of tweets
overall_score = get_overall_sentiment_score(clean_tweets)
polarity = get_polarity(clean_tweets)
negative_data, positive_data, neutral_count = create_chart_datasets(clean_tweets)
positive_avgs, negative_avgs = get_daily_avgs(clean_tweets)
tweets_to_display = get_tweets_to_display(clean_tweets)
## Save our new sentiment data to the db
save_new_sentiment(overall_score, movie)
## Prepare data to render on results page
data_to_render = { 'form' : QueryForm(request.POST),
'tweets' : tweets_to_display,
'movie' : movie,
'overall_score' : overall_score,
'polarity' : polarity,
'new_form' : QueryForm(),
# Begin chart data
'negative_data' : negative_data,
'positive_data' : positive_data,
'negative_count': len(negative_data),
'positive_count': len(positive_data),
'neutral_count' : neutral_count,
'positive_avgs' : positive_avgs,
'negative_avgs' : negative_avgs,
}
cache = ResponseCache(data_to_render)
return data_to_render
## =============================================================================
## Processes overview data and returns a summary of data to render
def prepare_overview_data_for_render(request):
## Chart sentiment scores of tweets
try:
worst_movie = Movie.objects.get(imdbID = Sentiment.objects.earliest('sentimentScore').imdbID)
best_movie = Movie.objects.get(imdbID = Sentiment.objects.latest('sentimentScore').imdbID)
except:
worst_movie = best_movie = None
try:
worst_score = Sentiment.objects.earliest('sentimentScore')
best_score = Sentiment.objects.latest('sentimentScore')
except:
best_score = worst_score = None
num_tweets = Tweet.objects.count()
num_movies = Movie.objects.count()
try:
avg_sentiment_num = Sentiment.objects.all().aggregate(Avg('sentimentScore'))['sentimentScore__avg']
avg_sentiment = str(round(avg_sentiment_num, 2))
except:
avg_sentiment = avg_sentiment_num = None
## Prepare data to render on results page
data_to_render = { 'worst_movie' : worst_movie,
'best_movie' : best_movie,
'num_tweets' : num_tweets,
'num_movies' : num_movies,
'avg_sentiment' : avg_sentiment,
'best_score' : best_score,
'worst_score' : worst_score,
}
return data_to_render
## =============================================================================
## Create datasets for chart that displays the sentiment scores for each tweet
def create_chart_datasets(clean_tweets):
negative_data = []
positive_data = []
neutral_count = 0
for tweet in clean_tweets:
score = tweet.sentiment_score
if score != 0:
r = (tweet.favorite_count/2 + tweet.retweet_count)/10
if r > 10:
r = 10
data = {
'y': round((score+1)*5,1),
'x': str(tweet.created_at),
'r': 5 + r,
'tweet': tweet.text,
}
if score < 0:
negative_data.append(data)
else:
positive_data.append(data)
else:
neutral_count += 1
return negative_data, positive_data, neutral_count
## =============================================================================
## Returns a touple of positive daily averages and daily negative averages of
## tweet sentiment over time
def get_daily_avgs(clean_tweets):
positive_data = {}
negative_data = {}
positive_avgs = []
negative_avgs = []
for tweet in clean_tweets:
day = datetime.datetime.date(tweet.created_at).isoformat()
if tweet.sentiment_score < 0:
try:
negative_data[day].append(tweet.sentiment_score)
except:
negative_data[day] = [tweet.sentiment_score]
elif tweet.sentiment_score > 0:
try:
positive_data[day].append(tweet.sentiment_score)
except:
positive_data[day] = [tweet.sentiment_score]
for (day, scores) in positive_data.items():
positive_avgs.append({ 'x' : day, 'y' : round((sum(scores) / len(scores)) * 10, 1)})
for (day, scores) in negative_data.items():
negative_avgs.append({ 'x' : day, 'y' : round((sum(scores) / len(scores)) * (-10), 1)})
positive_avgs.sort(key=lambda entry: entry['x'])
negative_avgs.sort(key=lambda entry: entry['x'])
return positive_avgs, negative_avgs
## =============================================================================
## Returns a list of Tweet objects created from the given list of twitter.Status objects
def get_clean_tweets(raw_tweets, movie_title):
clean_tweets = []
for raw_tweet in raw_tweets:
clean_tweet = Tweet().fillWithStatusObject(raw_tweet, movie_title)
if clean_tweet:
clean_tweets.append(clean_tweet)
return clean_tweets
## =============================================================================
## Returns a list of at most 10 Tweet objects whos sentiment scores are not 0
def get_tweets_to_display(clean_tweets):
tweets_to_display = []
i = 0
while i < len(clean_tweets) and len(tweets_to_display) < 20:
if clean_tweets[i].sentiment_score != 0:
clean_tweets[i].sentiment_score = round((clean_tweets[i].sentiment_score+1)*5, 1)
tweets_to_display.append(clean_tweets[i])
i += 1
return tweets_to_display
## =============================================================================
## Returns average sentiment score on a scale of 0 to 10, rounded to one decimal place
def get_overall_sentiment_score(clean_tweets):
num_nonzero = 0
sum_scores = 0
for tweet in clean_tweets:
score = tweet.sentiment_score
if score != 0:
num_nonzero += 1
sum_scores += score
for i in range(0, (int(tweet.retweet_count/4))):
num_nonzero += 1
sum_scores += score
for i in range(0, (int(tweet.favorite_count/8))):
num_nonzero += 1
sum_scores += score
if num_nonzero == 0:
return 5.0
return round((sum_scores/num_nonzero+1)*5, 1)
## =============================================================================
## Returns polarity of scentiment scores as a percentage #TODO THIS IS TERRIBLE
def get_polarity(clean_tweets):
positive_count = 0
negative_count = 0
positive_sum = 0
negative_sum = 0
for tweet in clean_tweets:
score = tweet.sentiment_score
if score > 0:
positive_count += 1
positive_sum += score
elif score < 0:
negative_count += 1
negative_sum += score
if positive_count != 0 and negative_count != 0:
return round((positive_sum/positive_count - negative_sum/negative_count)*50, 1)
else:
return 0
## =============================================================================
## Saves the given sentiment data to the db if it is not redundant
def save_new_sentiment(overall_score, movie):
## Find objects that are of the same movie and were made today
duplicate = Sentiment.objects.filter(imdbID = movie.imdbID, sentimentDate = timezone.now())
## If there aren't duplicates create a new sentiment object for this movie
if not duplicate:
## Populate the sentiment table with data for each movie with their scores at this time
MovieSentiment = Sentiment(Title = movie.Title, imdbID = movie.imdbID, sentimentDate = timezone.now() , sentimentScore = overall_score)
try:
## Movie doesn't exist yet (in the db) with this date so save it into the db
MovieSentiment.save()
except:
print ("ERROR: couldn't save to sentiment table")
|
17,218 | 2963f875dda3f28760626de207b24897ccecfaa4 | class Complex:
def __init__(self,a, b) -> None:
self.a = a
self.b = b
def __add__(self, obj):
return Complex(self.a + obj.a, self.b + obj.b)
c1 = Complex(1, 4)
c2 = Complex(11, 3)
c3 = c1+c2
print(c3.a, c3.b) |
17,219 | 0392dff19065054415e92f75734032eda4314923 | """ This module contains functions for handling the command line interface
to Spotlight.
"""
import argparse
try:
from spotlight import version
version_action = version.VersionAction
except ImportError:
from spotlight import _version
version_action = _version.NoVersionAction
class Driver:
""" Driver to execute a pipeline from command line interface.
Attributes
----------
opts : argparse.Namespace
The command line arguments.
"""
def __init__(self):
self.opts = None
def parse_command_line(self, opts_list=None):
""" Generates the command line options for the driver.
Parameters
----------
opts_list : list
A ``list`` of ``str`` that can be passed as options to parser.
Returns
-------
opts : argparse.Namespace
The namespace from argparse returned. This is returned
from ``argparse.parse_args``.
"""
parser = argparse.ArgumentParser(description="Executes a minimization")
parser.add_argument("--config-files", nargs="+", required=True)
parser.add_argument("--config-overrides", nargs="+")
parser.add_argument("--tmp-dir", default="tmp")
parser.add_argument("--verbose", action="store_true")
parser.add_argument("--version", action=version.VersionAction)
self.opts = parser.parse_args(opts_list)
return self.opts
def minimize(self, opts=None):
pass
|
17,220 | d55a964ca6573474517c089c05ec8ab2b1042ba9 | import tkinter.ttk as ttk
import time
from tkinter import *
root = Tk()
root.title("HJ GUI")
root.geometry("640x640")
'''progressbar = ttk.Progressbar(root, maximum=100, mode = "determinate") # mode = "indeterminate" 이면 좌-우로 왔다갔다 무한반복, determinate 이면 게이지가 좌->우로 차는거 반복
progressbar.start(10) # 10ms 마다 출력
progressbar.pack()
def btncmd():
progressbar.stop()
btn = Button(root, text = "중지", command = btncmd)
btn.pack()'''
pvar = DoubleVar()
progressbar2 = ttk.Progressbar(root, maximum=100, length = 200, variable = pvar)
progressbar2.pack()
def btncmd2():
for i in range(1,101):
time.sleep(0.01) # 0.01초 대기
pvar.set(i) # progressbar2 의 값을 설정
progressbar2.update() # 변경되는 값들을 실시간으로 출력하여 바의 게이지가 계속 변동되는 것을 출력
print(pvar.get())
btn = Button(root, text="시작", command=btncmd2)
btn.pack()
root.mainloop() |
17,221 | 496657c2318de434a8426675097acdf8cf210a50 | import cv2
from scipy import ndimage
import os, sys
import numpy as np
dev = int(sys.argv[1])
cap = cv2.VideoCapture(dev)
print(cap.isOpened())
os.system("v4l2-ctl -d %i -c focus_auto=0" % dev)
i = 0
max_focus = 255
step_size = 1
fine_tuning_iterations = 5
fine_tuning_batch_size = 10
# [x, y, h, w]
rect = [None, None, None, None]
is_drawing=False
def setCaptureRect(event, x, y, flags, param):
global rect, is_drawing
if event == cv2.EVENT_LBUTTONDOWN:
is_drawing=True
rect = [x, y, None, None]
elif event == cv2.EVENT_MOUSEMOVE:
if is_drawing:
ox = min(x, rect[0])
oy = min(y, rect[1])
tx = max(x, rect[0])
ty = max(y, rect[1])
w = tx - ox
h = ty - oy
#w = 40
#h = 40
#ox = 640/2 - w/2
#oy = 480/2 - h/2
rect = [ox, oy, w, h]
elif event == cv2.EVENT_LBUTTONUP:
is_drawing=False
ox = min(x, rect[0])
oy = min(y, rect[1])
tx = max(x, rect[0])
ty = max(y, rect[1])
w = tx - ox
h = ty - oy
#w = 40
#h = 40
#ox = 640/2 - w/2
#oy = 480/2 - h/2
print(w, h)
rect = [ox, oy, w, h]
x_kernel = np.array([[0, -1, 0], [0, 2, 0], [0, -1, 0]])
y_kernel = np.array([[0, 0, 0], [-1, 2, -1], [0, 0, 0]])
def modified_laplace(image):
global x_kernel, y_kernel
#dx = cv2.filter2D(image.astype(np.float32), -1, x_kernel)
#dy = cv2.filter2D(image.astype(np.float32), -1, y_kernel)
dx = ndimage.convolve(image, x_kernel, mode="constant")
dy = ndimage.convolve(image, y_kernel, mode="constant")
#return dx + dy
return np.abs(dx) + np.abs(dy)
cv2.namedWindow("Laplacian")
cv2.setMouseCallback("Laplacian", setCaptureRect)
measurements = []
is_sweeping = False
is_fine_tuning = False
ft_wb_i = 0
ft_measurements = []
focus = 0
while True:
if is_sweeping:
i = (i+1) % max_focus
if i == 0:
is_sweeping = False
is_fine_tuning = True
ft_measurements = []
ft_wb_i = 0
measurements = np.array(measurements)
measurements = measurements[(-1*measurements[:,0]).argsort()]
if is_fine_tuning:
ft_wb_i = ft_wb_i+1
if ft_wb_i >= fine_tuning_batch_size*fine_tuning_iterations:
is_fine_tuning=False
# Sort ft_measurements and print top ten results
ft_measurements = np.array(ft_measurements)
ft_measurements = ft_measurements[(-1*ft_measurements[:,0]).argsort()]
print(np.mean(ft_measurements[0:10, 1]))
if is_fine_tuning:
focus = measurements[ft_wb_i // fine_tuning_iterations, 1]
else:
focus = i
if i % step_size == 0:
os.system("v4l2-ctl -d %i -c focus_absolute=%i" % (dev, focus))
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
lpa = modified_laplace(gray)
# Calc ROI focus:
if not None in rect:
x, y = rect[0], rect[1]
w, h = rect[2], rect[3]
cv2.rectangle(lpa, (x, y), (x+w, y+h),
(255, 255, 255), 5)
if is_fine_tuning:
ft_measurements.append([np.mean(lpa[y:y+h, x:x+w]), focus])
print(np.mean(lpa[y:y+h, x:x+w]), focus)
elif is_sweeping:
measurements.append([np.mean(lpa[y:y+h, x:x+w]), focus])
print(np.mean(lpa[y:y+h, x:x+w]), focus)
cv2.imshow("Laplacian", lpa)
k = chr(cv2.waitKey(2) & 0xFF)
if k == "q":
break
if k == "j":
i = (i+1) % max_focus
if k == "k":
i = (i-1) % max_focus
if k == "m":
i = 0
measurements = []
is_sweeping = True
cv2.destroyAllWindows()
|
17,222 | 7464dc543581f2b103855d20679f2c090b8f5f22 | #!/usr/bin/python2
import sys, re
rc = re.compile
test = """amavis[5691]: (05691-06) TIMING [total 3257 ms] - SMTP EHLO: 3 (0%), SMTP pre-MAIL: 1 (0%), SMTP pre-DATA-flush: 4 (0%), SMTP DATA: 80 (2%), body hash: 1 (0%), mime_decode: 20 (1%), get-file-type: 30 (1%), decompose_part: 10 (0%), parts: 0 (0%), AV-scan-1: 12 (0%), SA msg read: 4 (0%), SA parse: 2 (0%), SA check: 2844 (87%), fwd-connect: 10 (0%), fwd-mail-from: 2 (0%), fwd-rcpt-to: 1 (0%), write-header: 18 (1%), fwd-data: 9 (0%), fwd-data-end: 197 (6%), fwd-rundown: 2 (0%), unlink-1-files: 5 (0%), rundown: 0 (0%)"""
test2 = """amavis[5691]: (05691-06) FWD via SMTP: [127.0.0.1:10025] <root@mail.northwesthomecare.com> -> <wcooley@nakedape.cc>"""
#test_re = rc(r'^.*total (\d+) .*$')
#m = test_re.search(test)
#print "Time: %s" % m.group(1)
#test2_re = rc(r'-> \<(.*)\>')
email_parts_re = rc(r'-> \<([^@]+)@([^@]+)\>')
#m = test2_re.search(test2)
#print "Found e-mail recipient: %s" % m.group(1)
m2 = email_parts_re.search(test2)
if m2 is not None:
print "Address parts: %s '@' %s" % (m2.group(1), m2.group(2))
else:
print "Couldn't decompose e-mail address"
|
17,223 | 6ab6fdf0ca5254fde1c10236f5a34892866870b1 | #!/usr/bin/env python
import rospy
from rospy_tutorials.srv import AddTwoInts, AddTwoIntsResponse, AddTwoIntsRequest # Messages used in the node must be imported.
rospy.init_node('service_client_node_py',anonymous=True) #initialzing the node with name "service_client_node_py". The second arugument "anonymous" is optional. If anonymous is set to true, then you can run this node in more than one terminal simultanously.
rate = rospy.Rate(1) # 1 hz
x=0
y=1
element=1
rospy.wait_for_service('add_py') # wait_for_service(service_name) will wait until the serivce node is ready and running.
'''
A service client is created in the below line.
First argument is the name of the service to be regitered
Second argument is the message type of the service
'''
add=rospy.ServiceProxy('add_py', AddTwoInts)
rospy.loginfo("Fibanocci series element 0: 0")
while not rospy.is_shutdown() and element < 81:
'''
In the below line, client is requesting for service.
The argument order is the same as the order of the fields in the Message, and you must provide a value for all of the fields. In this case rospy_tutorials.srv.AddTwoIntsRequest has two integer fields, hence add(x,y).
"resp" will be assigned with the response message.
'''
resp = add(x,y)
rospy.loginfo("Fibanocci series element %d: %d", element, resp.sum)
x=y
y=resp.sum
element+=1
rate.sleep() # This makes the loop to iterate at 1 Hz i.e., once in 1 sec.
|
17,224 | 21b38967f9768d5c19991a2f2d7239fc4a16d313 | import os
import sys
import subprocess
from datetime import datetime
import logging
from threading import Thread
import json
import re
from time import sleep
from ImageDB import ImageDB
from astropy.visualization.scripts.fits2bitmap import fits2bitmap
path = os.path
log = logging.getLogger(__name__)
class Executor(object):
""" Run CCDD processes and keep track of status """
def __init__(self, config=None, **kwargs):
"""
Args:
config (dict): dictionary of config settings. will be merged with
any other provided kwargs. valid keys are:
CCDDRONEPATH (str): path to top-level of CCDDrone installation
CCDDCONFIGFILE (str): path (under CCDDrone path) to store config
CCDDMETADATAFILE (str): path (under CCDDrone path) to store metadata
EXECUTOR_LOGFILE (str): where to put logs from CCDD executables
DATAPATH (str): path to save images
LASTIMGPATH (str): path to save png of last image taken
"""
def getkey(key, default=None):
return kwargs.get(key, config.get(key, default))
self.logfilename = getkey('EXECUTOR_LOGFILE', 'logs/Executor.log')
self.logfile = None
self.process = None
self.current_exposure = None
self.max_exposures = None
self.exposethread = None
self.lastfile=None
self.lastimgpath = getkey('LASTIMGPATH', 'static/lastimg.png')
self.datapath = getkey("DATAPATH", 'data')
self.ccddpath = getkey('CCDDRONEPATH')
CCDDConfigFile = getkey('CCDDCONFIGFILE','config/Config_GUI.ini')
CCDDMetaFile = getkey('CCDDMETADATAFILE', 'config/Metadata_GUI.json')
self.imagedb_uri = getkey("IMAGEDB_URI", ImageDB.default_uri)
self.imagedb_collection = getkey("IMAGEDB_COLLECTION",
ImageDB.default_collection)
# make sure the datapath exists
if not os.path.isdir(self.datapath):
try:
os.mkdir(self.datapath)
except FileNotFoundError:
raise ValueError(f"DATAPATH '{self.datapath}' does not exist"
"and can't be created")
# make sure ccdd path is real
if not os.path.isdir(self.ccddpath):
raise ValueError(f"CCDDRONEPATH '{self.ccddpath}' doesn't exist")
# make sure it is on PATH
if self.ccddpath not in os.getenv('PATH'):
os.environ['PATH'] = os.pathsep.join([self.ccddpath,
os.getenv('PATH')])
self.outputConfig = path.abspath(path.join(self.ccddpath,
CCDDConfigFile))
self.outputMetadata = path.join(self.ccddpath, CCDDMetaFile)
log.debug("New executor created, config=%s, meta=%s, imagedb=%s/%s",
self.outputConfig, self.outputMetadata,
self.imagedb_uri, self.imagedb_collection)
def readconfig(self):
""" Get the current config file and return as string """
files = [path.join(self.ccddpath, 'do_not_touch', 'LastSettings.ini'),
path.join(self.ccddpath, 'config', 'Config.ini'),
self.outputConfig]
last = sorted(files, reverse=True,
key=lambda f: path.getmtime(f) if path.isfile(f) else 0)
log.debug("Reading config settings from %s", last[0])
try:
with open(last[0]) as f:
return f.read()
except FileNotFoundError:
return None
def saveconfig(self, newconf, apply=True):
""" Save the config settings in `newconf` to file.
Args:
newconf (str): contents of ini config file as string
apply (bool): if True, call CCDDApplyNewSettings after
"""
with open(self.outputConfig, 'w') as f:
f.write(newconf)
if apply:
self.ApplyNewSettings()
def savemetadata(self, newmeta):
""" Save the metadata to file
Args:
metadata (dict): new metadata
"""
with open(self.outputMetadata, 'w') as f:
json.dump(newmeta, f)
def getstate(self):
state = 'idle'
if self.process:
if self.process.poll() is None:
state = 'running'
elif self.process.returncode != 0:
state = 'error'
if self.current_exposure is not None:
state = 'running'
return state
def getstatus(self):
""" Get out current status as a dict """
status = dict(state=self.getstate(), runningcmd=None,
current_exposure=self.current_exposure,
max_exposures=self.max_exposures,
statustime=str(datetime.now())[:-7],
lastfile=self.lastfile)
if self.process:
status['lastcmd'] = self.process.args[0]
status['lastreturn'] = self.process.poll()
if status['state'] == 'running':
status['runningcmd'] = path.basename(self.process.args[0])
try:
with open(self.logfilename, newline='') as logfile:
ts = datetime.fromtimestamp(path.getmtime(self.logfilename))
status['cmdoutput'] = f"Last output: {str(ts)[:-7]}\n"
status['cmdoutput'] += '#'*80+'\n'
lines = logfile.readlines()
if lines and lines[-1][-1] == '\r':
lines[-1] = lines[-1][:-1]
for line in lines:
if not line.endswith('\r'):
status['cmdoutput'] += line
except FileNotFoundError:
status['cmdoutput'] = ""
# info for the lastimg to update
status['lastimg'] = self.lastimgpath
try:
status['lastimg_timestamp'] = path.getmtime(self.lastimgpath)
except FileNotFoundError:
status['lastimg_timestamp'] = 0
return status
def endexposureloop(self):
""" Stop an ongoing exposure loop """
self.max_exposures = self.current_exposure
def abort(self, kill=False):
""" abort a currently running process """
log.warning("Received abort request")
self.current_exposure = None
if self.getstate() == 'running':
if kill:
self.process.kill()
else:
self.process.terminate()
with open(self.logfilename, 'a') as f:
print("!!!!!! process killed by user !!!!!!!", file=f)
# methods to run exectuables
def _run(self, args, cwd=None, env=None, logmode='wb'):
""" Run the commands in `args` in a subprocess """
args = tuple(str(arg) for arg in args)
if self.process and self.process.poll() is None:
raise RuntimeError("A process is already running")
if self.logfile:
self.logfile.close()
self.logfile = open(self.logfilename, logmode, buffering=0)
if env is not None:
env = dict(os.environ, **env,
PYTHONPATH=os.pathsep.join(sys.path))
self.process = subprocess.Popen(args, cwd=cwd, stdout=self.logfile,
stderr=subprocess.STDOUT, env=env)
def StartupAndErase(self):
return self._run(['./CCDDStartupAndErase', path.abspath(self.outputConfig)],
cwd=self.ccddpath)
def PerformEraseProcedure(self):
return self._run(['./CCDDPerformEraseProcedure', path.abspath(self.outputConfig)],
cwd=self.ccddpath)
def ApplyNewSettings(self, newconf=None):
if newconf:
self.saveconfig(newconf, apply=False)
return self._run(['./CCDDApplyNewSettings',
path.abspath(self.outputConfig)],
cwd=self.ccddpath)
def Expose(self, fitsfile, seconds=5):
""" Expose the CCD and read a new image to `fitsfile` """
# make sure the file has good name
if not fitsfile.endswith('.fits'):
fitsfile += '.fits'
tstamp = datetime.now().strftime('_%y%m%d-%H%M')
match = re.match(r'.*(_\d\d\d\d\d\d-\d\d\d\d)\.fits', fitsfile)
if not match:
fitsfile = fitsfile[:-5] + tstamp + '.fits'
elif match.group(1) != tstamp:
fitsfile = fitsfile[:-17] + tstamp + '.fits'
fitsfile = path.join(self.datapath, fitsfile)
self.lastfile = fitsfile
log.info("Starting new exposure, filename=%s",
path.basename(self.lastfile))
args = ['./CCDDExposeDB.py', str(seconds), fitsfile,
self.outputMetadata]
if self.lastimgpath:
args.append(self.lastimgpath)
return self._run(args,
env=dict(IMAGEDB_URI=self.imagedb_uri,
IMAGEDB_COLLECTION=self.imagedb_collection)
)
def _do_expose_loop(self, fitsfile, seconds):
""" private method to perform expose loop. Do not call directly! """
log.debug(f"Starting expose loop with {self.max_exposures} exposures")
while (self.current_exposure is not None and
self.current_exposure < self.max_exposures):
self.current_exposure += 1
self.Expose(fitsfile, seconds)
while self.process and self.process.poll() is None:
sleep(5)
if not self.process or self.process.returncode != 0:
break
self.current_exposure = None
self.max_exposures = None
def ExposeLoop(self, nexposures, fitsfile, seconds=5):
""" Take multiple exposures in a loop """
if self.process and self.process.poll() is None:
raise RuntimeError("A process is already running")
if self.exposethread and self.exposethread.is_alive():
raise RuntimeError("An exposure loop is already running")
self.current_exposure = 0
self.max_exposures = nexposures
self.exposethread = Thread(target=self._do_expose_loop,
args=(fitsfile, seconds))
self.exposethread.start()
def ToggleBias(self, value):
""" Toggle the bias on or off """
return self._run(['./CCDDToggleBias', value],
cwd=self.ccddpath)
|
17,225 | d3b9e2d4323a2b187161328dbfe0264c12372d45 |
from .category import *
from .trivia import *
from .premios import *
from .sorteos import *
from .rules import *
|
17,226 | c7ef4fd226ee95e7cf00a364127606f891a860c8 | #ex4 a, b
numeFisierAsn = "afr_green_monkey.asn1"
numeFisierFasta = "conversie_fasta.fasta"
numeFisier1Fasta = "Multifasta.fasta"
print ("ex. ASN - a - conversie secventa ADN")
def Mapping(char):
map={
"0":"AA",
"1":"AC",
"2":"AG",
"3":"AT",
"4":"CA",
"5":"CC",
"6":"CG",
"7":"CT",
"8":"GA",
"9":"GC",
"A":"GG",
"B":"GT",
"C":"TA",
"D":"TC",
"E":"TG",
"F":"TT",
"\n":"\n"
}
return map[char]
f = open(numeFisierAsn,'r')
g = open(numeFisierFasta,'w')
start = "ncbi2na '"
end = "'"
write = False
foundFirstSequence = False
for line in f:
if end in line:
write = False
if foundFirstSequence:
break
if write:
sec = map(Mapping, line)
g.write(''.join(sec))
foundFirstSequence = True
if start in line:
write = True
index = line.find(start) + len(start)
sec = map(Mapping, line[index:])
g.write(''.join(sec))
f.close()
g.close()
print ("ASN - b")
def conv(char):
code={
"0":"AA",
"1":"AC",
"2":"AG",
"3":"AT",
"4":"CA",
"5":"CC",
"6":"CG",
"7":"CT",
"8":"GA",
"9":"GC",
"A":"GG",
"B":"GT",
"C":"TA",
"D":"TC",
"E":"TG",
"F":"TT",
"\n":"\n"
}
return code[char]
def DNAFromASN1(asn1, fasta):
f = open(asn1,'r')
g = open(fasta,'w')
regiune = 0
start = "ncbi2na '"
end = "'"
write = False
for line in f:
if end in line:
write = False
if write:
for char in line:
sec = conv(char)
g.write(sec)
if start in line:
regiune = regiune + 1
g.write("\n>Regiune " + str(regiune) + "\n")
write = True
index = line.find(start) + len(start)
for char in line[index:]:
sec = conv(char)
g.write(sec)
f.close()
g.close()
DNAFromASN1(numeFisierAsn,numeFisier1Fasta)
|
17,227 | 2900eb0cd6ef832c803e5118c94aa8931bc97941 | from bs4 import BeautifulSoup
import urllib2
import re
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
sender = 'indrajeet.lyfsgr8@gmail.com'
receiver = 'sen15recess@gmail.com'
GMAIL_USERNAME = "sentest15@gmail.com"
GMAIL_PASSWORD = "123@Kiit"
pageFile = urllib2.urlopen("http://internshala.com/internships/computer%20science-internship")
pageHtml = pageFile.read()
soup = BeautifulSoup("".join(pageHtml),"html5lib")
f = open("G:/checkthisout.html","w+")
filename = "tosend.txt"
a = []
f.write("<html>")
f.write("<head>")
f.write("<h1>Internshala</h1>")
#get all links
for hit in soup.findAll(attrs={'class' : 'button_container'}):
for link in hit.find_all('a'):
a.append(link.get('href'))
#adding domain name to extended address
for i in range(1,10):
a[i] = "http://internshala.com" + a[i]
#writing internship details to file
for i in range(1,10):
f.write("<br><br>")
f.write("<p>")
pageFile = urllib2.urlopen(str(a[i]))
pageHtml = pageFile.read()
soup = BeautifulSoup("".join(pageHtml),"html5lib")
for hit in soup.findAll(attrs={'class' : 'individual_internship_details'}):
k = (re.sub(' +',' ',hit.text))
f.write(k)#date
for hit in soup.findAll(attrs={'class' : 'freetext-container'}):
l = (re.sub(' +',' ',hit.text))
f.write(l.encode('utf-8').strip())#info
m = "-----------------------------------------------------------------------------------------------------------------------------------------------------------------------"
f.write(m)
f.write("</p>")
f.write("<br><br><br>")
#closing head and html tags
f.write("</head>")
f.write("</html>")
#attaching text file to email
attachment = MIMEText(f.read())
msg = MIMEMultipart('alternative')
attachment.add_header('Content-Disposition', 'attachment', filename=filename)
msg.attach(attachment)
#sending mail
smtpObj = smtplib.SMTP('smtp.gmail.com:587')
smtpObj.ehlo()
smtpObj.starttls()
smtpObj.login(GMAIL_USERNAME, GMAIL_PASSWORD)
smtpObj.sendmail(GMAIL_USERNAME, receiver, msg.as_string())
print "Successfully sent email"
f.close()
|
17,228 | 1cd9322a78f94aa2137745ab8104245d305e3fd8 | # SMS spam optimised
# The pipeline you built earlier for the SMS spam model used the default parameters for all of the elements in the pipeline. It's very unlikely that these parameters will give a particularly good model though.
# In this exercise you'll set up a parameter grid which can be used with cross validation to choose a good set of parameters for the SMS spam classifier.
# The following are already defined:
# hasher — a HashingTF object and
# logistic — a LogisticRegression object.
# Instructions
# 100 XP
# Create a parameter grid builder object.
# Add grid points for numFeatures and binary parameters to the HashingTF object, giving values 1024, 4096 and 16384, and True and False, respectively.
# Add grid points for regParam and elasticNetParam parameters to the LogisticRegression object, giving values of 0.01, 0.1, 1.0 and 10.0, and 0.0, 0.5, and 1.0 respectively.
# Build the parameter grid.
# Create parameter grid
params = ParamGridBuilder()
# Add grid for hashing trick parameters
params = params.addGrid(hasher.numFeatures, [1024, 4096, 16384]) \
.addGrid(hasher.binary, [True, False])
# Add grid for logistic regression parameters
params = params.addGrid(logistic.regParam, [0.01, 0.1, 1.0, 10.0]) \
.addGrid(logistic.elasticNetParam, [0.0, 0.5, 1.0])
# Build parameter grid
params = params.build()
|
17,229 | 6e6d00337e18408f04b0bd17708de3b971c2d4b7 | #!/usr/bin/env python
import webbrowser
from globus_sdk import AccessTokenAuthorizer, NativeAppAuthClient, TransferClient
from utils import is_remote_session, start_local_server
CLIENT_ID = "1b0dc9d3-0a2b-4000-8bd6-90fb6a79be86"
REDIRECT_URI = "http://localhost:8000"
SCOPES = "openid email profile " "urn:globus:auth:scope:transfer.api.globus.org:all"
TUTORIAL_ENDPOINT_ID = "ddb59aef-6d04-11e5-ba46-22000b92c6ec"
SERVER_ADDRESS = ("127.0.0.1", 8000)
def do_native_app_authentication(client_id, redirect_uri, requested_scopes=None):
"""
Does a Native App authentication flow and returns a
dict of tokens keyed by service name.
"""
client = NativeAppAuthClient(client_id=client_id)
client.oauth2_start_flow(requested_scopes=SCOPES, redirect_uri=redirect_uri)
url = client.oauth2_get_authorize_url()
server = start_local_server(listen=SERVER_ADDRESS)
if not is_remote_session():
webbrowser.open(url, new=1)
auth_code = server.wait_for_code()
token_response = client.oauth2_exchange_code_for_tokens(auth_code)
server.shutdown()
# return a set of tokens, organized by resource server name
return token_response.by_resource_server
def main():
# start the Native App authentication process
tokens = do_native_app_authentication(CLIENT_ID, REDIRECT_URI)
transfer_token = tokens["transfer.api.globus.org"]["access_token"]
authorizer = AccessTokenAuthorizer(access_token=transfer_token)
transfer = TransferClient(authorizer=authorizer)
# print out a directory listing from an endpoint
transfer.endpoint_autoactivate(TUTORIAL_ENDPOINT_ID)
for entry in transfer.operation_ls(TUTORIAL_ENDPOINT_ID, path="/~/"):
print(entry["name"] + ("/" if entry["type"] == "dir" else ""))
if __name__ == "__main__":
if not is_remote_session():
main()
else:
print("This example does not work on a remote session.")
|
17,230 | 3b95692741d015679e1ad86a8471dec78fa011c7 | __author__ = 'ysekky'
from learner_base import LearnerBase
from sklearn.svm import SVC
class SimpleSVM(LearnerBase):
def learning(self, training_data, feature_names, **kwargs):
x = self.create_feature(training_data, feature_names)
y = self.encode_training_class(training_data)
self.learner = SVC(kernel="rbf", C=0.025)
self.learner.fit(x, y)
def predict(self, test_data, feature_names):
x = self.create_feature(test_data, feature_names)
result = [y for y in self.learner.predict(x)]
rank_order = range(1, len(result)+1)
self.submit(test_data, result, rank_order) |
17,231 | 1ae03e0105cbfbf0fd09fe3f4f6ca0f4f20508ac | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sh
import os
import re
from os.path import dirname
from docktree import Docktree
try:
from StringIO import StringIO
except:
from io import StringIO
def setup_module():
os.chdir(dirname(__file__))
def teardown_function(function):
try:
sh.docker('rmi', '-f', 'footest', 'bartest')
except:
pass
def test_single_image():
sh.docker(sh.cat('empty.tar'), 'import', '-', 'footest')
f = StringIO()
Docktree(restrict='footest', file=f).draw_tree()
assert re.match(u'└─ sha256:[a-f0-9]{5} footest:latest\n', f.getvalue())
def test_two_images():
sh.docker(sh.cat('empty.tar'), 'import', '-', 'footest')
sh.docker('build', '-t', 'bartest', '.')
f = StringIO()
Docktree(restrict='footest', file=f).draw_tree()
assert re.match(
u'└─ sha256:[a-f0-9]{5} footest:latest\n' +
u' └─ sha256:[a-f0-9]{5} bartest:latest\n', f.getvalue())
|
17,232 | 8c9d1ab26c94b2bf4983fadf7240218e77382215 | # Generated by Django 2.2.7 on 2019-11-25 11:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('livecoding', '0009_auto_20191125_1107'),
]
operations = [
migrations.AlterField(
model_name='exercice',
name='code',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='livecoding', to='livecoding.Code'),
),
]
|
17,233 | 996a7d42b1992b218d64e6fe8b7f0811dfdc1d88 | # class inheritance
"""
>>> x = OldBenz()
>>> print(x.number_of_wheels)
4
>>> print(x.get_speed())
70
>>> my = VW()
>>> print(my.number_of_wheels)
4
>>> print(my.diesel)
False
>>> print(my.star_logo)
Traceback (most recent call last):
...
AttributeError: 'VW' object has no attribute 'star_logo'
"""
class Vehicle(object):
number_of_wheels = None
def get_speed(self):
"""Return the maximum speed."""
raise NotImplementedError
class Bike(Vehicle):
number_of_wheels = 2
def get_speed(self):
return 20
class Car(Vehicle):
brand = None
number_of_wheels = 4
def get_speed(self):
return 90
class Benz(Car):
star_logo = True
brand = "Mercedes"
class OldBenz(Benz):
star_logo = False
def get_speed(self):
return super(OldBenz, self).get_speed() - 20
# super_object = super(OldBenz, self)
# print(type(super_object))
# meth = super_object.get_speed
# speed = meth()
# return speed - 20
class VW(Car):
brand = "Volkswagen"
class VW(VW):
diesel = False
if __name__ == '__main__':
import doctest
doctest.testmod()
|
17,234 | 1f8f8d11f0f79caa797de0ca4ccdae43f3dc3167 | # Copyright (c) 2014 Kontron Europe GmbH
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from .msgs.constants import cc_err_desc
class DecodingError(Exception):
"""Error on message decoding."""
pass
class EncodingError(Exception):
"""Error on message encoding."""
pass
class TimeoutError(Exception):
"""Timeout occurred."""
pass
class CompletionCodeError(Exception):
"""IPMI completion code not OK."""
def __init__(self, cc):
self.cc = cc
self.cc_desc = self.find_cc_desc(cc)
def __str__(self):
return "%s cc=0x%02x desc=%s" % (self.__class__.__name__, self.cc, self.cc_desc)
def find_cc_desc(self, error_cc):
for cc in cc_err_desc:
if error_cc == cc[0]:
return cc[1]
return "Unknown error description"
class NotSupportedError(Exception):
"""Not supported yet."""
pass
class DescriptionError(Exception):
"""Message description incorrect."""
pass
class RetryError(Exception):
"""Maxium number of retries exceeded."""
pass
class DataNotFound(Exception):
"""Requested data not found."""
pass
class HpmError(Exception):
"""HPM.1 error"""
pass
|
17,235 | 760b8d1f51b5fbbbbe9d13c8743ae67bb276b4ca | #!/usr/local/bin/python
# Copyright 2015 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
"""
Does specifying a . in an input file result in the parameter retaining
its default value, mimicking the behavior of AMPL?
The answer is yes, it does.
"""
import os
from pyomo.environ import *
m = AbstractModel()
m.S = Set(initialize=[1, 2, 3])
m.p = Param(m.S, default={1: 'a', 2: 'b', 3: 'c'})
i_d = m.create()
print "Values from initial defaults should be a, b, c:\n"
print [i_d.p[s] for s in i_d.S]
# Write a test data file.
# Overwrite default value for index 1
# Specify default value for index 2 with .
# Don't specify anything for 3, which should yield the default value.
path = 'foo.tab'
with open(path, 'w') as f:
f.write("S\tp\n")
f.write("1\t10\n")
f.write("2\t.\n")
dp = DataPortal(model=m)
dp.load(filename=path, param=(m.p))
i_f = m.create(dp)
print "Values after reading from file should be 10, b, c:\n"
print [i_f.p[s] for s in i_f.S]
os.remove(path)
|
17,236 | fe929d8c81d56c2cf85842fde0cbf1e2976f8c8f | # solution to the HackerRank problem "Largest Rectangle"
# https://www.hackerrank.com/challenges/largest-rectangle/problem
# Real Estate Developers is planning to demolish a number of old buildings and construct a shopping mall
# Task is to find the largest solid area in which the mall can be constructed.
# Ahmetcan Ozturk
def largestRectangle(h):
if len(h) == 0:
return 0
area = 0
curr = 0
for i in range(len(h)):
j = i - 1
k = i + 1
base = 1
okLeft = True
okRight = True
while (okLeft or okRight):
if j >= 0 and okLeft:
if (h[j] < h[i]):
okLeft = False
else:
base = base + 1
j = j - 1
if j < 0:
okLeft = False
if k < len(h) and okRight:
if (h[k] < h[i]):
okRight = False
else:
base = base + 1
k = k + 1
if k == len(h):
okRight = False
curr = h[i] * base
if curr > area:
area = curr
return area
if __name__ == "__main__":
#h = [1,2,3,4,5]
#h = [8979, 4570, 6436, 5083, 7780, 3269, 5400, 7579, 2324, 2116]
n = int(input())
h = list(map(int, input().rstrip().split()))
result = largestRectangle(h)
print(result) |
17,237 | eee87b28b94537d136984d557363d015e85e1c6d | import os
import pyttsx3
engine = pyttsx3.init()
def speed_audio(speed):
global engine
engine.say("speed is {0}.".format(speed))
engine.runAndWait()
def heel_strike_audio():
os.system('mpg321 heel_striking.mp3')
def increase_tilt_audio():
os.system('mpg321 increase_tilt.mp3')
def decrease_tilt_audio():
os.system('mpg321 decrease_tilt.mp3')
def cadence_audio():
os.system('mpg321 cadence.mp3')
def overstriding_audio():
os.system('mpg321 overstriding2.mp3') |
17,238 | ca4c91d3392cd705923c12d97d3318b55280f3c8 | import pandas as pd
data1={'key1':['A0','A0','A1','A2'],
'key2':['A0','A1','A0','A1'],
'B':['B0','B1','B2','B3'],
'C':['C0','C1','C2','C3'],
'D':['D0','D1','D2','D3']}
df1=pd.DataFrame(data1)
data2={'key1':['A0','A1','A1','A2'],
'key2':['A0','A0','A1','A1'],
'B':['B4','B5','B6','B7'],
'C':['C4','C5','C6','C7'],
'D':['D4','D5','D6','D7']
}
df2=pd.DataFrame(data2)
dcon1=pd.merge(df1,df2,on=['key1','key2'])
print(dcon1)
|
17,239 | a4a404a9a7dce4b91233c6b9b263c98919e14096 | import pandas
#from nltk.classify import NaiveBayesClassifier as NBC
from nltk.tokenize import word_tokenize
import nltk
import json
#deals with decoding errors
import sys
reload(sys)
sys.setdefaultencoding('utf8')
input = "sad.csv"
#dictionary mapping the number of positive tweets that appear in data and the number of negative tweets
#eg. {positive: 4992000, negative: 423482340203}
class_probs ={}
#dictionary mapping the size (i.e. how many words (not unique) per positive and negative)
#eg. {positive: 209348209348129348, negative: 2039482093481234}
cateogry_size = {}
#dictionary mapping the frequency of the word given the sentiment
#eg {positive: {hey: 4, how: 5, is: 6, your_mom: 80000, negative: {lolz: 234}}
word_probs = {}
count = 0;
real_count = 0
#1). Loop through training data to get tweets and their sentiments
for df in pandas.read_csv(input, chunksize=1):
count += 1
print count
if(count < 100000):
continue
else:
real_count += 1
print real_count
sent_int = int(df.iloc[0]['Sentiment'])#0 is negative, 1 is postive
if sent_int == 0:
sent = 'neg'
else:
sent = 'pos'
class_probs[sent] = class_probs.get(sent, 0) + 1#add to class frequency dictionary
text = str(df.iloc[0]['SentimentText']) #grabbing actual twitter text from the SentimentSource column
try:
tokens = word_tokenize(text) #tokenize the tweet text
except UnicodeDecodeError: #catch decode errors
continue
cateogry_size[sent] = cateogry_size.get(sent, 0) + len(tokens) #add to total word count dictionary
#for each token in your token list, add frequency to dictionary of word probs for that category
for t in tokens:
lower_t = t.lower()
word_probs[sent] = word_probs.get(sent, {})
word_probs[sent][t] = word_probs[sent].get(t, 0) + 1
dictionaries = []
dictionaries.append(class_probs)
dictionaries.append(cateogry_size)
dictionaries.append(word_probs)
with open("training_information1000.json", 'w') as train:
json.dump(dictionaries, train)
|
17,240 | 605d2d380d7917e6960ddd2f24038dee88b3b720 | import numpy as np
from numpy.testing import assert_allclose
from pytest import raises as assert_raises
from scipy.optimize import nnls
class TestNNLS:
def setup_method(self):
self.rng = np.random.default_rng(1685225766635251)
def test_nnls(self):
a = np.arange(25.0).reshape(-1, 5)
x = np.arange(5.0)
y = a @ x
x, res = nnls(a, y)
assert res < 1e-7
assert np.linalg.norm((a @ x) - y) < 1e-7
def test_nnls_tall(self):
a = self.rng.uniform(low=-10, high=10, size=[50, 10])
x = np.abs(self.rng.uniform(low=-2, high=2, size=[10]))
x[::2] = 0
b = a @ x
xact, rnorm = nnls(a, b, atol=500*np.linalg.norm(a, 1)*np.spacing(1.))
assert_allclose(xact, x, rtol=0., atol=1e-10)
assert rnorm < 1e-12
def test_nnls_wide(self):
# If too wide then problem becomes too ill-conditioned ans starts
# emitting warnings, hence small m, n difference.
a = self.rng.uniform(low=-10, high=10, size=[100, 120])
x = np.abs(self.rng.uniform(low=-2, high=2, size=[120]))
x[::2] = 0
b = a @ x
xact, rnorm = nnls(a, b, atol=500*np.linalg.norm(a, 1)*np.spacing(1.))
assert_allclose(xact, x, rtol=0., atol=1e-10)
assert rnorm < 1e-12
def test_maxiter(self):
# test that maxiter argument does stop iterations
a = self.rng.uniform(size=(5, 10))
b = self.rng.uniform(size=5)
with assert_raises(RuntimeError):
nnls(a, b, maxiter=1)
|
17,241 | edcf750f07556fc80f7b2be0a7ab76e1458ba1c5 | # Test Logging in Python
import logging
import logging.config
logging.config.fileConfig(fname='log.conf', disable_existing_loggers=False)
# Get the logger specified in the file
logger = logging.getLogger(__name__)
def main():
logger.debug('This is a debug message')
logger.info('This is an info message')
logger.warning('This is a warning message')
logger.error('This is an error message')
logger.critical('This is a critical message')
if __name__ == "__main__":
main()
|
17,242 | 875f2e9cf5632e3fae3333d2c7ebed21fd8ea0f0 | from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.http import HttpResponse
from .models import Post,Category
import markdown
from comments.forms import CommentForm
from django.views.generic import ListView,DetailView
# Create your views here.
# def index(request):
# # return HttpResponse('欢迎访问我的博客首页')
# #return render(request,'blog/index.html',
# # context={'title':'我的博客首页','welcome':'欢迎访问我的博客首页'})
# post_list = Post.objects.all().order_by('-created_time')
# return render(request,'blog/index.html',context={'post_list':post_list})
# def detail(request, pk):
# post = get_object_or_404(Post,pk=pk)
# post.increase_views()
# post.body = markdown.markdown(post.body,
# extensions=['markdown.extensions.extra',
# 'markdown.extensions.codehilite',
# 'markdown.extensions.toc',])
# form = CommentForm()
# comment_list = post.comment_set.all()
# context = {'post':post,'form':form,'comment_list':comment_list}
# return render(request, 'blog/detail.html', context=context)
# def archives(request, year, month):
# post_list = Post.objects.filter(created_time__year=year,created_time__month = month).order_by('-created_time')
# return render(request, 'blog/index.html', context={'post_list':post_list})
# def category(request, pk):
# cate = get_object_or_404(Category, pk=pk)
# post_list = Post.objects.filter(category = cate).order_by('-created_time')
# return render(request, 'blog/index.html', context={'post_list':post_list})
class IndexView(ListView):
model = Post
template_name = 'blog/index.html'
context_object_name = 'post_list'
paginate_by = 10
class CategoryView(IndexView):
def get_queryset(self):
cate = get_object_or_404(Category,pk=self.kwargs.get('pk'))
return super(CategoryView, self).get_queryset().filter(category=cate)
class ArchivesView(IndexView):
def get_queryset(self):
year = self.kwargs.get('year')
month = self.kwargs.get('month')
return super(ArchivesView, self).get_queryset().filter(created_time__year=year,created_time__month=month)
class PostDetailView(DetailView):
model = Post
template_name = 'blog/detail.html'
context_object_name = 'post'
def get(self, request, *args, **kwargs):
response = super(PostDetailView, self).get(request, *args, **kwargs)
self.object.increase_views()
return response
def get_object(self, queryset=None):
post = super(PostDetailView, self).get_object(queryset=None)
post.body = markdown.markdown(post.body,
extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
])
return post
def get_context_data(self, **kwargs):
context = super(PostDetailView, self).get_context_data(**kwargs)
form = CommentForm()
comment_list = self.object.comment_set.all()
context.update({
'form':form,
'comment_list':comment_list
})
return context
|
17,243 | c244ac19603e5425c63754a797db8aa21cf57e3a | import phe
import torch
import numpy as np
class KeyMaster():
def __init__(self):
self.public, self.private = phe.generate_paillier_keypair(n_length=256)
def public_key(self):
return self.public
def encrypt(self, plain):
return self.public.encrypt(float(plain))
def decrypt(self, enc):
return self.private.decrypt(enc)
def encrypt_tensor_to_numpy(self, tensor):
arr = tensor.cpu().detach().numpy()
vfunc = np.vectorize(self.encrypt)
return vfunc(arr)
def decrypt_nparray(self, enc):
vfunc = np.vectorize(self.decrypt)
return vfunc(enc)
|
17,244 | f125c381644bc46c5b7bdc9ca83b6a86f79a9a20 | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import re
from scrapy.http import Request
from feedback.items import FeedbackItem
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import Selector
class IreSpider(CrawlSpider):
name = "test"
allowed_domains = ["irecommend.ru"]
start_urls = [
'http://irecommend.ru/taxonomy/term/393347%20930502/reviews',
]
rules = [
Rule(LinkExtractor(
restrict_xpaths=('//nobr[@class="views-field-teaser"]/a'),
allow=['/content/']) ,
callback='parse_comments' ,
follow=True)
]
def parse_comments(self,response):
item = FeedbackItem()
item['date'] = response.selector.xpath('//span[@class="dtreviewed"]/meta/@content').extract()
item['user'] = response.selector.xpath('//strong[@class="reviewer"]/a/text()').extract()
item['title'] = response.selector.xpath('//h2[@class="summary"]/a/text()').extract()
item['url'] = response.url
item['site'] = IreSpider.allowed_domains
item['text'] = response.selector.xpath('//div[@class="views-field-teaser"]//*').extract()
item['type'] = 'post'
yield item
sel = Selector(response)
title = response.selector.xpath('//h2[@class="summary"]/a/text()').extract()
sites = sel.xpath('//ul[@class="list"]/li').extract()
items = []
for site in sites:
item = FeedbackItem()
item['url'] = response.url
item['site'] = IreSpider.allowed_domains
item['title'] = title
#item['user'] = site.xpath('div/a/').extract()
#item['date'] = site.xpath('div/span/@title').extract()
#txt = site.xpath('/div[@class="txt"]').extract()
sitej = "".join(site)
sp = BeautifulSoup(sitej)
item['user'] = sp.div.find_all('a')[1].contents
item['date'] = sp.div.span['title']
soup = sp.find("div",class_="txt")
strngs = []
for img in soup.find_all("img"):
img.replace_with(img['alt'])
item['text'] = soup.get_text()
item['type'] = 'comment'
items.append(item)
for item in items:
yield item
|
17,245 | bfa65331c0cd219a70e4a195e78d7a28af6af273 | # -*- coding: utf-8 -*-
"""
Manages user's configuration
"""
import os
import configparser
from pathlib import Path
def rmfriend_dir():
"""Returns the directory which will contain the configuration and cache."""
return Path.home() / '.rmfriend'
def notebook_cache_director():
"""Return the path to the .rmfriend/notebooks in the user's home."""
return str(rmfriend_dir() / 'notebooks')
def config_file_and_path():
"""Return the path to the .rmfriend.cfg file in the user's home."""
return str(rmfriend_dir() / 'config.cfg')
def recover_or_create():
"""Recover or create the configuration if its not present.
This will set up the configuration file and cache directory if not present.
:returns: A configparser.ConfigParser instance.
This will have the 'rmfriend' section with the configuration fields
- address
- port
- username
- cache_dir
"""
cache_dir = notebook_cache_director()
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
config_file = config_file_and_path()
if not os.path.isfile(config_file):
config = configparser.ConfigParser()
config['rmfriend'] = {
'address': '10.11.99.1',
'port': '22',
'username': 'root',
'cache_dir': cache_dir,
'remote_dir': '/home/root/.local/share/remarkable/xochitl'
}
with open(config_file, 'w') as fd:
config.write(fd)
else:
config = configparser.ConfigParser()
config.read(config_file)
return config
|
17,246 | a5a6bc67e50ff9a463b4298b71e606e27844c3d8 | #!/usr/bin/env python
import numpy as np
import numpy.linalg as la
import pycuda.driver as cuda
import pycuda.autoinit
def vecadd(a, b, c):
c[:] = a[:] + b[:]
# prepare the kernel
from pycuda.compiler import SourceModule
mod = SourceModule("""
__global__ void vecadd_gpu(int nx, float *a_gpu, float *b_gpu, float *c_gpu) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<nx) c_gpu[i] = a_gpu[i] + b_gpu[i];
}
""")
vecadd_gpu = mod.get_function("vecadd_gpu")
# allocate arrays with initialize
nx = 1000;
a = np.random.randn(nx).astype(np.float32)
b = np.random.randn(nx).astype(np.float32)
c = np.zeros(nx, 'f')
c2 = np.zeros_like(c)
# allocate device arrays with memcpy
a_gpu = cuda.to_device(a)
b_gpu = cuda.to_device(b)
c_gpu = cuda.mem_alloc(c.nbytes)
# exec
vecadd(a, b, c)
vecadd_gpu(np.int32(nx), a_gpu, b_gpu, c_gpu, block=(256,1,1), grid=(nx/256+1,1))
# copy result and compare
cuda.memcpy_dtoh(c2, c_gpu)
assert la.norm(c2-c) == 0
|
17,247 | ce13d01291a80f727cae898a6d50be6909dae3d3 | #
# See https://github.com/dials/dials/wiki/pytest for documentation on how to
# write and run pytest tests, and an overview of the available features.
#
from __future__ import absolute_import, division, print_function
import libtbx.load_env
import pytest
@pytest.fixture
def dials_regression():
'''Return the absolute path to the dials_regression module as a string.
Skip the test if dials_regression is not installed.'''
try:
return libtbx.env.dist_path('dials_regression')
except KeyError:
pytest.skip("dials_regression required for this test")
from libtbx.test_utils.pytest import libtbx_collector
pytest_collect_file = libtbx_collector()
|
17,248 | 3b428a9036c5f6c63ba4f6aded19a701ab7dd21e | MOCK_USER = {
'username': 'testuser',
'real_name': 'Test User',
'registration_number': 123,
'roles': ['ACCOUNTS', 'EQUIPMENT'],
'email': 'test@user.com',
}
MOCK_USER_2 = {
'username': 'testuser_2',
'real_name': 'Test User 2',
'registration_number': 124,
'roles': ['ACCOUNTS', 'EQUIPMENT'],
'email': 'test2@user.com',
}
MOCK_LOGIN = {'username': 'testuser', 'password': 'testpass'}
MOCK_USER_NO_PERM = {
'username': 'testusernoperm',
'real_name': 'Test User No Perm',
'registration_number': 134,
'roles': ['NONE'],
'email': 'testnoperm@user.com',
}
MOCK_LOGIN_NO_PERM = {
'username': 'testusernoperm',
'password': 'testusernoperm'
}
MOCK_EQUIPMENT = {
'tag': 'TAG0001',
'type_id': 1,
'brand': 'TestBrand',
'model': 'TestModel',
'series': 'TestSeries'
}
MOCK_EQUIPMENT_2 = {
'tag': 'TAG0002',
'type_id': 1,
'brand': 'TestBrand',
'model': 'TestModel',
'series': 'TestSeries'
}
MOCK_EQUIPMENT_TYPE = {'description': 'Balance'}
MOCK_EQUIPMENT_TYPE_2 = {'description': 'Printer'}
|
17,249 | 9ba20f0afb2d016a133f84af148e55a978239906 |
import math
import time
t = time.time()
lim = 1000000
def isPrime(num):
'''isPrime(int) --> bool'''
if (num < 3):
if num < 0:
num *= -1
elif num == 1:
return False
elif num == 2:
return True
if (num % 2 == 0):
return False
for i in range(3, math.floor(math.sqrt(num)) + 1, 2):
if (num % i == 0):
return False
return True
def primeFactors(n):
i = 0
factSet = set([])
while primeList[i] < n:
factor = primeList[i]
while n % factor == 0:
n //= factor
factSet.add(factor)
i += 1
if isPrime(n):
factSet.add(n)
return factSet
if n != 1:
factSet.add(n)
return factSet
def phi(n):
for h in primeFactors(n):
n = n//h * (h -1)
return n
primeList = [] #primeList = [2]
compList = []
for i in range(2, 1000001):#): for i in range(2, 1000000):
if isPrime(i):
primeList.append(i)
else:
compList.append(i)
print("done with easy part")
t = time.time()
tot = 0
for num in compList:
tot += phi(num)
tot += 37550323525
print(tot)
#302143220713
#subtract 1
print(time.time() - t) |
17,250 | dc4e76fbfe6bfeeddb55a5a6ae3eace178b5b28e | """
Class Equipement
"""
class Equipement:
def __init__(self,obj):
"""
Create an objet Equipement
nature_libelle > nature of the equipement
ins_nom > name of the equipement
ins_numero_install > foreign key of the installation
equipement_id > primary key of the equipement
"""
self.nature_libelle = obj['NatureLibelle']
self.ins_nom = obj['InsNom']
self.ins_numero_install = obj['InsNumeroInstall']
self.equipement_id = obj['EquipementId']
def add_db_equipement(self,c):
"""
Insert the object given in parametre (self) to the database given (c)
"""
insertQuery = "INSERT INTO Equipement(NumeroEquipement,NumeroInstallation,NatureLibelle,InsNom) VALUES (?,?,?,?)"
c.execute(insertQuery, (self.equipement_id,self.ins_numero_install,self.nature_libelle,self.ins_nom))
|
17,251 | 1431acd7eaddeaf9bf6caf2cdad81a0e784c5e4b | print(6 + 2)
print(10 - 2)
print(2 * 4)
print(int(48 / 6)) |
17,252 | 1672c76bb622bf001cebac2be61190868f66b253 | arr1 = (67, 28, 63, 77, 80)
arr2 = (62, 35, 70, 75, 63)
# input(arr1,arr2)
count = len(arr1)
i = 0
stList = []
while i < count:
if arr1[i] > 60:
if arr2[i] > 60:
stList.append("S"+str(i+1))
i += 1
print(stList)
|
17,253 | fa81d11c1ce753f5be8ec7125b25fcf768e2d1f7 | class Matrix:
"""General matrix class for Betsy Cannon"""
def __init__(self, file = None):
self.matrix = []
if file:
for row in file:
self.matrix.append(map(float, row.split()))
def __str__(self):
return str(self.matrix)
def write_to_file(self, file):
for row in self.matrix:
file.write(' '.join(map(str, row)) + '\n')
def band_matrix_elimination(self, n, m):
# Elimination with no row exchanges
# m is size of band
# iterate through each row, except the last one
for k in range(n-1):
# Check if pivot is zero
if self.matrix[k][k] == 0:
print "Pivot may not be zero."
return False
# Only need to update (m-1)/2 rows below, unless of course it's at the end of the matrix.
# After that it's zero.
if (k + (m-1)/2 + 1 < n):
max = k + (m-1)/2 + 1
else:
max = n
for i in range(k+1, max):
# only need to update m values per row max
if (k + m - 1 < n):
j_max = k + m - 1
else:
j_max = n
for j in range(k+1, j_max):
self.matrix[i][j] = self.matrix[i][j] - (self.matrix[i][k]/self.matrix[k][k]) * self.matrix[k][j]
#also update b
self.matrix[i][n] = self.matrix[i][n] - (self.matrix[i][k]/self.matrix[k][k]) * self.matrix[k][n]
return True
def back_substitution(self, n, m):
# To be applied after elimination of a band matrix
# m is size of band
soln = n*[0]
for k in range(n-1, -1, -1):
soln[k] = self.matrix[k][n]
# Only need to consider (m-1)/2 values after the pivot, unless of course it's at the end of the row.
# After that it's zero.
if (k + (m-1)/2 + 1 < n):
max = k + (m-1)/2 + 1
else:
max = n
for i in range(k+1, max):
soln[k] = soln[k] - self.matrix[k][i] * soln[i]
soln[k] = soln[k]/self.matrix[k][k]
return soln
|
17,254 | 9fc30a0e0056b38a6ec208e887e52efcd3a714b4 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 19 22:14:19 2017
@author: t
"""
import numpy as np
import cv2
#cv2.copyMakeBorder(src, top, bottom, left, right, borderType[, dst[, value]]) → dst
file_path = os.path.join('/home','t','git','Snapsolve','TrainingSetGeneration','generatedpics','opencvtest.png')
img = cv2.imread(file_path)
img = cv2.copyMakeBorder(img, 300, )
out_path = '/home/t/git/Snapsolve/TrainingSetGeneration/generatedpics/out1.png'
cv2.imwrite(out_path, img)
|
17,255 | 8cd2155975dcb66ac89ac7ac3b26ba600cd5afd4 | import sys
from setuptools import find_packages, setup
install_requires = [
'numpy>=1.11.1', 'pyyaml', 'six', 'addict', 'requests', 'opencv-python'
]
if sys.version_info < (3, 3):
install_requires.append('backports.shutil_get_terminal_size')
if sys.version_info < (3, 4):
install_requires.extend(['enum34', 'pathlib'])
def readme():
with open('README.rst') as f:
content = f.read()
return content
def get_version():
version_file = 'mmcv/version.py'
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
setup(
name='mmcv',
version=get_version(),
description='Open MMLab Computer Vision Foundation',
long_description=readme(),
keywords='computer vision',
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
],
url='https://github.com/open-mmlab/mmcv',
author='Kai Chen',
author_email='chenkaidev@gmail.com',
license='GPLv3',
setup_requires=['pytest-runner'],
tests_require=['pytest'],
install_requires=install_requires,
zip_safe=False)
|
17,256 | d332cf90acc3a73bdf6f2d643e02787976159f98 | import os
import numpy as np
import librosa
import matplotlib.pyplot as plt
import pickle
def onset_times(sound, sampling_rate):
'''
input :
sound - 1D array song
sampling_rate - sampling_rate
return :
1D array onset sequences (seconds)
'''
return librosa.frames_to_time(librosa.onset.onset_detect(y=sound, sr=sampling_rate), sr=sampling_rate)
def time_axies(array, sampling_rate):
'''
input :
array- 1D array
sampling_rate- int
return :
make suitable time axis with sampling_rate fiting well with array
'''
return np.linspace(0,(len(array)-1)/sampling_rate,len(array))
def onset_plot(sound, sampling_rate):
'''
input :
sound - 1D array
sampling_rate - int
return :
draw the sound plot with onset times is indicated
'''
onset_time = onset_times(sound, sampling_rate)
time_axis = time_axies(sound, sampling_rate)
plt.plot(time_axis, sound)
plt.vlines(onset_time, np.min(sound), np.max(sound), color='r', alpha=0.7,
linestyle='--', label='Onsets')
plt.show()
return
def DTFS(sound, islog = False, compressed_ratio = 100):
'''
input :
sound : 1D array
islog : boolean
compressed_ratio : int
return :
perform DTFS(Discrete time fourier series)
if islog == True : normalize(log(1 + compressed_ratio*DTFS))
else : normalize(DTFS)
'''
period = len(sound)
a = np.zeros(period, dtype = np.complex64)
for k in range(int(period/2)):
for n in range(period):
a[k] += sound[n]*np.exp(-1j*2*np.pi/period*k*n)
a[k]/= period
temp = np.array(abs(a), np.float32)
if islog:
return normalize(np.log(1 + compressed_ratio*temp))
else :
return normalize(temp)
def clip_by_value(x, v_max = 1, v_min = 0):
if x>v_max:
return v_max
if x<v_min :
return v_min
return x
def pickle_load(path):
f = open(path, 'rb')
temp = pickle.load(f)
f.close()
return temp
def pickle_store(content, path):
f = open(path, 'wb')
pickle.dump(content, f)
f.close()
def normalize(x):
'''
input :
x - numpy 1D array
return :
1D array normalized to be 1
'''
sum_ = np.sum(x)
return x/sum_
def DTFS(sound, islog = False, compressed_ratio = 100):
'''
input :
sound : 1D array
islog : boolean
compressed_ratio : int
return :
perform DTFS(Discrete time fourier series)
if islog == True : normalize(log(1 + compressed_ratio*DTFS))
else : normalize(DTFS)
'''
period = len(sound)
a = np.zeros(period, dtype = np.complex64)
for k in range(int(period/2)):
for n in range(period):
a[k] += sound[n]*np.exp(-1j*2*np.pi/period*k*n)
a[k]/= period
temp = np.array(abs(a), np.float32)
if islog:
return normalize(np.log(1 + compressed_ratio*temp))
else :
return normalize(temp)
|
17,257 | a85712134472c9375c850239feb05514983912ea | import numpy as np
from scipy.signal import convolve2d
import matplotlib.pyplot as plt
# Generate original image
img = np.zeros([16, 16])
img[4:12, 4:12] = 255
# Define filters
filters = [ np.array([[1, 1, 1], [0, 1, 1], [0, 0, 1]]),
np.array([[1, 1, 1], [1, 1, 0], [1, 0, 0]]),
np.array([[1, 0, 1], [0, 0, 0], [1, 0, 1]]),
np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]),
np.array([[1, 1, 1], [0, 0, 0], [0, 0, 0]]),
np.array([[0, 0, 0], [0, 0, 0], [1, 0, 1]])]
# Normalize filters
filters = [fil / np.sum(fil) for fil in filters]
# Apply filters
filtered_imgs = []
for fil in filters:
# Filter image
fil_img = convolve2d(img, fil, 'same')
# Normalize filtered image
fil_img[fil_img>0] = 255
filtered_imgs.append(fil_img)
# Plot filtered images
fig = plt.figure(1)
for i in range(2):
for j in range(3):
ind = 3 * i + j
plt.subplot(int(f'23{ind+1}'))
plt.imshow(filtered_imgs[ind], cmap='gray', vmin=0, vmax=255)
plt.show()
|
17,258 | 38e24bb2d36ccc1a4722146c985681a919f57412 | '''
Sudoku is a logic-based, combinatorial number-placement puzzle.
The objective is to fill a 9×9 grid with digits so that
each column, each row, and each of the nine 3×3 subgrids that compose the grid
contains all of the digits from 1 to 9.
Complete the check_sudoku function to check if the given grid
satisfies all the sudoku rules given in the statement above.
'''
def check_sudoku(sudoku):
'''
Your solution goes here. You may add other helper functions as needed.
The function has to return True for a valid sudoku grid and false otherwise
'''
def findNextCellToFill(grid, i, j):
for x in range(i,9):
for y in range(j,9):
if grid[x][y] == 0:
return x,y
for x in range(0,9):
for y in range(0,9):
if grid[x][y] == 0:
return x,y
return -1,-1
def isValid(grid, i, j, e):
rowOk = all([e != grid[i][x] for x in range(9)])
if rowOk:
columnOk = all([e != grid[x][j] for x in range(9)])
if columnOk:
# finding the top left x,y co-ordinates of the section containing the i,j cell
secTopX, secTopY = 3 *(i//3), 3 *(j//3) #floored quotient should be used here.
for x in range(secTopX, secTopX+3):
for y in range(secTopY, secTopY+3):
if grid[x][y] == e:
return False
return True
return False
def solveSudoku(grid, i=0, j=0):
i,j = findNextCellToFill(grid, i, j)
if i == -1:
return True
for e in range(1,10):
if isValid(grid,i,j,e):
grid[i][j] = e
if solveSudoku(grid, i, j):
return True
# Undo the current cell for backtracking
grid[i][j] = 0
return False
def main():
'''
main function to read input sudoku from console
call check_sudoku function and print the result to console
'''
# initialize empty list
sudoku = []
# loop to read 9 lines of input from console
for i in range(9):
# read a line, split it on SPACE and append row to list
row = input().split(' ')
sudoku.append(row)
# call solution function and print result to console
print(check_sudoku(sudoku))
if __name__ == '__main__':
main() |
17,259 | e5f23bc2c9355afa3df737ce2b2c922917e0745c | # Generated by Django 2.2.7 on 2020-09-26 16:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0011_auto_20200813_0902'),
]
operations = [
migrations.CreateModel(
name='Tabs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('root', models.CharField(max_length=400, verbose_name='url')),
],
),
migrations.AlterField(
model_name='category',
name='html',
field=models.CharField(default='category.html', max_length=200, verbose_name='Category File Name'),
),
migrations.AlterField(
model_name='category',
name='htmlpost',
field=models.CharField(default='post.html', max_length=200, verbose_name='File Name Read More of This Ccategory'),
),
migrations.AlterField(
model_name='category',
name='pcategory',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='Category', to='blog.Category', verbose_name='Mother Category'),
),
migrations.AlterField(
model_name='category',
name='pcount',
field=models.IntegerField(default=0, verbose_name='Number of Posts'),
),
migrations.AlterField(
model_name='comment',
name='status',
field=models.IntegerField(choices=[(1, 'Confirmed'), (0, 'Not confirmed')], default=0, verbose_name='Status'),
),
migrations.AlterField(
model_name='copen',
name='copeni',
field=models.IntegerField(default=0, verbose_name='Percentage of Coupons'),
),
migrations.AlterField(
model_name='field',
name='type',
field=models.CharField(choices=[('email', 'Email'), ('number', 'Number'), ('password', 'Password'), ('text', 'Text')], default='text', max_length=10, verbose_name='Type of Field'),
),
migrations.AlterField(
model_name='portable',
name='sex',
field=models.IntegerField(choices=[(0, 'Female'), (1, 'Male')], default=0, verbose_name='Sex'),
),
migrations.AlterField(
model_name='portable',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='Userportable', to=settings.AUTH_USER_MODEL, verbose_name='Username'),
),
migrations.AlterField(
model_name='post',
name='com',
field=models.BooleanField(default=0, verbose_name='Users Can Comment'),
),
migrations.AlterField(
model_name='safahat',
name='html',
field=models.CharField(default='page.html', max_length=200, verbose_name='Format File Name'),
),
migrations.AlterField(
model_name='setting',
name='introduction',
field=models.TextField(verbose_name='Short Introduction'),
),
migrations.AlterField(
model_name='setting',
name='moshtari',
field=models.BooleanField(default=1, verbose_name='Customers Club'),
),
migrations.AlterField(
model_name='submenu',
name='status',
field=models.IntegerField(choices=[(2, 'Page'), (1, 'Address'), (0, 'Category')], default=0, verbose_name='Status'),
),
migrations.AlterField(
model_name='task_manegar',
name='status',
field=models.IntegerField(choices=[(1, 'Completing'), (0, 'Incomplete!'), (3, 'Complete!')], default=0, verbose_name='Status'),
),
migrations.AlterField(
model_name='theme',
name='name',
field=models.CharField(max_length=200, verbose_name='Format Name'),
),
migrations.AlterField(
model_name='time',
name='in_date',
field=models.DateField(verbose_name='Login Date'),
),
migrations.AlterField(
model_name='time',
name='out_date',
field=models.DateField(null=True, verbose_name='Logout Date'),
),
]
|
17,260 | 882bed3135750bf8e6995dd1c6c0c9621dc3f02a | import torch
import numpy as np
from sklearn.decomposition import KernelPCA
from sklearn.manifold import TSNE
from sklearn.cluster import MiniBatchKMeans
from preprocess import *
from data import *
from model import *
from torch.utils.data import DataLoader
def inference(X, model, batch_size=256):
"""
得到encoder的结果
:param X: raw image
:param model: 训练好的模型
:param batch_size: batch大小
:return: latents:中间结果,还需要进行进一步预测
"""
X = preprocess(X)
dataset = ImageDataset(X)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False)
latents = []
for i, x in enumerate(dataloader):
x = torch.FloatTensor(x)
vec, img = model(x.cuda())
if i == 0:
latents = vec.view(img.size()[0], -1).cpu().detach().numpy() # detach()切断了反向传播
else:
latents = np.concatenate((latents, vec.view(img.size()[0], -1).cpu().detach().numpy()), axis=0)
print('Latents Shape:', latents.shape)
return latents
def invert(pred):
return np.abs(1 - pred)
def predict(latents):
"""
进行cluster后分类
:param latents: 经过inference之后得到的结果
:return:pred:
:return:x_embedded:
"""
# First Dimension Reduction
transformer = KernelPCA(n_components=200, kernel='rbf', n_jobs=-1)
kpca = transformer.fit_transform(latents)
print('First Reduction Shape:', kpca.shape)
# Second Dimesnion Reduction
X_embedded = TSNE(n_components=2).fit_transform(kpca)
print('Second Reduction Shape:', X_embedded.shape)
# Clustering
pred = MiniBatchKMeans(n_clusters=2, random_state=0).fit(X_embedded)
pred = [int(i) for i in pred.labels_]
pred = np.array(pred)
return pred, X_embedded
def save_prediction(pred, out_csv="prediction.csv"):
"""
保存预测结果
:param pred: 预测结果
:param out_csv: 保存路径
:return:
"""
with open(out_csv, 'w') as f:
f.write('id,label\n')
for i, p in enumerate(pred):
f.write(f'{i},{p}\n')
print(f'Save prediction to {out_csv}.')
def test(trainX, save=True):
# load model
model = AE().cuda()
model.load_state_dict(torch.load('./checkpoints/last_checkpoint.pth'))
model.eval()
latents = inference(X=trainX, model=model)
pred, X_embedded = predict(latents)
if save is True:
save_prediction(pred, 'prediction.csv')
save_prediction(invert(pred), 'prediction_invert.csv')
return pred, X_embedded
|
17,261 | f1604a263824bdd88309e096b69dc37a5d8610f7 | from bs4 import BeautifulSoup
from django import forms
import phonenumbers
def validate_mixed_content(value):
"""
Validate content to avoid mixed content warnings
"""
targets = (
{"tag": "img", "attr": "src"},
{"tag": "script", "attr": "src"},
{"tag": "iframe", "attr": "src"},
{"tag": "link", "attr": "src"},
)
soup = BeautifulSoup(value, 'html.parser')
errors = []
for target in targets:
for item in soup.find_all(target["tag"]):
src = item.get(target["attr"], '')
if "http://" in src:
if not errors:
errors.append("These tags must use https protocol:")
errors.append("<{}>: {}".format(target["tag"], src))
if errors:
raise forms.ValidationError(errors)
def validate_phone_number(value):
"""Uses the phonenumbers library to try and parse the phone number and
check for it's validity. """
try:
z = phonenumbers.parse(value, None)
except phonenumbers.NumberParseException:
raise forms.ValidationError("Enter a valid phone number.")
if not phonenumbers.is_valid_number(z):
raise forms.ValidationError("Enter a valid phone number.")
|
17,262 | 41d8b87315ed6cd8388127dc4a7261cd18261362 | # -*- coding: utf-8 -*-
"""
Using SQLAlchemy and Flask get db record.(GET)
"""
from flask import Flask, render_template, abort
from flaski.models import WikiContent
app = Flask(__name__)
app.config['DEBUG'] = True
# 起動されたサーバーの/にアクセスした時の挙動を記述。
# @app.route("/hoge")で記述すれば、http://127.0.0.1:5000/aaでアクセスした時の挙動を記述できる。
@app.route("/")
def hello():
contents = WikiContent.query.all()
return render_template("index.html", contents=contents)
#/<title>を指定することで、index.htmlのtitle=content.titleを指定して、
@app.route("/<title>", methods=["GET"])
def show_content(title):
"""
:param title:modelに対するクエリ文字列
:return:
"""
# wikicontentテーブルから、titleでフィルタ(where指定して取得) firstは1行だけ取得するの意味。
# all()だと、結果を複数リスト形式で取得する。
content = WikiContent.query.filter_by(title=title).first()
if content is None:
abort(404)
return render_template("show_content.html", content=content)
if __name__ == "__main__":
# サーバーの起動
app.run()
|
17,263 | bdf0b92d5b8ff762f8af4cbbebf0296a9c92dc62 | class Solution(object):
count = 0
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
# using dictionary to represent matrix
d = {}
# initialize the first row to 1
for i in range(m):
d[(i,0)] = 1
# initialize the first col to 1
for j in range(n):
d[(0,j)] = 1
# the no of path to each cell is addition of top and left cell
for i in range(1,m):
for j in range(1,n):
d[(i,j)] = d[(i-1,j)] + d[(i,j-1)]
return d[(m-1,n-1)]
|
17,264 | 9f57ceb5cc7a0fb6af243f0c01b4b9cea9686e3b | # for i in range(12): #rows
# for j in range(0,i): #star
# print("*",end=" ")
# print() #nextrowil pokaan
# for i in range(12,0 ,-1):
# for j in range(0,i):
# print("*", end=" ")
# print()
#1
#2 3
#4 5 6
#7 8 9 10
# def pattern(n):
# count=1
# for i in range(n):
# for j in range(0,i):
# print(count,end=" ")
# count=count+1
# print()
# pattern(5)
# 1
# 12
# 123
# 1234
# 12345
# def pattern(n):
# for i in range(n):
# count=1
# for j in range(i):
# print(count,end=" ")
# count+=1
# print()
# pattern(5)
# * * * * *
# * * * * *
# * * * * *
# * * * * *
# def pattern(n):
# for i in range(n):
# for j in range(n):
# print("*",end=" ")
# print()
# pattern(6)
# *
# * *
# * * *
# * * * *
# def pattern(n):
# space=n-1
# for i in range(n):
# for j in range(0,space):
# print(end=" ")
# space=space-1
# for j in range(0,i+1):
# print("*",end=" ")
# print()
# pattern(7)
#
# * * * * *
# * * * * *
# * * * * *
# * * * * *
# * * * * *
def pattern(n):
k=n
for i in range(0,n):
for r in range(0,k):
print(end=" ")
k=k+1
for j in range(0,n):
print("*", end=" ")
print()
pattern(6) |
17,265 | fef3d36020e9c9a01e24e45885ba0cbd02345fc1 | from django.test import TestCase
from .models import HealthData,Person
from django.contrib.auth.models import User
from django.urls import reverse
class UserTestCase(TestCase):
def create_user(self,username="guest",password="1DS8ylMMP"):
u = User.objects.create(username=username,password=password)
u.save()
return u
def test_create_user(self):
u = self.create_user()
self.assertTrue(isinstance(u,User))
def test_login_user(self):
u = self.create_user()
url = reverse('login')
resp = self.client.get(url)
# check response code
self.assertEqual(resp.status_code, 200)
# check 'login' in response
self.assertIn('Login'.encode(), resp.content)
# log the user in
self.user = u
self.client.force_login(self.user)
# check response code
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
# check logout in response
url = reverse('home')
self.assertIn('Logout'.encode(),resp.content)
def test_logout_user(self):
u = self.create_user()
# log the user in
self.user = u
self.client.force_login(self.user)
# check response code
url = reverse('login')
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn('Logout'.encode(),resp.content)
# Log out
url = reverse('logout')
self.client.logout()
# Check response code
resp = self.client.get(url)
self.assertEquals(resp.status_code, 200)
# Check login in response
url = reverse('home')
self.assertIn('Login'.encode(),resp.content)
class PersonTestCase(TestCase):
def create_person(self,name="guest",age=10,gender="Male",personalheight=160,personalweight=50):
u = User.objects.create(username="guest",password="1DS8ylMMP")
u.save()
self.user=u
self.client.force_login(self.user)
return Person.objects.create(user=u,name=name,age=age,gender=gender,personalheight=personalheight,personalweight=personalweight)
def test_create_person(self):
p = self.create_person()
self.assertTrue(isinstance(p,Person))
def test_person_list_view(self):
p = self.create_person()
url = reverse('index',kwargs={'id':p.id})
resp = self.client.get(url)
# Check if response gives right p.id
self.assertEqual(reverse('index',kwargs={'id':p.id}),p.get_absolute_url())
self.assertEqual(resp.status_code, 200)
# Check if response gives right p.name
self.assertIn(p.name.encode(), resp.content)
def test_person_update(self):
p = self.create_person()
url = reverse('update')
resp = self.client.post(url,{'name':'guest','age':20,'gender':'Female','personalheight':162,'personalweight':48},follow=True)
# Check if p has been modified
self.assertIn(p.age,resp.content)
# Check if redirected to the desired url
self.assertRedirects(resp,'/ehealth/user/')
def test_person_profile_view(self):
p = self.create_person()
url = reverse('user')
resp = self.client.get(url)
# Check Response code
self.assertEqual(resp.status_code, 200)
# Check if response gives right p.name
self.assertIn(p.name.encode(), resp.content)
def test_person_history_view(self):
p = self.create_person()
url = reverse('history')
resp = self.client.get(url)
# Check Response code
self.assertEqual(resp.status_code, 200)
# Check if response gives right p.name
self.assertIn(p.name.encode(), resp.content)
class HealthDataTestCase(TestCase):
def create_healthdata(self,originalEMG=['150','150'],frequencyEMG=['30','30'],mediafreq=130,temperature=37,spO2=90,pulse=100,fati=0):
u = User.objects.create(username="guest",password="1DS8ylMMP")
u.save()
p = Person.objects.create(user=u,name="guest",age="10",gender="Male",personalheight="160",personalweight="50")
p.save()
return HealthData.objects.create(person=p,originalEMG=originalEMG,frequencyEMG=frequencyEMG,mediafreq=mediafreq,temperature=temperature,spO2=spO2,pulse=pulse,fati=fati)
def test_create_healthdata(self):
h = self.create_healthdata()
self.assertTrue(isinstance(h,HealthData))
def test_insert_healthdata(self):
h = self.create_healthdata()
url = reverse('insertion')
resp = self.client.get(url,{'originalEMG':['150','150'],'frequencyEMG':['30','30'],'mediafreq':130,'temperature':37,'spO2':90,'pulse':100,'fati':0})
# Check Response code
self.assertEqual(resp.status_code, 200)
# Check if the request stimulated by the response contains element query_string
self.assertIn('QUERY_STRING',resp.request)
|
17,266 | 279bf0fb43c134902f5c9ef050f5654422ca3f8f | """
Loops
for and while
Range
"""
# print(list(range(2,10, 1)))
for i in range(1,31, 1):
if (i%3 == 0 and i%5 == 0):
print("FIZZ_BUZZ")
elif (i%3 == 0):
print("FIZZ")
elif (i%5 == 0):
print("FUZZ")
else:
print(i)
# Not printing a new line after print command
print("hello")
print("hi")
print("hello", end ="")
print("hi", end ="&&") |
17,267 | 52f6b6c5d2b12af2496705c675c8e35d3b66d594 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class ProductTemplate(models.Model):
_inherit = 'product.template'
# agora_sync = fields.Boolean(string='Synchronize with Agora')
|
17,268 | 797b2ac7331755e582e110b037f11f8ba4a9c3d9 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-14 20:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('gatheros_event', '0008_auto_20180306_1024'),
]
operations = [
migrations.AddField(
model_name='event',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='criado em'),
preserve_default=False,
),
migrations.AddField(
model_name='place',
name='zoom',
field=models.CharField(blank=True, default=18, max_length=4, null=True, verbose_name='zoom do mapa'),
),
migrations.AlterField(
model_name='place',
name='lat',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='latitude'),
),
migrations.AlterField(
model_name='place',
name='long',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='longitude'),
),
migrations.AlterField(
model_name='place',
name='name',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='nome do local'),
),
migrations.AlterField(
model_name='place',
name='show_location',
field=models.BooleanField(default=False, help_text='Marque para mostrar a localização do evento no site.', verbose_name='ativar localização no site'),
),
]
|
17,269 | 8229f74670bf13b908e78908395b0659925cc11f | import numpy as np
from sklearn.ensemble import RandomForestClassifier as SKRandomForestClassifier
from sklearn.feature_selection import SelectFromModel as SkSelect
from skopt.space import Real
from .feature_selector import FeatureSelector
class RFClassifierSelectFromModel(FeatureSelector):
"""Selects top features based on importance weights using a Random Forest classifier."""
name = "RF Classifier Select From Model"
hyperparameter_ranges = {
"percent_features": Real(0.01, 1),
"threshold": ["mean", -np.inf],
}
def __init__(
self,
number_features=None,
n_estimators=10,
max_depth=None,
percent_features=0.5,
threshold=-np.inf,
n_jobs=-1,
random_seed=0,
**kwargs
):
parameters = {
"number_features": number_features,
"n_estimators": n_estimators,
"max_depth": max_depth,
"percent_features": percent_features,
"threshold": threshold,
"n_jobs": n_jobs,
}
parameters.update(kwargs)
estimator = SKRandomForestClassifier(
random_state=random_seed,
n_estimators=n_estimators,
max_depth=max_depth,
n_jobs=n_jobs,
)
max_features = (
max(1, int(percent_features * number_features)) if number_features else None
)
feature_selection = SkSelect(
estimator=estimator,
max_features=max_features,
threshold=threshold,
**kwargs
)
super().__init__(
parameters=parameters,
component_obj=feature_selection,
random_seed=random_seed,
)
|
17,270 | 028cc099b91cba75bd8a43e4237258e3a1ac614b | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField
from wtforms.validators import DataRequired, Length
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(5, 20)])
password = PasswordField('Password', validators=[DataRequired(), Length(5, 20)])
remember = BooleanField('Remember me')
submit = SubmitField('Log in')
class RegisterForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(5, 20)])
password = PasswordField('Password', validators=[DataRequired(), Length(5, 20)])
submit = SubmitField('Register')
class CreateForm(FlaskForm):
title = StringField('Title', validators=[DataRequired(), Length(5, 70)])
body = TextAreaField('Body', validators=[DataRequired()])
submit = SubmitField('Create')
class UpdateForm(FlaskForm):
title = StringField('Title', validators=[DataRequired(), Length(5, 70)])
body = TextAreaField('Body', validators=[DataRequired()])
submit = SubmitField('Update')
class DeleteForm(FlaskForm):
submit = SubmitField('Delete')
|
17,271 | 2d57b3c5645c84dcd4d1fdb88efb7e55abc34230 | from skbio.draw import boxplots
fig = boxplots([[2, 2, 1, 3, 4, 4.2, 7], [0, -1, 4, 5, 6, 7]])
|
17,272 | bc380ee3a39b04dd558a71920f61cc2ef9cf8b3b | # Make a list that includes at least three people you
# would like to invite to dinner.
# Then use your list to print a message to each person,
# inviting them to dinner.
idol_guest_list = ["arnold schwarzenegger", "babe ruth",
"sean connery"]
invitation = f"Mr. {idol_guest_list[0].title()}, " \
f"Mr. {idol_guest_list[1].title()}, and " \
f"Mr. {idol_guest_list[2].title()}, I would like " \
f"to invite you to dinner because " \
f"\nyou are idols of mine and I have admired " \
f"\nyou and looked up to you sense I was a " \
f"little boy."
print(invitation)
# You just heard that one of your guests can't make the
# dinner, so you need to send out a new set of invitations.
# You will have to think of someone else to invite.
# Start with your program from Exercise 3 - 4.
# Add a print() call at the end of your program stating
# the name of the guest who can not make it.
message = f"I am sorry to inform you but " \
f"{idol_guest_list[1].title()} will not be able to " \
f"make it for dinner."
print(message)
# Modify your list, replacing the name of the guest who
# can't make it with the name of the new person you
# are inviting.
idol_guest_list[1] = "keanu reeves"
print(idol_guest_list)
# Print a second set of invitation messages,
# one for each person who is still in your list.
invitation1 = f"Mr. {idol_guest_list[0].title()}, I would " \
f"like to invite you to dinner because " \
f"\nyou have been an idol of mine and I have " \
f"\nadmired you and looked up to you sense I " \
f"was a little boy."
invitation2 = f"Mr. {idol_guest_list[1].title()}, I would " \
f"like to invite you to dinner because " \
f"\nyou have been an idol of mine and I have " \
f"\nadmired you and looked up to you sense I " \
f"was a little boy."
invitation3 = f"Mr. {idol_guest_list[2].title()}, I would " \
f"like to invite you to dinner because " \
f"\nyou have been an idol of mine and I have " \
f"\nadmired you and looked up to you sense I " \
f"was a little boy."
print(invitation1)
print(invitation2)
print(invitation3)
# You just found a bigger dinner table, so now more
# space is available. Think of three more guests to invite
# to dinner.
# Start with your program from Exercise 3 - 4 or 3 - 5.
# Add a print() call to the end of your program informing
# people that you found a bigger dinner table.
invitation4 = f"Mr. {idol_guest_list[0].title()}, " \
f"Mr. {idol_guest_list[1].title()}," \
f"\nand Mr. {idol_guest_list[2]}, I would like " \
f"\nto inform you that I have found a bigger " \
f"dinner table and can now seat three more " \
f"guests for dinner."
print(invitation4)
# Use the insert() function to add one new guest to the
# beginning of your list.
idol_guest_list.insert(0, "Harrison Ford")
print(idol_guest_list)
# Use the insert() function to add one new guest to the
# middle of your list.
idol_guest_list.insert(2, "Sylvester Stallone")
print(idol_guest_list)
# Use append to add one new guest to the end of your
# list.
idol_guest_list.append("Jean-Claude Van Damme")
print(idol_guest_list)
# Print a new set of invitation messages,
# one for each person.
invitation5 = f"Mr. {idol_guest_list[0].title()}, I would " \
f"like to invite you to dinner because " \
f"\nyou have been an idol of mine and I have " \
f"\nadmired you and looked up to you sense I " \
f"was a little boy."
invitation6 = f"Mr. {idol_guest_list[1].title()}, I would " \
f"like to invite you to dinner because " \
f"\nyou have been an idol of mine and I have " \
f"\nadmired you and looked up to you sense I " \
f"was a little boy."
invitation7 = f"Mr. {idol_guest_list[2].title()}, I would " \
f"like to invite you to dinner because " \
f"\nyou have been an idol of mine and I have " \
f"\nadmired you and looked up to you sense I " \
f"was a little boy."
invitation8 = f"Mr. {idol_guest_list[3].title()}, I would " \
f"like to invite you to dinner because " \
f"\nyou have been an idol of mine and I have " \
f"\nadmired you and looked up to you sense I " \
f"was a little boy."
invitation9 = f"Mr. {idol_guest_list[4].title()}, I would " \
f"like to invite you to dinner because " \
f"\nyou have been an idol of mine and I have " \
f"\nadmired you and looked up to you sense I " \
f"was a little boy."
invitation10 = f"Mr. {idol_guest_list[5].title()}, I would " \
f"like to invite you to dinner because " \
f"\nyou have been an idol of mine and I have " \
f"\nadmired you and looked up to you sense I " \
f"was a little boy."
print(invitation5)
print(invitation6)
print(invitation7)
print(invitation8)
print(invitation9)
print(invitation10)
# You just found out that your new dinner table won't
# arrive in time for the dinner, and you have space for
# only two guests.
# Start with your program from Exercise 3 - 6.
# Add a new line that prints a message saying that you
# can invite only two people for dinner.
declined_invitations = f"Mr. {idol_guest_list[0].title()}, " \
f"Mr. {idol_guest_list[1].title()}," \
f"\nMr. {idol_guest_list[2].title()}, " \
f"Mr. {idol_guest_list[3].title()}," \
f"\nMr. {idol_guest_list[4].title()}, " \
f"Mr. {idol_guest_list[5].title()}," \
f"\nI am sorry to inform you but the " \
f"\ndinner table will not arrive in time " \
f"\nfor the dinner, and I will not be able " \
f"\nto accommodate all six of your for " \
f"dinner."
print(declined_invitations)
# Use pop() to remove guests from your list one at a
# time until only two names remain in your list. Each
# time you pop a name from your list, print a message
# to that person letting them know you can't invite them
# to dinner.
idol_guest_declined1 = idol_guest_list.pop(0)
print(idol_guest_list)
print(f"Mr. {idol_guest_declined1.title()}, I am sorry "
f"\nto inform you but the dinner table will not "
f"\narrive in time for dinner, and I will no be able "
f"\nto accommodate you for dinner.")
idol_guest_declined2 = idol_guest_list.pop(1)
print(idol_guest_list)
print(f"Mr. {idol_guest_declined2.title()}, I am sorry "
f"\nto inform you but the dinner table will not "
f"\narrive in time for dinner, and I will no be able "
f"\nto accommodate you for dinner.")
idol_guest_declined3 = idol_guest_list.pop(1)
print(idol_guest_list)
print(f"Mr. {idol_guest_declined3.title()}, I am sorry "
f"\nto inform you but the dinner table will not "
f"\narrive in time for dinner, and I will no be able "
f"\nto accommodate you for dinner.")
idol_guest_declined4 = idol_guest_list.pop(2)
print(idol_guest_list)
print(f"Mr. {idol_guest_declined4.title()}, I am sorry "
f"\nto inform you but the dinner table will not "
f"\narrive in time for dinner, and I will no be "
f"\nable to accommodate you for dinner.")
# Print a message to the two people still on your list,
# letting them know they're still invited
extinguished_guests = f"Mr. {idol_guest_list[0].title()} " \
f"and Mr. {idol_guest_list[1].title()}," \
f"\nI would like to inform you that you " \
f"\nare still invited to the dinner."
print(extinguished_guests)
# Use del to remove the last two items from your list,
# so that you have an empty list. Print your list to make
# sure you actually have an empty list at the end of
# your program.
del idol_guest_list[0]
print(idol_guest_list)
del idol_guest_list[0]
print(idol_guest_list)
# Working with one of the programs from Exercises 3 -
# 4 through 3 - 7 (page 42), use len() to print a message
# indicating the number of people you are inviting to
# dinner.
idol_guest_list.append("arnold schwarzenegger")
print(idol_guest_list)
idol_guest_list.append("sean connery")
print(idol_guest_list)
message = f"Mr. {idol_guest_list[0].title()} and Mr. " \
f"{idol_guest_list[1].title()},I would like to " \
f"\nextend a dinner invitation to you because you " \
f"\nare idols of mine, and because I have " \
f"\nadmired you and looked up to you sense I " \
f"\nwas a little boy."
print(message)
print("The length of my idol guest list is ", len(idol_guest_list))
|
17,273 | 771f80a8e54aa80f60754bd1a4ff13091794f664 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-05 10:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_tag_description'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='slug',
field=models.SlugField(default='', editable=False, max_length=60),
),
migrations.AlterField(
model_name='tag',
name='slug',
field=models.SlugField(editable=False, max_length=20),
),
]
|
17,274 | 1ed9280f9b103cc2f126aa287c633b5300f0e193 | # python3
class BuildHeap:
def __init__(self):
self.swaps = []
self.data = []
self.size = 0
def reading(self):
n = int(input())
self.data = list(map(int, input().split()))
self.size = len(self.data) - 1
assert len(self.data) == n
def printing(self):
print(len(self.swaps))
for s in self.swaps:
print(s[0], s[1])
def left_child(self, i):
return 2 * i + 1
def right_child(self, i):
return 2 * i + 2
def min_heapify(self, i):
l = self.left_child(i)
r = self.right_child(i)
min_index = int(i)
# print(l)
# print(r)
if l <= len(self.data) - 1 and self.data[l] < self.data[min_index]:
min_index = l
if r <= len(self.data) - 1 and self.data[r] < self.data[min_index]:
min_index = r
if i != min_index:
self.swaps.append((int(i), min_index))
self.data[int(i)], self.data[min_index] = self.data[min_index], self.data[int(i)]
# print(self.swaps)
self.min_heapify(min_index)
def build_heap(self):
heap_size = len(self.data) - 1
if heap_size % 2 == 0:
starts = (heap_size // 2) - 1
else:
starts = heap_size // 2
for i in range(starts, -1, -1):
self.min_heapify(i)
def heap_sort(self):
self.calculation()
for i in range(0, len(self.data)):
self.data[i], self.data[self.size] = self.data[self.size], self.data[i]
self.size = self.size - 1
self.min_heapify(0)
def calculation(self):
self.reading()
self.build_heap()
self.printing()
if __name__ == '__main__':
b1 = BuildHeap()
# b1.heap_sort()
b1.calculation()
|
17,275 | 4431c11bd1082e553382dae4c6586136915bc0c6 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-11-16 07:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('supporting_business', '0016_auto_20181109_1921'),
]
operations = [
migrations.CreateModel(
name='CountingTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(blank=True, null=True)),
('hit_num', models.IntegerField()),
('fav_num', models.IntegerField()),
('apply_num', models.IntegerField()),
('support_business', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='supporting_business.SupportBusiness')),
],
),
]
|
17,276 | e62654b95574c88e3e4ace157a842c3e63e20378 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 4 17:37:19 2018
@author: son
"""
import numpy as np
import matplotlib.pylab as plt
def numerical_diff(f, x):
h = 1e-4
return ((f(x+h) - f(x-h))/ (2*h))
def origin_numerical_diff(f, x):
h = 1e-4
return ((f(x+h) - f(x))/ (h))
def numerical_gradient(f,x):
h=1e-4
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'])
while not it.finished:
idx = it.multi_index
tmp_val = x[idx]
x[idx] = tmp_val + h
fxh1 = f(x)
x[idx] = tmp_val - h
fxh2 = f(x)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val
it.iternext()
return grad
def gradient_descent(f, init_x, lr=0.1, step_num= 100):
x= init_x
x_his = []
# print(x_his.shape)
for i in range(step_num):
grad = numerical_gradient(f,x)
x -=lr* grad
# print(x)
x_his.append(x.copy())
x_ = np.array(x_his)
plt.figure()
plt.plot(x_[:,0], x_[:,1], 'o', color='#990000')#,headwidth=10,scale=40,color="#444444")
#plt.plot(x_, 'o')#,headwidth=10,scale=40,color="#444444")
plt.xlim([-50, 50])
plt.ylim([-50, 50])
plt.xlabel('x0')
plt.ylabel('x1')
plt.grid()
return x |
17,277 | 83ac2b1258df8c46333f7d7a933eb58eebdc0435 | # -*- coding: utf-8 -*-
from pyfr.integrators.dual.base import BaseDualIntegrator
class BaseDualPseudoStepper(BaseDualIntegrator):
def collect_stats(self, stats):
super().collect_stats(stats)
# Total number of RHS evaluations
stats.set('solver-time-integrator', 'nfevals', self._stepper_nfevals)
# Total number of pseudo-steps
stats.set('solver-time-integrator', 'npseudosteps', self.npseudosteps)
def _rhs_with_dts(self, t, uin, fout, c=1):
# Compute -∇·f
self.system.rhs(t, uin, fout)
# Coefficients for the dual-time source term
svals = [c*sc for sc in self._dual_time_source]
# Source addition -∇·f - dQ/dt
axnpby = self._get_axnpby_kerns(len(svals) + 1, subdims=self._subdims)
self._prepare_reg_banks(fout, self._idxcurr, *self._source_regidx)
self._queue % axnpby(1, *svals)
def finalise_step(self, currsoln):
pnreg, dtsnreg = self._pseudo_stepper_nregs, len(self._dual_time_source)
# Rotate the source registers to the right by one
self._regidx[pnreg:pnreg + dtsnreg - 1] = (self._source_regidx[-1:]
+ self._source_regidx[:-1])
# Copy the current soln into the first source register
self._add(0, self._regidx[pnreg], 1, currsoln)
class DualPseudoEulerStepper(BaseDualPseudoStepper):
pseudo_stepper_name = 'euler'
@property
def _stepper_nfevals(self):
return self.nsteps
@property
def _pseudo_stepper_nregs(self):
return 2
@property
def _pseudo_stepper_order(self):
return 1
def step(self, t, dt, dtau):
add = self._add
rhs = self._rhs_with_dts
r0, r1 = self._stepper_regidx
if r0 != self._idxcurr:
r0, r1 = r1, r0
rhs(t, r0, r1, c=1/dt)
add(0, r1, 1, r0, dtau, r1)
return r1, r0
class DualPseudoTVDRK3Stepper(BaseDualPseudoStepper):
pseudo_stepper_name = 'tvd-rk3'
@property
def _stepper_nfevals(self):
return 3*self.nsteps
@property
def _pseudo_stepper_nregs(self):
return 3
@property
def _pseudo_stepper_order(self):
return 3
def step(self, t, dt, dtau):
add = self._add
rhs = self._rhs_with_dts
# Get the bank indices for pseudo-registers (n+1,m; n+1,m+1; rhs),
# where m = pseudo-time and n = real-time
r0, r1, r2 = self._stepper_regidx
# Ensure r0 references the bank containing u(n+1,m)
if r0 != self._idxcurr:
r0, r1 = r1, r0
# First stage;
# r2 = -∇·f(r0) - dQ/dt; r1 = r0 + dtau*r2
rhs(t, r0, r2, c=1/dt)
add(0, r1, 1, r0, dtau, r2)
# Second stage;
# r2 = -∇·f(r1) - dQ/dt; r1 = 3/4*r0 + 1/4*r1 + 1/4*dtau*r2
rhs(t, r1, r2, c=1/dt)
add(1/4, r1, 3/4, r0, dtau/4, r2)
# Third stage;
# r2 = -∇·f(r1) - dQ/dt; r1 = 1/3*r0 + 2/3*r1 + 2/3*dtau*r2
rhs(t, r1, r2, c=1/dt)
add(2/3, r1, 1/3, r0, 2*dtau/3, r2)
# Return the index of the bank containing u(n+1,m+1)
return r1, r0
class DualPseudoRK4Stepper(BaseDualPseudoStepper):
pseudo_stepper_name = 'rk4'
@property
def _stepper_nfevals(self):
return 4*self.nsteps
@property
def _pseudo_stepper_nregs(self):
return 3
@property
def _pseudo_stepper_order(self):
return 4
def step(self, t, dt, dtau):
add = self._add
rhs = self._rhs_with_dts
# Get the bank indices for pseudo-registers (n+1,m; n+1,m+1; rhs),
# where m = pseudo-time and n = real-time
r0, r1, r2 = self._stepper_regidx
# Ensure r0 references the bank containing u(n+1,m)
if r0 != self._idxcurr:
r0, r1 = r1, r0
# First stage; r1 = -∇·f(r0) - dQ/dt;
rhs(t, r0, r1, c=1/dt)
# Second stage; r2 = r0 + dtau/2*r1; r2 = -∇·f(r2) - dQ/dt;
add(0, r2, 1, r0, dtau/2, r1)
rhs(t, r2, r2, c=1/dt)
# As no subsequent stages depend on the first stage we can
# reuse its register to start accumulating the solution with
# r1 = r0 + dtau/6*r1 + dtau/3*r2
add(dtau/6, r1, 1, r0, dtau/3, r2)
# Third stage; here we reuse the r2 register
# r2 = r0 + dtau/2*r2 - dtau/2*dQ/dt
# r2 = -∇·f(r2) - dQ/dt;
add(dtau/2, r2, 1, r0)
rhs(t, r2, r2, c=1/dt)
# Accumulate; r1 = r1 + dtau/3*r2
add(1, r1, dtau/3, r2)
# Fourth stage; again we reuse r2
# r2 = r0 + dtau*r2
# r2 = -∇·f(r2) - dQ/dt;
add(dtau, r2, 1, r0)
rhs(t, r2, r2, c=1/dt)
# Final accumulation r1 = r1 + dtau/6*r2 = u(n+1,m+1)
add(1, r1, dtau/6, r2)
# Return the index of the bank containing u(n+1,m+1)
return r1, r0
|
17,278 | 31b0ed8c56d1b8c773c74b8614f72a9d1d6b6995 | #!/usr/bin/env python
# Bkgd configuration file for limit-setting produced with makeANTables.py
backgrounds = {
'Fake2016DEFGH' : {
'N' : '1634',
'alpha' : '0.000558720822988',
},
'Elec2016DEFGH' : {
'N' : '32',
'alpha' : '0.0493887359699',
},
'Muon2016DEFGH' : {
'N' : '25',
'alpha' : '0.0291223965337',
},
'Tau2016DEFGH' : {
'N' : '10',
'alpha' : '0.00246390955317',
},
}
background_systematics = {
'Fake2016DEFGH_alpha' : { # error on alpha
'value' : '1.37812487145',
'background' : 'Fake2016DEFGH',
},
'Elec2016DEFGH_alpha' : { # error on alpha
'value' : '1.01398595188',
'background' : 'Elec2016DEFGH',
},
'Muon2016DEFGH_alpha' : { # error on alpha
'value' : '1.00541901138',
'background' : 'Muon2016DEFGH',
},
'Tau2016DEFGH_alpha' : { # error on alpha
'value' : '1.42810786104',
'background' : 'Tau2016DEFGH',
},
'Fake2016DEFGH_syst' : { # error on fake track rate assumption
'value' : str (1.0 + 8.58441294376118 / 100.0),
'background' : 'Fake2016DEFGH',
},
'Elec2016DEFGH_energy' : { # error on energy assumption
'value' : str (1.0 + 11.7113892531 / 100.0),
'background' : 'Elec2016DEFGH',
},
'Tau2016DEFGH_energy' : { # error on energy assumption
'value' : str (1.0 + 16.8609344527 / 100.0),
'background' : 'Tau2016DEFGH',
},
}
|
17,279 | a1ed716a95bf6a731d82146e37629255e141ec92 | # Generated by Django 3.0.8 on 2020-07-29 21:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('iot', '0003_iotdevice_circuit'),
]
operations = [
migrations.CreateModel(
name='Access',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('building', models.ForeignKey(max_length=255, on_delete=django.db.models.deletion.CASCADE, to='iot.Building')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='iot.Person')),
],
),
]
|
17,280 | 02301920f1023c58a0ed0210cbfc9848d87771c5 | from decimal import Decimal
import os
import tempfile
from django.core.urlresolvers import reverse
from django.test import Client, TestCase
from importer.models import ImportLog, ExpenseItem
from importer.tests.test_data import load_test_data
class BaseTestCase(TestCase):
def send_file_upload_request(self, view_name, filename, expected_response_code=200):
"""
Convenience method for uploading a file to a view
:param str view_name: name of view to reverse into an URL
:param str filename: the filename to be uploaded
:param int expected_response_code: expected response code
:return: Response object
"""
url = reverse(view_name)
client = Client()
with open(filename) as file_upload:
response = client.post(path=url, data={'file': file_upload})
self.assertEqual(expected_response_code, response.status_code)
return response
def send_request(self, view_name, params=None, expected_response_code=200):
"""
Convenience method to send a GET Request to a View.
:param base string view_name: name of view to reverse into an URL
:param dict params: the GET parameters
:param int expected_response_code: the expected response code
:return: the Response object
"""
url = reverse(viewname=view_name, kwargs=params)
client = Client()
response = client.get(path=url, data=params)
self.assertEqual(expected_response_code, response.status_code)
return response
class TestCsvImportView(BaseTestCase):
def test_file_upload(self):
"""
Integration Test of a successful File Upload View and CVS persistence.
Expected -
- data is written to the DB ( brief test )
- redirection response code ( 302 )
- URL redirection will be to the named URL 'upload_summary_view' with the PK of the ImportLog
:return:
"""
with tempfile.NamedTemporaryFile() as test_file:
test_file.write(
u'date,category,employee name,employee address,expense description,pre-tax amount,tax name,tax amount\n')
test_file.write(
u'12/1/2013,Travel,Don Draper,"783 Park Ave, New York, NY 10021",Taxi ride, 350.00 ,NY Sales tax, 31.06\n')
test_file.flush()
response = self.send_file_upload_request(view_name='csv_import_view', filename=test_file.name)
actual_import_logs = ImportLog.objects.all()
self.assertEqual(1, len(actual_import_logs))
actual_import_log = actual_import_logs[0]
expected_file_name = os.path.basename(test_file.name)
self.assertEqual(expected_file_name, actual_import_log.file_name)
expense_items = ExpenseItem.objects.all()
self.assertEqual(1, len(expense_items))
self.assertEqual('Don Draper', expense_items[0].employee.name)
self.assertEqual('{"upload_id": 1}', response.content)
class TestUploadSummaryView(BaseTestCase):
def setUp(self):
load_test_data()
def test_summary_success(self):
"""
A Success path of retrieving summary data.
Expected:
- 3 summary items are returned
- first pre tax amount is 100 ( don't test entire data - other unit tests do this )
:return:
"""
summary_data_key = 'summary_data'
response = self.send_request(view_name='upload_summary_view', params={'upload_id': 1})
context_data = response.context_data
self.assertTrue(summary_data_key in context_data)
summary_data = context_data[summary_data_key]
self.assertEquals(3, len(summary_data))
self.assertEqual(Decimal('100.0'), summary_data[0].pre_tax_amount)
|
17,281 | 7b44e26378874e2693cb089ea1812329b030155d | #!/usr/bin/env python3
s = input()
num1Bits = s.count( '1' )
if num1Bits % 2 == 0:
print( '{}{}'.format( s, 0 ) )
else:
print( '{}{}'.format( s, 1 ) )
|
17,282 | 5d00ae2596535a19af8a2b975842f1d0ef030a32 | """
Holds system wide configuration options.
"""
from .constants import *
import os
""""The preferences below are used as defaults if none of the configuration
files exist. Being defaults, they have the lowest priority of the importing
procedure."""
# Whether or not to try to obtain an OpenGL context supporting multisampling.
# This usually produces nicer results but is unsupported in older hardware.
# Even if this is set to false, however, pyprocessing will fallback to
# a non-multisampled config if it is not supported.
multisample = True
# Whether or not to invert the y axis. This is required for strict conformity
# with Processing. Beyond altering the modelview matrix, this also implies that
# the drawing of some primitives such as arc or text be modified.
coordInversionHack = True
# Since OpenGL actually addresses lines between pixels, in some cases
# shifting the drawing by half a pixel makes lines sharper.
halfPixelShiftHack = False # off by default
# try to get around the artifacts when drawing filled polygons in smooth mode
smoothFixHack = False # off by default
smoothTurnedOn = False # Used internally to tell whether smooth was on
# flipping policy: uncomment only one of the assignments below
# flipPolicy = DOUBLE_FLIP_POLICY # this should work for modern boards/drivers
# flipPolicy = SINGLE_FLIP_POLICY # use this for Intel 945 under Windows or other cheap boards
# flipPolicy = FBO_FLIP_POLICY # use this for modern boards/drivers where flip uses swap and not copy
# flipPolicy = ACCUM_FLIP_POLICY # use this for cheap boards where 'SINGLE' produces too much flickering
flipPolicy = BACKUP_FLIP_POLICY # this is the default and should work on all boards
"""Tries to import the global preferences from the globalconfig.txt file
located inside the pyprocessing installation folder. These preferences
have higher priority than defaults, but will be overriden by the user
preferences if they exist."""
try:
f = open(os.path.dirname(__file__) + "/globalconfig.txt", "r")
contents = f.read()
contents = contents.split("\n")
for i in contents:
if i != "":
vars()[i.split(":")[0]] = eval(i.split(":")[1])
except:
None
"""Loads the user configuration preferences from the userconfig.txt file situated
on the user's home path. Like the global preferences, they can be changed manually
with a text editor. User preferences have the highest priority, meaning that if
the import from userconfig.txt is successful, then the global preferencesill
be overriden."""
try:
f = open(os.path.expanduser("~/.pyprocessing/userconfig.txt"), "r")
contents = f.read()
contents = contents.split("\n")
for i in contents:
if i != "":
vars()[i.split(":")[0]] = eval(i.split(":")[1])
except:
None
|
17,283 | a44b17b58f6e8d2138c1c0bf597a00a9a4bdc8a7 | from django.contrib import admin
from reviews.models import Review
# Register your models here.
admin.site.register(Review)
|
17,284 | 86130202098124fa80c28620003839f7630e0d13 | # (c) 2022 Amazon Web Services, Inc. or its affiliates. All Rights Reserved.
# This AWS Content is provided subject to the terms of the AWS Customer
# Agreement available at https://aws.amazon.com/agreement or other written
# agreement between Customer and Amazon Web Services, Inc.
import json
import os
import boto3
from aws_lambda_powertools import Logger, Metrics, Tracer
from aws_lambda_powertools.metrics import MetricUnit
tracer = Tracer(service="EgressCopyToStaging")
logger = Logger(service="EgressCopyToStaging", sample_rate=0.1)
metrics = Metrics(service="EgressCopyToStaging", namespace="EgressRequests")
s3 = boto3.client("s3")
target_bucket = os.environ.get("EGRESS_STAGING_BUCKET")
notification_bucket = os.environ.get("NOTIFICATION_BUCKET")
@metrics.log_metrics()
@tracer.capture_lambda_handler
@logger.inject_lambda_context(log_event=True)
def handler(event, context):
s3_egress_store_bucket = event["s3_bucketname"]
workspace_id = event["workspace_id"]
egress_request_id = event["egress_request_id"]
ver_object_list_location = event["egress_store_object_list_location"]
logger.info(
"Starting copy to staging bucket with egress request ID: " + egress_request_id
)
logger.debug("Egress Store Bucket: " + s3_egress_store_bucket)
logger.debug("Staging bucket: " + target_bucket)
logger.debug("Version metadata file location: " + ver_object_list_location)
ver_object_list = fetch_object_version(ver_object_list_location)
copy_files_to_egress_staging(
version_list=ver_object_list,
source_bucket=s3_egress_store_bucket,
workspace=workspace_id,
egress_request_id=egress_request_id,
)
metrics.add_metric(name="EgressRequestStaged", value=1, unit=MetricUnit.Count)
return {
"file_extensions": list(
extension_set
), # convert set to list which is easily serializable into JSON
"staged": True,
}
def fetch_object_version(version_metadata_location: str):
file_key = version_metadata_location.split("/", 1)[1]
# Fetch S3 object containing json list of object versions
data = s3.get_object(Bucket=notification_bucket, Key=file_key)
json_data = data["Body"].read().decode("utf-8")
logger.info("Succesfully fetched versions metadata file")
return json.loads(json_data)
def copy_files_to_egress_staging(
version_list: object, source_bucket: str, workspace: str, egress_request_id: str
):
object_list = []
get_objects_list(source_bucket, workspace, object_list)
if object_list:
copy_objects(
version_list,
object_list,
target_bucket,
source_bucket,
workspace,
egress_request_id,
)
else:
logger.warn("No objects were found in the source bucket")
####################################################################
# get_object_list
# retrieves objects from bucket with specified prefix
####################################################################
def get_objects_list(bucket, prefix, object_list):
"""
:param bucket: source bucket
:type bucket: string
:param prefix: s3 workspace prefix
:type prefix: string
:param object_list: list of objects
:type object_list: []
:return: []
"""
# prepare args for retrieving items from a prefix (items from a certain prefix only)
kwargs = {"Bucket": bucket, "Prefix": prefix}
paginator = s3.get_paginator("list_objects_v2")
pages = paginator.paginate(**kwargs)
for page in pages:
for obj in page["Contents"]:
object_k = obj["Key"]
if object_k.endswith("/"):
# this is not an object
continue
else:
object_list.append(object_k)
logger.info("Retrieved list of objects")
logger.debug("Object list: ", object_list)
return object_list
####################################################################
# copy_objects
# copies candidate egress objects to staging bucket
####################################################################
def copy_objects(
version_list: object,
object_list: str,
target_bucket: str,
source_bucket: str,
workspace_id: str,
egress_request_id: str,
):
"""
:param object_list: list of S3 objects
:type bucket: []
:param target_bucket: location to copy objects to
:type prefix: string
:param source_bucket: source S3 bucket
:type : string
:param workspace_id: Workspace ID
:type : string
:param egress_request_id: Egress request ID which will form part of the object key
:type : string
:return: bool
"""
# Define global variable to hold set of unique extensions
global extension_set
extension_set = set()
# Retrieve object from object list
for _counter, obj in enumerate(object_list, start=1):
# split the object key into parts
obj_parts = split_object_key(obj)
file_name = obj_parts[-1]
# Retrieve the right object metadata from version list
versioned_object = next(
object_k for object_k in version_list["objects"] if object_k["Key"] == obj
)
# retrieve version attribute
object_version_id = versioned_object["VersionId"]
# create arguments for copy object call
kwargs = {
"Bucket": target_bucket,
"CopySource": f"{source_bucket}/{obj}?versionId={object_version_id}",
"Key": f"{workspace_id}/{egress_request_id}/{file_name}",
}
# copy objects to destination bucket
s3.copy_object(**kwargs)
logger.debug(
"Copied %s with %s from %s to %s",
str(obj),
str(object_version_id),
source_bucket,
target_bucket,
)
extension_set.add(get_file_extension(file_name))
logger.info("Copied %s versioned object/s to staging bucket", str(_counter))
return True
def split_object_key(path: str):
return path.split("/")
def get_file_extension(path: str):
split = path.split(".")
if len(split) > 1:
return split[-1]
else:
return ""
|
17,285 | fd8cc4d5ea4ea61413fd2414a975b3cb2b338fc6 | """
Starting with 1 and spiralling anticlockwise in the following way, a
square spiral with side length 7 is formed.
37 36 35 34 33 32 31
38 17 16 15 14 13 30
39 18 5 4 3 12 29
40 19 6 1 2 11 28
41 20 7 8 9 10 27
42 21 22 23 24 25 26
43 44 45 46 47 48 49
It is interesting to note that the odd squares lie along the bottom
right diagonal, but what is more interesting is that 8 out of the 13
numbers lying along both diagonals are prime; that is, a ratio of 8/13 = 62%.
If one complete new layer is wrapped around the spiral above, a square spiral
with side length 9 will be formed. If this process is continued, what is the
side length of the square spiral for which the ratio of primes along both
diagonals first falls below 10%?
"""
import math
def is_prime(n):
# make sure n is a positive integer
n = abs(int(n))
# 0 and 1 are not primes
if n < 2:
return False
# 2 is the only even prime number
if n == 2:
return True
# all other even numbers are not primes
if not n & 1:
return False
# range starts with 3 and only needs to go up the squareroot of n
# for all odd numbers
for x in range(3, int(n ** 0.5) + 1, 2):
if n % x == 0:
return False
return True
def the_brute_force_thought():
n_primes = 0
n_numbers = 1
limit = 0.10
i = 2
while True:
# Generates 5, 17, 37...
x = i ** 2 + 1
# Generates 3, 13, 31...
y = x - i
if is_prime(x):
n_primes += 1
if is_prime(y):
n_primes += 1
n_numbers += 2
i += 1
# Generates 9, 25, 49...
x = i ** 2
# Generates 7, 21, 43...
y = x - i + 1
if is_prime(x):
n_primes += 1
if is_prime(y):
n_primes += 1
n_numbers += 2
if n_primes / float(n_numbers) < limit:
break
i += 1
return i
print "Answer:", the_brute_force_thought()
|
17,286 | 60964a4000c29b932e81067288e1fb6c02e198a5 | """
SConnectForm.py
Form to connect to slack
"""
import npyscreen
class SConnectForm(npyscreen.Form):
def create(self):
self.name = "Login to Slack"
# cs = self.add(npyscreen.BoxBasic, name="Connect to Slack")
self.add(npyscreen.TitleText, name="Login:")
self.add(npyscreen.TitlePassword, name="Password:")
self.how_exited_handers[
npyscreen.wgwidget.EXITED_ESCAPE] = self.exit
def exit(self):
self.parentApp.setNextForm(None)
self.editing = False
|
17,287 | 6b51ccf8a0244db3ce07b523e670653b31d516e2 | # Copyright 2023 The Google Earth Engine Community Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START earthengine__apidocs__ee_featurecollection_get]
# A global power plant FeatureCollection.
fc = ee.FeatureCollection('WRI/GPPD/power_plants')
# View a list of FeatureCollection property names.
print(fc.propertyNames().getInfo())
# Get the value of a listed property.
print('Global power plant data provider as ee.ComputedObject:',
fc.get('provider').getInfo())
# The returned value is an ee.ComputedObject which has no methods available for
# further processing; cast to the relevant Earth Engine object class for use.
print('Global power plant data provider as ee.String:',
ee.String(fc.get('provider')).getInfo())
# [END earthengine__apidocs__ee_featurecollection_get]
|
17,288 | d5b568cbb1fb6a127777364329ddf5cb9a501d48 | from location import Location
from messages import Message
class EnterMineAndDigForNugget:
@staticmethod
def enter(miner):
if miner.location != Location.GOLDMINE:
miner.say('Walking to the goldmine.')
miner.change_location(Location.GOLDMINE)
@staticmethod
def execute(miner):
miner.add_to_gold_carried(1)
miner.increase_fatigue()
miner.say('Picking up a nugget')
"""
Original code has two if conditions to test gold and thirst, this
provokes strange behaviour if both conditions are true. To avoid
it, and elif clause has been added to select behaviour: go to
the bank is more important than go to the saloon.
"""
if miner.pockets_full():
miner.state_machine.change_state(VisitBankAndDepositGold)
elif miner.thirsty():
miner.state_machine.change_state(QuenchThirst)
@staticmethod
def exit(miner):
miner.say("Ah'm leavin' the gold mine with mah pockets full o' sweet gold")
@staticmethod
def on_message(miner, message):
return False
class VisitBankAndDepositGold:
@staticmethod
def enter(miner):
if miner.location != Location.BANK:
miner.say('Walking to the bank. Yes sireee.')
miner.change_location(Location.BANK)
@staticmethod
def execute(miner):
miner.add_to_wealth()
miner.say('Depositing gold. Total savings now: {0}'.format(miner.money_in_bank))
if miner.wealthy():
miner.say("WooHoo! Rich enough for now. Back home to mah li'lle lady.")
miner.state_machine.change_state(GoHomeAndSleepTilRested)
else:
miner.state_machine.change_state(EnterMineAndDigForNugget)
@staticmethod
def exit(miner):
miner.say('Leaving the bank')
@staticmethod
def on_message(miner, message):
return False
class QuenchThirst:
@staticmethod
def enter(miner):
if miner.location != Location.SALOON:
miner.say('Boy, ah sure is thusty! Walking to the saloon')
miner.change_location(Location.SALOON)
@staticmethod
def execute(miner):
if miner.thirsty():
miner.buy_and_drink_wiskey()
miner.say("That's mighty fine sippin liquer")
miner.state_machine.change_state(EnterMineAndDigForNugget)
else:
miner.say('ERROR! ERROR! ERROR!')
@staticmethod
def exit(miner):
miner.say('Leaving the saloon, feeling good')
@staticmethod
def on_message(miner, message):
return False
class GoHomeAndSleepTilRested:
@staticmethod
def enter(miner):
if miner.location != Location.SHACK:
miner.say('Walking home.')
miner.change_location(Location.SHACK)
miner.game.message_dispatcher.dispatch_message(miner, miner.wife, Message.HI_HONEY)
@staticmethod
def execute(miner):
if not miner.fatigued():
miner.say('What a God darn fantastic nap! Time to find more gold.')
miner.state_machine.change_state(EnterMineAndDigForNugget)
else:
miner.decrease_fatigue()
miner.say('ZZZ...')
@staticmethod
def exit(miner):
miner.say('Leaving the house.')
@staticmethod
def on_message(miner, message):
if message.message == Message.STEW_READY:
miner.say('Ok hun, ahm a-comin!')
miner.state_machine.change_state(EatStew)
class EatStew:
@staticmethod
def enter(miner):
miner.say('Smells Real Goood Elsa!')
@staticmethod
def execute(miner):
miner.say('Tastes Real Goood too!')
miner.state_machine.revert_previous_state()
@staticmethod
def exit(miner):
miner.say('Thank you! gAh better get back to whatever ah wuz doing')
@staticmethod
def on_message(miner, message):
return False
|
17,289 | c1e05ed6e78ea47b790e9e16a939a9bd900b50d2 | '''
Kruskal: relative importance analysis
'''
''' load modules '''
import numpy as np
import itertools
from math import factorial
from numpy import matrix, linalg
''' function to calculate R2 for a subset of the matrix '''
def calcR2(varlist, rmatrix):
#only include variables in the list
workmat = rmatrix[np.ix_(varlist, varlist)]
#take inverse of workmat
workmat = linalg.inv(workmat)
rcx = rmatrix[np.ix_(varlist),0]
rcxh = rcx.T
weights = rcx * workmat
r2 = weights * rcxh
return r2[0,0]
''' Kruskal calculation '''
def Kruskal(correlationmatrix):
'''
This function calculates relative importance of a range of independent variables on
a dependent variable.
Input: correlation matrix
make sure that the dependent variable comes first in your correlation matrix
'''
#assert that there are no negative correlations
assert all([0 <= el <= 1 for el in enumerate(correlationmatrix)])
#turn input into matrix structure
correlationmatrix = matrix(correlationmatrix)
noVars = len(correlationmatrix) #number of independent variables
#calculate the factors to multiply the (semipartial) correlations with (in diagonal)
T = [[factorial(n) * factorial(k) for n in range(noVars - 1)] for k in range(noVars - 1)][::-1]
#create structures to save output
mean_semipart = np.zeros((noVars - 1))
list_semipart = []
##start the calculation
#loop over each independent variable (IV)
for IV in range(1, noVars):
#make a list of the remaining control variables
CV_list = list(range(1, noVars))
CV_list.remove(IV)
#if no control variables => take squared correlation
mean_semipart[IV - 1] = (correlationmatrix[0, IV] ** 2) * T[0][0]
list_semipart.append([correlationmatrix[0, IV] ** 2])
#loop over all possible combinations of control variables (CV)
for CV in [x for l in range(1, noVars + 1) for x in itertools.combinations(CV_list, l)]:
#calculate R2 full (= R2 with the independent variable and the control variables)
full_list = list(CV)
full_list.append(IV)
R2full = calcR2(full_list, correlationmatrix)
#calculate R2 cont (= R2 with only the control variables)
R2cont = calcR2(list(CV), correlationmatrix)
#calculate semipartial correlation
semipart = R2full - R2cont
#store the semipart as a list for each IV
list_semipart[IV - 1].append(semipart)
#add to sum of mean squared semipartial correlation
#weight => see https://oeis.org/A098361
mean_semipart[IV - 1] += semipart * T[len(CV)][len(CV)]
mean_semipart[IV - 1] /= factorial(noVars - 1)
#print the output
print("\tRelative importance (%)\tMean of squared semipartial correlations\tMin\tMax")
template = "Variable {}\t{}\t{}\t{}\t{}\n"
for IV in range(noVars - 1):
print(template.format(IV + 1, mean_semipart[IV]/sum(mean_semipart), mean_semipart[IV], min(list_semipart[IV]), max(list_semipart[IV])))
|
17,290 | e79f75ea932bfe4b46c464e11aa1a45e6430a2d0 | import statsmodels.formula.api as smf
import pandas as pd
import numpy as np
def vif_cal(data, y):
""" Code for VIF Calculation. Writing a function to calculate the VIF values """
x_vars=data.drop([y], axis=1)
xvar_names=x_vars.columns.tolist()
x_var_col, vif_list = [], []
str_gap = max([len(c) for c in xvar_names])+2
# print("{:*^20s}".format("VIF Summary"))
str_len = str_gap + 2 + 7 + 3 + 6 - len(' VIF Summary ')
star_str = '*'*int(str_len/2)
str_to_print = ''.join((star_str,' VIF Summary ',star_str))
print(str_to_print)
for xvar in xvar_names:
y=xvar
x=xvar_names.copy()
x.remove(xvar)
formula = "{} ~ {} + 1".format(y, ' + '.join(x))
rsq=smf.ols(formula, data=x_vars).fit().rsquared
if rsq==1: vif=np.inf
else: vif=round(1/(1-rsq),10)
x_var_col.append(xvar)
vif_list.append(vif)
print('vif of {:<{width}} = {:.6}'.format(xvar, vif, width=str_gap))
str_len = str_gap + 2 + 7 + 3 + 6 - len(' VIF Summary END ')
star_str = '*'*int(str_len/2)
str_to_print = ''.join((star_str,' VIF Summary END ',star_str))
print(str_to_print)
vif_df = pd.DataFrame({'x_variable': x_var_col, 'vif': vif_list})
vif_df = vif_df[['x_variable', 'vif']]
return vif_df |
17,291 | 59482cc6b3cb121db48d312b68e4ffd57c060a94 | # -*- coding: utf-8 -*-
import numpy as np
import scipy as sp
import pandas as pd
def walmart_prepare(df):
def convert_weekday(day):
'''Function to convert weekday char strings
to numeric'''
if day == 'Sunday':
return 0
elif day == 'Monday':
return 1
elif day == 'Tuesday':
return 2
elif day == 'Wednesday':
return 3
elif day == 'Thursday':
return 4
elif day == 'Friday':
return 5
else:
return 6
# Drop UPC (there are too many to deal with), fill NAs, and convert
# weekdays to numeric.
df.drop(['Upc'], inplace=True, axis=1)
df.DepartmentDescription = df.DepartmentDescription.fillna('None')
df.FinelineNumber = df.FinelineNumber.fillna(-1)
df.Weekday = df.Weekday.apply(convert_weekday)
# Add a column for returns
df['Returns'] = pd.Series([abs(num) if num < 0
else 0 for num in df.ScanCount], index=df.index)
df.ScanCount = df.ScanCount.apply(lambda x: 0 if x < 0 else x)
# Rename ScanCount to Purchases
df.rename(columns={'ScanCount': 'Purchases'}, inplace=True)
# Create dummy variables for dept description and fineline number.
# (Dropping the first column of each to avoid multicollinearity.)
temp1 = pd.get_dummies(df.DepartmentDescription).astype(int, copy=False)
temp2 = pd.get_dummies(df.FinelineNumber).astype(int, copy=False)
temp1.drop(temp1.columns[0], inplace=True, axis=1)
temp2.drop(temp2.columns[0], inplace=True, axis=1)
# Now we don't need these columns anymore
df.drop(['DepartmentDescription', 'FinelineNumber'], inplace=True, axis=1)
# Concatenate df with the dummy dataframes after converting df to int
df.astype(int, copy=False)
df = pd.concat([df, temp1, temp2], axis=1)
del temp1, temp2
# for train.csv
if 'TripType' in df.columns:
return df.groupby(['TripType', 'VisitNumber', 'Weekday']).aggregate(np.sum).reset_index()
# for test.csv
else:
return df.groupby(['VisitNumber', 'Weekday']).aggregate(np.sum).reset_index()
if __name__ == "__main__":
import xgboost as xgb
import random
from sklearn.cross_validation import train_test_split
# This all has to be done in parts because my computer doesn't have
# enough RAM.
train = pd.read_csv('data/train.csv')
# XGB needs target classes to be in [0, num_classes)
ref = {type:n for (type, n) in zip(np.sort(train.TripType.unique()), range(38))}
train.TripType = train.TripType.apply(lambda x: ref[x])
test = pd.read_csv('data/test.csv')
print "Preparing Walmart training set with 80/20 split for CV!"
random.seed(a=0)
rows = random.sample(train.index, 300000)
train_prepare1 = walmart_prepare(train.ix[rows])
train_prepare2 = walmart_prepare(train.drop(rows))
X1 = train_prepare1.drop(['TripType'], axis=1)
y1 = train_prepare1.TripType
X2 = train_prepare2.drop(['TripType'], axis=1)
y2 = train_prepare2.TripType
del train_prepare1, train_prepare2
X1_train, X1_test, y1_train, y1_test = train_test_split(X1, y1, test_size=0.2, random_state=0)
X2_train, X2_test, y2_train, y2_test = train_test_split(X2, y2, test_size=0.2, random_state=0)
del X1, y1, X2, y2
print "Saving training DMatrices to XGBoost binary buffer files!"
dtrain1 = xgb.DMatrix(np.array(X1_train), label=np.array(y1_train))
dtrain2 = xgb.DMatrix(np.array(X2_train), label=np.array(y2_train))
dtrain1.save_binary('data/dtrain1.buffer')
dtrain2.save_binary('data/dtrain2.buffer')
del dtrain1, dtrain2, X1_train, X2_train, y1_train, y2_train
print "Saving CV test DMatrices to XGBoost binary buffer files!"
dtestCV1 = xgb.DMatrix(np.array(X1_test), label=np.array(y1_test))
dtestCV2 = xgb.DMatrix(np.array(X2_test), label=np.array(y2_test))
dtestCV1.save_binary('data/dtestCV1.buffer')
dtestCV2.save_binary('data/dtestCV2.buffer')
del dtestCV1, dtestCV2, X1_test, X2_test, y1_test, y2_test
print "Preparing Walmart testing set!"
test_prepare1 = walmart_prepare(test.ix[rows])
test_prepare2 = walmart_prepare(test.drop(rows))
print "Saving test sets to an XGBoost binary buffer file!"
dtest1 = xgb.DMatrix(np.array(test_prepare1))
dtest2 = xgb.DMatrix(np.array(test_prepare2))
dtest1.save_binary('data/test1.buffer')
dtest2.save_binary('data/test2.buffer')
|
17,292 | 08383e9eda198667c2f724a3e52524e1a7bb07a5 | version https://git-lfs.github.com/spec/v1
oid sha256:fc918f6634c335750e0a2d303e13d21c5fe6aaf1dd905e7789e94e95aad2da0e
size 32096
|
17,293 | 330941793ae02d3fb07c53b642d4957806873133 | from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from telegram import ChatAction
def start(bot, update):
update.message.reply_text("""Hi, here you have the actions you can perform:
1. /showTasks
2. /newTask
3. /removeTask
4. /removeAllTasks""")
def show(bot, update):
if len(tasks)!=0:
taskToPrint = ["List of the tasks:"] + tasks
taskToPrint = '\n- '.join(taskToPrint)
update.message.reply_text(taskToPrint)
else:
taskToPrint = "The list of the tasks is empty"
update.message.reply_text(taskToPrint)
def new(bot, update, args):
input_new = " ".join(args)
if input_new != '':
update.message.reply_text('''Creating updated list of tasks, you add:
'''+ input_new)
tasks.append(input_new)
else:
update.message.reply_text("Error, type task after the command")
def remove(bot, update, args):
input_delete = " ".join(args)
if input_delete != '':
update.message.reply_text('''Creating updated list of tasks, you deleted:
'''+ input_delete)
tasks.remove(input_delete)
else:
update.message.reply_text("Error, type task after the command")
def removeAll(bot, update,args):
tasks.clear()
print(tasks)
update.message.reply_text("All Tasks have been successfully deleted")
def main():
# create the EventHandler and pass it your bot's token
updater = Updater("513703656:AAFIV1vBhHuXLz9f9XXT_AwuxVKi5aEaXfI")
# Open the file in reading mode
savedTasks = open("task_list.txt", "r+")
for line in savedTasks:
line = line[0:len(line) - 1] # -1 used to eliminate the end of raw character
tasks.append(line)
# message.text("To start the bot please selct: /start")
# get the dispatcher to register handlers
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", start)) # add the command handler for the "/start" command
dp.add_handler(CommandHandler("showTasks", show))
dp.add_handler(CommandHandler("newTask", new, pass_args=True)) # pass_args=True To write
dp.add_handler(CommandHandler("removeTask", remove, pass_args=True))
dp.add_handler(CommandHandler("removeAllTasks", removeAll, pass_args=True))
# start the bot
updater.start_polling()
updater.idle()
tasks = []
if __name__ == "__main__":
main()
|
17,294 | 37065a5101702dfe65c609f7071f4b3cc613f1a0 | #!/usr/bin/env python
import unittest
import nbconvert
import os
import numpy as np
with open("assignment8.ipynb") as f:
exporter = nbconvert.PythonExporter()
python_file, _ = exporter.from_file(f)
with open("assignment8.py", "w") as f:
f.write(python_file)
from assignment8 import KozenyCarmen
class TestSolution(unittest.TestCase):
def test_transform(self):
kc = KozenyCarmen('poro_perm.csv')
np.testing.assert_allclose(kc.kc_model()[0:10],
np.array([0.00144518, 0.00144518, 0.00178167,
0.00073352, 0.0035369, 0.00123457,
0.00194181, 0.00199742, 0.0022314,
0.00205417]), atol=0.0001)
if __name__ == '__main__':
unittest.main()
|
17,295 | 0864a0a4e493296928b450d79afe787ce92efad5 | # 2015.11.18 11:51:45 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/gold_fish.py
from account_helpers.AccountSettings import AccountSettings, GOLD_FISH_LAST_SHOW_TIME
import constants
from gui import GUI_SETTINGS
from helpers.time_utils import getCurrentTimestamp
def isGoldFishActionActive():
from gui.LobbyContext import g_lobbyContext
from gui.shared.ItemsCache import g_itemsCache
outOfSessionWallet = constants.ACCOUNT_ATTR.OUT_OF_SESSION_WALLET
return not g_itemsCache.items.stats.isGoldFishBonusApplied and g_lobbyContext.getServerSettings().isGoldFishEnabled() and not g_itemsCache.items.stats.attributes & outOfSessionWallet != 0
def isTimeToShowGoldFishPromo():
return getCurrentTimestamp() - AccountSettings.getFilter(GOLD_FISH_LAST_SHOW_TIME) >= GUI_SETTINGS.goldFishActionShowCooldown
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\gold_fish.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 11:51:45 Střední Evropa (běžný čas)
|
17,296 | e5255911768a8c024196650e93933ffa9e9904af | from django.apps import AppConfig
class TiendamascotasConfig(AppConfig):
name = 'tiendaMascotas'
|
17,297 | 41e2ff99d249f1c12553f6bc9b82218182112d62 | # coding=UTF-8
from __future__ import print_function, absolute_import, division
import logging
import typing as tp
from coolamqp.framing.definitions import ConnectionUnblocked, ConnectionBlocked
from coolamqp.objects import Callable
from coolamqp.uplink import Connection
from coolamqp.uplink.connection import MethodWatch
logger = logging.getLogger(__name__)
class SingleNodeReconnector(object):
"""
Connection to one node. It will do it's best to remain alive.
"""
def __init__(self, node_def, # type: coolamqp.objects.NodeDefinition
attache_group, # type: coolamqp.attaches.AttacheGroup
listener_thread, # type: coolamqp.uplink.ListenerThread
extra_properties=None, # type: tp.Dict[bytes, tp.Tuple[tp.Any, str]]
log_frames=None, # type: tp.Callable[]
name=None):
self.listener_thread = listener_thread
self.node_def = node_def
self.attache_group = attache_group
self.connection = None
self.extra_properties = extra_properties
self.log_frames = log_frames
self.name = name or 'CoolAMQP'
self.terminating = False
self.timeout = None
self.on_fail = Callable() #: public
self.on_blocked = Callable() #: public
self.on_fail.add(self._on_fail)
def is_connected(self): # type: () -> bool
return self.connection is not None
def connect(self, timeout=None): # type: (tp.Optional[float]) -> None
assert self.connection is None
timeout = timeout or self.timeout
self.timeout = timeout
# Initiate connecting - this order is very important!
self.connection = Connection(self.node_def, self.listener_thread,
extra_properties=self.extra_properties,
log_frames=self.log_frames,
name=self.name)
self.attache_group.attach(self.connection)
self.connection.start(timeout)
self.connection.finalize.add(self.on_fail)
# Register the on-blocking watches
mw = MethodWatch(0, (ConnectionBlocked,), lambda: self.on_blocked(True))
mw.oneshot = False
self.connection.watch(mw)
mw = MethodWatch(0, (ConnectionUnblocked,), lambda: self.on_blocked(False))
mw.oneshot = False
self.connection.watch(mw)
def _on_fail(self):
if self.terminating:
return
self.connection = None
self.listener_thread.call_next_io_event(self.connect)
def shutdown(self):
"""Close this connection"""
self.terminating = True
if self.connection is not None:
self.connection.send(None)
self.connection = None
|
17,298 | 5214b49f38aae73d0cbd1459197e9d13dae511c1 | from font import Font, FontLoader
from textdrawer import TextDrawer
def getText():
return 'Git'
text = getText()
font = FontLoader().loadFont('fancyFont/')
drawer = TextDrawer()
drawer.setFont(font)
drawer.draw(text)
|
17,299 | 30b0176251618513cb675aa1145d8043d28f158f | import json
from get_urltrain import d
import urllib
from urllib import request
import requests
from pprint import pprint
from get_urltrain import url
from prettytable import PrettyTable
from colour_set import colored
r = requests.get(url, verify=False) #请求网址1的内容
# rows = r.json()['data']['datas'] #将内容解析为列表
# rows = r.json()
print(r)
trains= PrettyTable()
trains.field_names=["车次","车站","时间","历时","商务座","特等座","一等座","二等座","高级软卧","软卧","硬卧 ","软座 ","硬座","无座"]
#设置table的header
# num = len(rows) #打印列表的个数
# for row in rows : #列表循环
# trains.add_row([row['station_train_code'],
# '\n'.join([colored('green', row['from_station_name']),
# colored('red', row['to_station_name'])]),
# '\n'.join([colored('green', row['start_time']), #对于双行示的信息,设置颜色
# colored('red', row['arrive_time'])]),
# row['lishi'],row['swz_num'],row['tz_num'],
# row['zy_num'],row['ze_num'],row['gr_num'],
# row['rw_num'],row['yw_num'],row['rz_num'],
# row['yz_num'],row['wz_num']])
# print ('查询结束,共有 %d 趟列车。'%num ) #列表个数也就是列车个数
# print (trains) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.