content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from keras.models import Sequential, model_from_json
from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda
from keras.layers import Cropping2D
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.advanced_activations import ELU
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, Callback
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
import math
import numpy as np
from PIL import Image
import cv2
import matplotlib.pyplot as plt
from os import getcwd
import csv
# Fix error with TF and Keras
import tensorflow as tf
# tf.python.control_flow_ops = tf
import sklearn
def displayCV2(img):
# Displaying a CV2 Image
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
samples = [] #simple array to append all the entries present in the .csv file
with open('./data/driving_log.csv') as csvfile: #currently after extracting the file is present in this path
reader = csv.reader(csvfile)
next(reader, None) #this is necessary to skip the first record as it contains the headings
for line in reader:
samples.append(line)
# Code for Data Augmentation (Image Generator)
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1:
shuffle(samples) # Shuffling the total images
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
for i in range(0,3): # Taking 3 images, first one is center, second is left, and third is right
name = './data/data/IMG/'+batch_sample[i].split('/')[-1]
center_image = cv2.cvtColor(cv2.imread(name), cv2.COLOR_BGR2RGB) # Since CV2 reads an image in BGR we need to convert it to RGB since in drive.py it is RGB
center_angle = float(batch_sample[3]) # Getting the steering angle measurement
images.append(center_image)
# Introducing correction for left and right images
# if using the left image (i == 1), then increase the steering angle by 0.2
# if using the right image (i == 2), then decrease the steering angle by 0.2
if(i == 0):
angles.append(center_angle)
elif(i == 1):
angles.append(center_angle + 0.2)
elif(i == 2):
angles.append(center_angle - 0.2)
# Code for Augmentation of data (6 augmented images per 1 source image)
# We flip the image and mirror the associated steering angle measurement
images.append(cv2.flip(center_image,1))
if(i==0):
angles.append(center_angle*-1)
elif(i==1):
angles.append((center_angle+0.2)*-1)
elif(i==2):
angles.append((center_angle-0.2)*-1)
# Here we can get 6 images from one image
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train) # Here we do not hold the values of X_train and y_train instead we yield the values meaning we hold until generator() is running
### Main Program ###
# Getting the data
lines = []
with open('./data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader)
for line in reader:
lines.append(line)
images = []
measurements = []
for line in lines:
source_path = line[0]
filename = source_path.split('/')[-1]
current_path = './data/IMG/' + filename
image = cv2.imread(current_path)
images.append(image)
measurement = float(line[3])
measurements.append(measurement)
X_train = np.array(images)
y_train = np.array(measurements)
# The Neural Network Architecture (NVIDIA Model)
model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
model.add(Conv2D(24, activation='relu', padding='valid', strides=(2,2), kernel_size=(5, 5)))
model.add(ELU())
model.add(Conv2D(36, activation='relu', padding='valid', strides=(2,2), kernel_size=(5, 5)))
model.add(ELU())
model.add(Conv2D(48, activation='relu', padding='valid', strides=(2,2), kernel_size=(5, 5)))
model.add(ELU())
model.add(Dropout(0.5))
model.add(Conv2D(64, activation='relu', padding='valid', kernel_size=(3, 3)))
model.add(ELU())
model.add(Conv2D(64, activation='relu', padding='valid', kernel_size=(3, 3)))
model.add(ELU())
model.add(Flatten())
model.add(Dense(100))
model.add(ELU())
model.add(Dense(50))
model.add(ELU())
model.add(Dense(10))
model.add(ELU())
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
train_samples, validation_samples = train_test_split(samples,test_size=0.15) #simply splitting the dataset to train and validation set usking sklearn. .15 indicates 15% of the dataset is validation set
# Compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
# model.fit(X_train, y_train, validation_split=0.2, shuffle=True, epochs=5)
model.fit_generator(train_generator, samples_per_epoch= len(train_samples), validation_data=validation_generator, nb_val_samples=len(validation_samples), nb_epoch=5, verbose=1)
print(model.summary())
model.save('model.h5')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import logging
from p4p.client.thread import Context
_log = logging.getLogger(__name__)
def getargs():
from argparse import ArgumentParser
P = ArgumentParser()
P.add_argument('pvname', help='SIGS pvname (eg. RX:SIG')
P.add_argument('filename', help='list of BSA/signal PV names. text, one per line')
P.add_argument('-v', '--verbose', action='store_const', const=logging.DEBUG, default=logging.INFO)
return P.parse_args()
def main(args):
sigs = []
with open(args.filename, 'r') as F:
for line in F:
line = line.strip()
if len(line)==0 or line[:1]=='#':
continue
_log.debug("Read signal '%s'", line)
sigs.append(line)
with Context('pva') as ctxt:
ctxt.put(args.pvname, sigs, wait=True)
print("Success. Signal list now")
for sig in ctxt.get(args.pvname):
print(sig)
if __name__=='__main__':
args = getargs()
logging.basicConfig(level=args.verbose)
main(args)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
import rospy
import tf
from geometry_msgs.msg import PoseStamped
from sensor_msgs.msg import JointState
from std_msgs.msg import Float64
import numpy as np
class Pose_pub:
def __init__(self):
self._sub_pos = rospy.Subscriber("/head", PoseStamped, self.pose_callback)
self.pub = rospy.Publisher("master_joint_state", JointState, queue_size=10)
#コントローラの初期位置を取得
self.zero_pose = rospy.wait_for_message("/head", PoseStamped).pose
quaternion = [self.zero_pose.orientation.x, self.zero_pose.orientation.y, self.zero_pose.orientation.z, self.zero_pose.orientation.w]
euler = tf.transformations.euler_from_quaternion(quaternion, axes='rzyx')
self.zero_pan = euler[0]
#10Hzで動作
self.r = rospy.Rate(10)
#コントローラ位置のスケール
self.scale_fac = 1.
#アーム手先位置のオフセット
self.r_offset = 0.8
self.q_old = np.array([0., 0., 0., 0., 0., 0.])
#最大関節角速度
self.max_vel = 0.5
def pose_callback(self, message):
self.pose = message.pose
#逆運動学計算
def ik(self):
while not rospy.is_shutdown():
#目標手先位置
r_ref = self.pose.position.z - self.zero_pose.position.z
#位置のスケール
r_ref *= self.scale_fac
#アーム手先位置のオフセット
r_ref += self.r_offset
#手先位置が稼働範囲内に収まっているかチェック
r_ref = self.check_movable_range(r_ref)
theta = np.arccos(r_ref)
pan, tilt, _ = self.calc_pan_tilt_angle()
rospy.loginfo(pan)
q = np.array([-pan - self.zero_pan, theta, -2 * theta, -tilt + theta, 0, 0])
q = self.angular_vel_limit(q)
q_deg = np.rad2deg(q)
js = JointState()
js.name=["joint{}".format(i) for i in range(1,6)]
js.position = q_deg
self.pub.publish(js)
self.r.sleep()
#角速度制限
def angular_vel_limit(self, q):
q_diff = self.q_old - q
q_diff_max = np.abs(q_diff).max()
if(q_diff_max > self.max_vel):
rospy.loginfo("Too fast")
q_diff /= q_diff_max
q_diff *= self.max_vel
q = self.q_old - q_diff
self.q_old = q
return q
#ピッチ角計算
def calc_pan_tilt_angle(self):
quaternion = [self.pose.orientation.x, self.pose.orientation.y, self.pose.orientation.z, self.pose.orientation.w]
euler = tf.transformations.euler_from_quaternion(quaternion, axes='rzyx')
return euler
def check_movable_range(self, r_ref):
if r_ref > 1:
rospy.loginfo("Out of movable range")
r_ref = 1
return r_ref
if __name__ == '__main__':
try:
rospy.init_node('pan_tilt_controller')
pose_pub = Pose_pub()
pose_pub.ik()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
nilq/baby-python
|
python
|
"""
语言概念与机制
http://coolpython.net/python_interview/basic/py_concept_mechanism.html
"""
# 01 谈下GIL 全局解释器锁
# 02 遍历文件夹,输出文件夹下所有文件的路径
import os
def print_directory_contents(path):
test02_dirList = os.listdir(path)
for childfile in test02_dirList:
childPath = os.path.join(path, childfile)
# 判断为文件夹
if os.path.isdir(childPath):
print_directory_contents(childPath)
else:
print(childPath)
print_directory_contents('./')
def get_english_score():
return 90
def get_history_score():
return 95
def get_score(course):
golbal_dic = globals()
print(golbal_dic)
funname = f'get_{course}_score'
# 如果找不到,直接返回lambda表达式,不会应为程序而报错
func = golbal_dic.get(funname, lambda: 0)
return func()
print(get_score('english'))
print(get_score('abc'))
for i, j in enumerate([3, 65, 2, 5, 6]):
print(i, j)
def abc():
print('aa')
print(abc())
import enum
|
nilq/baby-python
|
python
|
import gzip
import jsonpickle
from mdrsl.rule_models.eids.st_to_mt_model_merging import MergedSTMIDSClassifier
def store_merged_st_mids_model(merged_model_abs_file_name: str, merged_st_mids_classifier: MergedSTMIDSClassifier) -> None:
frozen = jsonpickle.encode(merged_st_mids_classifier)
with gzip.open(merged_model_abs_file_name, 'wt') as ofile:
ofile.write(frozen)
def load_merged_st_mids_model(merged_model_abs_file_name: str) -> MergedSTMIDSClassifier:
mids_classifier: MergedSTMIDSClassifier
with gzip.open(merged_model_abs_file_name, 'rt') as ifile:
file_contents = ifile.read()
mids_classifier = jsonpickle.decode(file_contents)
return mids_classifier
|
nilq/baby-python
|
python
|
#https://www.youtube.com/watch?v=2egPL5KFCC8&list=PLGKQkV4guDKEKZXAyeLQZjE6fulXHW11y&index=2
#java scrip cannot be pull by beautifulsoap, java scrip use sileniun
#resultdo 0 para atributo existentem, vem exemplo imagem como pegar
import requests
from bs4 import BeautifulSoup
url = "https://www.marketwatch.com/"
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
soup.find_all('div', class_ = "element element--latestNews")
a = len(soup.find_all('div', class_ = "element element--latestNews"))
#sempre usar len para sabe quantos elementos/tag tem, neste caso o elemento/tag é div, para sabe quantidade div com o mesmo nome, vemos qual é o atributo, neste caso é uma class, se fosse um atributo id, não precisa de sabe quantas div, pois id são unicos, com class nome "element element--latestNews" temos apenas len = 1
b = soup.find_all('div', class_ = "element element--latestNews")
c = soup.find('a').get('href')
#HTML links are defined with the <a> tag. The link address is specified in the href attribute: no caso acima extrai apenas um link, para extrair todo usar um loop
#linkes()
e = soup.find_all('ul')
f = len(soup.find_all('ul'))
g = soup.find_all('ul')[0]
print(5*'\n')
print(soup.find_all('ul', class_ ="list list--menu j-list"))
print(len(soup.find_all('ul', class_ ="list list--menu j-list")))
print(5*'\n')
#https://www.w3schools.com/html/html_lists.asp
#https://www.youtube.com/watch?v=5IxadAxTS04&list=PLGKQkV4guDKEKZXAyeLQZjE6fulXHW11y&index=3
#listas()
def imagem():
#print(soup.find_all('img'))
print(soup.find('img').get('src'))
print(soup.find('img').get('data-src')) #get nao funciona para o atributo 'data-src', usa o codigo abaixo,
print(soup.find('img', attrs = {'data-src' : True}))
#print(soup.findAll('img', attrs = {'data-src' : True}))
#https://www.w3schools.com/html/html_images.asp
imagem()
|
nilq/baby-python
|
python
|
# coding: utf-8
"""
jatdb
JSON API to DB: Fetch JSON from APIs and send to a TinyDB database. # noqa: E501
OpenAPI spec version: 0.0.2
Contact: Nathan@Genetzky.us
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import jatdb_client
from jatdb_client.api.content_api import ContentApi # noqa: E501
from jatdb_client.rest import ApiException
class TestContentApi(unittest.TestCase):
"""ContentApi unit test stubs"""
def setUp(self):
self.api = jatdb_client.api.content_api.ContentApi() # noqa: E501
def tearDown(self):
pass
def test_content_get(self):
"""Test case for content_get
"""
pass
def test_content_post(self):
"""Test case for content_post
"""
pass
def test_content_put(self):
"""Test case for content_put
"""
pass
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import json
import os
import sys
import re
import pickle
import logging
import gzip
import shutil
import urllib.request
from tqdm import tqdm
from collections import defaultdict
from utils.data_utils import load_jsonl_file, create_pkl_file, load_pkl_file
module_path = os.path.dirname(os.path.abspath(__file__))
# --------------------------------------------- Pipelines ----------------------------------------------------
class DataPreprocessingRoutine:
'''
Data Preparation Routine
This class holds utilities that execute a data processing routine that:
1. Loads Natural Questions simplified training dataset from local directory
2. Filters the examples to only those relevant to retriever evaluation (has short_answer, resolves multiple answers)
3. Cleans, parses, and extracts relevant data fields
4. Saves the prepared data to a local directory
Args:
retriever_eval_only (bool) - indicates if the pipeline incluedes short answer AND no answer (False) or short answer only (True)
raw_data_path (str) - path to unzipped simplified nq jsonl file
'''
def __init__(self, raw_data_path, retriever_eval_only=True):
self.mode = retriever_eval_only
self.raw_data_path = raw_data_path
def run(self):
logging.info('Data Processing Routine Started')
# check if file already exits
ext = "" if self.mode else "_fullsys"
outfile = module_path+f'/data/stage_data/extracted_clean_data{ext}.pkl' ## TO-DO: Make this implicit!
if not os.path.exists(outfile):
# run pipeline
self.load_data()
self.filter_nq_train_data()
self.extract_data()
self.drop_longer_answers()
# save data
os.makedirs(module_path+'/data/stage_data', exist_ok=True)
self.save_data(outfile)
logging.info('Data Processing Routine Finished')
else:
logging.info('This file has already been created. Skipping DataPreprocessing and using existing file.')
return
def load_data(self):
'''
Loads raw, zipped jsonl data from disk
'''
self.data = load_jsonl_file(filepath=self.raw_data_path)
return
def filter_nq_train_data(self):
'''
This method takes the full corpus of NQ training data and filters examples that
are not relevant for proper retriever evaluation, including:
a.) records that do not have at least one short answer are discarded and
b.) records that have more than one short answer are truncated to only use the first short answer.
These filters are in line with standard retriever evaluation techniques as well as
Google's suggested reference implementation:
https://github.com/google-research/language/blob/master/language/question_answering/
decatt_docreader/preprocessing/create_nq_short_pipeline_examples.py
Args:
raw_data (list) - python object representation of the raw jsonl file
retriever_eval_only (bool) - if False, include short answer AND no answer
Returns:
filtered_data (list) - a refined version of the raw jsonl file
'''
logging.info('Filtering Data')
multi_count = 0
filtered_data = []
for i, rec in enumerate(tqdm(self.data)):
# ignore questions that dont have at least one short answer
if len(rec['annotations'][0]['short_answers']) == 0 and self.mode==True:
continue
# if an annotation contains multiple short answers, keep only the first
if len(rec['annotations'][0]['short_answers']) > 1:
multi_count += 1
# extract first dict and keep as one-element list
temp = []
short_ans = rec['annotations'][0]['short_answers'][0]
temp.append(short_ans)
# overwrite
new_rec = rec.copy()
new_rec['annotations'][0]['short_answers'] = temp
filtered_data.append(new_rec)
else:
filtered_data.append(rec)
logging.info(f'{len(self.data)-len(filtered_data)} records (out of {len(self.data)}) did not have at least one short answer and were dropped.')
logging.info(f'{multi_count} questions had multiple short answers that were effected by truncation.')
# overwrite data attribute
self.data = filtered_data
return
def extract_data(self):
'''
This method loops through a list of NQ simplified records and extracts only the data items
needed for retriever evaluation including:
- example_id
- document_title (extracted from document_url using extract_wiki_title())
- document_url
- question_text
- short_answer (converted to text using get_short_answer_from_span())
- document_text_clean (stripped of remaining HTML tags using clean_document_text())
Args:
data (list) - a list of filtered jsonl records from NQ simplified dataset
Returns:
extracted_data (list) - a list of cleaned jsonl records
'''
logging.info('Extracting Data')
extracted_data = []
for i, rec in enumerate(tqdm(self.data)):
try:
example_id = rec['example_id']
document_url = rec['document_url']
question_text = rec['question_text']
short_answer = self.get_short_answer_from_span(rec)
document_text_clean = self.clean_document_text(rec['document_text'])
document_title = self.extract_wiki_title(rec['document_url'])
# to ensure our dataset is completely solveable this logic weeds out erroneous labels
# ex. 'Mickey Hart </Li> <Li> Bill Kreutzmann </Li> <Li> John Mayer </Li> was selected as long AND short answer
# when really each of these should have been their own short answers
if short_answer not in document_text_clean:
continue
new_rec = {'example_id': example_id,
'document_title': document_title,
'document_url': document_url,
'question_text': question_text,
'short_answer': short_answer,
'document_text_clean': document_text_clean}
extracted_data.append(new_rec)
except Exception as e:
logging.info(str(e))
continue
logging.info(f'{len(extracted_data)} of the {len(self.data)} records are complete and solvable.')
# overwrite data attribute
self.data = extracted_data
return
def drop_longer_answers(self):
'''
This method loops through a list of NQ simplified records and drops any records where the short answer
contains more than 5 tokens.
Answers with many tokens often resemble extractive snippets rather than canonical answers, so we discard
answers with more than 5 tokens: https://arxiv.org/pdf/1906.00300.pdf
Args:
data (list) - a list of cleaned jsonl records from NQ simplified dataset
Returns:
extracted_data (list) - a list of cleaned jsonl records
'''
logging.info('Dropping Long Answers')
slim_data = []
for i, rec in enumerate(tqdm(self.data)):
if len(rec['short_answer'].split(' ')) <= 5:
slim_data.append(rec)
logging.info(f'{len(self.data) - len(slim_data)} records were "long" short-answers and were dropped.')
logging.info(f'{len(slim_data)} records remain.')
# overwrite data attribute
self.data = slim_data
return
def save_data(self, outfile):
'''
Saves the data attribute to a pickle local file
'''
create_pkl_file(self.data, outfile)
return
@staticmethod
def get_short_answer_from_span(example):
'''
Use the short answer span from a NQ json record to retreive
and return the corresponding short answer text.
Args:
example - a jsonl record from NQ simplified dataset
Returns:
short_answer (string) - the string representation of text in the short answer span
'''
sa_field = example['annotations'][0]['short_answers']
if len(sa_field) >= 1:
short_answer_span = sa_field[0]
short_answer = " ".join(example['document_text'].split(" ")\
[short_answer_span['start_token']:short_answer_span['end_token']])
else:
short_answer = ''
return short_answer
@staticmethod
def clean_document_text(text):
'''
This function applies a regular expression to an input text string to remove
any characters wrapped in <> with the goal of stripping HTML tags from a string.
Args:
text (string)
Returns:
text (string) - cleaned text
'''
cleaner = re.compile('<.*?>')
return re.sub(cleaner, '', text)
@staticmethod
def extract_wiki_title(document_url):
'''
This function applies a regular expression to an input wikipedia article URL
to extract and return the article title.
Args:
document_url (string)
Returns:
title (string) - article title
'''
pattern = 'title=(.*?)&'
try:
title = re.search(pattern, document_url).group(1)
except AttributeError:
title = 'No Title Found'
return title
class DataCompilationRoutine:
'''
Data Compilation Utility Pipeline
This class holds utilties to execute a data routine that:
1. Loads pre-cleaned data from staging
2. Deduplicates Wikipedia artilces and finalizes them for loading into ElasticSearch
3. Creates q/a records to be used for evaluation
4. Saves those data artifacts to eval_data directory
Args:
retriever_eval_only (bool) - indicates if the pipeline incluedes short answer AND no answer (False) or short answer only (True)
'''
def __init__(self, clean_data_path=None, retriever_eval_only=True):
self.mode = retriever_eval_only
# set clean data path
ext = "" if self.mode else "_fullsys"
self.clean_data_path = clean_data_path if clean_data_path else module_path+f'/data/stage_data/extracted_clean_data{ext}.pkl'
def run(self):
logging.info('Data Compilation Routine Started')
# check if exists
ext = "" if self.mode else "_fullsys"
outfile_ec = module_path+f'/data/eval_data/evidence_corpus{ext}.pkl'
outfile_rec = module_path+f'/data/eval_data/qa_records{ext}.pkl'
if not os.path.exists(outfile_ec) or not os.path.exists(outfile_ec):
self.load_data()
self.compile_evidence_corpus()
self.compile_qa_records()
# save data
os.makedirs(module_path+'/data/eval_data', exist_ok=True)
self.save_data(self.evidence_corpus, outfile_ec)
self.save_data(self.qa_records, outfile_rec)
logging.info('Data Compilation Routine Finished')
else:
logging.info('Stage data files have already been created, skipping compilation.')
def load_data(self):
'''
Loads clean, extracted pickle file from disk
'''
self.data = load_pkl_file(filepath=self.clean_data_path)
return
def compile_evidence_corpus(self):
'''
This method compiles all unique wikipedia documents into a dictionary
Args:
extracted_data (list)
Returns:
evidence_docs (dict)
'''
logging.info('Compiling Evidence Docs')
unique_titles = []
evidence_docs = []
for i, rec in enumerate(tqdm(self.data)):
if rec['document_title'] not in unique_titles:
unique_titles.append(rec['document_title'])
fields = {'document_title': rec['document_title'],
'document_url': rec['document_url'],
'document_text_clean': rec['document_text_clean']}
evidence_docs.append(fields)
logging.info(f'Of the {len(self.data)} records, there are {len(evidence_docs)} unique Wikipedia articles.')
self.evidence_corpus = evidence_docs
return
def compile_qa_records(self):
'''
This method loops through the extracted_clean_data list and removes the document_text_clean field
from each record
Args:
extracted_data (list)
Returns:
slim_data (list)
'''
logging.info('Compiling QA Records')
qa_records = []
for i, rec in enumerate(tqdm(self.data)):
new_rec = {k:v for k,v in rec.items() if k != 'document_text_clean'}
qa_records.append(new_rec)
self.qa_records = qa_records
return
@staticmethod
def save_data(obj, outfile):
'''
Saves the obj to a pickle local file
'''
create_pkl_file(obj, outfile)
return
|
nilq/baby-python
|
python
|
KEYWORDS = ["dev", "backup", "develop", "int", "internal", "staging", "test"]
with open("../../roots.txt") as roots:
with open("targets.txt", "w+") as targets:
for domain in roots:
for keyword in KEYWORDS:
target = domain.strip("\n") + "-" + keyword.strip("\n") + ".oss.eu-west-1.aliyuncs.com" + "\n"
targets.write(target)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Module docstring
TODO:
* Write module docstring
"""
from .player.dealer import Dealer
from .player.player import Player
from .carddeck.deck import Deck
class Game():
"""Class to represent the blackjack Game"""
def __init__(self):
self.dealer = Dealer()
self.player = Player()
self.deck = Deck()
def __str__(self):
result = ''
result += f'Dealer:\n{str(self.dealer)}\n\n'
result += f'Player:\n{str(self.player)}\n\n'
result += f'Deck:\n{str(self.deck)}'
return result
def deal(self):
# clear both dealer and player`s hand
self.dealer.hand.clear()
self.player.hand.clear()
# Populate and shuffle deck
self.deck.populate()
# Deal 2 cards to the dealer.
self.dealer.hand.add_card(self.deck.deal_card())
self.dealer.hand.add_card(self.deck.deal_card())
# Deal 2 cards to the player.
self.player.hand.add_card(self.deck.deal_card())
self.player.hand.add_card(self.deck.deal_card())
def hit(self):
card = self.deck.deal_card()
card.flip()
self.player.hand.add_card(card)
def stand(self):
# Return value if <= 21 else return None
return self.player.hand.value if self.player.hand.value <= 21 else None
def play_dealer(self):
# Flip dealers cards over
# self.dealer.hand.cards[1].flip()
# Dealer will always hit untill value meets or exceeds 17
while self.dealer.hand.value < 17:
self.dealer.hand.add_card(self.deck.deal_card())
def end_round(self):
'''Returns True player won, return False dealer wins, None TIE'''
if not self.player.hand.bust:
# Player is not bust
if self.dealer.hand.bust or \
self.player.hand.value > self.dealer.hand.value:
# Dealer is bust or player`s hand is greater
self.player.balance += self.player.bet * 2
return True
elif self.player.hand.value == self.dealer.hand.value:
# Tie
self.player.balance += self.player.bet
return None
return False
|
nilq/baby-python
|
python
|
from flask_pymongo import PyMongo
from flask_compress import Compress
from flask_cors import CORS
from flask_bcrypt import Bcrypt
from itsdangerous import URLSafeTimedSerializer
mongo = PyMongo()
flask_bcrypt = Bcrypt()
flask_compress = Compress()
flask_cors = CORS(resources={"/api/*": {"origins": "*"}})
RECAPTCHA_SITEKEY = None
ImgurObject = None
Serialize_Secret_Keys = [""]
serializer = URLSafeTimedSerializer(Serialize_Secret_Keys)
|
nilq/baby-python
|
python
|
#
# Jasy - Web Tooling Framework
# Copyright 2013-2014 Sebastian Werner
#
import json
import copy
class AbstractNode(list):
__slots__ = [
# core data
"line", "type", "tokenizer", "start", "end", "rel", "parent",
# dynamic added data by other modules
"comments", "scope", "values",
# node type specific
"value", "parenthesized", "fileId", "params",
"name", "initializer", "condition", "assignOp",
"thenPart", "elsePart", "statements",
"statement", "variables", "names", "postfix"
]
def __init__(self, tokenizer=None, type=None, args=[]):
list.__init__(self)
self.start = 0
self.end = 0
self.line = None
if tokenizer:
token = getattr(tokenizer, "token", None)
if token:
# We may define a custom type but use the same positioning as another token
# e.g. transform curlys in block nodes, etc.
self.type = type if type else getattr(token, "type", None)
self.line = token.line
# Start & end are file positions for error handling.
self.start = token.start
self.end = token.end
else:
self.type = type
self.line = tokenizer.line
self.start = None
self.end = None
self.tokenizer = tokenizer
elif type:
self.type = type
for arg in args:
self.append(arg)
def getFileName(self):
"""Traverses up the tree to find a node with a fileId and returns it."""
node = self
while node:
fileId = getattr(node, "fileId", None)
if fileId is not None:
return fileId
node = getattr(node, "parent", None)
def getUnrelatedChildren(self):
"""Collects all unrelated children."""
collection = []
for child in self:
if not hasattr(child, "rel"):
collection.append(child)
return collection
def getChildrenLength(self, filter=True):
"""Number of (per default unrelated) children."""
count = 0
for child in self:
if not filter or not hasattr(child, "rel"):
count += 1
return count
def remove(self, kid):
"""Removes the given kid."""
if kid not in self:
raise Exception("Given node is no child!")
if hasattr(kid, "rel"):
delattr(self, kid.rel)
del kid.rel
del kid.parent
list.remove(self, kid)
def insert(self, index, kid):
"""Inserts the given kid at the given index."""
if index is None:
return self.append(kid)
if hasattr(kid, "parent"):
kid.parent.remove(kid)
kid.parent = self
return list.insert(self, index, kid)
def insertAll(self, index, kids):
"""Inserts all kids starting with the given index."""
if index is None:
for kid in list(kids):
self.append(kid)
else:
for pos, kid in enumerate(list(kids)):
self.insert(index + pos, kid)
def insertAllReplace(self, orig, kids):
"""Inserts all kids at the same position as the original node (which is removed afterwards)"""
index = self.index(orig)
for pos, kid in enumerate(list(kids)):
self.insert(index + pos, kid)
self.remove(orig)
def append(self, kid, rel=None):
"""Appends the given kid with an optional relation hint."""
# kid can be null e.g. [1, , 2].
if kid:
if hasattr(kid, "parent"):
kid.parent.remove(kid)
# Debug
if not isinstance(kid, AbstractNode):
raise Exception("Invalid kid: %s" % kid)
if hasattr(kid, "tokenizer"):
if hasattr(kid, "start"):
if not hasattr(self, "start") or self.start is None or kid.start < self.start:
self.start = kid.start
if hasattr(kid, "end"):
if not hasattr(self, "end") or self.end is None or self.end < kid.end:
self.end = kid.end
kid.parent = self
# alias for function
if rel is not None:
setattr(self, rel, kid)
setattr(kid, "rel", rel)
# Block None kids when they should be related
if not kid and rel:
return
return list.append(self, kid)
def replace(self, kid, repl):
"""Replaces the given kid with a replacement kid."""
if repl in self:
self.remove(repl)
self[self.index(kid)] = repl
if hasattr(kid, "rel"):
repl.rel = kid.rel
setattr(self, kid.rel, repl)
# cleanup old kid
delattr(kid, "rel")
elif hasattr(repl, "rel"):
# delete old relation on new child
delattr(repl, "rel")
delattr(kid, "parent")
repl.parent = self
return kid
def toXml(self, format=True, indent=0, tab=" "):
"""Converts the node to XML."""
lead = tab * indent if format else ""
innerLead = tab * (indent + 1) if format else ""
lineBreak = "\n" if format else ""
relatedChildren = []
attrsCollection = []
for name in self.__slots__:
# "type" is used as node name - no need to repeat it as an attribute
# "parent" is a relation to the parent node - for serialization we ignore these at the moment
# "rel" is used internally to keep the relation to the parent - used by nodes which need to keep track of specific children
# "start" and "end" are for debugging only
if hasattr(self, name) and name not in ("type", "parent", "comments", "selector", "rel", "start", "end") and name[0] != "_":
value = getattr(self, name)
if isinstance(value, AbstractNode):
if hasattr(value, "rel"):
relatedChildren.append(value)
elif type(value) in (bool, int, float, str, list, set, dict):
if isinstance(value, bool):
value = "true" if value else "false"
elif type(value) in (int, float):
value = str(value)
elif type(value) in (list, set, dict):
if isinstance(value, dict):
value = value.keys()
if len(value) == 0:
continue
try:
value = ",".join(value)
except TypeError as ex:
raise Exception("Invalid attribute list child at: %s: %s" % (name, ex))
attrsCollection.append('%s=%s' % (name, json.dumps(value)))
attrs = (" " + " ".join(attrsCollection)) if len(attrsCollection) > 0 else ""
comments = getattr(self, "comments", None)
scope = getattr(self, "scope", None)
selector = getattr(self, "selector", None)
if len(self) == 0 and len(relatedChildren) == 0 and (not comments or len(comments) == 0) and not scope and not selector:
result = "%s<%s%s/>%s" % (lead, self.type, attrs, lineBreak)
else:
result = "%s<%s%s>%s" % (lead, self.type, attrs, lineBreak)
if comments:
for comment in comments:
result += '%s<comment context="%s" variant="%s">%s</comment>%s' % (innerLead, comment.context, comment.variant, comment.text, lineBreak)
if scope:
for statKey in scope:
statValue = scope[statKey]
if statValue is not None and len(statValue) > 0:
if isinstance(statValue, set):
statValue = ",".join(statValue)
elif isinstance(statValue, dict):
statValue = ",".join(statValue.keys())
result += '%s<stat name="%s">%s</stat>%s' % (innerLead, statKey, statValue, lineBreak)
if selector:
for entry in selector:
result += '%s<selector>%s</selector>%s' % (innerLead, entry, lineBreak)
for child in self:
if not child:
result += "%s<none/>%s" % (innerLead, lineBreak)
elif not hasattr(child, "rel"):
result += child.toXml(format, indent + 1)
elif not child in relatedChildren:
raise Exception("Oops, irritated by non related: %s in %s - child says it is related as %s" % (child.type, self.type, child.rel))
for child in relatedChildren:
result += "%s<%s>%s" % (innerLead, child.rel, lineBreak)
result += child.toXml(format, indent + 2)
result += "%s</%s>%s" % (innerLead, child.rel, lineBreak)
result += "%s</%s>%s" % (lead, self.type, lineBreak)
return result
def __deepcopy__(self, memo):
"""Used by deepcopy function to clone AbstractNode instances."""
CurrentClass = self.__class__
# Create copy
if hasattr(self, "tokenizer"):
result = CurrentClass(tokenizer=self.tokenizer)
else:
result = CurrentClass(type=self.type)
# Copy children
for child in self:
if child is None:
list.append(result, None)
else:
# Using simple list appends for better performance
childCopy = copy.deepcopy(child, memo)
childCopy.parent = result
list.append(result, childCopy)
# Sync attributes
# Note: "parent" attribute is handled by append() already
for name in self.__slots__:
if hasattr(self, name) and not name in ("parent", "tokenizer"):
value = getattr(self, name)
if value is None:
pass
elif type(value) in (bool, int, float, str):
setattr(result, name, value)
elif type(value) in (list, set, dict, CurrentClass):
setattr(result, name, copy.deepcopy(value, memo))
# Scope can be assigned (will be re-created when needed for the copied node)
elif name == "scope":
result.scope = self.scope
return result
def getSource(self):
"""Returns the source code of the node."""
if not self.tokenizer:
raise Exception("Could not find source for node '%s'" % node.type)
if getattr(self, "start", None) is not None:
if getattr(self, "end", None) is not None:
return self.tokenizer.source[self.start:self.end]
return self.tokenizer.source[self.start:]
if getattr(self, "end", None) is not None:
return self.tokenizer.source[:self.end]
return self.tokenizer.source[:]
# Map Python built-ins
__repr__ = toXml
__str__ = toXml
def __eq__(self, other):
return self is other
def __bool__(self):
return True
|
nilq/baby-python
|
python
|
from useless import base
from useless.base import *
class Resolver(CMakePackage):
def __init__(self):
super().__init__()
self.name = 'openvdb'
self.depends(require('openexr'))
self.depends(require('tbb'))
self.depends(require('boost'))
self.set('USE_BLOSC','OFF')
self.set('USE_EXR', 'ON')
def setup(self, src_dir, build_dir, install_dir):
super().setup(src_dir, build_dir, install_dir)
self.set('Boost_ROOT', src_dir+'/boost/')
# self.set('CMAKE_POSITION_INDEPENDENT_CODE', 'TRUE')
def download(self):
self.checkpoint('download', lambda: download_git(
'https://github.com/AcademySoftwareFoundation/openvdb', self.src_dir, tag='v8.0.0'))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
""" Diffraction image analysis """
from .alignment import (
align,
ialign,
shift_image,
itrack_peak,
masked_register_translation,
)
from .calibration import powder_calq
from .correlation import mnxc, xcorr
from .metrics import (
snr_from_collection,
isnr,
mask_from_collection,
combine_masks,
mask_image,
trimr,
triml,
)
from .powder import azimuthal_average
from .symmetry import nfold, reflection
|
nilq/baby-python
|
python
|
class AbstractObject(object):
def __init__(self):
pass
def get_class(self, universe):
raise NotImplementedError("Subclasses need to implement get_class(universe).")
def get_object_layout(self, universe):
raise NotImplementedError(
"Subclasses need to implement get_object_layout(universe)."
)
@staticmethod
def is_invokable():
return False
def __str__(self):
from som.vm.current import current_universe
return "a " + self.get_class(current_universe).get_name().get_embedded_string()
|
nilq/baby-python
|
python
|
"""
Tests for Galaxy Queue Worker
"""
|
nilq/baby-python
|
python
|
from io import BytesIO
import json
import cgi
from pathlib import Path
from abeja.common.docker_image_name import DockerImageName, ALL_GPU_19_04, ALL_CPU_19_10
from abeja.training import JobDefinition, JobDefinitionVersion # noqa: F401
def test_job_definition_version(
requests_mock,
api_base_url,
job_definition_version_factory,
job_definition_response) -> None:
version = job_definition_version_factory() # type: JobDefinitionVersion
res = job_definition_response(
version.organization_id, version.job_definition_id)
requests_mock.get(
'{}/organizations/{}/training/definitions/{}?include_jobs=false'.format(
api_base_url,
version.organization_id,
version.job_definition_id),
json=res)
definition = version.job_definition
assert definition
assert definition.organization_id == version.organization_id
assert definition.job_definition_id == version.job_definition_id
def test_job_definition_versions(job_definition_factory) -> None:
definition = job_definition_factory() # type: JobDefinition
adapter = definition.job_definition_versions()
assert adapter.organization_id == definition.organization_id
assert adapter.job_definition_id == definition.job_definition_id
def test_get_job_definition_version(
requests_mock,
api_base_url,
job_definition_factory,
job_definition_version_response) -> None:
definition = job_definition_factory() # type: JobDefinition
adapter = definition.job_definition_versions()
res = job_definition_version_response(
adapter.organization_id,
adapter.job_definition_id,
environment=None
)
version_id = res['job_definition_version']
requests_mock.get(
'{}/organizations/{}/training/definitions/{}/versions/{}'.format(
api_base_url,
adapter.organization_id,
adapter.job_definition_name,
version_id),
json=res)
version = adapter.get(job_definition_version_id=version_id)
assert version
assert version.organization_id == adapter.organization_id
assert version.job_definition_id == adapter.job_definition_id
assert version.job_definition_version_id == version_id
assert version.handler == res['handler']
assert version.image == DockerImageName.parse(res['image'])
assert version.environment == {}
assert version.created_at == res['created_at']
assert version.modified_at == res['modified_at']
assert version.job_definition
assert version.job_definition_id == adapter.job_definition_id
def test_get_job_definition_versions(
requests_mock,
api_base_url,
job_definition_factory,
job_definition_version_response) -> None:
definition = job_definition_factory() # type: JobDefinition
adapter = definition.job_definition_versions()
res1 = job_definition_version_response(
adapter.organization_id,
adapter.job_definition_id,
environment=None
)
res2 = job_definition_version_response(
adapter.organization_id,
adapter.job_definition_id,
environment={'foo': '1'}
)
requests_mock.get(
'{}/organizations/{}/training/definitions/{}/versions'.format(
api_base_url,
adapter.organization_id,
adapter.job_definition_name),
json={
'entries': [
res1,
res2]})
it = adapter.list()
assert len(it) == 2
versions = list(it)
assert len(versions) == 2
for version, res in zip(versions, [res1, res2]):
assert version.organization_id == adapter.organization_id
assert version.job_definition_id == adapter.job_definition_id
assert version.job_definition_version_id == res['job_definition_version']
assert version.handler == res['handler']
assert version.image == DockerImageName.parse(res['image'])
assert version.environment == {
} if res['environment'] is None else res['environment']
assert version.created_at == res['created_at']
assert version.modified_at == res['modified_at']
assert version.job_definition
assert version.job_definition_id == adapter.job_definition_id
def test_get_job_definition_versions_filter_archived(
requests_mock,
api_base_url,
job_definition_factory,
job_definition_version_response) -> None:
definition = job_definition_factory() # type: JobDefinition
adapter = definition.job_definition_versions()
res1 = job_definition_version_response(
adapter.organization_id,
adapter.job_definition_id,
environment=None
)
requests_mock.get(
'{}/organizations/{}/training/definitions/{}/versions?filter_archived=exclude_archived'.format(
api_base_url,
adapter.organization_id,
adapter.job_definition_name),
json={
'entries': [res1]})
versions = list(adapter.list(filter_archived=True))
assert len(versions) == 1
def test_create_job_definition_version_zip(
requests_mock,
api_base_url,
make_zip_content,
job_definition_factory,
job_definition_version_response) -> None:
definition = job_definition_factory() # type: JobDefinition
adapter = definition.job_definition_versions()
res = job_definition_version_response(
adapter.organization_id, adapter.job_definition_id)
requests_mock.post(
'{}/organizations/{}/training/definitions/{}/versions'.format(
api_base_url, adapter.organization_id, adapter.job_definition_name),
json=res)
zip_content = make_zip_content({'train.py': b'print(1)'})
version = adapter.create(
BytesIO(zip_content), 'train:main', ALL_GPU_19_04, {
'key': 'value'}, description='new version')
assert version
assert version.job_definition_version_id == res['job_definition_version']
assert version.job_definition
assert version.job_definition_id == adapter.job_definition_id
history = requests_mock.request_history
assert len(history) == 1
fs = cgi.FieldStorage(
fp=BytesIO(
history[0].body),
headers=history[0].headers,
environ={
'REQUEST_METHOD': 'POST'})
item = fs['parameters']
parameters = json.loads(item.value.decode('utf-8'))
assert item.headers['Content-Type'] == 'application/json'
assert parameters['handler'] == 'train:main'
assert parameters['image'] == 'abeja-inc/all-gpu:19.04'
assert parameters['environment'] == {'key': 'value'}
item = fs['source_code']
assert item.headers['Content-Type'] == 'application/zip'
assert item.value == zip_content
def test_create_job_definition_version_files(
requests_mock,
api_base_url,
tmpdir,
make_zip_content,
job_definition_factory,
job_definition_version_response) -> None:
definition = job_definition_factory() # type: JobDefinition
adapter = definition.job_definition_versions()
# Make some files
files = []
with tmpdir.as_cwd():
d = Path('work')
d.mkdir(parents=True, exist_ok=True)
path = d / 'test.txt'
path.write_bytes(b'test')
files.append(str(path))
path = d / 'train.py'
path.write_bytes(b'def handler(): pass')
files.append(str(path))
res = job_definition_version_response(
adapter.organization_id, adapter.job_definition_id)
requests_mock.post(
'{}/organizations/{}/training/definitions/{}/versions'.format(
api_base_url, adapter.organization_id, adapter.job_definition_name),
json=res)
version = adapter.create(
files, 'train:handler', ALL_CPU_19_10, {
'KEY': 'VALUE'}, description='new version')
assert version
assert version.job_definition_version_id == res['job_definition_version']
assert version.job_definition
assert version.job_definition_id == adapter.job_definition_id
history = requests_mock.request_history
assert len(history) == 1
fs = cgi.FieldStorage(
fp=BytesIO(
history[0].body),
headers=history[0].headers,
environ={
'REQUEST_METHOD': 'POST'})
item = fs['parameters']
parameters = json.loads(item.value.decode('utf-8'))
assert item.headers['Content-Type'] == 'application/json'
assert parameters['handler'] == 'train:handler'
assert parameters['image'] == 'abeja-inc/all-cpu:19.10'
assert parameters['environment'] == {'KEY': 'VALUE'}
item = fs['source_code']
assert item.headers['Content-Type'] == 'application/zip'
assert item.value
def test_update_job_definition_version(
requests_mock,
api_base_url,
job_definition_factory,
job_definition_version_response) -> None:
definition = job_definition_factory() # type: JobDefinition
adapter = definition.job_definition_versions()
res = job_definition_version_response(
adapter.organization_id, adapter.job_definition_id)
version_id = res['job_definition_version']
requests_mock.patch(
'{}/organizations/{}/training/definitions/{}/versions/{}'.format(
api_base_url,
adapter.organization_id,
adapter.job_definition_name,
version_id),
json=res)
description = 'new version'
version = adapter.update(version_id, description)
assert version
assert version.job_definition_version_id == version_id
assert version.job_definition
assert version.job_definition_id == adapter.job_definition_id
history = requests_mock.request_history
assert len(history) == 1
assert history[0].json() == {'description': description}
def test_archive_job_definition_version(
requests_mock,
api_base_url,
training_api_client,
job_definition_factory) -> None:
definition = job_definition_factory() # type: JobDefinition
adapter = definition.job_definition_versions()
requests_mock.post(
'{}/organizations/{}/training/definitions/{}/versions/1/archive'.format(
api_base_url,
adapter.organization_id,
adapter.job_definition_name),
json={
'message': "test-1 archived"})
adapter.archive(job_definition_version_id=1)
assert requests_mock.called
def test_unarchive_job_definition_version(
requests_mock,
api_base_url,
training_api_client,
job_definition_factory) -> None:
definition = job_definition_factory() # type: JobDefinition
adapter = definition.job_definition_versions()
requests_mock.post(
'{}/organizations/{}/training/definitions/{}/versions/1/unarchive'.format(
api_base_url,
adapter.organization_id,
adapter.job_definition_name),
json={
'message': "test-1 unarchived"})
adapter.unarchive(job_definition_version_id=1)
assert requests_mock.called
def test_delete_job_definition_version(
requests_mock,
api_base_url,
training_api_client,
job_definition_factory) -> None:
definition = job_definition_factory() # type: JobDefinition
adapter = definition.job_definition_versions()
requests_mock.delete(
'{}/organizations/{}/training/definitions/{}/versions/1'.format(
api_base_url,
adapter.organization_id,
adapter.job_definition_name),
json={
'message': "test-1 deleted"})
adapter.delete(job_definition_version_id=1)
assert requests_mock.called
|
nilq/baby-python
|
python
|
"""
Import as:
import dataflow.core.dag_adapter as dtfcodaada
"""
import logging
from typing import Any, Dict, List
import core.config as cconfig
import dataflow.core.builders as dtfcorbuil
import dataflow.core.dag as dtfcordag
import dataflow.core.node as dtfcornode
import helpers.hdbg as hdbg
import helpers.hprint as hprint
_LOG = logging.getLogger(__name__)
class DagAdapter(dtfcorbuil.DagBuilder):
"""
Adapt a DAG builder by overriding part of the config and appending nodes.
"""
def __init__(
self,
dag_builder: dtfcorbuil.DagBuilder,
overriding_config: Dict[str, Any],
nodes_to_insert: List[dtfcornode.Node],
nodes_to_append: List[dtfcornode.Node],
**kwargs,
):
"""
Constructor.
:param dag_builder: a `DagBuilder` containing a single sink
:param overriding_config: a template `Config` containing the fields to
override. Note that this `Config` can still be a template, i.e.,
containing dummies that are finally overwritten by callers.
:param nodes_to_append: list of tuples `(node name, constructor)` storing
the nodes to append to the DAG created from `dag_builder`.
The node constructor function should accept only the `nid` and the
configuration dict, while all the other inputs need to be already
specified.
"""
super().__init__()
hdbg.dassert_isinstance(dag_builder, dtfcorbuil.DagBuilder)
self._dag_builder = dag_builder
hdbg.dassert_isinstance(overriding_config, cconfig.Config)
self._overriding_config = overriding_config
hdbg.dassert_container_type(nodes_to_insert, list, tuple)
self._nodes_to_insert = nodes_to_insert
hdbg.dassert_container_type(nodes_to_append, list, tuple)
self._nodes_to_append = nodes_to_append
def __str__(self) -> str:
txt = []
#
txt.append("dag_builder=")
txt.append(hprint.indent(str(self._dag_builder), 2))
#
txt.append("overriding_config=")
txt.append(hprint.indent(str(self._overriding_config), 2))
#
txt.append("nodes_to_insert=")
txt.append(hprint.indent("\n".join(map(str, self._nodes_to_insert)), 2))
#
txt.append("nodes_to_append=")
txt.append(hprint.indent("\n".join(map(str, self._nodes_to_append)), 2))
#
txt = "\n".join(txt)
return txt
def get_config_template(self) -> cconfig.Config:
config = self._dag_builder.get_config_template()
config.update(self._overriding_config)
return config
def _get_dag(
self, config: cconfig.Config, mode: str = "strict"
) -> dtfcordag.DAG:
# Remove the nodes that are in config
nested_config_template = self._dag_builder.get_config_template()
config_diff = cconfig.Config()
for key in config.keys():
if key in nested_config_template:
config_diff[key] = config[key]
_LOG.debug("# config_diff=\n%s", str(config_diff))
dag = self._dag_builder.get_dag(config_diff, mode=mode)
_LOG.debug("# dag=\n%s", str(dag))
#
if self._nodes_to_insert:
_LOG.debug("# Inserting nodes")
# To insert a node we need to to assume that there is a single source node.
source_nid = dag.get_unique_source()
# TODO(gp): Allow to insert more than one node, if needed.
hdbg.dassert_eq(len(self._nodes_to_insert), 1)
stage, node_ctor = self._nodes_to_insert[0]
_LOG.debug(hprint.to_str("stage node_ctor"))
head_nid = self._dag_builder._get_nid(stage)
node = node_ctor(
head_nid,
**config[head_nid].to_dict(),
)
dag.add_node(node)
dag.connect(head_nid, source_nid)
if self._nodes_to_append:
_LOG.debug("# Appending nodes")
# To append a node we need to to assume that there is a single sink node.
sink_nid = dag.get_unique_sink()
# TODO(gp): Allow to append more than one node, if needed.
hdbg.dassert_eq(len(self._nodes_to_append), 1)
stage, node_ctor = self._nodes_to_append[0]
_LOG.debug(hprint.to_str("stage node_ctor"))
tail_nid = self._dag_builder._get_nid(stage)
node = node_ctor(
tail_nid,
**config[tail_nid].to_dict(),
)
dag.add_node(node)
dag.connect(sink_nid, tail_nid)
return dag
|
nilq/baby-python
|
python
|
from distutils.core import setup
setup(name='DefenseLab',
version='1.0',
author='Andrew Meserole',
packages=['DefenseLab', ])
|
nilq/baby-python
|
python
|
#!/usr/bin/python
#_*_coding:utf-8_*_
import sys
# Point类
class Point:
lng = ''
lat = ''
def __init__(self,lng,lat):
self.lng = lng
self.lat = lat
def show(self):
print self.lng,"\t",self.lat
#采用射线法判断点是否在多边形集内
def isPointsInPolygons(point,xyset):
flag = False
p = point
length = len(xyset)
p2 = xyset[length-1]
for i in range(0,length):
p1 = xyset[i]
#点与多边形顶点重合
if (p.lng == p1.lng and p.lat == p1.lat) or (p.lng == p2.lng and p.lat == p2.lat):
return True
#判断线段两端点是否在射线两侧
if (p2.lat < p.lat and p1.lat >= p.lat) or (p2.lat >= p.lat and p1.lat < p.lat):
#线段上与射线 Y 坐标相同的点的 X 坐标
if (p2.lat == p1.lat):
x = (p1.lng + p2.lng)/2
else:
x = p2.lng - (p2.lat - p.lat)*(p2.lng - p1.lng)/(p2.lat - p1.lat)
#点在多边形的边上
if (x == p.lng):
return True
#射线穿过多边形的边界
if (x > p.lng):
flag = not flag
p2 = p1
return flag
def pointcheck():
#加载多边形点到xyset
line = '121.42277777778,31.027666666667,121.42797222222,31.016361111111,121.45088888889,31.023666666667,121.44575,31.035027777778'
line = line.strip(',')
strList = line.split(',')
pointslen = len(strList)
xyset = []
for i in range(0,pointslen,2):
temp = Point(float(strList[i]),float(strList[i+1]))
xyset.append(temp)
temp.show()
# lxy = '121.42797222222,31.023666666667'.split(',')#里面的点
lxy = '121.42797222222,37.023666666667'.split(',') #外面的点
lx = float(lxy[0])
ly = float(lxy[1])
point = Point(lx,ly)
if isPointsInPolygons(point,xyset):
return "在里面"
return "在外面"
#调用函数
if __name__=="__main__":
print (pointcheck())
|
nilq/baby-python
|
python
|
from . import utils
from discord.utils import get
async def update_admins(guild, bot_log):
role_admin = get(guild.roles, name='Админ')
role_past_admin = get(guild.roles, name='Бивш Админ')
for admin in utils.get_members_with_role(guild, role_admin):
await bot_log.send(f'{admin.mention}')
await utils.remove_all_roles(admin)
await admin.add_roles(role_past_admin)
await bot_log.send(f'Добре дошли в клуба {role_past_admin.mention}')
async def update_hacktues(guild):
role_10 = get(guild.roles, name='10ти клас')
role_11 = get(guild.roles, name='11ти клас')
role_12 = get(guild.roles, name='12ти клас')
hacktues = get(guild.roles, name='HackTUES')
alumni = get(guild.roles, name='Завършили')
for member in utils.get_members_with_role(guild, hacktues):
if role_10 in member.roles:
await member.remove_roles(role_10)
await member.add_roles(role_11)
elif role_11 in member.roles:
await member.remove_roles(role_11)
await member.add_roles(role_12)
elif role_12 in member.roles:
await member.remove_roles(role_12)
await utils.update_and_dm(member, alumni, True)
async def update_students(guild, bot_log):
role_08 = get(guild.roles, name='8ми клас')
role_09 = get(guild.roles, name='9ти клас')
role_10 = get(guild.roles, name='10ти клас')
role_11 = get(guild.roles, name='11ти клас')
role_12 = get(guild.roles, name='12ти клас')
roles = {
role_11: role_12,
role_10: role_11,
role_09: role_10,
role_08: role_09
}
for old_role, new_role in roles.items():
await utils.update_roles(guild, old_role, new_role)
await bot_log.send(f'{old_role.mention}, добре дошли в {new_role.mention}')
async def update_alumni(guild):
role_12 = get(guild.roles, name='12ти клас')
role_alumni = get(guild.roles, name='Завършили')
for student in utils.get_members_with_role(guild, role_12):
await utils.remove_all_roles(student)
await utils.update_and_dm(student, role_alumni, False)
|
nilq/baby-python
|
python
|
from statistics import multimode
def migratoryBirds(arr):
mode = multimode(arr)
mode.sort()
return mode[0]
if __name__ == "__main__":
arr = [1 ,2 ,3 ,4 ,5 ,4 ,3 ,2 ,1 ,3 ,4]
print(migratoryBirds(arr))
|
nilq/baby-python
|
python
|
'''Test configuration constants, functions ...
'''
import subprocess
import os
import unittest
TVM_ROOT_PART='may not need'
TVM_SWAP_PART='may not need'
TVM_HOSTNAME='cworld.local'
TVM_GITREPO_URL = 'git@cworld.local'
def product_topdir():
'''return the project's top level directory (according to git)
'''
topdir = subprocess.check_output(['git','rev-parse','--show-toplevel']
).decode('utf-8').strip()
if not os.path.isdir(topdir):
raise Exception('Not a dir: '+topdir)
return topdir
class TestThisModule(unittest.TestCase):
def setup(self):
pass
def test_product_topdir(self):
'''verify the product_topdir returns a valid directory
The .git sub directory is check for existence
'''
topdir = product_topdir()
self.assertTrue(os.path.isdir(os.path.join(topdir,'.git')),
topdir + '/.git is not a directory')
if __name__ == '__man__':
unittest.main()
|
nilq/baby-python
|
python
|
import pytest
from core import helpers
@pytest.mark.parametrize('path,expected_prefix', (
('/', 'en-gb'),
('/ar/', 'ar'),
('/es/industries/', 'es'),
('/zh-hans/industries/', 'zh-hans'),
('/de/industries/aerospace/', 'de'),
('/fr/industries/free-foods/', 'fr'),
))
def test_get_language_from_prefix(client, path, expected_prefix):
prefix = helpers.get_language_from_prefix(path)
assert prefix == expected_prefix
@pytest.mark.parametrize('prefixed_url,exp_url', (
('/de/', '/'),
('/ar/', '/'),
('/es/industries/', '/industries/'),
('/zh-hans/industries/', '/industries/'),
('/de/industries/aerospace/', '/industries/aerospace/'),
('/fr/industries/free-foods/', '/industries/free-foods/'),
(
'/es/uk-setup-guide/establish-base-business-uk/',
'/uk-setup-guide/establish-base-business-uk/'
),
))
def test_get_untranslated_url(prefixed_url, exp_url):
url = helpers.get_untranslated_url(prefixed_url)
assert url == exp_url
|
nilq/baby-python
|
python
|
from os import path, environ
from imgaug import augmenters as iaa
from keras import backend as K
from keras import optimizers
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras.layers import BatchNormalization, Activation
from keras.layers import Input, Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing.image import ImageDataGenerator
img_width, img_height = 256, 256
channels = 3
input_shape = channels, img_width, img_height if K.image_data_format() == 'channels_first' \
else img_width, img_height, channels
train_data_dir = path.join('data', 'train')
validation_data_dir = path.join('data', 'validation')
nb_train_samples = int(environ.get('TRAINING_SAMPLES', 20))
nb_validation_samples = int(environ.get('VALIDATION_SAMPLES', 20))
batch_size = 16
epochs = 100
input_tensor = Input(shape=input_shape)
block1 = BatchNormalization(name='norm_0')(input_tensor)
# Block 1
block1 = Conv2D(8, (3, 3), name='conv_11', activation='relu')(block1)
block1 = Conv2D(16, (3, 3), name='conv_12', activation='relu')(block1)
block1 = Conv2D(32, (3, 3), name='conv_13', activation='relu')(block1)
block1 = Conv2D(64, (3, 3), name='conv_14', activation='relu')(block1)
block1 = MaxPooling2D(pool_size=(2, 2))(block1)
block1 = BatchNormalization(name='norm_1')(block1)
block1 = Conv2D(16, 1)(block1)
# Block 2
block2 = Conv2D(32, (3, 3), name='conv_21', activation='relu')(block1)
block2 = Conv2D(64, (3, 3), name='conv_22', activation='relu')(block2)
block2 = Conv2D(64, (3, 3), name='conv_23', activation='relu')(block2)
block2 = Conv2D(128, (3, 3), name='conv_24', activation='relu')(block2)
block2 = MaxPooling2D(pool_size=(2, 2))(block2)
block2 = BatchNormalization(name='norm_2')(block2)
block2 = Conv2D(64, 1)(block2)
# Block 3
block3 = Conv2D(64, (3, 3), name='conv_31', activation='relu')(block2)
block3 = Conv2D(128, (3, 3), name='conv_32', activation='relu')(block3)
block3 = Conv2D(128, (3, 3), name='conv_33', activation='relu')(block3)
block3 = Conv2D(64, (3, 3), name='conv_34', activation='relu')(block3)
block3 = MaxPooling2D(pool_size=(2, 2))(block3)
block3 = BatchNormalization(name='norm_3')(block3)
# Block 4
block4 = Conv2D(64, (3, 3), name='conv_41', activation='relu')(block3)
block4 = Conv2D(32, (3, 3), name='conv_42', activation='relu')(block4)
block4 = Conv2D(16, (3, 3), name='conv_43', activation='relu')(block4)
block4 = Conv2D(8, (2, 2), name='conv_44', activation='relu')(block4)
block4 = MaxPooling2D(pool_size=(2, 2))(block4)
block4 = BatchNormalization(name='norm_4')(block4)
block4 = Conv2D(2, 1)(block4)
block5 = GlobalAveragePooling2D()(block4)
output = Activation('softmax')(block5)
model = Model(inputs=[input_tensor], outputs=[output])
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False),
metrics=['accuracy'])
# Initiate the train and test generators with data Augmentation
sometimes = lambda aug: iaa.Sometimes(0.6, aug)
seq = iaa.Sequential([
iaa.GaussianBlur(sigma=(0, 1.0)),
iaa.Sharpen(alpha=1, lightness=0),
iaa.CoarseDropout(p=0.1, size_percent=0.15),
sometimes(iaa.Affine(
scale={'x': (0.8, 1.2), 'y': (0.8, 1.2)},
translate_percent={'x': (-0.2, 0.2), 'y': (-0.2, 0.2)},
rotate=(-30, 30),
shear=(-16, 16)))
])
train_datagen = ImageDataGenerator(
rescale=1. / 255,
preprocessing_function=seq.augment_image,
horizontal_flip=True,
vertical_flip=True)
test_datagen = ImageDataGenerator(
rescale=1. / 255,
horizontal_flip=True,
vertical_flip=True)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
class_mode='categorical')
checkpoint = ModelCheckpoint('f1.h5', monitor='acc', verbose=1, save_best_only=True, save_weights_only=False,
mode='auto', period=1)
reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.1, patience=2, verbose=0, mode='auto', cooldown=0, min_lr=0)
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size,
callbacks=[checkpoint, reduce_lr]
)
|
nilq/baby-python
|
python
|
# mb, 2012-05-26, 2013-02-28
import os
import sys
import subprocess
import shutil
from datetime import datetime
ospj = os.path.join
dest_path_to_extensions = '/home/mbless/public_html/TYPO3/extensions'
tempdir = '/home/mbless/HTDOCS/render-ter-extensions/temp'
proceeding = True
stats = {}
def walk_ter_extensions_index_html(rootfolder, f2=sys.stdout):
prelpath = len(rootfolder)
proceeding = True
for path, dirs, files in os.walk(rootfolder):
proceedwithfile = True
destdir = path
if not proceeding:
dirs[:] = []
else:
if os.path.exists(os.path.join(path, 'manual-is-not-available.txt')):
stats['manual-is-not-available.txt'] = stats.get('manual-is-not-available.txt', 0) + 1
else:
for afile in ['index.html', 'manual.sxw', 'manual.html', 'manual.rst']:
if os.path.exists(os.path.join(path, afile)):
stats[afile] = stats.get(afile, 0) + 1
for afile in files:
leaf = os.path.split(path)[1]
vsplitted = leaf.split('.')
if afile.lower() == 'index.html' and (leaf=='latest' or len(vsplitted) == 3):
if leaf == 'latest':
vsplitted = ['999','999','999']
try:
vsplitted = [int(v) for v in vsplitted]
skip = False
except ValueError:
skip = True
if skip:
continue
left, version = os.path.split(path)
left, extkey = os.path.split(left)
v1, v2, v3 = vsplitted
f2.write(extkey + ',%05d.'%v1 + '%05d.'%v2 + '%05d'%v3 + ',' + version + '\n')
document_part_1 = """\
<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Extensions</title>
<link rel="stylesheet" href="https://docs.typo3.org/css/typo3_docutils_styles.css" type="text/css" />
</head>
<body>
<div class="document">
"""
document_part_2 = """\
</div>
</body>
</html>
"""
def main( timestr=None):
tempfile = ospj(tempdir, 'tempfile-ter-manuals-1.txt')
f2 = file(tempfile,'w')
walk_ter_extensions_index_html(dest_path_to_extensions, f2)
f2.close()
f1 = file(ospj(tempdir, 'tempfile-ter-manuals-1.txt'))
f2 = file(ospj(tempdir, 'tempfile-ter-manuals-2.txt'), 'w')
subprocess.call('sort', stdin=f1, stdout=f2)
f1.close()
f2.close()
extkey0 = None
version0 = None
firstletter0 = None
firstletter00 = ''
cntlines = 0
cntlinks = 0
f1 = file(ospj(tempdir, 'tempfile-ter-manuals-2.txt'))
f2 = file(ospj(tempdir, 'tempfile-ter-manuals-3-index.html'), 'w')
f2.write(document_part_1)
if timestr is None:
timestr = str(datetime.now())[:19]
f2.write('<pre>')
f2.write(timestr)
f2.write(" updated every 2 hours at HH:10\n")
f2.write('</pre>\n')
else:
f2.write('<pre>')
f2.write("This list reflects extensions.xml.gz %s\n" % timestr)
f2.write("Updated every 2 hours at HH:10\n")
f2.write('</pre>\n')
#f2.write('<p>'
# 'The links will open in a second window. I you arrange the two windows '
# 'side by side you can click an extension in this window and '
# 'immediately read in the other.</p>'
#)
if timestr < '2012-12-30 16:00:00':
f2.write('<p><b>'
"Due to the way TER works it may take "
'up to a day until new manuals appear.'
'</b></p>'
)
if timestr < '2011-12-30 16:00:00':
f2.write('<p><b>'
"http://typo3.org doesn\'t hand out new 'manual.sxw' files at the moment. "
'So we are not getting any updates at the moment. This will be repaired '
'once typo3.org works again. ~Martin, 2012-05-21 18:35'
'</b></p>'
)
for line in f1:
cntlines += 1
extkey, dummy, version = line.strip().split(',')
firstletter = extkey[0]
if not extkey0 is None:
if firstletter0 != firstletter00:
f2.write('<br /><br /><b>%s</b><br />\n' % firstletter0)
firstletter00 = firstletter0
if extkey != extkey0:
f2.write('<a href="%s/%s/" title="%s %s" >%s</a><br />\n' % (extkey0, version0, extkey0, version0, extkey0))
cntlinks += 1
firstletter0 = firstletter
extkey0 = extkey
version0 = version
if not extkey0 is None:
if firstletter0 != firstletter00:
f2.write('<br /><br /><b>%s</b><br />\n' % firstletter0)
firstletter00 = firstletter0
f2.write('<a href="%s/%s/" title="%s %s" >%s</a>\n' % (extkey0, version0, extkey0, version0, extkey0))
f2.write('<pre>\n')
f2.write('%s\n\n' % (str(datetime.now())[:19]))
f2.write('Available:\n')
f2.write('\n')
f2.write('%6d links on this page to different extensions.\n' % cntlinks)
f2.write(' The links point to the latest version which has an index.html\n')
f2.write('\n')
f2.write('%6d with manual.sxw (made by extension author)\n' % stats['manual.sxw'])
f2.write('%6d with manual.html (made from manual.sxw)\n' % stats['manual.html'])
f2.write('%6d with manual.rst (made from manual.html)\n' % stats['manual.rst'])
f2.write('%6d with index.html (made from manual.rst)\n' % stats['index.html'])
f2.write('\n')
f2.write("%6d don't have a manual at http://typo3.org/extension-manuals/EXTKEY/VERSION/sxw/?no_cache=1\n" % stats['manual-is-not-available.txt'])
f2.write('</pre>')
f2.write(document_part_2)
f2.close()
if (0):
# moved this functionality to the caller to make everything more "atomic"
srcfile = ospj(tempdir, 'tempfile-ter-manuals-3-index.html')
destfile = os.path.join(dest_path_to_extensions, 'index.html')
shutil.copyfile(srcfile, destfile)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
def test(i):
print("test", i)
def add_test(mf):
def add_test_print(i):
print("added to test", i)
mf.register_event("test", add_test_print, unique=False)
def main(event):
event.test(0)
event.add_test()
event.test(1)
def register(mf):
mf.register_event("test", test, unique=False)
mf.register_event("add_test", add_test, unique=False)
mf.register_event("main", main, unique=False)
|
nilq/baby-python
|
python
|
"""
Entendendo Interadores e Iteraveis
#Interador
- Um objeto que poder ser iterado
- Um objeto que retorna um dado, sendo um elemento por vez quando uma função next() é chamada;
#Interaveis
- Um objeto que irá retorna um interator quando inter() for chamada.
"""
|
nilq/baby-python
|
python
|
from infosystem.common.subsystem import router
class Router(router.Router):
def __init__(self, collection, routes=[]):
super().__init__(collection, routes)
@property
def routes(self):
# TODO(samueldmq): is this the best way to re-write the defaults to
# only change bypass=true for create ?
return [
{
'action': 'create',
'method': 'POST',
'url': self.collection_url,
'callback': 'create',
'bypass': True
},
{
'action': 'get',
'method': 'GET',
'url': self.resource_url,
'callback': 'get'
},
{
'action': 'delete',
'method': 'DELETE',
'url': self.resource_url,
'callback': 'delete'
}
]
|
nilq/baby-python
|
python
|
from ubikagent import Project
from ubikagent.introspection import get_methods
class DummyAgent:
"""Test class needed by `InstantiableProject` and `TestProject`."""
pass
class NonInstantiableProject(Project):
"""Test class needed by `TestProject`."""
pass
class InstantiableProject(Project):
"""Test class needed by `TestProject` and `TestIntrospection`."""
ENV_ID = 'test-v0'
AGENT_CLASS = DummyAgent
def no_args(self):
pass
def pos_arg(self, argument):
pass
def pos_arg_with_explicit_type(self, argument: int):
pass
def kwarg_with_implicit_int_type(self, argument=1):
pass
def kwarg_with_default_none(self, argument=None):
pass
def kwarg_with_explicit_int_type(self, argument: int = 1):
pass
def kwarg_with_implicit_bool_type(self, argument=True):
pass
def kwarg_with_implicit_string_type(self, argument='a_string'):
pass
class TestIntrospection:
"""Tests reading methods and arguments from `Project` and its subclasses
to be used to generate command line help."""
def setup_class(cls):
cls.instance = InstantiableProject()
cls.methods = get_methods(cls.instance)
def test_project_method_without_args(self):
method_name = 'no_args'
argument = self.methods[method_name]
assert argument == []
def test_project_method_with_an_arg(self):
method_name = 'pos_arg'
expected_name = 'argument'
expected_kwarg = False
expected_default = None
expected_type = None
expected_doc = None
arguments = self.methods[method_name]
first_argument = arguments[0]
assert first_argument == (expected_name, expected_kwarg, expected_default, expected_type, expected_doc)
def test_project_method_with_an_arg_with_explicit_type(self):
method_name = 'pos_arg_with_explicit_type'
expected_name = 'argument'
expected_default = None
expected_type = int
arguments = self.methods[method_name]
first_argument = arguments[0]
argument_name, is_kwarg, argument_default, argument_type, _ = first_argument
assert argument_name == expected_name
assert is_kwarg is False
assert argument_default == expected_default
assert argument_type == expected_type
def test_project_method_default_none(self):
method_name = 'kwarg_with_default_none'
expected_name = 'argument'
expected_default = None
expected_type = None
arguments = self.methods[method_name]
first_argument = arguments[0]
argument_name, is_kwarg, argument_default, argument_type, _ = first_argument
assert argument_name == expected_name
assert is_kwarg is True
assert argument_default == expected_default
assert argument_type == expected_type
def test_project_method_with_int_default(self):
method_name = 'kwarg_with_implicit_int_type'
expected_name = 'argument'
expected_default = 1
expected_type = int
arguments = self.methods[method_name]
first_argument = arguments[0]
argument_name, is_kwarg, argument_default, argument_type, _ = first_argument
assert argument_name == expected_name
assert is_kwarg is True
assert argument_default == expected_default
assert argument_type == expected_type
def test_project_method_with_int_type(self):
method_name = 'kwarg_with_explicit_int_type'
expected_name = 'argument'
expected_default = 1
expected_type = int
expected_doc = None
arguments = self.methods[method_name]
first_argument = arguments[0]
argument_name, is_kwarg, argument_default, argument_type, _ = first_argument
assert argument_name == expected_name
assert is_kwarg is True
assert argument_default == expected_default
assert argument_type == expected_type
def test_project_method_with_bool_default(self):
method_name = 'kwarg_with_implicit_bool_type'
expected_name = 'argument'
expected_default = True
expected_type = bool
expected_doc = None
arguments = self.methods[method_name]
first_argument = arguments[0]
argument_name, is_kwarg, argument_default, argument_type, _ = first_argument
assert argument_name == expected_name
assert is_kwarg is True
assert argument_default == expected_default
assert argument_type == expected_type
class TestProject:
"""Tests instantiating a `Project`."""
def test_instantiating_project(self):
instance = InstantiableProject()
def test_instantiating_project_without_variables_fails(self):
try:
instance = NonInstantiableProject()
except Exception:
pass
else:
raise AssertionError(
"Instantiating did not raise exception when it should have")
|
nilq/baby-python
|
python
|
# coding:utf-8
import threading
import redlock
class Locker(object):
def __init__(self,resource,ttl=0,servers=[{"host": "localhost", "port": 6379, "db": 0}, ]):
self.servers = servers
self.resource = resource
self.ttl = ttl
self.dlm = None
self.r = None
def lock(self):
self.dlm = redlock.Redlock(self.servers)
self.r = self.dlm.lock( self.resource,self.ttl)
if not self.r:
return False
return True
def unlock(self):
self.dlm.unlock(self.r)
# import time
# lock = redlock.RedLock("distributed_lock",
# connection_details=[
# {'host':'172.16.109.1','port':6379,'db':0}
#
# ])
#
# lock.acquire()
# print 'enter lock...'
# time.sleep(10000)
# lock.release()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Copyright 2020 MaaT Pharma
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############
# This script extracts the sequences with a length greater than or equal to a length threshold from a FASTA file.
# python filter_FASTA_by_seq_length.py in.fasta out.fasta 1000
#############
from Bio import SeqIO
import sys, os
if len(sys.argv) == 4 :
fasta_file = sys.argv[1]
output_file = sys.argv[2]
length = int(sys.argv[3])
output_file = open(output_file, "w")
if os.path.isfile(fasta_file) :
with open(fasta_file, 'r') as ff:
for seq_record in SeqIO.parse(ff, "fasta"):
seq_length = len(seq_record.seq) - seq_record.seq.count("N")
if (seq_length >= length) :
SeqIO.write(seq_record, output_file, "fasta")
output_file.close()
|
nilq/baby-python
|
python
|
import logging
from io import BytesIO
from datetime import datetime, timezone
from kermes_infra.mail import MailService
from kermes_infra.repositories import FileRepository, UserRepository, EBookRepository
from kermes_infra.queues import SQSConsumer
from kermes_infra.messages import DeliverEBookMessage, CleanUpMessage
class Postmaster:
def __init__(
self,
user_repository: UserRepository,
ebook_repository: EBookRepository,
file_repository: FileRepository,
mail_service: MailService,
housekeeper_queue_producer: SQSConsumer,
logger: logging.Logger,
) -> None:
self.user_repository = user_repository
self.ebook_repository = ebook_repository
self.file_repository = file_repository
self.mail_service = mail_service
self.housekeeper_queue_producer = housekeeper_queue_producer
self.logger = logger
def process_message(self, message_json: str) -> bool:
self.logger.debug(f"processing message {message_json}")
# parse the message
deliver_msg = DeliverEBookMessage.from_json(message_json)
# fetch the user record
user = self.user_repository.get(deliver_msg.user_id)
if user is None:
self.logger.error(f"couldn't fetch user with id {deliver_msg.user_id}")
return False
# fetch the ebook record
ebook = self.ebook_repository.get(user.user_id, deliver_msg.ebook_id)
if ebook is None:
self.logger.error(f"couldn't fetch ebook with id {deliver_msg.ebook_id} for user {user.user_id}")
return False
# fetch the ebook file from S3
content_key = ebook.kindle_content_key if user.prefer_kindle else ebook.content_key
ebook_content = self.file_repository.get(content_key)
if ebook_content is None:
self.logger.error(f"couldn't fetch ebook content for key {content_key}")
return False
# send the ebook message
attachment_filename = "ebook.mobi" if user.prefer_kindle else "ebook.epub"
if not self.mail_service.send_message(
user.prefer_kindle,
user.delivery_email,
"Kermes delivery!",
"This is your ebook!",
BytesIO(ebook_content.read()),
attachment_filename,
):
self.logger.error(f"couldn't deliver ebook {ebook.ebook_id} for user {user.user_id}")
return False
self.housekeeper_queue_producer.send_message(CleanUpMessage(user.user_id, ebook.ebook_id).to_json())
ebook.sent = True
ebook.sent_date = datetime.now(tz=timezone.utc)
if not self.ebook_repository.put(ebook):
self.logger.error(f"couldn't update ebook {ebook.ebook_id} with sent status")
return False
return True
|
nilq/baby-python
|
python
|
# values_from_literature.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
Values from the literature used for data allocation are
specified here and can be called on using functions.
"""
import pandas as pd
import numpy as np
from flowsa.common import datapath
def get_US_urban_green_space_and_public_parks_ratio():
"""
calculates weighted average of urban green space and public parks in national total urban areas
Based on weighted average of 44 cities based on city population.
weighted average value = 12.35%
Larson LR, Jennings V, Cloutier SA (2016) Public Parks and
Wellbeing in Urban Areas of the United States.
PLoS ONE 11(4): e0153211. https://doi.org/10.1371/journal.pone.0153211
"""
# load Larson's saved SI data
df = pd.read_csv(datapath + "Larson_UrbanPublicParks_SI.csv")
# calculate a weighted value for ratio of urban land
# that belongs to parks based on city populations
# weighted average function
try:
wm = lambda x: np.ma.average(x, weights=df.loc[x.index, "CityPop2010"])
except ZeroDivisionError:
wm = 0
# column to weight
agg_funx = {"ParkPercent-2014": wm}
# weighted averages as value
value_series = df.agg(agg_funx)
value = value_series[0]
return value
def get_Canadian_to_USD_exchange_rate(year):
"""
Return exchange rate (Canadian $/USD)
From https://www.federalreserve.gov/releases/h10/current/ on 09/07/2020
:param year:
:return:
"""
er = ({'2000': '1.4855',
'2001': '1.5487',
'2002': '1.5704',
'2003': '1.4008',
'2004': '1.3017',
'2005': '1.2115',
'2006': '1.134',
'2007': '1.0734',
'2008': '1.066',
'2009': '1.1412',
'2010': '1.0298',
'2011': '0.9887',
'2012': '0.9995',
'2013': '1.03',
'2014': '1.1043',
'2015': '1.2791',
'2016': '1.3243',
'2017': '1.2984',
'2018': '1.2957',
'2019': '1.3269'
})
exchange_rate = er.get(year)
return exchange_rate
def get_area_of_urban_land_occupied_by_houses_2013():
"""
Reported area of urban land occupied by houses in 2013 from the USDA ERS Major Land Uses Report
:return:
"""
acres_to_sq_m_conversion = 4046.86
# value originally reported in million acres
area_urban_residence = 32.8
# convert to square meters
area_urban_residence = area_urban_residence * 1000000 * acres_to_sq_m_conversion
return area_urban_residence
def get_area_of_rural_land_occupied_by_houses_2013():
"""
Reported area of urban land occupied by houses in 2013 from the USDA ERS Major Land Uses Report
:return:
"""
acres_to_sq_m_conversion = 4046.86
# value originally reported in million acres
area_rural_residence = 106.3
# convert to square meters
area_rural_residence = area_rural_residence * 1000000 * acres_to_sq_m_conversion
return area_rural_residence
def get_commercial_and_manufacturing_floorspace_to_land_area_ratio():
"""
The additional land area associated with commercial and
manufacturing buildings (parking, sinage, landscaping)
Based on original USEEIO assumption
:return: ratio of land area to total floorspace assumption
"""
floor_space_to_land_area_ratio = 0.25
return floor_space_to_land_area_ratio
def get_open_space_fraction_of_urban_area():
"""
Assumption on the fraction of urban areas that is open space
Based on Lin Zeng's 2020 paper
:return: fraction of open space in urban areas
"""
value = 0.1
return value
def get_urban_land_use_for_airports():
"""
Based on Lin Zeng's 2020 paper
:return:
"""
value = 0.05
return value
def get_urban_land_use_for_railroads():
"""
Based on Lin Zeng's 2020 paper
:return:
"""
value = 0.05
return value
def get_fraction_of_urban_local_road_area_for_parking():
"""
Based on Lin Zeng's 2020 paper
:return:
"""
value = 0.25
return value
def get_transportation_sectors_based_on_FHA_fees():
"""
Values from https://www.fhwa.dot.gov/policy/hcas/addendum.cfm
Website accessed 11/02/2020
Data from 1997
:return:
"""
fha_dict = ({'Truck transportation': {'NAICS_2012_Code': '484', 'ShareOfFees': 0.329},
'Transit and ground passenger transportation': {'NAICS_2012_Code': '485',
'ShareOfFees': 0.001},
'State and local government passenger transit': {'NAICS_2012_Code': 'S00201',
'ShareOfFees': 0.001},
'Personal consumption expenditures': {'NAICS_2012_Code': 'F01000',
'ShareOfFees': 0.669}
})
return fha_dict
|
nilq/baby-python
|
python
|
"""
sentry.plugins.base.v2
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
__all__ = ('Plugin2',)
import logging
from django.http import HttpResponseRedirect
from threading import local
from sentry.plugins.base.response import Response
class PluginMount(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
if IPlugin2 in bases:
return new_cls
if not new_cls.title:
new_cls.title = new_cls.__name__
if not new_cls.slug:
new_cls.slug = new_cls.title.replace(' ', '-').lower()
if not hasattr(new_cls, 'logger'):
new_cls.logger = logging.getLogger('sentry.plugins.%s' % (new_cls.slug,))
return new_cls
class IPlugin2(local):
"""
Plugin interface. Should not be inherited from directly.
A plugin should be treated as if it were a singleton. The owner does not
control when or how the plugin gets instantiated, nor is it guaranteed that
it will happen, or happen more than once.
>>> from sentry.plugins import Plugin2
>>>
>>> class MyPlugin(Plugin2):
>>> def get_title(self):
>>> return 'My Plugin'
As a general rule all inherited methods should allow ``**kwargs`` to ensure
ease of future compatibility.
"""
# Generic plugin information
title = None
slug = None
description = None
version = None
author = None
author_url = None
resource_links = ()
# Configuration specifics
conf_key = None
conf_title = None
project_conf_form = None
project_conf_template = 'sentry/plugins/project_configuration.html'
# Global enabled state
enabled = True
can_disable = True
# Should this plugin be enabled by default for projects?
project_default_enabled = False
def _get_option_key(self, key):
return '%s:%s' % (self.get_conf_key(), key)
def is_enabled(self, project=None):
"""
Returns a boolean representing if this plugin is enabled.
If ``project`` is passed, it will limit the scope to that project.
>>> plugin.is_enabled()
"""
if not self.enabled:
return False
if not self.can_disable:
return True
if not self.can_enable_for_projects():
return True
if project:
project_enabled = self.get_option('enabled', project)
if project_enabled is not None:
return project_enabled
else:
return self.project_default_enabled
return True
def reset_options(self, project=None, user=None):
from .helpers import reset_options
return reset_options(self.get_conf_key(), project, user)
def get_option(self, key, project=None, user=None):
"""
Returns the value of an option in your plugins keyspace, or ``None`` if
one is not present.
If ``project`` is passed, it will limit the scope to that project's keyspace.
>>> value = plugin.get_option('my_option')
"""
from sentry.plugins.helpers import get_option
return get_option(self._get_option_key(key), project, user)
def set_option(self, key, value, project=None, user=None):
"""
Updates the value of an option in your plugins keyspace.
If ``project`` is passed, it will limit the scope to that project's keyspace.
>>> plugin.set_option('my_option', 'http://example.com')
"""
from sentry.plugins.helpers import set_option
return set_option(self._get_option_key(key), value, project, user)
def unset_option(self, key, project=None, user=None):
"""
Removes an option in your plugins keyspace.
If ``project`` is passed, it will limit the scope to that project's keyspace.
>>> plugin.unset_option('my_option')
"""
from sentry.plugins.helpers import unset_option
return unset_option(self._get_option_key(key), project, user)
def get_conf_key(self):
"""
Returns a string representing the configuration keyspace prefix for this plugin.
"""
if not self.conf_key:
return self.get_conf_title().lower().replace(' ', '_')
return self.conf_key
def get_conf_title(self):
"""
Returns a string representing the title to be shown on the configuration page.
"""
return self.conf_title or self.get_title()
def has_project_conf(self):
return self.project_conf_form is not None
def can_enable_for_projects(self):
"""
Returns a boolean describing whether this plugin can be enabled on a per project basis
"""
return True
# Response methods
def redirect(self, url):
"""
Returns a redirect response type.
"""
return HttpResponseRedirect(url)
def render(self, template, context=None):
"""
Given a template name, and an optional context (dictionary), returns a
ready-to-render response.
Default context includes the plugin instance.
>>> plugin.render('template.html', {'hello': 'world'})
"""
if context is None:
context = {}
context['plugin'] = self
return Response(template, context)
# The following methods are specific to web requests
def get_title(self):
"""
Returns the general title for this plugin.
>>> plugin.get_title()
"""
return self.title
def get_description(self):
"""
Returns the description for this plugin. This is shown on the plugin configuration
page.
>>> plugin.get_description()
"""
return self.description
def get_resource_links(self):
"""
Returns a list of tuples pointing to various resources for this plugin.
>>> def get_resource_links(self):
>>> return [
>>> ('Documentation', 'http://sentry.readthedocs.org'),
>>> ('Bug Tracker', 'https://github.com/getsentry/sentry/issues'),
>>> ('Source', 'https://github.com/getsentry/sentry'),
>>> ]
"""
return self.resource_links
def get_rules(self, **kwargs):
"""
Return a list of Rule classes to add to the registry.
>>> def get_rules(self, **kwargs):
>>> return [MyCustomRule]
"""
return []
def get_actions(self, request, group, **kwargs):
"""
Return a list of available actions to append this aggregate.
Examples of built-in actions are "Mute Event" and "Remove Data".
An action is a tuple containing two elements:
('Action Label', '/uri/to/action/')
>>> def get_actions(self, request, group, **kwargs):
>>> return [('Google', 'http://google.com')]
"""
return []
def get_annotations(self, request, group, **kwargs):
"""
Return a list of annotations to append to this aggregate.
An example of an annotation might be "Needs Fix" or "Task #123".
The properties of each tag must match the constructor for
:class:`sentry.plugins.Annotation`
>>> def get_annotations(self, request, group, **kwargs):
>>> task_id = GroupMeta.objects.get_value(group, 'myplugin:tid')
>>> if not task_id:
>>> return []
>>> return [{'label': '#%s' % (task_id,)}]
"""
return []
def get_notifiers(self, **kwargs):
"""
Return a list of notifiers to append to the registry.
Notifiers must extend :class:`sentry.plugins.Notifier`.
>>> def get_notifiers(self, **kwargs):
>>> return [MyNotifier]
"""
return []
def get_tags(self, event, **kwargs):
"""
Return a list of additional tags to add to this instance.
A tag is a tuple containing two elements:
('tag-key', 'tag-value')
>>> def get_tags(self, event, **kwargs):
>>> return [('tag-key', 'tag-value')]
"""
return []
def get_event_preprocessors(self, **kwargs):
"""
Return a list of preprocessors to apply to the given event.
A preprocessor is a function that takes the normalized data blob as an
input and returns modified data as output. If no changes to the data are
made it is safe to return ``None``.
>>> def get_event_preprocessors(self, **kwargs):
>>> return [lambda x: x]
"""
return []
def get_feature_hooks(self, **kwargs):
"""
Return a list of callables to check for feature status.
>>> from sentry.features import FeatureHandler
>>>
>>> class NoRegistration(FeatureHandler):
>>> features = set(['auth:register'])
>>>
>>> def has(self, feature, actor):
>>> return False
>>> def get_feature_hooks(self, **kwargs):
>>> return [NoRegistration()]
"""
return []
class Plugin2(IPlugin2):
"""
A plugin should be treated as if it were a singleton. The owner does not
control when or how the plugin gets instantiated, nor is it guaranteed that
it will happen, or happen more than once.
"""
__version__ = 2
__metaclass__ = PluginMount
|
nilq/baby-python
|
python
|
# Copyright (C) 2019 Analog Devices, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# - Neither the name of Analog Devices, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# - The use of this software may or may not infringe the patent rights
# of one or more patent holders. This license does not release you
# from the requirement that you obtain separate licenses from these
# patent holders to use this software.
# - Use of the software either in source or binary form, must be run
# on or directly connected to an Analog Devices Inc. component.
#
# THIS SOFTWARE IS PROVIDED BY ANALOG DEVICES "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED.
#
# IN NO EVENT SHALL ANALOG DEVICES BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, INTELLECTUAL PROPERTY
# RIGHTS, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import adi
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
import time
# Create radio
sdr = adi.Pluto()
# Configure properties
sdr.rx_rf_bandwidth = 4000000
sdr.rx_lo = 2000000000
sdr.tx_lo = 2000000000
sdr.tx_cyclic_buffer = True
sdr.tx_hardwaregain = -30
sdr.gain_control_mode = 'slow_attack'
# Read properties
print("RX LO %s" % (sdr.rx_lo))
# Create a sinewave waveform
fs = int(sdr.sample_rate)
fc = 3000000
N = 1024
ts = 1/float(fs)
t = np.arange(0, N*ts, ts)
i = np.cos(2*np.pi*t*fc) * 2**14
q = np.sin(2*np.pi*t*fc) * 2**14
iq = i + 1j*q
# Send data
sdr.tx(iq)
# Collect data
for r in range(20):
x = sdr.rx()
f, Pxx_den = signal.periodogram(x, fs)
plt.clf()
plt.semilogy(f, Pxx_den)
plt.ylim([1e-7, 1e2])
plt.xlabel('frequency [Hz]')
plt.ylabel('PSD [V**2/Hz]')
plt.draw()
plt.pause(0.05)
time.sleep(0.1)
plt.show()
|
nilq/baby-python
|
python
|
# Dindo Bot
# Copyright (c) 2018 - 2019 AXeL
from lib.shared import LogType, DebugLevel
from lib import tools, parser
from .job import JobThread
class BotThread(JobThread):
def __init__(self, parent, game_location, start_from_step, repeat_path, account_id, disconnect_after):
JobThread.__init__(self, parent, game_location)
self.start_from_step = start_from_step
self.repeat_path = repeat_path
self.account_id = account_id
self.disconnect_after = disconnect_after
self.exit_game = parent.settings['Account']['ExitGame']
def run(self):
self.start_timer()
self.debug('Bot thread started', DebugLevel.Low)
# connect to account
account_connected = False
if self.account_id is not None:
self.debug('Connect to account (account_id: %s)' % self.account_id)
self.connect(self.account_id)
account_connected = True
# check for pause
self.pause_event.wait()
# get instructions & interpret them
if not self.suspend:
self.debug('Bot path: %s, repeat: %d' % (self.parent.bot_path, self.repeat_path))
if self.parent.bot_path:
instructions = tools.read_file(self.parent.bot_path)
repeat_count = 0
while repeat_count < self.repeat_path:
# check for pause or suspend
self.pause_event.wait()
if self.suspend: break
# start interpretation
self.interpret(instructions)
repeat_count += 1
# tell user that we have complete the path
if not self.suspend:
self.log('Bot path completed', LogType.Success)
if not self.suspend:
# disconnect account
if account_connected and self.disconnect_after:
self.debug('Disconnect account')
self.disconnect(self.exit_game)
# reset bot window buttons
self.reset()
self.debug('Bot thread ended, elapsed time: ' + self.get_elapsed_time(), DebugLevel.Low)
def interpret(self, instructions):
# split instructions
lines = instructions.splitlines()
# ignore instructions before start step
if self.start_from_step > 1 and self.start_from_step <= len(lines):
self.debug('Start from step: %d' % self.start_from_step)
step = self.start_from_step - 1
lines = lines[step:]
for i, line in enumerate(lines, start=1):
# check for pause or suspend
self.pause_event.wait()
if self.suspend: break
# parse instruction
self.debug('Instruction (%d): %s' % (i, line), DebugLevel.Low)
instruction = parser.parse_instruction(line)
self.debug('Parse result: ' + str(instruction), DebugLevel.High)
# begin interpretation
if instruction['name'] == 'Move':
self.move(instruction['value'])
elif instruction['name'] == 'Enclos':
self.check_enclos(instruction['location'], instruction['type'])
elif instruction['name'] == 'Zaap':
self.use_zaap(instruction['from'], instruction['to'])
elif instruction['name'] == 'Zaapi':
self.use_zaapi(instruction['from'], instruction['to'])
elif instruction['name'] == 'Collect':
self.collect(instruction['map'], instruction['store_path'])
elif instruction['name'] == 'Click':
coordinates = (
int(instruction['x']),
int(instruction['y']),
int(instruction['width']),
int(instruction['height'])
)
if instruction['twice'] == 'True':
self.double_click(coordinates)
else:
self.click(coordinates)
elif instruction['name'] == 'Wait':
if instruction['pause'] == 'True':
self.wait()
elif instruction['duration'].isdigit():
self.sleep(int(instruction['duration']))
elif instruction['name'] == 'PressKey':
self.press_key(instruction['value'])
elif instruction['name'] == 'TypeText':
self.type_text(instruction['value'])
elif instruction['name'] == 'Connect':
if instruction['account_id'].isdigit():
account_id = int(instruction['account_id'])
else:
account_id = instruction['account_id']
self.connect(account_id)
elif instruction['name'] == 'Disconnect':
self.disconnect(instruction['value'])
else:
self.debug('Unknown instruction', DebugLevel.Low)
|
nilq/baby-python
|
python
|
class ForeignCountry:
def __init__(self, code):
self.code = code
self.name = "Paese Estero"
|
nilq/baby-python
|
python
|
import json
import pytest
from tests.unit.resources import searched_observable
from trustar2.models.searched_observable import SearchedObservable
from trustar2.trustar_enums import ObservableTypes
VALUE = "2.2.2.2"
TYPE = ObservableTypes.IP4.value
FIRST_SEEN = 1623273177255
LAST_SEEN = 1623701072520
ENCLAVE_GUIDS = ["test-enclave-guid"]
TAGS = ["test-tag"]
@pytest.fixture
def searched_observable_json():
return json.loads(searched_observable)
@pytest.fixture
def searched_observable_obj():
return SearchedObservable(
value=VALUE,
type=TYPE,
first_seen=FIRST_SEEN,
last_seen=LAST_SEEN,
enclave_guids=ENCLAVE_GUIDS,
tags=TAGS
)
def test_searched_observable_deserialization(searched_observable_json):
searched_observable = SearchedObservable.from_dict(searched_observable_json)
assert searched_observable.value == VALUE
assert searched_observable.type == TYPE
assert searched_observable.first_seen == FIRST_SEEN
assert searched_observable.last_seen == LAST_SEEN
assert searched_observable.enclave_guids == ENCLAVE_GUIDS
assert searched_observable.tags == TAGS
def test_searched_observable_serialization(searched_observable_obj, searched_observable_json):
assert searched_observable_obj.serialize() == searched_observable_json
def test_searched_observable_repr(searched_observable_obj):
assert searched_observable_obj.__repr__() == "SearchedObservable(type=IP4, value=2.2.2.2)"
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot3/blob/master/LICENSE
|
nilq/baby-python
|
python
|
import ipywidgets as widgets
a = widgets.IntText(description='Value A')
b = widgets.IntSlider(description='Value B')
vbox = widgets.VBox(children=[a, b])
vbox
|
nilq/baby-python
|
python
|
"""
Char. number range | UTF-8 octet sequence
(hexadecimal) | (binary)
--------------------+---------------------------------------------
0000 0000-0000 007F | 0xxxxxxx
0000 0080-0000 07FF | 110xxxxx 10xxxxxx
0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
"""
class Solution(object):
def validUtf8(self, data):
"""
:type data: List[int]
:rtype: bool
"""
i = 0
datalen = len(data)
try:
while i < datalen:
b0 = data[i]
if b0 <= 0b01111111: # 0xxxxxxx
i += 1
elif b0 <= 0b11011111:
if not (0b10000000 <= data[i+1] <= 0b10111111): return False
i += 2
elif b0 <= 0b11101111:
if not (0b10000000 <= data[i+1] <= 0b10111111): return False
if not (0b10000000 <= data[i+2] <= 0b10111111): return False
i += 3
elif b0 <= 0b11110111:
if not (0b10000000 <= data[i+1] <= 0b10111111): return False
if not (0b10000000 <= data[i+2] <= 0b10111111): return False
if not (0b10000000 <= data[i+3] <= 0b10111111): return False
i += 4
else:
return False
except IndexError:
return False
return i == datalen
print Solution().validUtf8([])
print Solution().validUtf8([197, 130, 1])
print Solution().validUtf8([235, 140, 4])
print Solution().validUtf8([206,210,189,208,197,163,182,171,212,243,10,0,10])
|
nilq/baby-python
|
python
|
from fastapi import FastAPI, status
from pydantic import BaseModel, ValidationError
from requests_html import HTMLSession
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import JSONResponse
session = HTMLSession()
app = FastAPI(
title="corona virus real time data",
description="",
version="0.3.0",
docs_url="/docs",
redoc_url="/redoc",
openapi_url="/openapi.json",
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
def get_data(*, country: str):
respond = session.get("https://www.worldometers.info/coronavirus/")
tbody = respond.html.find("tbody", first=True)
trs = tbody.find("tr")
data = {}
for tr in trs:
if f"{country}" in tr.text.lower():
tds = tr.find("td")
country = 0 if tds[1].text == "" else tds[1].text
total_case = 0 if tds[2].text == "" else tds[2].text
new_case = 0 if tds[3].text == "" else tds[3].text
total_death = 0 if tds[4].text == "" else tds[4].text
new_death = 0 if tds[5].text == "" else tds[5].text
total_recovered = 0 if tds[6].text == "" else tds[6].text
new_recovered = 0 if tds[7].text == "" else tds[7].text
active_case = 0 if tds[8].text == "" else tds[8].text
serious_critical = 0 if tds[9].text == "" else tds[9].text
total_cases_1_m_pop = 0 if tds[10].text == "" else tds[10].text
total_deaths_1_m_pop = 0 if tds[11].text == "" else tds[11].text
total_test = 0 if tds[12].text == "" else tds[12].text
total_test_1_m_pop = 0 if tds[13].text == "" else tds[13].text
population = 0 if tds[14].text == "" else tds[14].text
continent = 0 if tds[15].text == "" else tds[15].text
one_case_every_x_ppl = 0 if tds[16].text == "" else tds[16].text
one_death_every_x_ppl = 0 if tds[17].text == "" else tds[17].text
one_test_every_x_ppl = 0 if tds[18].text == "" else tds[18].text
data.update(
{
"country": country,
"total_case": total_case,
"new_case": new_case,
"total_death": total_death,
"new_death": new_death,
"total_recovered": total_recovered,
"new_recovered":new_recovered,
"active_case": active_case,
"serious_critical": serious_critical,
"total_cases_1_M_pop": total_cases_1_m_pop,
"total_deaths_1_m_pop": total_deaths_1_m_pop,
"total_test": total_test,
"total_test_1_m_pop": total_test_1_m_pop,
"population": population,
"continent": continent,
"one_case_every_x_ppl": one_case_every_x_ppl,
"one_death_every_x_ppl": one_death_every_x_ppl,
"one_test_every_x_ppl": one_test_every_x_ppl,
}
)
return data
class CoronaVirusData(BaseModel):
country: str
total_case: str
new_case: str
total_death: str
new_death: str
total_recovered: str
new_recovered: str
active_case: str
serious_critical: str
total_cases_1_M_pop: str
total_deaths_1_m_pop: str
total_test: str
total_test_1_m_pop: str
population: str
continent: str
one_case_every_x_ppl: str
one_death_every_x_ppl: str
one_test_every_x_ppl: str
@app.get("/", response_model=CoronaVirusData)
async def get_country_corona_virus_data(country: str = "Ethiopia"):
"""Getting corona virus data from any country.
Args:
country: Tell what country data to get. Default to Ethiopia.
Example:
https://example.com/?country=china
"""
return get_data(country=country.lower())
@app.get("/total/")
async def get_total_corona_virus_cases():
"""Getting total corona virus cases."""
respond = session.get("https://www.worldometers.info/coronavirus/")
cases, deaths, recovered = respond.html.find(".maincounter-number")
total_currently_infected_patients = respond.html.find(
".number-table-main", first=True
).text
total_cases_which_had_an_outcome = respond.html.find(".number-table-main")[1].text
total_in_mild_condition = respond.html.find(".number-table", first=True).text
total_serious_or_critical = respond.html.find(".number-table")[1].text
totals_cases = cases.find("span", first=True).text
totals_deaths = deaths.find("span", first=True).text
totals_recovered = recovered.find("span", first=True).text
return {
"totals_cases": totals_cases,
"totals_deaths": totals_deaths,
"totals_recovered": totals_recovered,
"total_currently_infected_patients": total_currently_infected_patients,
"total_cases_which_had_an_outcome": total_cases_which_had_an_outcome,
"total_in_mild_condition": total_in_mild_condition,
"total_serious_or_critical": total_serious_or_critical,
}
async def http400_error_handler(_, exc):
return JSONResponse(
{"detail": "Country doesn't exist"}, status_code=status.HTTP_400_BAD_REQUEST
)
app.add_exception_handler(ValidationError, http400_error_handler)
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3
from typing import Dict, List, Tuple
import graphics
import day24
from utils import get_file_lines
class Hexagon(graphics.Polygon):
def __init__(self, x, y, length):
delta_x = (1, 0.5, -0.5, -1, -0.5, 0.5)
delta_y = (0, -0.86602540378443864676372317075294, -0.86602540378443864676372317075294, 0, 0.86602540378443864676372317075294, -0.86602540378443864676372317075294)
points = [(x, y)]
for i in range(5):
nx = points[-1][0] + length * delta_x[i]
ny = points[-1][1] - length * delta_y[i]
points.append((nx, ny))
super().__init__([graphics.Point(i,j) for i,j in points])
class HexagonGrid:
def __init__(self, left, top, col_count, row_count, length):
self.cells = []
self.filled_cells = set()
y_length = length * 1.7320508075688772935274463415059
for x in range(col_count):
self.cells.append([])
x_offset = left + 0.5 * length + 1.5 * length * x
y_offset = top + (0 if x % 2 == 0 else y_length / 2)
for y in range(row_count):
hexagon = Hexagon(x_offset, y_offset + y * y_length, length)
self.cells[-1].append(hexagon)
def draw(self, graphwin):
for row in self.cells:
for cell in row:
cell.draw(graphwin)
def reset_cells(self, coords_to_fill):
for coord in coords_to_fill:
if coord not in self.filled_cells:
y, x = int(coord.real), int(coord.imag)
self.cells[y][x].setFill('red')
for coord in (self.filled_cells - coords_to_fill):
y, x = int(coord.real), int(coord.imag)
self.cells[y][x].setFill('light grey')
self.filled_cells = coords_to_fill
def get_grid_size(floors: List[Dict[complex, int]]) -> Tuple[int, int]:
minx, miny, maxx, maxy = 0, 0, 0, 0
for floor in floors:
for pos in floor.keys():
minx = min(minx, int(pos.real))
miny = min(miny, int(pos.imag))
maxx = max(maxx, int(pos.real))
maxy = max(maxy, int(pos.imag))
return (maxx-minx+3, maxy-miny+2)
def part1(floor: Dict[complex, int]) -> int:
minx = int(min(pos.real for pos in floor.keys()))
miny = int(min(pos.imag for pos in floor.keys()))
maxx = int(max(pos.real for pos in floor.keys()))
maxy = int(max(pos.imag for pos in floor.keys()))
col_count, row_count = get_grid_size([floor])
x_offset = (maxx - minx) // 2 + 1
y_offset = (maxy - miny) // 2
win = graphics.GraphWin('Part 1', 1460, 920)
grid = HexagonGrid(5, 5, col_count, row_count, 15)
grid.draw(win)
for pos, colour in floor.items():
if colour:
grid.cells[int(pos.real+x_offset)][int(pos.imag+y_offset)].setFill('red')
win.getMouse()
def part2(floor: Dict[complex, int]) -> int:
floors = [floor]
for _ in range(20):
floor = day24.next_floor(floor)
floors.append(floor)
col_count, row_count = get_grid_size(floors)
x_offset = col_count // 2
y_offset = row_count // 2
center = complex(x_offset, y_offset)
length = 10
row_height = length*1.7320508075688772935274463415059
print('cols',col_count, 'width',2*length*col_count + 10)
win = graphics.GraphWin('Part 2', 1.5*length*col_count + 20, row_count*row_height + 20)
grid = HexagonGrid(5, 5, col_count, row_count, length)
grid.draw(win)
for floor in floors:
print(win.getMouse())
grid.reset_cells(set([center+pos for pos in floor.keys()]))
print(win.getMouse())
return sum(floor.values())
if __name__ == '__main__':
raw_data = get_file_lines('input/day24.txt')
raw_floor = day24.get_initial_state(raw_data)
part1(raw_floor)
part2(raw_floor)
|
nilq/baby-python
|
python
|
# TODO
# class MeanAbsoluteError():
# def __init__(self): pass
# TODO
# class MeanBiasError():
# def __init__(self): pass
# TODO
# class ClassificationLosses():
# def __init__(self): pass
# TODO
# class Elbow():
# def __init__(self): pass
# TODO
# class EuclideanDistance():
# def __init__(self): pass
# TODO
# class Graussian():
# def __init__(self): pass
####################################################################
import numpy as np # for math
# Resources
# https://ml-cheatsheet.readthedocs.io/en/latest/loss_functions.html
def accuracy_score(y_true, y_pred):
""" Compare y_true to y_pred and return the accuracy """
accuracy = np.sum(y_true == y_pred, axis=0) / len(y_true)
return accuracy
class Loss(object):
def __call__(self, y_true, y_pred):
return NotImplementedError()
def gradient(self, y, y_pred):
raise NotImplementedError()
def acc(self, y, y_pred):
return 0
class MeanSquareError(Loss):
def __call__(self, y_true, y_pred):
return 0.5 * np.power((y_true - y_pred), 2)
def gradient(self, y_true, y_pred):
return -(y_true - y_pred)
class CrossEntropy():
def __call__(self, y_true, y_pred):
# Avoid division by zero
y_pred = np.clip(y_pred, 1e-15, 1 - 1e-15)
return - y_true * np.log(y_pred) - (1 - y_true) * np.log(1 - y_pred)
def gradient(self, y_true, y_pred):
# Avoid division by zero
y_pred = np.clip(y_pred, 1e-15, 1 - 1e-15)
return - (y_true / y_pred) + (1 - y_true) / (1 - y_pred)
def acc(self, y, p):
return accuracy_score(np.argmax(y, axis=1), np.argmax(p, axis=1))
loss_functions = {
"MSE" : MeanSquareError,
"CrossEntropy" : CrossEntropy
}
# class CrossEntropy():
# # https://machinelearningmastery.com/cross-entropy-for-machine-learning/
# def __init__(self, epsilon=1e-15):
# self.epsilon = epsilon# Close To 0
# def loss(self, yhat, y):
# # Avoid division by zero
# yhat = np.clip(yhat, self.epsilon, 1. - self.epsilon)
# # get losses values
# return -y * np.log(yhat) - (1 - y)* np.log(1 - yhat)
# def accuracy(self, yhat, y):
# return accuracy_score(np.argmax(y, axis=1), np.argmax(yhat, axis=1))
# def derivative(self, yhat, y):
# # Avoid devision by zero
# yhat = np.clip(yhat, self.epsilon, 1. - self.epsilon)
# # get derivative values
# return -(y / yhat) + (1 - y) / (1 - yhat)
# class CrossEntropy():
# def loss(self, y, p):
# # Avoid division by zero
# p = np.clip(p, 1e-15, 1 - 1e-15)
# return - y * np.log(p) - (1 - y) * np.log(1 - p)
# def acc(self, y, p):
# return accuracy_score(np.argmax(y, axis=1), np.argmax(p, axis=1))
# def gradient(self, y, p):
# # Avoid division by zero
# p = np.clip(p, 1e-15, 1 - 1e-15)
# return - (y / p) + (1 - y) / (1 - p)
# if __name__ == "__main__":
# yhat = np.array(
# [
# [0.25,0.25,0.25,0.25],
# [0.01,0.01,0.01,0.96]
# ]
# )
# y = np.array(
# [
# [0,0,0,1],
# [0,0,0,1]
# ]
# )
# mse = MeanSquareError()
# print(mse.loss(yhat, y))
|
nilq/baby-python
|
python
|
## @packege zeus_security_py
# Helper package for data security that will implement zeus microservices
#
#
from Cryptodome.Cipher import AES
from Cryptodome import Random
from hashlib import sha256
import base64
import os
import json
__author__ = "Noé Cruz | contactozurckz@gmail.com"
__copyright__ = "Copyright 2007, The Cogent Project"
__credits__ = ["Noé Cruz", "Zurck'z", "Jesus Salazar"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Noé Cruz"
__email__ = "contactozurckz@gmail.com"
__status__ = "Dev"
## Class Encryptor
# Encryptor class contains AES encrypt/decrypt functions
#
class AESEncryptor:
"""
Helper class for data security this contains certain methods for it.
AES (Advanced Encryption Standard) is a symmetric block cipher standardized by NIST .
It has a fixed data block size of 16 bytes. Its keys can be 128, 192, or 256 bits long.
Attributes
----------
default_block_size : int
Default block size for aes (default 32)
_sk_env : str
Key for get secret key from environment
Methods
-------
__is_valid(sk=None)
Check if the secret key of argument is null, if that is null try to get secret key from environment.
encrypt
"""
default_block_size: int = 32
_sk_env = "AES_SK"
@staticmethod
def __is_valid(sk: str = None):
if sk is not None:
return sk
sk_env: str = os.getenv(AESEncryptor._sk_env)
if sk_env is not None:
return sk_env
raise Exception("AES Secret key was not provided!")
@staticmethod
def decrypt_ws_response(payload: dict, secret_key=None) -> dict:
json_decrypted = AESEncryptor.decrypt(payload["data"], secret_key)
return json_decrypted
@staticmethod
def encrypt_ws_request(payload: dict, secret_key=None) -> dict:
encrypted_payload = AESEncryptor.encrypt(json.dumps(payload), secret_key)
return {"data": encrypted_payload}
@staticmethod
def json_decrypt(json_encrypted: str, secret_key=None) -> dict:
return json.loads(AESEncryptor.encrypt(json_encrypted, secret_key))
@staticmethod
def json_encrypt(json_to_encrypt: dict, secret_key=None) -> str:
json_str = json.dumps(json_to_encrypt)
return AESEncryptor.encrypt(json_str, secret_key)
@staticmethod
def json_decrypt(json_encrypted: str, secret_key=None) -> dict:
return json.loads(AESEncryptor.encrypt(json_encrypted, secret_key))
@staticmethod
def encrypt(
value: str,
secret_key: str = None,
aes_mode=AES.MODE_CBC,
charset="utf-8",
block_size: int = 16,
) -> str:
secret_key = AESEncryptor.__is_valid(secret_key).encode(charset)
raw_bytes = AESEncryptor.__pad(value)
iv = Random.new().read(block_size)
cipher = AES.new(secret_key, aes_mode, iv)
return base64.b64encode(iv + cipher.encrypt(raw_bytes)).decode(charset)
@staticmethod
def decrypt(
value: str, secret_key=None, aes_mode=AES.MODE_CBC, charset="utf-8"
) -> str:
secret_key = str.encode(AESEncryptor.__is_valid(secret_key))
encrypted = base64.b64decode(value)
iv = encrypted[:16]
cipher = AES.new(secret_key, aes_mode, iv)
return AESEncryptor.__un_pad(cipher.decrypt(encrypted[16:])).decode(charset)
@staticmethod
def genHash(value: str, charset="utf-8") -> str:
return sha256(value.encode(charset)).hexdigest()
@staticmethod
def __pad(s: str, block_size: int = 16, charset: str = "utf-8") -> bytes:
return bytes(
s
+ (block_size - len(s) % block_size)
* chr(block_size - len(s) % block_size),
charset,
)
@staticmethod
def __un_pad(value: str) -> str:
return value[0 : -ord(value[-1:])]
|
nilq/baby-python
|
python
|
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helper function to generate the README table."""
import json
import os
from pathlib import Path
import utils
import composer
from composer import functional as CF
EXCLUDE_METHODS = ['no_op_model', 'utils']
HEADER = ['Name', 'Functional', 'Attribution', 'tl;dr']
ATTRIBUTES = ['class_name', 'functional', 'tldr', 'attribution', 'link']
GITHUB_BASE = 'https://github.com/mosaicml/composer/tree/dev/composer/algorithms/'
folder_path = os.path.join(os.path.dirname(composer.__file__), 'algorithms')
methods = utils.list_dirs(Path(folder_path))
methods = [m for m in methods if m not in EXCLUDE_METHODS]
if not len(methods):
raise ValueError(f'Found 0 methods in {folder_path}')
print(f'Found {len(methods)} methods with metadata.')
metadata = {}
for name in methods:
json_path = os.path.join(folder_path, name, 'metadata.json')
with open(json_path, 'r') as f:
metadata[name] = json.load(f)[name]
# test functional method is importable
method_functional = metadata[name]['functional']
if method_functional and not hasattr(CF, method_functional):
raise ImportError(f'Unable to import functional form {method_functional} for {name}')
metadata[name]['functional'] = f'`cf.{method_functional}`'
metadata[name]['github_link'] = GITHUB_BASE + name
# define row format
row = [
'[{class_name}]({github_link})',
'{functional}',
lambda d: '[{attribution}]({link})' if d['link'] else ['attribution'],
'{tldr}',
]
table_md = utils.build_markdown_table(
header=HEADER,
metadata=metadata,
sorted_keys=sorted(metadata.keys()),
row_format=row,
)
table_path = os.path.join(os.path.dirname(__file__), 'algorithms_table.md')
with open(table_path, 'w') as f:
f.write(table_md)
print(f'Table written to {table_path}')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""IdentityServicesEngineAPI network_access_time_date_conditions API fixtures and tests.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from ciscoisesdk.exceptions import MalformedRequest
from ciscoisesdk.exceptions import ciscoisesdkException
from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION
pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.0', reason='version does not match')
def is_valid_get_network_access_time_conditions(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_ab916b19789c59b79dddbc2d0a3c57fc_v3_1_0').validate(obj.response)
return True
def get_network_access_time_conditions(api):
endpoint_result = api.network_access_time_date_conditions.get_network_access_time_conditions(
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_get_network_access_time_conditions(api, validator):
try:
assert is_valid_get_network_access_time_conditions(
validator,
get_network_access_time_conditions(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_network_access_time_conditions_default(api):
endpoint_result = api.network_access_time_date_conditions.get_network_access_time_conditions(
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_get_network_access_time_conditions_default(api, validator):
try:
assert is_valid_get_network_access_time_conditions(
validator,
get_network_access_time_conditions_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_network_access_time_condition(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_784b314d32b258a1b53c5c84cf84d396_v3_1_0').validate(obj.response)
return True
def create_network_access_time_condition(api):
endpoint_result = api.network_access_time_date_conditions.create_network_access_time_condition(
active_validation=False,
attribute_name='string',
attribute_value='string',
children=[{'conditionType': 'string', 'isNegate': True, 'link': {'href': 'string', 'rel': 'string', 'type': 'string'}}],
condition_type='string',
dates_range={'endDate': 'string', 'startDate': 'string'},
dates_range_exception={'endDate': 'string', 'startDate': 'string'},
description='string',
dictionary_name='string',
dictionary_value='string',
hours_range={'endTime': 'string', 'startTime': 'string'},
hours_range_exception={'endTime': 'string', 'startTime': 'string'},
id='string',
is_negate=True,
link={'href': 'string', 'rel': 'string', 'type': 'string'},
name='string',
operator='string',
payload=None,
week_days=['string'],
week_days_exception=['string']
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_create_network_access_time_condition(api, validator):
try:
assert is_valid_create_network_access_time_condition(
validator,
create_network_access_time_condition(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def create_network_access_time_condition_default(api):
endpoint_result = api.network_access_time_date_conditions.create_network_access_time_condition(
active_validation=False,
attribute_name=None,
attribute_value=None,
children=None,
condition_type=None,
dates_range=None,
dates_range_exception=None,
description=None,
dictionary_name=None,
dictionary_value=None,
hours_range=None,
hours_range_exception=None,
id=None,
is_negate=None,
link=None,
name=None,
operator=None,
payload=None,
week_days=None,
week_days_exception=None
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_create_network_access_time_condition_default(api, validator):
try:
assert is_valid_create_network_access_time_condition(
validator,
create_network_access_time_condition_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_network_access_time_condition_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_c941303330bc5615b3eb8d4d2702b874_v3_1_0').validate(obj.response)
return True
def get_network_access_time_condition_by_id(api):
endpoint_result = api.network_access_time_date_conditions.get_network_access_time_condition_by_id(
id='string'
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_get_network_access_time_condition_by_id(api, validator):
try:
assert is_valid_get_network_access_time_condition_by_id(
validator,
get_network_access_time_condition_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_network_access_time_condition_by_id_default(api):
endpoint_result = api.network_access_time_date_conditions.get_network_access_time_condition_by_id(
id='string'
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_get_network_access_time_condition_by_id_default(api, validator):
try:
assert is_valid_get_network_access_time_condition_by_id(
validator,
get_network_access_time_condition_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_network_access_time_condition_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_11232a518d5655f69e8687c9c98740c6_v3_1_0').validate(obj.response)
return True
def update_network_access_time_condition_by_id(api):
endpoint_result = api.network_access_time_date_conditions.update_network_access_time_condition_by_id(
active_validation=False,
attribute_name='string',
attribute_value='string',
children=[{'conditionType': 'string', 'isNegate': True, 'link': {'href': 'string', 'rel': 'string', 'type': 'string'}}],
condition_type='string',
dates_range={'endDate': 'string', 'startDate': 'string'},
dates_range_exception={'endDate': 'string', 'startDate': 'string'},
description='string',
dictionary_name='string',
dictionary_value='string',
hours_range={'endTime': 'string', 'startTime': 'string'},
hours_range_exception={'endTime': 'string', 'startTime': 'string'},
id='string',
is_negate=True,
link={'href': 'string', 'rel': 'string', 'type': 'string'},
name='string',
operator='string',
payload=None,
week_days=['string'],
week_days_exception=['string']
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_update_network_access_time_condition_by_id(api, validator):
try:
assert is_valid_update_network_access_time_condition_by_id(
validator,
update_network_access_time_condition_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def update_network_access_time_condition_by_id_default(api):
endpoint_result = api.network_access_time_date_conditions.update_network_access_time_condition_by_id(
active_validation=False,
id='string',
attribute_name=None,
attribute_value=None,
children=None,
condition_type=None,
dates_range=None,
dates_range_exception=None,
description=None,
dictionary_name=None,
dictionary_value=None,
hours_range=None,
hours_range_exception=None,
is_negate=None,
link=None,
name=None,
operator=None,
payload=None,
week_days=None,
week_days_exception=None
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_update_network_access_time_condition_by_id_default(api, validator):
try:
assert is_valid_update_network_access_time_condition_by_id(
validator,
update_network_access_time_condition_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_network_access_time_condition_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_e2a697abfe2058d3adc7ad9922f5a5d6_v3_1_0').validate(obj.response)
return True
def delete_network_access_time_condition_by_id(api):
endpoint_result = api.network_access_time_date_conditions.delete_network_access_time_condition_by_id(
id='string'
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_delete_network_access_time_condition_by_id(api, validator):
try:
assert is_valid_delete_network_access_time_condition_by_id(
validator,
delete_network_access_time_condition_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def delete_network_access_time_condition_by_id_default(api):
endpoint_result = api.network_access_time_date_conditions.delete_network_access_time_condition_by_id(
id='string'
)
return endpoint_result
@pytest.mark.network_access_time_date_conditions
def test_delete_network_access_time_condition_by_id_default(api, validator):
try:
assert is_valid_delete_network_access_time_condition_by_id(
validator,
delete_network_access_time_condition_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
|
nilq/baby-python
|
python
|
"""Run calcsfh or hybridMC in Parallel (using subprocess)"""
import argparse
import logging
import os
import subprocess
import sys
from glob import glob1
import numpy as np
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
# Could be in a config or environ
calcsfh = '$HOME/research/match2.5/bin/calcsfh'
zcombine = '$HOME/research/match2.5/bin/zcombine'
hybridmc = '$HOME/research/match2.5/bin/hybridMC'
def test_files(prefs, run_calcsfh=True):
"""make sure match input files exist"""
return_code = 0
for pref in prefs:
if run_calcsfh:
pfiles = calcsfh_existing_files(pref)
else:
pfiles = [hybridmc_existing_files(pref)]
test = [os.path.isfile(f) for f in pfiles]
if False in test:
logger.error('missing a file in {}'.format(pref))
logger.error(pfiles)
return_code += 1
if return_code > 0:
sys.exit(2)
return
def uniform_filenames(prefs, dry_run=False):
"""
make all fake match and par files in a directory follow the format
target_filter1_filter2.gst.suffix all lower case
use dry_run to print the mv command, or will call os.system.
"""
from glob import glob1
for pref in prefs:
dirname, p = os.path.split(pref)
filters = '_'.join(p.split('_')[1:])
print dirname, p, filters
fake, = glob1(dirname, '*{}*fake'.format(filters))
match, = glob1(dirname, '*{}*match'.format(filters))
param, = glob1(dirname, '*{}*param'.format(filters))
ufake = '_'.join(fake.split('_')[1:]).replace('_gst.fake1',
'.gst').lower()
umatch = '_'.join(match.split('_')[1:]).lower()
uparam = param.replace('.param', '.gst.param').lower()
for old, new in zip([fake, match, param], [ufake, umatch, uparam]):
cmd = 'mv {dir}/{old} {dir}/{new}'.format(dir=dirname, old=old,
new=new)
logger.info(cmd)
if not dry_run:
os.system(cmd)
def calcsfh_existing_files(pref, optfilter1=''):
"""file formats for param match and matchfake"""
param = pref + '.param'
match = pref + '.match'
fake = pref + '.matchfake'
return (param, match, fake)
def calcsfh_new_files(pref):
"""file formats for match grid, sdout, and sfh file"""
out = pref + '.out'
scrn = pref + '.scrn'
sfh = pref + '.sfh'
return (out, scrn, sfh)
def hybridmc_existing_files(pref):
"""file formats for the HMC, based off of calcsfh_new_files"""
mcin = pref + '.out.dat'
return mcin
def hybridmc_new_files(pref):
"""file formats for HybridMC output and the following zcombine output"""
pref = pref.strip()
mcmc = pref + '.mcmc'
mcscrn = mcmc + '.scrn'
mczc = mcmc + '.zc'
return (mcmc, mcscrn, mczc)
def run_parallel(prefs, dry_run=False, nproc=8, run_calcsfh=True):
"""run calcsfh and zcombine in parallel, flags are hardcoded."""
test_files(prefs, run_calcsfh)
rdict = {'calcsfh': calcsfh, 'zcombine': zcombine, 'hybridmc': hybridmc}
# calcsfh
# calcsfh, param, match, fake, out, scrn
cmd1 = ('{calcsfh} {param} {match} {fake} {out} ',
'-PARSEC -mcdata -kroupa -zinc -sub=v2 > {scrn}')
# zcombine
# zcombine, out, sfh
cmd2 = '{zcombine} {out} -bestonly > {sfh}'
# hybridmc
# hybridmc, mcin, mcmc, mcscrn
cmd3 = '{hybridmc} {mcin} {mcmc} -tint=2.0 -nmc=10000 -dt=0.015 > {mcscrn}'
# zcombine w/ hybrid mc
# zcombine, mcmc, mczc
cmd4 = '{zcombine} {mcmc} -unweighted -medbest -jeffreys -best={mczc}'
niters = np.ceil(len(prefs) / float(nproc))
sets = np.arange(niters * nproc, dtype=int).reshape(niters, nproc)
logging.debug('{} prefs, {} niters'.format(len(prefs), niters))
for j, iset in enumerate(sets):
# don't use not needed procs
iset = iset[iset < len(prefs)]
# run calcsfh
procs = []
for i in iset:
if run_calcsfh:
rdict['param'], rdict['match'], rdict['fake'] = \
calcsfh_existing_files(prefs[i])
rdict['out'], rdict['scrn'], rdict['sfh'] = \
calcsfh_new_files(prefs[i])
cmd = cmd1.format(**rdict)
else:
rdict['mcin'] = hybridmc_existing_files(prefs[i])
rdict['mcmc'], rdict['mcscrn'], rdict['mczc'] = \
hybridmc_new_files(prefs[i])
cmd = cmd3.format(**rdict)
if not dry_run:
procs.append(subprocess.Popen(cmd, shell=True))
logger.info(cmd)
# wait for calcsfh
if not dry_run:
[p.wait() for p in procs]
logger.debug('calcsfh or hybridMC set {} complete'.format(j))
# run zcombine
procs = []
for i in iset:
if run_calcsfh:
rdict['out'], rdict['scrn'], rdict['sfh'] = \
calcsfh_new_files(prefs[i])
zcom = cmd2.format(**rdict)
else:
zcom = cmd4.format(**rdict)
if not dry_run:
procs.append(subprocess.Popen(zcom, shell=True))
logger.info(zcom)
# wait for zcombine
if not dry_run:
[p.wait() for p in procs]
logger.debug('zcombine set {} complete'.format(j))
def main(argv):
"""parse in put args, setup logger, and call run_parallel"""
desc = ('Run calcsfh in parallel. Note: bg cmd, if in use, ',
'need to be in the current folder')
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-d', '--dry_run', action='store_true',
help='only print commands')
parser.add_argument('-v', '--verbose', action='store_true',
help='set logging to debug')
parser.add_argument('-n', '--nproc', type=int, default=8,
help='number of processors')
parser.add_argument('-m', '--hmc', action='store_false',
help='run hybridMC (must be after a calcsfh run)')
parser.add_argument('-f', '--logfile', type=str,
default='calcsfh_parallel.log',
help='log file name')
parser.add_argument('-s', '--simplify', action='store_true',
help=('make filename uniform and exit ',
'(before calcsfh run)'))
parser.add_argument('pref_list', type=argparse.FileType('r'),
help=("list of prefixs to run on. E.g.,",
"ls */*.match | sed 's/.match//' > pref_list"))
args = parser.parse_args(argv)
prefs = [l.strip() for l in args.pref_list.readlines()]
handler = logging.FileHandler(args.logfile)
if args.verbose:
handler.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.INFO)
formatter = logging.Formatter(('%(asctime)s - %(name)s - ',
'%(levelname)s - %(message)s'))
handler.setFormatter(formatter)
logger.addHandler(handler)
if args.simplify:
uniform_filenames(prefs, dry_run=args.dry_run)
else:
logger.info('running on {}'.format(', '.join([p.strip()
for p in prefs])))
run_parallel(prefs, dry_run=args.dry_run, nproc=args.nproc,
run_calcsfh=args.hmc)
if __name__ == '__main__':
main(sys.argv[1:])
|
nilq/baby-python
|
python
|
import os
import pickle
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, \
DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent
from watchdog.utils.dirsnapshot import DirectorySnapshot, DirectorySnapshotDiff
import logging
from watchdog.events import LoggingEventHandler
class _EmptySnapshot(DirectorySnapshot):
@property
def stat_snapshot(self):
return dict()
@property
def paths(self):
return set()
class PersistantObserver(Observer):
def __init__(self, *args, **kwargs):
"""
Check if watching folders has changed since last observation.
If change detected, emit corresponding events at suscribers handlers.
At the `Observer.stop`, save states of folders with pickle for the next observation.
PARAMETERS
==========
save_to : unicode
path where save pickle dumping
protocol (optionnal): int
protocol used for dump current states of watching folders
"""
self._filename = kwargs.pop('save_to')
self._protocol = kwargs.pop('protocol', 0)
Observer.__init__(self, *args, **kwargs)
def start(self, *args, **kwargs):
previous_snapshots = dict()
if os.path.exists(self._filename):
with open(self._filename, 'rb') as f:
previous_snapshots = pickle.load(f)
for watcher, handlers in self._handlers.items():
try:
path = watcher.path
curr_snap = DirectorySnapshot(path)
pre_snap = previous_snapshots.get(path, _EmptySnapshot(path))
diff = DirectorySnapshotDiff(pre_snap, curr_snap)
for handler in handlers:
# Dispatch files modifications
for new_path in diff.files_created:
handler.dispatch(FileCreatedEvent(new_path))
for del_path in diff.files_deleted:
handler.dispatch(FileDeletedEvent(del_path))
for mod_path in diff.files_modified:
handler.dispatch(FileModifiedEvent(mod_path))
for src_path, mov_path in diff.files_moved:
handler.dispatch(FileMovedEvent(src_path, mov_path))
# Dispatch directories modifications
for new_dir in diff.dirs_created:
handler.dispatch(DirCreatedEvent(new_dir))
for del_dir in diff.dirs_deleted:
handler.dispatch(DirDeletedEvent(del_dir))
for mod_dir in diff.dirs_modified:
handler.dispatch(DirModifiedEvent(mod_dir))
for src_path, mov_path in diff.dirs_moved:
handler.dispatch(DirMovedEvent(src_path, mov_path))
except PermissionError as e:
print(e)
Observer.start(self, *args, **kwargs)
def stop(self, *args, **kwargs):
try:
snapshots = {handler.path: DirectorySnapshot(handler.path) for handler in self._handlers.keys()}
with open(self._filename, 'wb') as f:
pickle.dump(snapshots, f, self._protocol)
Observer.stop(self, *args, **kwargs)
except PermissionError as e:
print(e)
def observe_realtime(path=os.path.curdir):
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
event_handler = LoggingEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def observe_over_sessions(path=os.path.curdir):
logging.basicConfig(level=logging.DEBUG)
event_handler = LoggingEventHandler()
observer = PersistantObserver(save_to='C:\\temp\\test.pickle', protocol=-1)
observer.schedule(event_handler, path=path, recursive=True)
observer.start()
# observer.join()
observer.stop()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def compare_dirs(src_path, dest_path):
src_snap = DirectorySnapshot(src_path)
dest_path = DirectorySnapshot(dest_path)
diff = DirectorySnapshotDiff(src_snap, dest_path)
print(diff.files_modified)
if __name__ == "__main__":
path = sys.argv[1] if len(sys.argv) > 1 else '.'
# observe_realtime(path)
# observe_over_sessions(path)
compare_dirs("C:\\New folder\\temp", "C:\\temp")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tests for the console plugin.
"""
# Standard library imports
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
# Third party imports
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QMainWindow
import pytest
from flaky import flaky
# Local imports
from spyder.config.manager import CONF
from spyder.plugins.console.plugin import Console
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def console_plugin(qtbot):
"""Console plugin fixture."""
class MainWindowMock(QMainWindow):
def __getattr__(self, attr):
return Mock()
window = MainWindowMock()
console_plugin = Console(parent=window, configuration=CONF)
console_plugin.start_interpreter({})
window.setCentralWidget(console_plugin.get_widget())
qtbot.addWidget(window)
window.resize(640, 480)
window.show()
return console_plugin
# =============================================================================
# Tests
# =============================================================================
@flaky(max_runs=3)
def test_run_code(console_plugin, capsys):
"""Test that the console runs code."""
shell = console_plugin.get_widget().shell
# Run a simple code
shell.insert_text('2+2', at_end=True)
shell._key_enter()
# Capture stdout and assert that it's the expected one
sys_stream = capsys.readouterr()
assert sys_stream.out == u'4\n'
@flaky(max_runs=3)
def test_completions(console_plugin, qtbot):
"""Test that completions work as expected."""
shell = console_plugin.get_widget().shell
# Get completions
qtbot.keyClicks(shell, 'impor')
qtbot.keyClick(shell, Qt.Key_Tab)
qtbot.keyClick(shell.completion_widget, Qt.Key_Enter)
# Assert completion was introduced in the console
assert u'import' in shell.toPlainText()
if __name__ == "__main__":
pytest.main()
|
nilq/baby-python
|
python
|
from discord.ext.alternatives import silent_delete
from bot import Bot
Bot().run()
|
nilq/baby-python
|
python
|
import distutils.command.build
import setuptools.command.egg_info
from setuptools import setup, Extension, find_packages
from Cython.Build import cythonize
import os
def get_build_dir(default):
return os.environ.get('STFPY_BUILD_DIR', default)
# Override egg command
class EggCommand(setuptools.command.egg_info.egg_info):
def initialize_options(self):
setuptools.command.egg_info.egg_info.initialize_options(self)
self.egg_base = get_build_dir(self.egg_base)
# Override build command
class BuildCommand(distutils.command.build.build):
def initialize_options(self):
distutils.command.build.build.initialize_options(self)
self.build_base = get_build_dir(self.build_base)
setup(
name = "stfpy",
packages = find_packages(),
cmdclass = {'build': BuildCommand, 'egg_info': EggCommand},
ext_modules = cythonize(Extension('*',
sources=["stfpy/*.pyx"],
language='c++',
extra_link_args=os.environ.get('LDFLAGS', '').split(' ')), # Ensure our link flags come last
nthreads = 4,
language_level = "3")
)
|
nilq/baby-python
|
python
|
from geolocalizador import *
endereco = u'Universidade de Sao Paulo, Instituto de Matematica e Estatastica, Departamento de Ciencia da Computacao. Rua do Matao 1010 Cidade Universitaria 05508090 - Sao Paulo, SP - Brasil Telefone: (11) 30916135 Ramal: 6235 Fax: (11) 30916134 URL da Homepage: http://www.ime.usp.br/~cesar/'.encode('utf8','replace')
g = Geolocalizador(endereco)
endereco = u'Universidade de Sao Paulo, Instituto de Matematica e Estatistica. Rua do Matao, 1010 - Cidade Universitaria Butanta 05508-090 - Sao Paulo, SP - Brasil URL da Homepage: http://www.vision.ime.usp.br/~jmena/'
g = Geolocalizador(endereco)
endereco = u'Universidade de Sao Paulo, Instituto de Matematica e Estatistica. Rua do Matao, 1010 - Cidade Universitaria Butanta 0090 - Arequipa, - Peru URL da Homepage: http://www.vision.ime.usp.br/~jmena/'
g = Geolocalizador(endereco)
endereco = u'Universidade de Sao Paulo, Instituto de Matematica e Estatastica, Departamento de Cienci 6235 Fax: (11) 30916134 URL da Homepage: http://www.ime.usp.br/~cesar/'.encode('utf8','replace')
g = Geolocalizador(endereco)
|
nilq/baby-python
|
python
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from telemetry import test
from measurements import image_decoding
class ImageDecodingToughImageCases(test.Test):
test = image_decoding.ImageDecoding
# TODO: Rename this page set to tough_image_cases.json
page_set = 'page_sets/image_decoding_measurement.json'
# crbug.com/323015
enabled = not sys.platform.startswith('linux')
|
nilq/baby-python
|
python
|
from .hook_group import HookGroup
class Event(HookGroup):
def __init__(self, event=None, hooks=None, config=None):
self.type = event
super().__init__(hooks=hooks, config=config)
|
nilq/baby-python
|
python
|
# -------------------------------------------------
# Data Types for Data Science in Python - Handling Dates and Times
# 24 set 2020
# VNTBJR
# ------------------------------------------------
#
# Load packages
reticulate::repl_python()
# Load data
import csv
csvfile2 = open("Datasets/cta_summary.csv", mode = 'r')
daily_summaries = []
for row in csv.reader(csvfile2):
daily_summaries.append(row)
quit()
csvfile2.close()
daily_summaries.pop(0)
print(daily_summaries)
dates_list = []
riderships = []
for date in daily_summaries:
dates_list.append(date[0])
riderships.append(date[4])
quit()
datetimes_list0 = []
for date in dates_list:
datetimes_list0.append(datetime.strptime(date, '%m/%d/%Y'))
quit()
daily_summaries2 = list(zip(datetimes_list0, riderships))
print(daily_summaries2)
daily_summaries3 = defaultdict(list)
dict_inside1 = defaultdict(list)
dict_inside2 = defaultdict(list)
# Loop over the list daily_summaries
for daily_summary in daily_summaries:
# Convert the service_date to a datetime object
service_datetime = datetime.strptime(daily_summary[0], '%m/%d/%Y')
# Add the total rides to the current amount for the month
daily_summaries3[service_datetime] = dict_inside1['day_type'] = daily_summary[1]
daily_summaries3[service_datetime] = dict_inside2['total_ridership'] = daily_summary[4]
quit()
# Print monthly_total_rides
print(daily_summaries3)
review_dates = []
for date in daily_summaries:
review_dates.append(datetime.strptime(date[0], '%m/%d/%Y'))
quit()
review_dates = review_dates[4469:4479]
print(review_dates)
len(review_dates)
#######################################################
# There and Back Again a Date Time Journey-------------------------------------
#######################################################
# Strings to DateTimes
# Import the datetime object from datetime
from datetime import datetime
# Iterate over the dates_list
datetimes_list = []
for date_str in dates_list:
# Convert each date to a datetime object: date_dt
datetimes_list.append(datetime.strptime(date_str, '%m/%d/%Y'))
quit()
# Print each date_dt
print(datetimes_list)
# Converting to a String
# Loop over the first 10 items of the datetimes_list
for item in datetimes_list[:10]:
# Print out the record as a string in the format of 'MM/DD/YYYY'
print(datetime.strftime(item, '%m/%d/%Y'))
# Print out the record as an ISO standard string
print(datetime.isoformat(item))
quit()
#######################################################
# Working with Datetime Components and Current time -----------------------------
#######################################################
# Pieces of Time
from datetime import datetime
from collections import defaultdict
# Create a defaultdict of an integer: monthly_total_rides
monthly_total_rides = defaultdict(int)
# Loop over the list daily_summaries
for daily_summary in daily_summaries:
# Convert the service_date to a datetime object
service_datetime = datetime.strptime(daily_summary[0], '%m/%d/%Y')
# Add the total rides to the current amount for the month
monthly_total_rides[service_datetime.month] += int(daily_summary[4])
quit()
# Print monthly_total_rides
print(monthly_total_rides)
# Creating DateTime Objects... Now
# Import datetime from the datetime module
from datetime import datetime
# Compute the local datetime: local_dt
local_dt = datetime.now()
# Print the local datetime
print(local_dt)
# Compute the UTC datetime: utc_dt
utc_dt = datetime.utcnow()
# Print the UTC datetime
print(utc_dt)
# Timezones
from pytz import timezone
# Create a Timezone object for Chicago
chicago_usa_tz = timezone('US/Central')
# Create a Timezone object for New York
ny_usa_tz = timezone('US/Eastern')
# Iterate over the daily_summaries list
for orig_dt, ridership in daily_summaries2:
# Make the orig_dt timezone "aware" for Chicago
chicago_dt = orig_dt.replace(tzinfo = chicago_usa_tz)
# Convert chicago_dt to the New York Timezone
ny_dt = chicago_dt.astimezone(ny_usa_tz)
# Print the chicago_dt, ny_dt, and ridership
print('Chicago: %s, NY: %s, Ridership: %s' % (chicago_dt, ny_dt, ridership))
quit()
#######################################################
# Time Travel (Adding and Subtracting Time) ----------------------------------
#######################################################
# Finding a time in the future and from the past
# object daily_summaries for this exercise is missing...
# Import timedelta from the datetime module
from datetime import timedelta
# Build a timedelta of 30 days: glanceback
glanceback = timedelta(days = 30)
# Iterate over the review_dates as date
for date in review_dates:
# Calculate the date 30 days back: prior_period_dt
prior_period_dt = date - glanceback
# Print the review_date, day_type and total_ridership
print('Date: %s, Type: %s, Total Ridership: %s' %
(date,
daily_summaries[date]['day_type'],
daily_summaries[date]['total_ridership']))
# Print the prior_period_dt, day_type and total_ridership
print('Date: %s, Type: %s, Total Ridership: %s' %
(prior_period_dt,
daily_summaries[prior_period_dt]['day_type'],
daily_summaries[prior_period_dt]['total_ridership']))
quit()
# Finding differences in DateTimes
# object date_ranges for this exercise is missing
# Iterate over the date_ranges
for start_date, end_date in date_ranges:
# Print the End and Start Date
print(end_date, start_date)
# Print the difference between each end and start date
print(end_date - start_date)
quit()
#######################################################
# HELP! Libraries to make it easier --------------------------------------------
#######################################################
# Pendulum library
# .parse() convert a string to a pendulum datetime object without the need
# of the formating string
# .in_timezone() convert a pendulum object to a desired timezone
# .now() accepts a timezone you want to get the current time in
# .in_XXX() (days, months, years...) provide the difference in a chosen metric
# .in_words() provides the difference in a nice expressive form
# Localizing time with pendulum
# Import the pendulum module
import pendulum
# Create a now datetime for Tokyo: tokyo_dt
tokyo_dt = pendulum.now('Asia/Tokyo')
# Covert the tokyo_dt to Los Angeles: la_dt
la_dt = tokyo_dt.in_timezone('America/Los_Angeles')
# Print the ISO 8601 string of la_dt
print(la_dt.to_iso8601_string())
# Humanizing Differences with Pendulum
# Iterate over date_ranges
for start_date, end_date in date_ranges:
# Convert the start_date string to a pendulum date: start_dt
start_dt = pendulum.parse(start_date, strict = False)
# Convert the end_date string to a pendulum date: end_dt
end_dt = pendulum.parse(end_date, strict = False)
# Print the End and Start Date
print(end_dt, start_dt)
# Calculate the difference between end_dt and start_dt: diff_period
diff_period = end_dt - start_dt
# Print the difference in days
print(diff_period.in_days())
#######################################################
|
nilq/baby-python
|
python
|
import requests
import folium
import geocoder
import string
import os
import json
from functools import wraps, update_wrapper
from datetime import datetime
from pathlib import Path
from flask_bootstrap import Bootstrap
from flask_nav import Nav
from flask_nav.elements import *
from dominate.tags import img
from ediblepickle import checkpoint
from flask import Flask, render_template, request, redirect, url_for, send_file, make_response
###############################################
# Define navbar with logo #
###############################################
logo = img(src='./static/img/logo.png', height="50", width="50", style="margin-top:-15px")
#here we define our menu items
topbar = Navbar(logo,
Link('IXWater','http://ixwater.com'),
View('Home', 'main')
)
# registers the "top" menubar
nav = Nav()
nav.register_element('top', topbar)
app = Flask(__name__)
Bootstrap(app)
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.vars = {}
@app.route('/')
def main():
return redirect('/index.html')
@app.route('/index.html', methods=['GET'])
def index():
if request.method == 'GET':
#return render_template('input.html')
map_name = f"commercecity_outfalls_8dec2021.html"
#have to set map path - used by template
map_path = os.path.join(app.root_path, 'static/' + map_name)
app.vars['map_path'] = map_path
if Path(map_path).exists():
return render_template('display.html')
else:
return redirect('/maperror.html')
pass
@app.route('/maps/map.html')
def show_map():
map_path = app.vars.get("map_path")
map_file = Path(map_path)
if map_file.exists():
return send_file(map_path)
else:
return render_template('error.html', culprit='map file', details="the map file couldn't be loaded")
pass
@app.route('/error.html')
def error():
details = "There was some kind of error."
return render_template('error.html', culprit='logic', details=details)
@app.route('/apierror.html')
def apierror():
details = "There was an error with one of the API calls you attempted."
return render_template('error.html', culprit='API', details=details)
@app.route('/maperror.html')
def geoerror():
details = "Map not found."
return render_template('error.html', culprit='the Map', details=details)
nav.init_app(app)
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
|
nilq/baby-python
|
python
|
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from asset_events.models import StatusChangingEvent
@receiver(post_save)
def update_asset_status(sender, instance, **kwargs):
if not issubclass(sender, StatusChangingEvent):
return
sender.post_save(instance)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-06-05 08:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('yaksh', '0015_auto_20180601_1215'),
]
operations = [
migrations.AlterField(
model_name='question',
name='type',
field=models.CharField(choices=[('mcq', 'Single Correct Choice'), ('mcc', 'Multiple Correct Choices'), ('code', 'Code'), ('upload', 'Assignment Upload'), ('integer', 'Answer in Integer'), ('string', 'Answer in String'), ('float', 'Answer in Float'), ('arrange', 'Arrange in Correct Order')], max_length=24),
),
migrations.AlterField(
model_name='testcase',
name='type',
field=models.CharField(choices=[('standardtestcase', 'Standard Testcase'), ('stdiobasedtestcase', 'StdIO Based Testcase'), ('mcqtestcase', 'MCQ Testcase'), ('hooktestcase', 'Hook Testcase'), ('integertestcase', 'Integer Testcase'), ('stringtestcase', 'String Testcase'), ('floattestcase', 'Float Testcase'), ('arrangetestcase', 'Arrange Testcase'), ('easystandardtestcase', 'Easy Standard Testcase')], max_length=24, null=True),
),
]
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.5 on 2020-12-11 07:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('content_api', '0002_auto_20201002_1228'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name': 'Категория', 'verbose_name_plural': 'Категории'},
),
migrations.AlterModelOptions(
name='genre',
options={'verbose_name': 'Жанр', 'verbose_name_plural': 'Жанры'},
),
migrations.AlterModelOptions(
name='title',
options={'verbose_name': 'Произведение', 'verbose_name_plural': 'Произведения'},
),
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=30, verbose_name='Название'),
),
migrations.AlterField(
model_name='category',
name='slug',
field=models.SlugField(max_length=30, unique=True, verbose_name='url'),
),
migrations.AlterField(
model_name='genre',
name='name',
field=models.CharField(max_length=30, verbose_name='Название'),
),
migrations.AlterField(
model_name='genre',
name='slug',
field=models.SlugField(max_length=30, unique=True, verbose_name='url'),
),
migrations.AlterField(
model_name='title',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='titles', to='content_api.Category', verbose_name='Категория'),
),
migrations.AlterField(
model_name='title',
name='description',
field=models.TextField(blank=True, null=True, verbose_name='Описание'),
),
migrations.AlterField(
model_name='title',
name='genre',
field=models.ManyToManyField(related_name='titles', to='content_api.Genre', verbose_name='Жанр'),
),
migrations.AlterField(
model_name='title',
name='name',
field=models.TextField(verbose_name='Название'),
),
migrations.AlterField(
model_name='title',
name='rating',
field=models.IntegerField(blank=True, null=True, verbose_name='Рейтинг'),
),
migrations.AlterField(
model_name='title',
name='year',
field=models.PositiveSmallIntegerField(db_index=True, verbose_name='Год'),
),
]
|
nilq/baby-python
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testscenarios
from cliff import command
from cliff import commandmanager
from cliff.tests import base
from cliff.tests import utils
load_tests = testscenarios.load_tests_apply_scenarios
class TestLookupAndFind(base.TestBase):
scenarios = [
('one-word', {'argv': ['one']}),
('two-words', {'argv': ['two', 'words']}),
('three-words', {'argv': ['three', 'word', 'command']}),
]
def test(self):
mgr = utils.TestCommandManager(utils.TEST_NAMESPACE)
cmd, name, remaining = mgr.find_command(self.argv)
self.assertTrue(cmd)
self.assertEqual(' '.join(self.argv), name)
self.assertFalse(remaining)
class TestLookupWithRemainder(base.TestBase):
scenarios = [
('one', {'argv': ['one', '--opt']}),
('two', {'argv': ['two', 'words', '--opt']}),
('three', {'argv': ['three', 'word', 'command', '--opt']}),
]
def test(self):
mgr = utils.TestCommandManager(utils.TEST_NAMESPACE)
cmd, name, remaining = mgr.find_command(self.argv)
self.assertTrue(cmd)
self.assertEqual(['--opt'], remaining)
class TestFindInvalidCommand(base.TestBase):
scenarios = [
('no-such-command', {'argv': ['a', '-b']}),
('no-command-given', {'argv': ['-b']}),
]
def test(self):
mgr = utils.TestCommandManager(utils.TEST_NAMESPACE)
try:
mgr.find_command(self.argv)
except ValueError as err:
# make sure err include 'a' when ['a', '-b']
self.assertIn(self.argv[0], str(err))
self.assertIn('-b', str(err))
else:
self.fail('expected a failure')
class TestFindUnknownCommand(base.TestBase):
def test(self):
mgr = utils.TestCommandManager(utils.TEST_NAMESPACE)
try:
mgr.find_command(['a', 'b'])
except ValueError as err:
self.assertIn("['a', 'b']", str(err))
else:
self.fail('expected a failure')
class TestDynamicCommands(base.TestBase):
def test_add(self):
mgr = utils.TestCommandManager(utils.TEST_NAMESPACE)
mock_cmd = mock.Mock()
mgr.add_command('mock', mock_cmd)
found_cmd, name, args = mgr.find_command(['mock'])
self.assertIs(mock_cmd, found_cmd)
def test_intersected_commands(self):
def foo(arg):
pass
def foo_bar():
pass
mgr = utils.TestCommandManager(utils.TEST_NAMESPACE)
mgr.add_command('foo', foo)
mgr.add_command('foo bar', foo_bar)
self.assertIs(foo_bar, mgr.find_command(['foo', 'bar'])[0])
self.assertIs(
foo,
mgr.find_command(['foo', 'arg0'])[0],
)
class TestLoad(base.TestBase):
def test_load_commands(self):
testcmd = mock.Mock(name='testcmd')
testcmd.name.replace.return_value = 'test'
mock_pkg_resources = mock.Mock(return_value=[testcmd])
with mock.patch('pkg_resources.iter_entry_points',
mock_pkg_resources) as iter_entry_points:
mgr = commandmanager.CommandManager('test')
iter_entry_points.assert_called_once_with('test')
names = [n for n, v in mgr]
self.assertEqual(['test'], names)
def test_load_commands_keep_underscores(self):
testcmd = mock.Mock()
testcmd.name = 'test_cmd'
mock_pkg_resources = mock.Mock(return_value=[testcmd])
with mock.patch('pkg_resources.iter_entry_points',
mock_pkg_resources) as iter_entry_points:
mgr = commandmanager.CommandManager(
'test',
convert_underscores=False,
)
iter_entry_points.assert_called_once_with('test')
names = [n for n, v in mgr]
self.assertEqual(['test_cmd'], names)
def test_load_commands_replace_underscores(self):
testcmd = mock.Mock()
testcmd.name = 'test_cmd'
mock_pkg_resources = mock.Mock(return_value=[testcmd])
with mock.patch('pkg_resources.iter_entry_points',
mock_pkg_resources) as iter_entry_points:
mgr = commandmanager.CommandManager(
'test',
convert_underscores=True,
)
iter_entry_points.assert_called_once_with('test')
names = [n for n, v in mgr]
self.assertEqual(['test cmd'], names)
class FauxCommand(command.Command):
def take_action(self, parsed_args):
return 0
class FauxCommand2(FauxCommand):
pass
class TestLegacyCommand(base.TestBase):
def test_find_legacy(self):
mgr = utils.TestCommandManager(None)
mgr.add_command('new name', FauxCommand)
mgr.add_legacy_command('old name', 'new name')
cmd, name, remaining = mgr.find_command(['old', 'name'])
self.assertIs(cmd, FauxCommand)
self.assertEqual(name, 'old name')
def test_legacy_overrides_new(self):
mgr = utils.TestCommandManager(None)
mgr.add_command('cmd1', FauxCommand)
mgr.add_command('cmd2', FauxCommand2)
mgr.add_legacy_command('cmd2', 'cmd1')
cmd, name, remaining = mgr.find_command(['cmd2'])
self.assertIs(cmd, FauxCommand)
self.assertEqual(name, 'cmd2')
def test_no_legacy(self):
mgr = utils.TestCommandManager(None)
mgr.add_command('cmd1', FauxCommand)
self.assertRaises(
ValueError,
mgr.find_command,
['cmd2'],
)
def test_no_command(self):
mgr = utils.TestCommandManager(None)
mgr.add_legacy_command('cmd2', 'cmd1')
self.assertRaises(
ValueError,
mgr.find_command,
['cmd2'],
)
|
nilq/baby-python
|
python
|
# Copyright (C) 2013 Claudio "nex" Guarnieri (@botherder)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class NetworkHTTP(Signature):
name = "network_http"
description = "Performs some HTTP requests"
severity = 2
categories = ["http"]
authors = ["nex"]
minimum = "2.0"
host_safelist = [
"www.msftncsi.com"
]
def on_complete(self):
for http in getattr(self, "get_net_http_ex", lambda: [])():
if http["host"] in self.host_safelist:
continue
self.mark_ioc("request", "%s %s://%s%s" % (
http["method"], http["protocol"], http["host"], http["uri"],
))
return self.has_marks()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from calendar import timegm
from collections import defaultdict
from datetime import datetime
from importlib import import_module
from os import path as op
import re
from pkg_resources import DistributionNotFound, iter_entry_points, load_entry_point
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
from pygments.util import ClassNotFound
from peppermynt.containers import Config, Container, Item, Items, Posts, SiteContent, Page
from peppermynt.exceptions import ConfigException, ContentException, ParserException, RendererException
from peppermynt.fs import File
from peppermynt.utils import get_logger, dest_path, Timer, unescape, Url
logger = get_logger('peppermynt')
class Reader:
def __init__(self, src, temp, dest, site, writer):
self._writer = writer
self._parsers = {}
self._extensions = defaultdict(list)
self._cache = {}
self.src = src
self.temp = temp
self.dest = dest
self.site = site
self._find_parsers()
def _find_parsers(self):
for parser in iter_entry_points('peppermynt.parsers'):
name = parser.name
try:
Parser = parser.load()
except DistributionNotFound as e:
logger.debug('@@ The %s parser could not be loaded due to a missing requirement: %s.', name, str(e))
continue
for extension in Parser.accepts:
if 'parsers' in self.site and self.site['parsers'].get(extension.lstrip('.')) == name:
self._extensions[extension].insert(0, name)
else:
self._extensions[extension].append(name)
self._parsers[name] = Parser
def _get_date(self, mtime, date):
if not date:
return mtime
d = [None, None, None, 0, 0]
for i, v in enumerate(date.split('-')):
d[i] = v
if not d[3]:
d[3], d[4] = mtime.strftime('%H %M').split()
elif not d[4]:
d[4] = '{0:02d}'.format(d[4])
return datetime.strptime('-'.join(d), '%Y-%m-%d-%H-%M')
def _get_parser(self, item, parser = None):
if not parser:
try:
parser = self._extensions[item.extension()][0]
except KeyError:
raise ParserException('No parser found that accepts \'{0}\' files.'.format(item.extension()),
'src: {0}'.format(item))
if parser in self._cache:
return self._cache[parser]
options = self.site.get(parser, None)
if parser in self._parsers:
Parser = self._parsers[parser](options)
else:
try:
Parser = import_module('peppermynt.parsers.{0}'.format(parser)).Parser(options)
except ImportError:
raise ParserException('The {0} parser could not be found.'.format(parser))
self._cache[parser] = Parser
return Parser
def _parse_filename(self, f):
date, text = re.match(r'(?:(\d{4}(?:-\d{2}-\d{2}){1,2})-)?(.+)', f.name).groups()
return (text, self._get_date(f.mtime, date))
def _init_container(self, container):
for f in container.path:
container.add(self._init_item(container.config, f))
container.sort()
container.tag()
container.archive()
return container
def _init_item(self, config, f, simple = False):
Timer.start()
frontmatter, bodymatter = self._parse_item_frontmatter(f)
item = Item(f.path)
text, date = self._parse_filename(f)
item['date'] = date.strftime(self.site['date_format'])
item['timestamp'] = timegm(date.utctimetuple())
if simple:
item['url'] = Url.from_path(f.root.path.replace(self.src.path, ''), text)
else:
item['tags'] = []
item['url'] = Url.from_format(config['url'], text, date, frontmatter)
item['dest'] = dest_path(self.dest.path, item['url'])
item.update(frontmatter)
item['raw_content'] = bodymatter
return item
def parse_item(self, config, item, simple = False):
bodymatter = item.pop('raw_content')
parser = self._get_parser(item, item.get('parser', config.get('parser', None)))
content = parser.parse(self._writer.from_string(bodymatter, item))
item['content'] = content
if not simple:
item['excerpt'] = re.search(r'\A.*?(?:<p>(.+?)</p>)?', content, re.M | re.S).group(1)
logger.debug('.. (%.3fs) %s', Timer.stop(), str(item).replace(self.src.path, ''))
return item
def _parse_item_frontmatter(self, f):
try:
frontmatter, bodymatter = re.search(r'\A---\s+^(.+?)$\s+---\s*(.*)\Z', f.content, re.M | re.S).groups()
frontmatter = Config(frontmatter)
except AttributeError:
raise ContentException('Invalid frontmatter.',
'src: {0}'.format(f.path),
'frontmatter must not be empty')
except ConfigException:
raise ConfigException('Invalid frontmatter.',
'src: {0}'.format(f.path),
'fontmatter contains invalid YAML')
if 'layout' not in frontmatter:
raise ContentException('Invalid frontmatter.',
'src: {0}'.format(f.path),
'layout must be set')
frontmatter.pop('url', None)
return frontmatter, bodymatter
def init_parse(self):
posts = self._init_container(Posts(self.src, self.site))
containers = {}
miscellany = Container('miscellany', self.src, None)
pages = posts.pages
feeds = []
for name, config in self.site['containers'].items():
container = self._init_container(Items(name, self.src, config))
containers[name] = container
pages.extend(container.pages)
for f in miscellany.path:
if f.extension in self._extensions:
miscellany.add(self._init_item(miscellany.config, f, True))
elif f.extension == '.xml':
# Assume for now that the only xml files are feeds
feeds.append(Page(f.path.replace(self.src.path, ''), None, None))
elif f.extension in ('.html', '.htm'):
pages.append(Page(f.path.replace(self.src.path, ''), None, None))
pages.extend(miscellany.pages)
return SiteContent(posts, containers, pages, feeds)
class Writer:
def __init__(self, src, temp, dest, site):
self.src = src
self.temp = temp
self.dest = dest
self.site = site
self._renderer = self._get_renderer()
def _get_renderer(self):
renderer = self.site['renderer']
options = self.site.get(renderer, None)
try:
Renderer = load_entry_point('peppermynt', 'peppermynt.renderers', renderer)
except DistributionNotFound as e:
raise RendererException('The {0} renderer requires {1}.'.format(renderer, str(e)))
except ImportError:
try:
Renderer = import_module('peppermynt.renderers.{0}'.format(renderer)).Renderer
except ImportError:
raise RendererException('The {0} renderer could not be found.'.format(renderer))
return Renderer(self.src.path, options)
def _highlight(self, match):
language, code = match.groups()
formatter = HtmlFormatter(linenos = 'table')
code = unescape(code)
try:
code = highlight(code, get_lexer_by_name(language), formatter)
except ClassNotFound:
code = highlight(code, get_lexer_by_name('text'), formatter)
return '<div class="code"><div>{0}</div></div>'.format(code)
def _pygmentize(self, html):
return re.sub(r'<pre><code[^>]+data-lang="([^>]+)"[^>]*>(.+?)</code></pre>', self._highlight, html, flags = re.S)
def from_string(self, string, data = None):
return self._renderer.from_string(string, data)
def register(self, data):
self._renderer.register(data)
def render_path(self, template, _data = None, url = None):
return dest_path(self.dest.path, url or template)
def render(self, template, data = None, url = None):
path = self.render_path(template, data, url)
try:
Timer.start()
content = self._renderer.render(template, data)
if self.site['pygmentize']:
content = self._pygmentize(content)
logger.debug('.. (%.3fs) %s', Timer.stop(), path.replace(self.dest.path, ''))
except RendererException as e:
raise RendererException(
e.message,
'{0} in container item {1}'.format(template, data.get('item', url or template))
)
return File(path, content)
|
nilq/baby-python
|
python
|
import asyncio
from xwing.socket.server import Server
BACKEND_ADDRESS = '/var/tmp/xwing.socket'
async def start_server(loop):
server = Server(loop, BACKEND_ADDRESS, 'server0')
await server.listen()
conn = await server.accept()
while True:
data = await conn.recv()
if not data:
break
await conn.send(data)
conn.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(start_server(loop))
loop.close()
|
nilq/baby-python
|
python
|
import sys
import json
if len(sys.argv) < 2:
print('uso: python tag_input.py <arquivo>')
exit(-1)
arquivo_entrada = open(sys.argv[1], 'r', encoding='utf8')
fluxo = json.load(arquivo_entrada)
arquivo_entrada.close()
for bloco in fluxo:
for action_moment in ['$enteringCustomActions', '$leavingCustomActions']:
for i, acao in enumerate(fluxo[bloco][action_moment]):
try:
acao['type']
except:
print(json.dumps(acao, indent=4))
continue
if acao['type'] == 'ProcessHttp' and acao['settings']['uri'] == '{{config.api}}/blip/tracking':
body = json.loads(acao['settings']['body'])
for track in body:
fluxo[bloco][action_moment].append(
{
'type': 'TrackEvent',
'$title': acao['$title'],
'$invalid': False,
'settings': {
'category': track['category'],
'action': track['action'],
'extras': track['extras']
}
}
)
fluxo[bloco][action_moment].pop(i)
nome_saida = '%s MIGRATED.json' % (sys.argv[1].split('.')[0])
arquivo_saida = open(nome_saida, 'w', encoding='utf8')
arquivo_saida.write(json.dumps(fluxo))
arquivo_saida.close()
print('Feito! Salvo no arquivo %s' % nome_saida)
|
nilq/baby-python
|
python
|
# coding=utf-8
from setuptools import setup, find_packages
setup(
name="wsgi-listenme",
description="WSGI middleware for capture and browse requests and responses",
version='1.0',
author='Mario César Señoranis Ayala',
author_email='mariocesar.c50@gmail.com',
url='https://github.com/humanzilla/wsgi-listenme',
packages=find_packages('wsgi_listenme'),
license="MIT license",
install_requires=[''],
tests_require=["tox"],
zip_safe=False,
include_package_data=True
)
|
nilq/baby-python
|
python
|
name=("Rayne","Coder","Progammer","Enginner","VScode")
(man,*item,software)=name
print(man)
#*item container for all value that not contain by man and software
print(item)
print(software)
|
nilq/baby-python
|
python
|
import unittest
from unittest import mock
from .. import surface
class TestEllipsoidDem(unittest.TestCase):
def test_height(self):
test_dem = surface.EllipsoidDem(3396190, 3376200)
self.assertEqual(test_dem.get_height(0, 0), 0)
self.assertEqual(test_dem.get_height(0, 180), 0)
self.assertEqual(test_dem.get_height(90, 100), 0)
def test_radius(self):
test_dem = surface.EllipsoidDem(3396190, 3376200)
self.assertEqual(test_dem.get_radius(0, 0), 3396190)
self.assertEqual(test_dem.get_radius(0, 180), 3396190)
self.assertEqual(test_dem.get_radius(90, 300), 3376200)
def tearDown(self):
pass
class TestGdalDem(unittest.TestCase):
def test_height(self):
with mock.patch('autocnet.spatial.surface.GeoDataset') as mockDataset:
mockInstance = mockDataset.return_value
mockInstance.latlon_to_pixel.return_value = (1,2)
mockInstance.read_array.return_value = [[100]]
test_dem = surface.GdalDem('TestDem.cub', 3396190, 3376200)
self.assertEqual(test_dem.get_height(0, 0), 100)
self.assertEqual(test_dem.get_height(0, 180), 100)
self.assertEqual(test_dem.get_height(90, 300), 100)
def test_height_from_radius(self):
with mock.patch('autocnet.spatial.surface.GeoDataset') as mockDataset:
mockInstance = mockDataset.return_value
mockInstance.latlon_to_pixel.return_value = (1,2)
mockInstance.read_array.return_value = [[3396190]]
test_dem = surface.GdalDem('TestDem.cub', 3396190, 3376200, 'radius')
self.assertEqual(test_dem.get_height(0, 0), 0)
self.assertEqual(test_dem.get_height(0, 180), 0)
self.assertEqual(test_dem.get_height(90, 300), 19990)
def test_radius(self):
with mock.patch('autocnet.spatial.surface.GeoDataset') as mockDataset:
mockInstance = mockDataset.return_value
mockInstance.latlon_to_pixel.return_value = (1,2)
mockInstance.read_array.return_value = [[3396190]]
test_dem = surface.GdalDem('TestDem.cub', 3396190, 3376200, 'radius')
self.assertEqual(test_dem.get_radius(0, 0), 3396190)
self.assertEqual(test_dem.get_radius(0, 180), 3396190)
self.assertEqual(test_dem.get_radius(90, 300), 3396190)
def test_radius_from_height(self):
with mock.patch('autocnet.spatial.surface.GeoDataset') as mockDataset:
mockInstance = mockDataset.return_value
mockInstance.latlon_to_pixel.return_value = (1,2)
mockInstance.read_array.return_value = [[100]]
test_dem = surface.GdalDem('TestDem.cub', 3396190, 3376200)
self.assertEqual(test_dem.get_radius(0, 0), 3396290)
self.assertEqual(test_dem.get_radius(0, 180), 3396290)
self.assertEqual(test_dem.get_radius(90, 300), 3376300)
def tearDown(self):
pass
|
nilq/baby-python
|
python
|
# ==正規表現によるスクレイピング==
import re
from html import unescape
# プロジェクト配下にダウンロードしたhtmlファイルを開き、レスポンスボディを変数に格納。
with open('../sample.scraping-book.com/dp.html') as f:
html = f.read()
# findallを使って書籍一冊分のhtml情報を取得する
# re.DOTALL => 改行も含むすべての文字にマッチ
for partial_html in re.findall(r'<a itemprop="url".*?</ul>\s*</a></li>', html, re.DOTALL):
# 書籍のurlはa要素のhref属性から取得する
# .group()に0を渡すと正規表現全体にマッチした値が得られ、
# 1を渡すと正規表現の()で囲った部分にマッチした値を取得できる
url = re.search(r'<a itemprop="url" href="(.*?)">', partial_html).group(1)
url = 'http://sample.scraping-book.com' + url
title = re.search(r'<p itemprop="name".*?</p>', partial_html).group(0)
# 値を置き換える
# re.subでは正規表現でパターン指定できている点に注目。
title = title.replace('<br/>', ' ')
title = re.sub(r'<.*?>', '', title)
title = unescape(title)
print(url, title)
print(1)
|
nilq/baby-python
|
python
|
"""
735. Asteroid Collision
Medium
We are given an array asteroids of integers representing asteroids in a row.
For each asteroid, the absolute value represents its size, and the sign represents its direction (positive meaning right, negative meaning left). Each asteroid moves at the same speed.
Find out the state of the asteroids after all collisions. If two asteroids meet, the smaller one will explode. If both are the same size, both will explode. Two asteroids moving in the same direction will never meet.
Example 1:
Input: asteroids = [5,10,-5]
Output: [5,10]
Explanation: The 10 and -5 collide resulting in 10. The 5 and 10 never collide.
Example 2:
Input: asteroids = [8,-8]
Output: []
Explanation: The 8 and -8 collide exploding each other.
Example 3:
Input: asteroids = [10,2,-5]
Output: [10]
Explanation: The 2 and -5 collide resulting in -5. The 10 and -5 collide resulting in 10.
Constraints:
2 <= asteroids.length <= 104
-1000 <= asteroids[i] <= 1000
asteroids[i] != 0
"""
# V0
# IDEA : STACK
class Solution(object):
def asteroidCollision(self, asteroids):
stack = []
for item in asteroids:
while stack and item < 0 and stack[-1] >= 0:
pre = stack.pop()
if item == -pre:
item = None
break
elif -item < pre:
item = pre
if item != None:
stack.append(item)
return stack
# V0
# IDEA : STACK
class Solution(object):
def asteroidCollision(self, asteroids):
ans = []
for new in asteroids:
while ans and new < 0 < ans[-1]:
if ans[-1] < -new:
ans.pop()
continue
elif ans[-1] == -new:
ans.pop()
break
else:
ans.append(new)
return ans
# V1
# IDEA : STACK
# https://leetcode.com/problems/asteroid-collision/solution/
class Solution(object):
def asteroidCollision(self, asteroids):
ans = []
for new in asteroids:
while ans and new < 0 < ans[-1]:
if ans[-1] < -new:
ans.pop()
continue
elif ans[-1] == -new:
ans.pop()
break
else:
ans.append(new)
return ans
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/81079015
class Solution(object):
def asteroidCollision(self, asteroids):
stack = []
for ast in asteroids:
while stack and ast < 0 and stack[-1] >= 0:
pre = stack.pop()
if ast == -pre:
ast = None
break
elif -ast < pre:
ast = pre
if ast != None:
stack.append(ast)
return stack
# V2
# Time: O(n)
# Space: O(n)
class Solution(object):
def asteroidCollision(self, asteroids):
"""
:type asteroids: List[int]
:rtype: List[int]
"""
result = []
for asteroid in asteroids:
while result and asteroid < 0 < result[-1]:
if result[-1] < -asteroid:
result.pop()
continue
elif result[-1] == -asteroid:
result.pop()
break
else:
result.append(asteroid)
return result
|
nilq/baby-python
|
python
|
import torch
import time
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
@torch.no_grad()
def evaluate(val_loader, model, device=None, print_freq=100):
if device is None:
device = next(model.parameters()).device
else:
model.to(device)
batch_time = AverageMeter('Time', ':6.3f')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.to(device)
target = target.to(device)
# compute output
output = model(images)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
progress.display(i)
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1, top5=top5))
return top1.avg
|
nilq/baby-python
|
python
|
from time import time
from json import dumps, loads
from redis import StrictRedis, ConnectionPool, WatchError
from PyYADL.distributed_lock import AbstractDistributedLock
class RedisLock(AbstractDistributedLock):
def __init__(self, name, prefix=None, ttl=-1, existing_connection_pool=None, redis_host='localhost', redis_port=6379,
redis_password=None, redis_db=0, **kwargs):
super().__init__(name, prefix, ttl)
client_connection = existing_connection_pool or ConnectionPool(host=redis_host, port=redis_port,
password=redis_password, db=redis_db, **kwargs)
self._client = StrictRedis(connection_pool=client_connection)
self.LOCK_KEY = self._build_lock_key()
def _build_lock_key(self):
key = ''
if self.prefix:
key = key + self.prefix + ':'
key = key + 'lock:' + self.name
return key
def _write_lock_if_not_exists(self):
value = dumps({'timestamp': int(time()), 'secret': self._secret, 'exclusive': True})
ttl = self.ttl if self.ttl > 0 else None
result = self._client.set(name=self.LOCK_KEY, value=value, ex=ttl, nx=True)
return bool(result)
def _verify_secret(self) -> bool:
result = self._client.get(self.LOCK_KEY)
secret = loads(result.decode('utf-8')).get('secret') if result is not None else None
if secret is None:
raise RuntimeError('release unlocked lock')
return secret == self._secret
def _delete_lock(self):
return bool(self._client.delete(self.LOCK_KEY))
class RedisWriteLock(RedisLock):
pass
class RedisReadLock(RedisLock):
def _write_lock_if_not_exists(self):
with self._client.pipeline() as pipe:
try:
pipe.watch(self.LOCK_KEY)
raw_lock_data = pipe.get(self.LOCK_KEY)
lock_data = loads(raw_lock_data.decode('utf-8')) if raw_lock_data else self._generate_new_lock_data()
if not self._is_valid_read_lock_data(lock_data):
return False
lock_data['secret'] = list(set(lock_data['secret'] + [self._secret]))
lock_data['timestamp'] = int(time())
ttl = self.ttl if self.ttl > 0 else None
pipe.multi()
pipe.set(self.LOCK_KEY, value=dumps(lock_data), ex=ttl)
pipe.execute()
return True
except WatchError:
self.logger.info('Key %s has changed during transaction. Trying to retry', self.LOCK_KEY)
return self._write_lock_if_not_exists()
@staticmethod
def _is_valid_read_lock_data(lock_data):
return (lock_data.get('exclusive', True) is False) and (isinstance(lock_data.get('secret'), (list, set, tuple)))
def _generate_new_lock_data(self):
return {'timestamp': int(time()), 'secret': [self._secret], 'exclusive': False}
def _verify_secret(self) -> bool:
with self._client.pipeline() as pipe:
try:
pipe.watch(self.LOCK_KEY)
raw_lock_data = pipe.get(self.LOCK_KEY)
if raw_lock_data is None:
return False
lock_data = loads(raw_lock_data.decode('utf-8'))
if not self._is_valid_read_lock_data(lock_data):
return False
return self._secret in lock_data['secret']
except WatchError:
self.logger.info('Key %s has changed during transaction. Trying to retry', self.LOCK_KEY)
return self._verify_secret()
def _delete_lock(self):
with self._client.pipeline() as pipe:
try:
pipe.watch(self.LOCK_KEY)
raw_lock_data = pipe.get(self.LOCK_KEY)
if raw_lock_data is None:
return False
lock_data = loads(raw_lock_data.decode('utf-8'))
if not self._is_valid_read_lock_data(lock_data):
return False
if self._secret not in lock_data['secret']:
return False
secrets = lock_data['secret']
secrets.remove(self._secret)
ttl = pipe.ttl(self.LOCK_KEY)
if not secrets:
pipe.multi()
pipe.delete(self.LOCK_KEY)
pipe.execute()
return True
else:
lock_data['secret'] = secrets
pipe.multi()
pipe.set(self.LOCK_KEY, value=dumps(lock_data), ex=ttl)
pipe.execute()
return True
except WatchError:
self.logger.info('Key %s has changed during transaction. Trying to retry', self.LOCK_KEY)
return self._delete_lock()
|
nilq/baby-python
|
python
|
from .elbo import ELBO
__all__ = [
'ELBO'
]
|
nilq/baby-python
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from cinder import exception as exc
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUIDS = {}
def stub_volume(id, **kwargs):
volume = {
'id': id,
'user_id': 'fakeuser',
'project_id': 'fakeproject',
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
'instance_uuid': 'fakeuuid',
'attached_host': None,
'mountpoint': '/',
'attached_mode': 'rw',
'status': 'fakestatus',
'migration_status': None,
'attach_status': 'attached',
'bootable': 'false',
'name': 'vol name',
'display_name': 'displayname',
'display_description': 'displaydesc',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'snapshot_id': None,
'source_volid': None,
'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66',
'volume_metadata': [],
'volume_type': {'name': 'vol_type_name'},
'readonly': 'False'}
volume.update(kwargs)
return volume
def stub_volume_create(self, context, size, name, description, snapshot,
**param):
vol = stub_volume('1')
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
vol['source_volid'] = None
try:
vol['snapshot_id'] = snapshot['id']
except (KeyError, TypeError):
vol['snapshot_id'] = None
vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
return vol
def stub_volume_create_from_image(self, context, size, name, description,
snapshot, volume_type, metadata,
availability_zone):
vol = stub_volume('1')
vol['status'] = 'creating'
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
vol['availability_zone'] = 'cinder'
return vol
def stub_volume_update(self, context, *args, **param):
pass
def stub_volume_delete(self, context, *args, **param):
pass
def stub_volume_get(self, context, volume_id):
return stub_volume(volume_id)
def stub_volume_get_notfound(self, context, volume_id):
raise exc.NotFound
def stub_volume_get_all(context, search_opts=None):
return [stub_volume(100, project_id='fake'),
stub_volume(101, project_id='superfake'),
stub_volume(102, project_id='superduperfake')]
def stub_volume_get_all_by_project(self, context, search_opts=None):
return [stub_volume_get(self, context, '1')]
def stub_snapshot(id, **kwargs):
snapshot = {'id': id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake'}
snapshot.update(kwargs)
return snapshot
def stub_snapshot_get_all(self):
return [stub_snapshot(100, project_id='fake'),
stub_snapshot(101, project_id='superfake'),
stub_snapshot(102, project_id='superduperfake')]
def stub_snapshot_get_all_by_project(self, context):
return [stub_snapshot(1)]
def stub_snapshot_update(self, context, *args, **param):
pass
def stub_service_get_all_by_topic(context, topic):
return [{'availability_zone': "zone1:host1", "disabled": 0}]
|
nilq/baby-python
|
python
|
from baseline.train import create_trainer, register_trainer, register_training_func, Trainer
from baseline.embeddings import register_embeddings
from baseline.reporting import register_reporting, ReportingHook
from baseline.tf.embeddings import TensorFlowEmbeddings
from baseline.tf.optz import optimizer
from baseline.confusion import ConfusionMatrix
from baseline.utils import listify, get_model_file, write_json, color, Colors
from baseline.tf.tfy import embed
import tensorflow as tf
import os
import numpy as np
@register_embeddings(name='cbow')
class CharBoWEmbeddings(TensorFlowEmbeddings):
"""Bag of character embeddings, sum char embeds, so in this case `wsz == dsz`
"""
@classmethod
def create_placeholder(cls, name):
return tf.placeholder(tf.int32, [None, None, None], name=name)
def __init__(self, name, **kwargs):
super(CharBoWEmbeddings, self).__init__()
self.vsz = kwargs.get('vsz')
self.dsz = kwargs.get('dsz')
self.finetune = kwargs.get('finetune', True)
self.name = name
self.scope = kwargs.get('scope', '{}/CharBoWLUT'.format(self.name))
self.weights = kwargs.get('weights')
if self.weights is None:
unif = kwargs.get('unif', 0.1)
self.weights = np.random.uniform(-unif, unif, (self.vsz, self.dsz))
self.params = kwargs
def save_md(self, target):
write_json({'vsz': self.get_vsz(), 'dsz': self.get_dsz()}, target)
def encode(self, x=None):
if x is None:
x = CharBoWEmbeddings.create_placeholder(self.name)
self.x = x
return tf.reduce_sum(embed(x,
self.get_vsz(),
self.get_dsz(),
tf.constant_initializer(self.weights, dtype=tf.float32),
self.finetune,
self.scope), axis=2, keepdims=False)
def get_vsz(self):
return self.vsz
# Warning this function is only initialized AFTER encode
def get_dsz(self):
return self.dsz
@register_reporting(name='slack')
class SlackReporting(ReportingHook):
def __init__(self, **kwargs):
super(SlackReporting, self).__init__(**kwargs)
self.webhook = kwargs['webhook']
def step(self, metrics, tick, phase, tick_type=None, **kwargs):
"""Write results to `slack` (webhook)
:param metrics: A map of metrics to scores
:param tick: The time (resolution defined by `tick_type`)
:param phase: The phase of training (`Train`, `Valid`, `Test`)
:param tick_type: The resolution of tick (`STEP`, `EPOCH`)
:return:
"""
import requests
chunks = ''
if phase in ['Valid', 'Test']:
chunks += '%s(%d) [Epoch %d] [%s]' % (os.getlogin(), os.getpid(), tick, phase)
for k, v in metrics.items():
if k not in ['avg_loss', 'perplexity']:
v *= 100.
chunks += '\t%s=%.3f' % (k, v)
requests.post(self.webhook, json={"text": chunks})
@register_training_func('classify', name='test_every_n_epochs')
def train(model, ts, vs, es=None, **kwargs):
"""
Train a classifier using TensorFlow
:param model: The model to train
:param ts: A training data set
:param vs: A validation data set
:param es: A test data set, can be None
:param kwargs:
See below
:Keyword Arguments:
* *do_early_stopping* (``bool``) --
Stop after evaluation data is no longer improving. Defaults to True
* *epochs* (``int``) -- how many epochs. Default to 20
* *outfile* -- Model output file, defaults to classifier-model.pyth
* *patience* --
How many epochs where evaluation is no longer improving before we give up
* *reporting* --
Callbacks which may be used on reporting updates
* Additional arguments are supported, see :func:`baseline.tf.optimize` for full list
:return:
"""
n = int(kwargs.get('test_epochs', 5))
do_early_stopping = bool(kwargs.get('do_early_stopping', True))
epochs = int(kwargs.get('epochs', 20))
model_file = get_model_file('classify', 'tf', kwargs.get('basedir'))
if do_early_stopping:
early_stopping_metric = kwargs.get('early_stopping_metric', 'acc')
patience = kwargs.get('patience', epochs)
print('Doing early stopping on [%s] with patience [%d]' % (early_stopping_metric, patience))
reporting_fns = listify(kwargs.get('reporting', []))
print('reporting', reporting_fns)
trainer = create_trainer(model, **kwargs)
tables = tf.tables_initializer()
model.sess.run(tables)
model.sess.run(tf.global_variables_initializer())
model.set_saver(tf.train.Saver())
max_metric = 0
last_improved = 0
for epoch in range(epochs):
trainer.train(ts, reporting_fns)
test_metrics = trainer.test(vs, reporting_fns, phase='Valid')
if epoch > 0 and epoch % n == 0 and epoch < epochs - 1:
print(color('Running test', Colors.GREEN))
trainer.test(es, reporting_fns, phase='Test')
if do_early_stopping is False:
trainer.checkpoint()
trainer.model.save(model_file)
elif test_metrics[early_stopping_metric] > max_metric:
last_improved = epoch
max_metric = test_metrics[early_stopping_metric]
print('New max %.3f' % max_metric)
trainer.checkpoint()
trainer.model.save(model_file)
elif (epoch - last_improved) > patience:
print(color('Stopping due to persistent failures to improve', Colors.RED))
break
if do_early_stopping is True:
print('Best performance on max_metric %.3f at epoch %d' % (max_metric, last_improved))
if es is not None:
print(color('Reloading best checkpoint', Colors.GREEN))
trainer.recover_last_checkpoint()
trainer.test(es, reporting_fns, phase='Test')
|
nilq/baby-python
|
python
|
"""Super class of contextual bandit algorithm agent class"""
import numpy as np
class ContextualBanditAlgorithm(object):
"""
Args:
n_features : 特徴量の次元数
Attributes:
iter_num(int) : 現在の反復回数
"""
def __init__(self, n_features:int):
self.n_features = n_features
self.iter_num = 0
def get_iteration_number(self) -> int:
"""Getter of iteration 回数"""
return self.iter_num
def set_iteration_number(self, t: int) -> None:
"""Setter of iteration 回数"""
# t が自然数でない場合、エラーを返す
assert t > 0, "iteration number must be positive. t = {0}".format(t)
# python の型定義は正確ではないため、しっかりエラーを吐くように設定しないといけない
assert isinstance(t, int), "iteration number must be int. t = {0}".format(t)
self.iter_num = t
if __name__ == '__main__':
pass
|
nilq/baby-python
|
python
|
# Generated by Django 2.1.8 on 2019-08-08 23:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailnhsukfrontendsettings', '0003_footersettings'),
]
operations = [
migrations.AddField(
model_name='footersettings',
name='fixed_coloumn_footer',
field=models.BooleanField(default=False, help_text='Enable this setting to change way the footer is styled, so links group into coloumns'),
),
]
|
nilq/baby-python
|
python
|
# --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
# code starts here
bank = pd.read_csv(path)
categorical_var = bank.select_dtypes(include = 'object')
print(categorical_var)
numerical_var = bank.select_dtypes(include = 'number')
print(numerical_var)
# code ends here
# --------------
# code starts here
banks = bank.drop('Loan_ID', axis=1)
print(banks.isnull().sum())
bank_mode = banks.mode()
banks.fillna(banks.mode().iloc[0],inplace=True)
print(banks.isnull().sum())
#code ends here
# --------------
# Code starts here
avg_loan_amount = pd.pivot_table(
banks,
index=['Gender','Married','Self_Employed'],
values='LoanAmount',
aggfunc=np.mean)
# code ends here
# --------------
# code starts here
loan_approved_se = len(banks[
(banks['Self_Employed']=='Yes') & (banks['Loan_Status']=='Y')
])
loan_approved_nse = len(banks[
(banks['Self_Employed']=='No') & (banks['Loan_Status']=='Y')
])
total = banks.shape[0]
percentage_se = loan_approved_se/total*100
percentage_nse = loan_approved_nse/total*100
# code ends here
# --------------
# code starts here
loan_term = banks['Loan_Amount_Term'].apply(lambda x:x/12)
big_loan_term = len(loan_term[loan_term>=25])
# code ends here
# --------------
# code starts here
loan_groupby = banks.groupby('Loan_Status')
loan_groupby = loan_groupby[['ApplicantIncome', 'Credit_History']]
mean_values = loan_groupby.mean()
# code ends here
|
nilq/baby-python
|
python
|
# Copyright © 2018 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only
# !/usr/bin/python
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vcd_vapp_netcommit
short_description: Ansible Module to manage (create/delete) Networks in vApps in vCloud Director.
version_added: "2.4"
description:
- "Ansible Module to manage (create/delete) Networks in vApps."
options:
user:
description:
- vCloud Director user name
required: false
password:
description:
- vCloud Director user password
required: false
host:
description:
- vCloud Director host address
required: false
org:
description:
- Organization name on vCloud Director to access
required: false
api_version:
description:
- Pyvcloud API version
required: false
verify_ssl_certs:
description:
- whether to use secure connection to vCloud Director host
required: false
network:
description:
- Network name
required: true
vapp:
description:
- vApp name
required: true
vdc:
description:
- VDC name
required: true
fence_mode:
description:
- Network fence mode
required: false
parent_network:
description:
- VDC parent network to connect to
required: false
ip_scope:
description:
- IP scope when no parent_network is defined
state:
description:
- state of network ('present'/'absent').
required: true
author:
- mtaneja@vmware.com
'''
EXAMPLES = '''
- name: Test with a message
vcd_vapp_netcommit:
user: terraform
password: abcd
host: csa.sandbox.org
org: Terraform
api_version: 30
verify_ssl_certs: False
network = "uplink"
vapp = "vapp1"
vdc = "vdc1"
state = "present"
'''
RETURN = '''
msg: success/failure message corresponding to vapp network state
changed: true if resource has been changed else false
'''
from lxml import etree
from ipaddress import ip_network
from pyvcloud.vcd.org import Org
from pyvcloud.vcd.vdc import VDC
from pyvcloud.vcd.client import E
from pyvcloud.vcd.vapp import VApp
from pyvcloud.vcd.client import NSMAP
from pyvcloud.vcd.client import E_OVF
from pyvcloud.vcd.client import FenceMode
from pyvcloud.vcd.client import EntityType
from pyvcloud.vcd.client import RelationType
from ansible.module_utils.vcd import VcdAnsibleModule
from pyvcloud.vcd.exceptions import EntityNotFoundException, OperationNotSupportedException
VAPP_NETWORK_STATES = ['present', 'absent']
def vapp_network_argument_spec():
return dict(
network=dict(type='str', required=True),
vapp=dict(type='str', required=True),
vdc=dict(type='str', required=True),
fence_mode=dict(type='str', required=False, default=FenceMode.BRIDGED.value),
parent_network=dict(type='str', required=False, default=None),
ip_scope=dict(type='str', required=False, default=None),
state=dict(choices=VAPP_NETWORK_STATES, required=True),
)
class VappNetwork(VcdAnsibleModule):
def __init__(self, **kwargs):
super(VappNetwork, self).__init__(**kwargs)
vapp_resource = self.get_resource()
self.vapp = VApp(self.client, resource=vapp_resource)
def manage_states(self):
state = self.params.get('state')
if state == "present":
return self.add_network()
if state == "absent":
return self.delete_network()
def get_resource(self):
vapp = self.params.get('vapp')
vdc = self.params.get('vdc')
org_resource = Org(self.client, resource=self.client.get_org())
vdc_resource = VDC(self.client, resource=org_resource.get_vdc(vdc))
vapp_resource_href = vdc_resource.get_resource_href(name=vapp, entity_type=EntityType.VAPP)
vapp_resource = self.client.get_resource(vapp_resource_href)
return vapp_resource
def get_network(self):
network_name = self.params.get('network')
networks = self.vapp.get_all_networks()
for network in networks:
if network.get('{'+NSMAP['ovf']+'}name') == network_name:
return network
raise EntityNotFoundException('Can\'t find the specified vApp network')
def delete_network(self):
network_name = self.params.get('network')
response = dict()
response['changed'] = False
try:
self.get_network()
except EntityNotFoundException:
response['warnings'] = 'Vapp Network {} is not present.'.format(network_name)
else:
network_config_section = self.vapp.resource.NetworkConfigSection
for network_config in network_config_section.NetworkConfig:
if network_config.get('networkName') == network_name:
network_config_section.remove(network_config)
delete_network_task = self.client.put_linked_resource(
self.vapp.resource.NetworkConfigSection, RelationType.EDIT,
EntityType.NETWORK_CONFIG_SECTION.value,
network_config_section)
self.execute_task(delete_network_task)
response['msg'] = 'Vapp Network {} has been deleted.'.format(network_name)
response['changed'] = True
return response
def add_network(self):
network_name = self.params.get('network')
fence_mode = self.params.get('fence_mode')
parent_network = self.params.get('parent_network')
ip_scope = self.params.get('ip_scope')
response = dict()
response['changed'] = False
try:
self.get_network()
except EntityNotFoundException:
network_config_section = self.vapp.resource.NetworkConfigSection
config = E.Configuration()
if parent_network:
vdc = self.params.get('vdc')
org_resource = Org(self.client, resource=self.client.get_org())
vdc_resource = VDC(self.client, resource=org_resource.get_vdc(vdc))
orgvdc_networks = vdc_resource.list_orgvdc_network_resources(parent_network)
parent = next((network for network in orgvdc_networks if network.get('name') == parent_network), None)
if parent:
config.append(E.ParentNetwork(href=parent.get('href')))
else:
raise EntityNotFoundException('Parent network \'%s\' does not exist'.format(parent_network))
elif ip_scope:
scope = E.IpScope(
E.IsInherited('false'),
E.Gateway(str(ip_network(ip_scope, strict=False).network_address+1)),
E.Netmask(str(ip_network(ip_scope, strict=False).netmask)))
config.append(E.IpScopes(scope))
else:
raise VappNetworkCreateError('Either parent_network or ip_scope must be set')
config.append(E.FenceMode(fence_mode))
network_config = E.NetworkConfig(config, networkName=network_name)
network_config_section.append(network_config)
add_network_task = self.client.put_linked_resource(
self.vapp.resource.NetworkConfigSection, RelationType.EDIT,
EntityType.NETWORK_CONFIG_SECTION.value,
network_config_section)
self.execute_task(add_network_task)
response['msg'] = 'Vapp Network {} has been added'.format(network_name)
response['changed'] = True
else:
response['warnings'] = 'Vapp Network {} is already present.'.format(network_name)
return response
def main():
argument_spec = vapp_network_argument_spec()
response = dict(
msg=dict(type='str')
)
module = VappNetwork(argument_spec=argument_spec, supports_check_mode=True)
try:
if not module.params.get('state'):
raise Exception('Please provide the state for the resource.')
response = module.manage_states()
module.exit_json(**response)
except Exception as error:
response['msg'] = error
module.fail_json(**response)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import random
import go
import numpy as np
import tensorflow as tf
"""
Allowable symmetries:
identity [12][34]
rot90 [24][13]
rot180 [43][21]
rot270 [31][42]
flip [13][24]
fliprot90 [34][12]
fliprot180 [42][31]
fliprot270 [21][43]
"""
INVERSES = {
'identity': 'identity',
'rot90': 'rot270',
'rot180': 'rot180',
'rot270': 'rot90',
'flip': 'flip',
'fliprot90': 'fliprot90',
'fliprot180': 'fliprot180',
'fliprot270': 'fliprot270',
}
IMPLS = {
'identity': lambda x: x,
'rot90': np.rot90,
'rot180': functools.partial(np.rot90, k=2),
'rot270': functools.partial(np.rot90, k=3),
'flip': lambda x: np.rot90(np.fliplr(x)),
'fliprot90': np.flipud,
'fliprot180': lambda x: np.rot90(np.flipud(x)),
'fliprot270': np.fliplr,
}
assert set(IMPLS.keys()) == set(INVERSES.keys())
# A symmetry is just a string describing the transformation.
SYMMETRIES = list(INVERSES.keys())
def invert_symmetry(s):
return INVERSES[s]
def apply_symmetry_feat(sym, features):
return IMPLS[sym](features)
def apply_symmetry_pi(s, pi):
pi = np.copy(pi)
# rotate all moves except for the pass move at end
pi[:-1] = IMPLS[s](pi[:-1].reshape([go.N, go.N])).ravel()
return pi
def randomize_symmetries_feat(features):
symmetries_used = [random.choice(SYMMETRIES) for _ in features]
return symmetries_used, [apply_symmetry_feat(s, f)
for s, f in zip(symmetries_used, features)]
def invert_symmetries_pi(symmetries, pis):
return [apply_symmetry_pi(invert_symmetry(s), pi)
for s, pi in zip(symmetries, pis)]
def rotate_train_nhwc(x, pi):
sym = tf.random_uniform(
[],
minval=0,
maxval=len(SYMMETRIES),
dtype=tf.int32,
seed=123)
def rotate(tensor):
# flipLeftRight
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 1) > 0,
tf.reverse(tensor, axis=[0]),
tensor)
# flipUpDown
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 2) > 0,
tf.reverse(tensor, axis=[1]),
tensor)
# flipDiagonal
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 4) > 0,
tf.transpose(tensor, perm=[1, 0, 2]),
tensor)
return tensor
# TODO(tommadams): use tf.ensure_shape instead of tf.assert_equal.
squares = go.N * go.N
assert_shape_pi = tf.assert_equal(pi.shape.as_list(), [squares + 1])
x_shape = x.shape.as_list()
assert_shape_x = tf.assert_equal(x_shape, [go.N, go.N, x_shape[2]])
pi_move = tf.slice(pi, [0], [squares], name="slice_moves")
pi_pass = tf.slice(pi, [squares], [1], name="slice_pass")
# Add a final dim so that x and pi have same shape: [N,N,num_features].
pi_n_by_n = tf.reshape(pi_move, [go.N, go.N, 1])
with tf.control_dependencies([assert_shape_x, assert_shape_pi]):
pi_rot = tf.concat(
[tf.reshape(rotate(pi_n_by_n), [squares]), pi_pass],
axis=0)
return rotate(x), pi_rot
def rotate_train_nchw(x, pi):
sym = tf.random_uniform(
[],
minval=0,
maxval=len(SYMMETRIES),
dtype=tf.int32,
seed=123)
def rotate(tensor):
# flipLeftRight
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 1) > 0,
tf.reverse(tensor, axis=[1]),
tensor)
# flipUpDown
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 2) > 0,
tf.reverse(tensor, axis=[2]),
tensor)
# flipDiagonal
tensor = tf.where(
tf.bitwise.bitwise_and(sym, 4) > 0,
tf.transpose(tensor, perm=[0, 2, 1]),
tensor)
return tensor
# TODO(tommadams): use tf.ensure_shape instead of tf.assert_equal.
squares = go.N * go.N
assert_shape_pi = tf.assert_equal(pi.shape.as_list(), [squares + 1])
x_shape = x.shape.as_list()
assert_shape_x = tf.assert_equal(x_shape, [x_shape[0], go.N, go.N])
pi_move = tf.slice(pi, [0], [squares], name="slice_moves")
pi_pass = tf.slice(pi, [squares], [1], name="slice_pass")
# Add a dim so that x and pi have same shape: [num_features,N,N].
pi_n_by_n = tf.reshape(pi_move, [1, go.N, go.N])
with tf.control_dependencies([assert_shape_x, assert_shape_pi]):
pi_rot = tf.concat(
[tf.reshape(rotate(pi_n_by_n), [squares]), pi_pass],
axis=0)
return rotate(x), pi_rot
def apply_symmetry_dual(X0, y0, v0, num_symmetries=8):
"""
to transform on the fly, just need to use tensor ops
# return tf.repeat(X0, repeats=2, axis=0), tf.repeat(y0, repeats=2, axis=0), tf.repeat(v0, repeats=2, axis=0)
# return tf.experimental.numpy.rot90(X0, axes=(1, 2)), y0, v0
"""
Xs, ys, vs = [], [], []
for s in random.sample(SYMMETRIES, num_symmetries):
Xs.append(apply_symmetry_feat(s, X0))
ys.append(apply_symmetry_pi(s, y0))
vs.append(v0)
return np.stack(Xs), np.stack(ys), np.stack(vs)
|
nilq/baby-python
|
python
|
import asyncio
import unittest
from unittest.mock import ANY
from aiobeanstalk.proto import Client
from aiobeanstalk.packets import Using, Inserted
def btalk_test(fun):
fun = asyncio.coroutine(fun)
def wrapper(self):
@asyncio.coroutine
def full_test():
cli = yield from Client.connect('localhost', 11300, loop=self.loop)
try:
yield from fun(self, cli)
finally:
cli.close()
self.loop.run_until_complete(full_test())
return wrapper
class TestCase(unittest.TestCase):
def setUp(self):
asyncio.set_event_loop(None)
self.loop = asyncio.new_event_loop()
@btalk_test
def testPut(self, btalk):
self.assertEqual((yield from btalk.send_command('use', 'test.q1')),
Using('test.q1'))
self.assertEqual((yield from btalk.send_command(
'put', 0, 0, 30,
body=b'hello world')),
Inserted(ANY))
|
nilq/baby-python
|
python
|
import os.path
charmap = []
charmapDescription = []
if os.path.isfile('charmap.mif'):
charmapFile = open('charmap.mif', 'r+')
lines = charmapFile.readlines()
cont = 0
character = []
for line in lines:
if line[0] == " ":
newLine = line[-10:-2]
if cont % 8 == 0 and cont != 0:
charmap.append(character[:])
character.clear()
character.append(newLine[:])
cont += 1
if line[0] == '-':
newDescription = line[line.index(']')+1:-1]
charmapDescription.append(newDescription[:])
charmap.append(character[:])
charmapFile.close()
else:
charmap = [
#0
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#1
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#2
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#3
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#4
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#5
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#6
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#7
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#8
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#9
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#10
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#11
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#12
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#13
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#14
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#15
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#16
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#17
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#18
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#19
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#20
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#21
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#22
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#23
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#24
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#25
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#26
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#27
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#28
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#29
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#30
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#31
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#32 SPACE
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#33 !
[
"00011000",
"00011000",
"00011000",
"00011000",
"00011000",
"00000000",
"00011000",
"00000000"
],
#34 "
[
"01100110",
"01100110",
"01100110",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#35 #
[
"01100110",
"01100110",
"11111111",
"01100110",
"11111111",
"01100110",
"01100110",
"00000000"
],
#36 $
[
"00011000",
"00111110",
"01100000",
"00111100",
"00000110",
"01111100",
"00011000",
"00000000"
],
#37 %
[
"01100010",
"01100110",
"00001100",
"00011000",
"00110000",
"01100110",
"01000110",
"00000000"
],
#38 &
[
"00111100",
"01100110",
"00111100",
"00111000",
"01100111",
"01100110",
"00111111",
"00000000"
],
#39 '
[
"00000110",
"00001100",
"00011000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#40 (
[
"00001100",
"00011000",
"00110000",
"00110000",
"00110000",
"00011000",
"00001100",
"00000000"
],
#41 )
[
"00110000",
"00011000",
"00001100",
"00001100",
"00001100",
"00011000",
"00110000",
"00000000"
],
#42 *
[
"00000000",
"01100110",
"00111100",
"11111111",
"00111100",
"01100110",
"00000000",
"00000000"
],
#43 +
[
"00000000",
"00011000",
"00011000",
"01111110",
"00011000",
"00011000",
"00000000",
"00000000"
],
#44 ,
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00011000",
"00011000",
"00110000"
],
#45 -
[
"00000000",
"00000000",
"00000000",
"01111110",
"00000000",
"00000000",
"00000000",
"00000000"
],
#46 .
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00011000",
"00011000",
"00000000"
],
#47 /
[
"00000000",
"00000011",
"00000110",
"00001100",
"00011000",
"00110000",
"01100000",
"00000000"
],
#48 0
[
"00111100",
"01100110",
"01101110",
"01110110",
"01100110",
"01100110",
"00111100",
"00000000"
],
#49 1
[
"00011000",
"00011000",
"00111000",
"00011000",
"00011000",
"00011000",
"01111110",
"00000000"
],
#50 2
[
"00111100",
"01100110",
"00000110",
"00001100",
"00110000",
"01100000",
"01111110",
"00000000"
],
#51 3
[
"00111100",
"01100110",
"00000110",
"00011100",
"00000110",
"01100110",
"00111100",
"00000000"
],
#52 4
[
"00000110",
"00001110",
"00011110",
"01100110",
"01111111",
"00000110",
"00000110",
"00000000"
],
#53 5
[
"01111110",
"01100000",
"01111100",
"00000110",
"00000110",
"01100110",
"00111100",
"00000000"
],
#54 6
[
"00111100",
"01100110",
"01100000",
"01111100",
"01100110",
"01100110",
"00111100",
"00000000"
],
#55 7
[
"01111110",
"01100110",
"00001100",
"00011000",
"00011000",
"00011000",
"00011000",
"00000000"
],
#56 8
[
"00111100",
"01100110",
"01100110",
"00111100",
"01100110",
"01100110",
"00111100",
"00000000"
],
#57 9
[
"00111100",
"01100110",
"01100110",
"00111110",
"00000110",
"01100110",
"00111100",
"00000000"
],
#58 :
[
"00000000",
"00011000",
"00011000",
"00000000",
"00000000",
"00011000",
"00011000",
"00000000"
],
#59 ;
[
"00000000",
"00011000",
"00011000",
"00000000",
"00011000",
"00110000",
"00000000",
"00000000"
],
#60 <
[
"00000000",
"00000110",
"00011000",
"01100000",
"01100000",
"00011000",
"00000110",
"00000000"
],
#61 =
[
"00000000",
"00000000",
"01111110",
"00000000",
"00000000",
"01111110",
"00000000",
"00000000"
],
#62 >
[
"00000000",
"01100000",
"00011000",
"00000110",
"00000110",
"00011000",
"01100000",
"00000000"
],
#63 ?
[
"00111100",
"01000010",
"00000010",
"00000100",
"00001000",
"00000000",
"00001000",
"00000000"
],
#64 @
[
"00111100",
"01100110",
"01101110",
"01101110",
"01100000",
"01100010",
"00111100",
"00000000"
],
#65 A
[
"00011000",
"00111100",
"01100110",
"01111110",
"01100110",
"01100110",
"01100110",
"00000000"
],
#66 B
[
"01111100",
"01100110",
"01100110",
"01111100",
"01100110",
"01100110",
"01111100",
"00000000"
],
#67 C
[
"00111100",
"01100110",
"01100000",
"01100000",
"01100000",
"01100110",
"00111100",
"00000000"
],
#68 D
[
"01111000",
"01101100",
"01100110",
"01100110",
"01100110",
"01101100",
"01111000",
"00000000"
],
#69 E
[
"01111110",
"01100000",
"01100000",
"01111000",
"01100000",
"01100000",
"01111110",
"00000000"
],
#70 F
[
"01111110",
"01100000",
"01100000",
"01111000",
"01100000",
"01100000",
"01100000",
"00000000"
],
#71 G
[
"00111100",
"01100110",
"01100000",
"01101110",
"01100110",
"01100110",
"00111100",
"00000000"
],
#72 H
[
"01100110",
"01100110",
"01100110",
"01111110",
"01100110",
"01100110",
"01100110",
"00000000"
],
#73 I
[
"00111100",
"00011000",
"00011000",
"00011000",
"00011000",
"00011000",
"00111100",
"00000000"
],
#74 J
[
"00011110",
"00001100",
"00001100",
"00001100",
"00001100",
"01101100",
"00111000",
"00000000"
],
#75 K
[
"01100110",
"01101100",
"01111000",
"01110000",
"01111000",
"01101100",
"01100110",
"00000000"
],
#76 L
[
"01100000",
"01100000",
"01100000",
"01100000",
"01100000",
"01100000",
"01111110",
"00000000"
],
#77 M
[
"01100011",
"01110111",
"01111111",
"01101011",
"01100011",
"01100011",
"01100011",
"00000000"
],
#78 N
[
"01100110",
"01110110",
"01111110",
"01111110",
"01101110",
"01100110",
"01100110",
"00000000"
],
#79 O
[
"00111100",
"01100110",
"01100110",
"01100110",
"01100110",
"01100110",
"00111100",
"00000000"
],
#80 P
[
"01111100",
"01100110",
"01100110",
"01111100",
"01100000",
"01100000",
"01100000",
"00000000"
],
#81 Q
[
"00111100",
"01100110",
"01100110",
"01100110",
"01100110",
"00111100",
"00001110",
"00000000"
],
#82 R
[
"00111100",
"01100110",
"01100110",
"01111100",
"01111000",
"01101100",
"01100110",
"00000000"
],
#83 S
[
"00111100",
"01100110",
"01100000",
"00111100",
"00000110",
"01100110",
"00111100",
"00000000"
],
#84 T
[
"01111110",
"00011000",
"00011000",
"00011000",
"00011000",
"00011000",
"00011000",
"00000000"
],
#85 U
[
"01100110",
"01100110",
"01100110",
"01100110",
"01100110",
"01100110",
"00111100",
"00000000"
],
#86 V
[
"01100110",
"01100110",
"01100110",
"01100110",
"01100110",
"00111100",
"00011000",
"00000000"
],
#87 W
[
"01100011",
"01100011",
"01100011",
"01101011",
"01111111",
"01110111",
"01100011",
"00000000"
],
#88 X
[
"01100110",
"01100110",
"00111100",
"00011000",
"00111100",
"01100110",
"01100110",
"00000000"
],
#89 Y
[
"01100110",
"01100110",
"01100110",
"00111100",
"00011000",
"00011000",
"00011000",
"00000000"
],
#90 Z
[
"01111110",
"00000110",
"00001100",
"00011000",
"00110000",
"01100000",
"01111110",
"00000000"
],
#91 [
[
"00111100",
"00110000",
"00110000",
"00110000",
"00110000",
"00110000",
"00111100",
"00000000"
],
#92 \
[
"00000000",
"01100000",
"00110000",
"00011000",
"00001100",
"00000110",
"00000011",
"00000000"
],
#93 ]
[
"00111100",
"00001100",
"00001100",
"00001100",
"00001100",
"00001100",
"00111100",
"00000000"
],
#94 ^
[
"00011000",
"00111100",
"01100110",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#95 _
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"01111111",
"00000000"
],
#96 `
[
"00110000",
"00011000",
"00001100",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
],
#97 a
[
"00000000",
"00000000",
"00111100",
"00000110",
"00111110",
"01100110",
"00111110",
"00000000"
],
#98 b
[
"00000000",
"01100000",
"01100000",
"01111100",
"01100110",
"01100110",
"01111100",
"00000000"
],
#99 c
[
"00000000",
"00000000",
"00111110",
"01100000",
"01100000",
"01100000",
"00111110",
"00000000"
],
#100 d
[
"00000000",
"00000110",
"00000110",
"00111110",
"01100110",
"01100110",
"00111110",
"00000000"
],
#101 e
[
"00000000",
"00000000",
"00111100",
"01100110",
"01111110",
"01100000",
"00111110",
"00000000"
],
#102 f
[
"00000000",
"00111100",
"01100000",
"01111000",
"01100000",
"01100000",
"01100000",
"00000000"
],
#103 g
[
"00000000",
"00000000",
"00111100",
"01100110",
"00111110",
"00000110",
"00111100",
"00000000"
],
#104 h
[
"00000000",
"01100000",
"01100000",
"01111100",
"01100110",
"01100110",
"01100110",
"00000000"
],
#105 i
[
"00000000",
"00011000",
"00000000",
"00111000",
"00011000",
"00011000",
"00111100",
"00000000"
],
#106 j
[
"00000000",
"00011000",
"00000000",
"00111000",
"00011000",
"00011000",
"01110000",
"00000000"
],
#107 k
[
"00000000",
"01100000",
"01100110",
"01101100",
"01111000",
"01101100",
"01100110",
"00000000"
],
#108 l
[
"00000000",
"01110000",
"00110000",
"00110000",
"00110000",
"00110000",
"00011100",
"00000000"
],
#109 m
[
"00000000",
"00000000",
"00110110",
"01101011",
"01100011",
"01100011",
"01100011",
"00000000"
],
#110 n
[
"00000000",
"00000000",
"00111100",
"01100110",
"01100110",
"01100110",
"01100110",
"00000000"
],
#111 o
[
"00000000",
"00000000",
"00111100",
"01100110",
"01100110",
"01100110",
"00111100",
"00000000"
],
#112 p
[
"00000000",
"00000000",
"00111100",
"01100110",
"01111100",
"01100000",
"01100000",
"00000000"
],
#113 q
[
"00000000",
"00000000",
"00111100",
"01100110",
"00111110",
"00000110",
"00000110",
"00000000"
],
#114 r
[
"00000000",
"00000000",
"00111100",
"01100110",
"01100000",
"01100000",
"01100000",
"00000000"
],
#115 s
[
"00000000",
"00000000",
"00111100",
"01100000",
"00111100",
"00000110",
"00111100",
"00000000"
],
#116 t
[
"00000000",
"01100000",
"01100000",
"01111000",
"01100000",
"01100000",
"00111100",
"00000000"
],
#117 u
[
"00000000",
"00000000",
"01100110",
"01100110",
"01100110",
"01100110",
"00111100",
"00000000"
],
#118 v
[
"00000000",
"00000000",
"01100110",
"01100110",
"01100110",
"00111100",
"00011000",
"00000000"
],
#119 w
[
"00000000",
"00000000",
"01100011",
"01100011",
"01100011",
"01101011",
"00110110",
"00000000"
],
#120 x
[
"00000000",
"00000000",
"01100110",
"00111100",
"00011000",
"00111100",
"01100110",
"00000000"
],
#121 y
[
"00000000",
"00000000",
"01100110",
"00111100",
"00011000",
"00110000",
"01100000",
"00000000"
],
#122 z
[
"00000000",
"00000000",
"01111110",
"00001100",
"00011000",
"00110000",
"01111110",
"00000000"
],
#123 {
[
"00001100",
"00011000",
"00110000",
"11110000",
"00110000",
"00011000",
"00001100",
"00000000"
],
#124 |
[
"00011000",
"00011000",
"00011000",
"00011000",
"00011000",
"00011000",
"00011000",
"00000000"
],
#125 }
[
"00110000",
"00011000",
"00001100",
"00001111",
"00001100",
"00011000",
"00110000",
"00000000"
],
#126 ~
[
"00000000",
"00000000",
"00000000",
"01110110",
"11011100",
"00000000",
"00000000",
"00000000"
],
#127
[
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000",
"00000000"
]
]
charmapDescription = [
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"SPACE",
"!",
'"',
"#",
"$",
"%",
"&",
"'",
"(",
")",
"*",
"+",
",",
"-",
".",
"/",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
":",
";",
"<",
"=",
">",
"?",
"@",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"[",
"\\",
"]",
"^",
"_",
"`",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"{",
"|",
"}",
"~",
"ESC"
]
|
nilq/baby-python
|
python
|
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
#helper methods to create a Junit file
import xml.etree.ElementTree as ET
import os
from warrior.Framework.Utils.print_Utils import print_debug
from warrior.Framework.Utils import file_Utils
from warrior.WarriorCore.Classes.html_results_class import WarriorHtmlResults
from warrior.WarriorCore.Classes.execution_summary_class import ExecutionSummary
class Junit(object):
"""Junit class"""
def __init__(self, filename, **kwargs):
"""constructor """
#self.junit_xslt = "{0}{1}Reporting{1}junit_to_html.xsl".format(Tools.__path__[0], os.sep)
self.junit_xslt = "{0}{1}Reporting{1}junit_to_html.xsl".format(os.getenv("WAR_TOOLS_DIR"), os.sep)
self.root = self.create_element("testsuites", tests="0", suites="0",
**self.init_arg(**kwargs))
self.filename = filename
properties = self.create_element("properties")
self.root.append(properties)
def init_arg(self, **kwargs):
"""
initialize the common attribute for an element
"""
default_keys = ["errors", "failures", "skipped", "time", "passes"]
result = {}
for default_key in default_keys:
result[default_key] = kwargs[default_key] if default_key in kwargs else "0"
for key, val in list(kwargs.items()):
result[key] = val
return result
def create_testsuite(self, location, **kwargs):
"""
Create a testsuite element
"""
testsuite = self.create_element("testsuite", tests="0", **self.init_arg(**kwargs))
properties = self.create_element("properties")
testsuite.append(properties)
properties.append(self.create_element("property", {"name": "location", "value": location}))
self.root.append(testsuite)
def create_testcase(self, location, timestamp, ts_timestamp, name,
classname="customTestsuite_independant_testcase_execution",
tag="testcase", **kwargs):
"""
Create a testcase element
"""
if self.root.find("testsuite") is None:
self.update_attr("timestamp", timestamp, "pj", "0")
self.create_testsuite(location=location, name=classname, timestamp=timestamp,
display='False', **self.init_arg(**kwargs))
for ts in self.root.findall("testsuite"):
if ts.get("timestamp") == ts_timestamp:
#create an element with name as in tag
tc = self.create_element(tag, classname=classname, timestamp=timestamp,
exceptions="0", keywords="0", name=name,
display='True', **self.init_arg(**kwargs))
ts.append(tc)
properties = self.create_element("properties")
tc.append(properties)
@classmethod
def create_element(cls, tagname="", attr=None, **kwargs):
"""create an xml element with given name and a dict of attribute"""
if attr is None:
attr = {}
elem = ET.Element(tagname)
for key, val in list(attr.items()):
elem.set(str(key), str(val))
for key, val in list(kwargs.items()):
elem.set(str(key), str(val))
return elem
def get_family_with_timestamp(self, timestamp):
""" Get case, suite & root element based on the timestamp value """
for testsuite in list(self.root):
for testcase in list(testsuite):
if testcase.get("timestamp") == timestamp:
return [testcase, testsuite, self.root]
def get_tc_with_timestamp(self, timestamp):
""" Get case element based on the timestamp value """
for testsuite in list(self.root):
for testcase in list(testsuite):
if testcase.get("timestamp") == timestamp:
return testcase
def get_ts_with_timestamp(self, timestamp):
""" Get suite element based on the timestamp value """
for testsuite in list(self.root):
if testsuite.get("timestamp") == timestamp:
return testsuite
def add_keyword_result(self, tc_timestamp, step_num, kw_name, status, kw_timestamp, duration,
resultfile, impact, onerror, desc="", info="", tc_name="",
tc_resultsdir=""):
"""form a keyword status dict with kw info and call function to build keyword elem"""
if str(status).lower() == "true":
status = "PASS"
elif str(status).lower() == "false":
status = "FAIL"
keyword_items = {"type": "keyword", 'display': 'True', "step": step_num,
"name": kw_name, "status": status, "timestamp": kw_timestamp,
"time": duration, "resultfile": resultfile,
"impact": impact, "onerror": onerror, "description": desc,
"info":info}
# if a failing status if encountered add a defects atribute to the keyword tag
# and its value is the path to the defects file.
failing_status = ['FAIL', 'EXCEPTION', 'ERROR']
if str(status).upper() in failing_status:
defects_dir = os.path.dirname(tc_resultsdir) + os.sep + 'Defects'
kw_resultfile_nameonly = file_Utils.getNameOnly(os.path.basename(resultfile))
defects_file = tc_name + "_" + kw_resultfile_nameonly + ".json"
defects_filepath = defects_dir + os.sep + defects_file
keyword_items['defects'] = defects_filepath
self.add_property(name=kw_name, value="KEYWORD_DISCARD", elem_type="kw",
timestamp=tc_timestamp, keyword_items=keyword_items)
def add_testcase_message(self, timestamp, status):
""" Add a message element for fail/error/skip cases """
elem = self.get_tc_with_timestamp(timestamp)
if elem is None:
elem = self.get_ts_with_timestamp(timestamp)
if str(status).lower() == "false":
elem.append(self.create_element("failure", {"message": "test failure"}))
elif str(status).lower() == "error":
elem.append(self.create_element("error", {}))
elif str(status).lower() == "skipped":
elem.append(self.create_element("skipped", {}))
def add_requirement(self, requirement, timestamp):
"""add a new requirement when called"""
self.get_ts_with_timestamp(timestamp).find("properties").append(self.create_element\
("property", {"name": "requirement", "value": requirement}))
def add_property(self, name, value, elem_type, timestamp, **kwargs):
"""add a new property to specific element when called
since steps are logged as property, need special handling to create kw item"""
if elem_type == "pj":
elem = self.root
elif elem_type == "ts":
elem = self.get_ts_with_timestamp(timestamp)
else:
elem = self.get_tc_with_timestamp(timestamp)
if elem_type == "kw":
item = self.create_element("property", kwargs["keyword_items"])
else:
item = self.create_element("property", {"name": name, "value": value})
elem.find("properties").append(item)
def add_jobid(self, jobid):
"""add a new requirement when called"""
self.root.append(self.create_element("property", {"name": "jobid", "value": jobid}))
def add_project_location(self, location):
"""add a new requirement when called"""
self.root.find("properties").append(self.create_element(
"property", {"name": "location", "value": location}))
self.root.append(self.create_element(
"property", {"name": "location", "value": location}))
def update_count(self, attr, value, elem_type, timestamp="0"):
"""
increase the value of an attribute based on
element type (project, testsuite or testcase) and timestamp
"""
if elem_type == "pj":
elem = self.root
elif elem_type == "ts":
elem = self.get_ts_with_timestamp(timestamp)
else:
elem = self.get_tc_with_timestamp(timestamp)
attr = str(attr).lower()
statuses = {"true": "passes", "false": "failures", "exception": "exceptions",
"error": "errors", "skip": "skipped"}
if attr in statuses:
attr = statuses[attr]
if elem.tag != "testcase" and attr == "exceptions":
attr = "errors"
if elem.get(attr) is not None:
elem.set(attr, str(int(elem.get(attr)) + int(value)))
def update_attr(self, attr, value, elem_type, timestamp=None):
"""
update the value of an attribute based on
element type (project, testsuite or testcase) and timestamp
special handling to create failure message for fail/exception status
"""
if elem_type == "pj":
elem = self.root
elif elem_type == "ts":
elem = self.get_ts_with_timestamp(timestamp)
else:
elem = self.get_tc_with_timestamp(timestamp)
if attr == "status":
if elem.tag == "testcase":
if attr == "false":
elem.append(self.create_element("failure", {"message": "test failure"}))
elif attr == "exception" or attr == "error":
elem.append(self.create_element("failure",
{"message": "errors/exceptions "\
"encountered during testcase execution"}))
if str(value).lower() == "true":
value = "PASS"
elif str(value).lower() == "false":
value = "FAIL"
elem.set(attr, value)
def _junit_to_html(self, junit_file, print_summary=True):
""" Convert junit file to html"""
if not hasattr(self, 'html_result_obj'):
self.html_result_obj = WarriorHtmlResults(junit_file)
self.html_result_obj.write_live_results(junit_file, None, print_summary)
def remove_html_obj(self):
"""checks and removes html_results_obj from junit object usecase in parralel execution"""
if hasattr(self, 'html_result_obj'):
del self.html_result_obj
def output_junit(self, path, print_summary=True):
"""output the actual file
copy xslt to the results folder
Print execution summary in console based on 'print_summary' value """
if print_summary is True:
fpath = path + os.sep + self.filename + "_junit.xml"
tree = ET.ElementTree(self.root)
tree.write(fpath)
summary_obj = ExecutionSummary(fpath)
summary_obj.print_result_in_console(fpath)
print_debug("\n")
if print_summary is True:
self._junit_to_html(fpath, print_summary)
def junit_output(self, path, print_summary=False):
"""output the actual file
copy xslt to the results folder """
fpath = path + os.sep + self.filename + "_junit.xml"
tree = ET.ElementTree(self.root)
tree.write(fpath)
self._junit_to_html(fpath, print_summary)
|
nilq/baby-python
|
python
|
from flask import Blueprint, request, jsonify
from werkzeug import check_password_hash
from flask.ext.login import login_user, logout_user
from app.core import db
from app.api_decorators import requires_login, requires_keys
from app.models.user import User
blueprint = Blueprint('api_slash', __name__, url_prefix='/api')
@blueprint.route('/login/', methods=['POST'])
@requires_keys('email', 'password')
def login():
errors = []
json = request.get_json(force=True)
user = User.query.filter_by(email=json['email']).first()
if user is None:
errors.append('Invalid username/password combination.')
if not errors and not check_password_hash(user.password, json['password']):
errors.append('Invalid username/password combination.')
if not errors:
login_user(user, remember=False)
return jsonify(success=not errors, errors=errors)
@blueprint.route('/logout/', methods=['GET'])
@requires_login
def logout():
logout_user()
return jsonify(success=True)
@blueprint.route('/register/', methods=['POST'])
# Need to add challenge / response captcha stuff in later
# @requires_keys('email', 'password', 'confirm', 'challenge', 'response')
@requires_keys('email', 'name', 'password', 'confirm')
def register():
json = request.get_json(force=True)
errors = []
user_id = None
# captcha_result = submit(json['challenge'], json['response'],
# RECAPTCHA_PRIVATE_KEY, request.remote_addr)
# if not captcha_result.is_valid:
# errors.append('captcha: Validation failed.')
if not errors:
if User.query.filter_by(email=json['email']).first():
errors.append('An account already exists with this email.')
# Need better password requirements later
if len(json['password']) < 6:
errors.append('Password must be at least 6 characters long.')
if json['password'] != json['confirm']:
errors.append('Passwords do not match.')
if not errors:
user = User(json)
db.session.add(user)
db.session.commit()
user_id = user.id
login_user(user)
return jsonify(success=not errors, errors=errors, id=user_id)
|
nilq/baby-python
|
python
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""environment variables template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import hpccm.base_object
class envvars(hpccm.base_object):
"""Template for setting environment variables"""
def __init__(self, **kwargs):
"""Initialize template"""
super(envvars, self).__init__(**kwargs)
self.environment = kwargs.get('environment', True)
self.environment_variables = {}
# Use only if the runtime environment is incompatible with the
# non-runtime environment, e.g., PATH contains different
# values. Otherwise, try to use the filtering options.
self.runtime_environment_variables = {}
def environment_step(self, include_only=None, exclude=None, runtime=False):
"""Return dictionary of environment variables"""
if runtime:
e = self.runtime_environment_variables
else:
e = self.environment_variables
if self.environment:
if include_only:
return {x: e[x] for x in e if x in include_only}
elif exclude:
return {x: e[x] for x in e if x not in exclude}
else:
return e
else:
return {}
|
nilq/baby-python
|
python
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import json
import logging
import os
from google.cloud import firestore
from google.cloud import storage
# API clients
gcs = None
db = None
def analyze(data, context):
"""Function entry point, triggered by creation of an object in a GCS bucket.
The function reads the content of the triggering file, analyses its contents,
and persists the results of the analysis to a new Firestore document.
Args:
data (dict): The trigger event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
page_metrics = get_gcs_file_contents(data)
max_time_meaningful_paint = int(os.environ.get('MAX_TIME_MEANINGFUL_PAINT'))
analysis_result = analyze_metrics(data, page_metrics,
max_time_meaningful_paint)
docref = persist(analysis_result, data['name'])
logging.info('Created new Firestore document %s/%s describing analysis of %s',
docref.parent.id, docref.id, analysis_result['input_file'])
def get_gcs_file_contents(data):
"""Get the content of the GCS object that triggered this function."""
global gcs
if not gcs:
gcs = storage.Client()
bucket = gcs.get_bucket(data['bucket'])
blob = bucket.blob(data['name'])
return blob.download_as_string()
def persist(analysis_result, document_id):
"""Persist analysis results to the configured Firestore collection."""
global db
if not db:
db = firestore.Client()
collection_name = os.environ.get('METRICS_COLLECTION')
collection = db.collection(collection_name)
inserted = collection.add(analysis_result, document_id=document_id)
return inserted[1]
# [START parse-block]
def analyze_metrics(data, metrics, max_time_meaningful_paint):
"""Parse the page metrics and return a dict with details of the operation."""
calculated = parse_metrics(metrics)
gcs_filename = 'gs://{}/{}'.format(data['bucket'], data['name'])
parse_result = {
'metrics': calculated,
'input_file': gcs_filename,
'page_url': data['metadata']['pageUrl'],
'fetch_timestamp': data['timeCreated'],
'analysis_timestamp': datetime.utcnow().isoformat() + 'Z'
}
# check whether page performance is within threshold
time_meaningful_paint = calculated['FirstMeaningfulPaint']
if time_meaningful_paint > max_time_meaningful_paint:
logging.warning('FAILED: page load time (%d) exceeded max threshold (%d)',
time_meaningful_paint, max_time_meaningful_paint)
parse_result['status'] = 'FAIL'
else:
parse_result['status'] = 'PASS'
return parse_result
def parse_metrics(metrics_str):
metrics_obj = json.loads(metrics_str)
metrics = metrics_obj['metrics']
keys = [x['name'] for x in metrics]
values = [x['value'] for x in metrics]
kv = dict(zip(keys, values))
calculated = {
'DomContentLoaded': calc_event_time(kv, 'DomContentLoaded'),
'FirstMeaningfulPaint': calc_event_time(kv, 'FirstMeaningfulPaint'),
'JSHeapTotalSize': kv['JSHeapTotalSize'],
'JSHeapUsedSize': kv['JSHeapUsedSize']
}
return calculated
# [END parse-block]
def calc_event_time(metrics_kv, event_name):
return int((metrics_kv[event_name] - metrics_kv['NavigationStart']) * 1000)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class FaceSearchUserInfo(object):
def __init__(self):
self._customuserid = None
self._merchantid = None
self._merchantuid = None
self._score = None
@property
def customuserid(self):
return self._customuserid
@customuserid.setter
def customuserid(self, value):
self._customuserid = value
@property
def merchantid(self):
return self._merchantid
@merchantid.setter
def merchantid(self, value):
self._merchantid = value
@property
def merchantuid(self):
return self._merchantuid
@merchantuid.setter
def merchantuid(self, value):
self._merchantuid = value
@property
def score(self):
return self._score
@score.setter
def score(self, value):
self._score = value
def to_alipay_dict(self):
params = dict()
if self.customuserid:
if hasattr(self.customuserid, 'to_alipay_dict'):
params['customuserid'] = self.customuserid.to_alipay_dict()
else:
params['customuserid'] = self.customuserid
if self.merchantid:
if hasattr(self.merchantid, 'to_alipay_dict'):
params['merchantid'] = self.merchantid.to_alipay_dict()
else:
params['merchantid'] = self.merchantid
if self.merchantuid:
if hasattr(self.merchantuid, 'to_alipay_dict'):
params['merchantuid'] = self.merchantuid.to_alipay_dict()
else:
params['merchantuid'] = self.merchantuid
if self.score:
if hasattr(self.score, 'to_alipay_dict'):
params['score'] = self.score.to_alipay_dict()
else:
params['score'] = self.score
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = FaceSearchUserInfo()
if 'customuserid' in d:
o.customuserid = d['customuserid']
if 'merchantid' in d:
o.merchantid = d['merchantid']
if 'merchantuid' in d:
o.merchantuid = d['merchantuid']
if 'score' in d:
o.score = d['score']
return o
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
class SLAF(nn.Module):
def __init__(self, k=2):
super().__init__()
self.k = k
self.coeff = nn.ParameterList(
[nn.Parameter(torch.tensor(1.0)) for i in range(k)])
def forward(self, x):
out = sum([self.coeff[k] * torch.pow(x, k) for k in range(self.k)])
return out
|
nilq/baby-python
|
python
|
#from keras.models import Sequential, Model
#from keras.layers import Dense, Dropout, Flatten, Input
#from keras.layers import Conv2D, MaxPooling2D, Reshape, Concatenate
from keras.optimizers import Adam
#import tensorflow as tf
import numpy as np
import sys
import os
import cv2
import keras.backend as K
import math
if len(sys.argv) == 2:
dataset = sys.argv[1]
else:
print('usage: python3 test.py A(or B)')
exit()
print('dataset:', dataset)
train_path = './data/formatted_trainval/shanghaitech_part_' + dataset + '_patches_9/train/'
train_den_path = './data/formatted_trainval/shanghaitech_part_' + dataset + '_patches_9/train_den/'
val_path = './data/formatted_trainval/shanghaitech_part_' + dataset + '_patches_9/val/'
val_den_path = './data/formatted_trainval/shanghaitech_part_' + dataset + '_patches_9/val_den/'
img_path = './data/original/shanghaitech/part_' + dataset + '_final/test_data/images/'
den_path = './data/original/shanghaitech/part_' + dataset + '_final/test_data/ground_truth_csv/'
def data_pre_train():
print('loading data from dataset ', dataset, '...')
train_img_names = os.listdir(train_path)
img_num = len(train_img_names)
train_data = []
for i in range(img_num):
if i % 100 == 0:
print(i, '/', img_num)
name = train_img_names[i]
#print(name + '****************************')
img = cv2.imread(train_path + name, 0)
img = np.array(img)
img = (img - 127.5) / 128
#print(img.shape)
den = np.loadtxt(open(train_den_path + name[:-4] + '.csv'), delimiter = ",")
den_quarter = np.zeros((int(den.shape[0] / 4), int(den.shape[1] / 4)))
#print(den_quarter.shape)
for i in range(len(den_quarter)):
for j in range(len(den_quarter[0])):
for p in range(4):
for q in range(4):
den_quarter[i][j] += den[i * 4 + p][j * 4 + q]
train_data.append([img, den_quarter])
print('load data finished.')
return train_data
def data_pre_test():
print('loading test data from dataset', dataset, '...')
img_names = os.listdir(img_path)
img_num = len(img_names)
data = []
for i in range(img_num):
if i % 50 == 0:
print(i, '/', img_num)
name = 'IMG_' + str(i + 1) + '.jpg'
#print(name + '****************************')
img = cv2.imread(img_path + name, 0)
img = np.array(img)
img = (img - 127.5) / 128
#print(img.shape)
den = np.loadtxt(open(den_path + name[:-4] + '.csv'), delimiter = ",")
den_quarter = np.zeros((int(den.shape[0] / 4), int(den.shape[1] / 4)))
#print(den_quarter.shape)
for i in range(len(den_quarter)):
for j in range(len(den_quarter[0])):
for p in range(4):
for q in range(4):
den_quarter[i][j] += den[i * 4 + p][j * 4 + q]
#print(den.shape)
data.append([img, den_quarter])
print('load data finished.')
return data
data = data_pre_train()
data_test = data_pre_test()
np.random.shuffle(data)
x_train = []
y_train = []
for d in data:
x_train.append(np.reshape(d[0], (d[0].shape[0], d[0].shape[1], 1)))
y_train.append(np.reshape(d[1], (d[1].shape[0], d[1].shape[1], 1)))
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = []
y_test = []
for d in data_test:
x_test.append(np.reshape(d[0], (d[0].shape[0], d[0].shape[1], 1)))
y_test.append(np.reshape(d[1], (d[1].shape[0], d[1].shape[1], 1)))
x_test = np.array(x_test)
y_test = np.array(y_test)
def maaae(y_true, y_pred):
return abs(K.sum(y_true) - K.sum(y_pred))
def mssse(y_true, y_pred):
return (K.sum(y_true) - K.sum(y_pred)) * (K.sum(y_true) - K.sum(y_pred))
inputs = Input(shape = (None, None, 1))
conv_m = Conv2D(20, (7, 7), padding = 'same', activation = 'relu')(inputs)
conv_m = MaxPooling2D(pool_size = (2, 2))(conv_m)
conv_m = (conv_m)
conv_m = Conv2D(40, (5, 5), padding = 'same', activation = 'relu')(conv_m)
conv_m = MaxPooling2D(pool_size = (2, 2))(conv_m)
conv_m = Conv2D(20, (5, 5), padding = 'same', activation = 'relu')(conv_m)
conv_m = Conv2D(10, (5, 5), padding = 'same', activation = 'relu')(conv_m)
#conv_m = Conv2D(1, (1, 1), padding = 'same', activation = 'relu')(conv_m)
conv_s = Conv2D(24, (5, 5), padding = 'same', activation = 'relu')(inputs)
conv_s = MaxPooling2D(pool_size = (2, 2))(conv_s)
conv_s = (conv_s)
conv_s = Conv2D(48, (3, 3), padding = 'same', activation = 'relu')(conv_s)
conv_s = MaxPooling2D(pool_size = (2, 2))(conv_s)
conv_s = Conv2D(24, (3, 3), padding = 'same', activation = 'relu')(conv_s)
conv_s = Conv2D(12, (3, 3), padding = 'same', activation = 'relu')(conv_s)
#conv_s = Conv2D(1, (1, 1), padding = 'same', activation = 'relu')(conv_s)
conv_l = Conv2D(16, (9, 9), padding = 'same', activation = 'relu')(inputs)
conv_l = MaxPooling2D(pool_size = (2, 2))(conv_l)
conv_l = (conv_l)
conv_l = Conv2D(32, (7, 7), padding = 'same', activation = 'relu')(conv_l)
conv_l = MaxPooling2D(pool_size = (2, 2))(conv_l)
conv_l = Conv2D(16, (7, 7), padding = 'same', activation = 'relu')(conv_l)
conv_l = Conv2D(8, (7, 7), padding = 'same', activation = 'relu')(conv_l)
#conv_l = Conv2D(1, (1, 1), padding = 'same', activation = 'relu')(conv_l)
conv_merge = Concatenate(axis = 3)([conv_m, conv_s, conv_l])
result = Conv2D(1, (1, 1), padding = 'same')(conv_merge)
'''
inputs = Input(shape = (None, None, 1))
conv_m = Conv2D(20, (7, 7), padding = 'same', activation = 'relu')(inputs)
conv_m = MaxPooling2D(pool_size = (2, 2))(conv_m)
conv_m = (conv_m)
conv_m = Conv2D(40, (5, 5), padding = 'same', activation = 'relu')(conv_m)
conv_m = MaxPooling2D(pool_size = (2, 2))(conv_m)
conv_m = Conv2D(20, (5, 5), padding = 'same', activation = 'relu')(conv_m)
conv_m = Conv2D(10, (5, 5), padding = 'same', activation = 'relu')(conv_m)
#conv_m = Conv2D(1, (1, 1), padding = 'same', activation = 'relu')(conv_m)
conv_s = Conv2D(24, (5, 5), padding = 'same', activation = 'relu')(inputs)
conv_s = MaxPooling2D(pool_size = (2, 2))(conv_s)
conv_s = (conv_s)
conv_s = Conv2D(48, (3, 3), padding = 'same', activation = 'relu')(conv_s)
conv_s = MaxPooling2D(pool_size = (2, 2))(conv_s)
conv_s = Conv2D(24, (3, 3), padding = 'same', activation = 'relu')(conv_s)
conv_s = Conv2D(12, (3, 3), padding = 'same', activation = 'relu')(conv_s)
#conv_s = Conv2D(1, (1, 1), padding = 'same', activation = 'relu')(conv_s)
conv_l = Conv2D(16, (9, 9), padding = 'same', activation = 'relu')(inputs)
conv_l = MaxPooling2D(pool_size = (2, 2))(conv_l)
conv_l = (conv_l)
conv_l = Conv2D(32, (7, 7), padding = 'same', activation = 'relu')(conv_l)
conv_l = MaxPooling2D(pool_size = (2, 2))(conv_l)
conv_l = Conv2D(16, (7, 7), padding = 'same', activation = 'relu')(conv_l)
conv_l = Conv2D(8, (7, 7), padding = 'same', activation = 'relu')(conv_l)
#conv_l = Conv2D(1, (1, 1), padding = 'same', activation = 'relu')(conv_l)
conv_merge = Concatenate(axis = 3)([conv_m, conv_s, conv_l])
result = Conv2D(1, (1, 1), padding = 'same')(conv_merge)
'''
model = Model(inputs = inputs, outputs = result)
adam = Adam(lr = 1e-4)
model.compile(loss = 'mse', optimizer = adam, metrics = [maaae, mssse])
best_mae = 10000
best_mae_mse = 10000
best_mse = 10000
best_mse_mae = 10000
for i in range(200):
model.fit(x_train, y_train, epochs = 3, batch_size = 1, validation_split = 0.2)
score = model.evaluate(x_test, y_test, batch_size = 1)
score[2] = math.sqrt(score[2])
print(score)
if score[1] < best_mae:
best_mae = score[1]
best_mae_mse = score[2]
json_string = model.to_json()
open('model.json', 'w').write(json_string)
model.save_weights('weights.h5')
if score[2] < best_mse:
best_mse = score[2]
best_mse_mae = score[1]
print('best mae: ', best_mae, '(', best_mae_mse, ')')
print('best mse: ', '(', best_mse_mae, ')', best_mse)
|
nilq/baby-python
|
python
|
import torch
import torchvision
def get_loader(root='.', batch_size=512):
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
train_dataset = torchvision.datasets.CIFAR10(root, train=True,
download=True,
transform=transform)
test_dataset = torchvision.datasets.CIFAR10(root, train=False,
download=True,
transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=8,
drop_last=True)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=batch_size,
num_workers=8)
return (train_loader, test_loader)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from guillotina.factory import serialize # noqa
from guillotina.factory.app import make_app # noqa
from guillotina.factory.content import ApplicationRoot # noqa
from guillotina.factory.content import Database # noqa
from guillotina.factory import security # noqa
|
nilq/baby-python
|
python
|
import typing
from kubernetes import client
from kubernetes import config
from kubernetes.client.rest import ApiException
from kuber import definitions
from kuber import versioning
def load_access_config(in_cluster: bool = False, **kwargs):
"""
Initializes the kubernetes library from either a kube configuration
file for external access or using mounted configuration data for
access from within a pod in the cluster.
:param in_cluster:
Whether or not to initialize access within the cluster or not. By
default the access will be loaded from a kube config file for
external access to a cluster.
:param kwargs:
Optional arguments to pass ot the external kube-config-based
initialization process.
"""
if in_cluster:
return config.load_incluster_config()
return config.load_kube_config(**kwargs)
def get_version_from_cluster(
fallback: typing.Union["versioning.KubernetesVersion", str] = None
) -> versioning.KubernetesVersion:
"""
Returns the KubernetesVersion object associated with the configured
cluster. If the cluster version cannot be determined, the specified
fallback version will be returned instead. If no fallback is specified
the earliest (oldest) version available in the kuber library installation
will be used instead.
"""
versions = versioning.get_all_versions()
default = fallback or versions[0]
if not isinstance(default, versioning.KubernetesVersion):
default = versioning.get_version_data(fallback)
try:
response: client.VersionInfo = client.VersionApi().get_code()
major = response.major
minor = response.minor.rstrip("+")
except ApiException:
return default
return next((v for v in versions if v.major == major and v.minor == minor), default)
def execute(
action: str,
resource: "definitions.Resource",
names: typing.List[str],
namespace: str = None,
api_client: client.ApiClient = None,
api_args: typing.Dict[str, typing.Any] = None,
) -> typing.Optional["definitions.ExecutionResponse"]:
"""
Executes the specified action on the given resource object using
the kubernetes API client.
:param action:
The CRUD operation to carry out for the given resource.
:param resource:
Kuber resource on which to carry out the operation.
:param names:
Names of potential kubernetes python client functions that can be
called to carry out this operation.
:param namespace:
Kubernetes namespace in which this execution will take place.
:param api_client:
Kubernetes python client API connection to use when carrying out
the execution.
:param api_args:
Keyword arguments to pass through to the kubernetes python client
execution call.
"""
api = resource.get_resource_api(api_client=api_client)
name = next((n for n in names if hasattr(api, n)), None)
if name is None:
raise ValueError(
f"{action.capitalize()} function not found for resource "
f"{resource.__class__.__name__}"
)
func = getattr(api, name)
func_variables = func.__code__.co_varnames
args = (api_args or {}).copy()
ns = namespace or getattr(resource.metadata, "namespace", None)
if ns and "namespace" in func_variables:
args["namespace"] = ns
return typing.cast(
typing.Optional[definitions.ExecutionResponse],
getattr(api, name)(**args),
)
def to_camel_case(source: str) -> str:
"""Converts the specified source string from snake_case to camelCase."""
parts = source.split("_")
prefix = parts.pop(0)
suffix = "".join([p.capitalize() for p in parts])
return f"{prefix}{suffix}"
def to_kuber_dict(kube_api_entity: typing.Union[typing.Any, typing.Dict]) -> dict:
"""
Converts a Kubernetes client object, or serialized dictionary of
configuration values to the kuber representation, which enforces
camelCase and omits any keys with `None` values.
:param kube_api_entity:
Either a kubernetes Python client object or a dictionary that
contains keys and value for a kubernetes resource configuration.
"""
entity = kube_api_entity
if not isinstance(entity, dict):
entity = entity.to_dict()
return {to_camel_case(k): v for k, v in entity.items() if v is not None}
|
nilq/baby-python
|
python
|
# coding: utf-8
from abc import ABCMeta
from config.config_loader import logger
from mall_spider.spiders.actions.action import Action
from mall_spider.spiders.actions.context import Context
class DefaultAction(Action):
__metaclass__ = ABCMeta
def on_error(self, context, exp):
task = context.get(Context.KEY_CURRENT_TASK, '')
good = context.get(Context.KEY_GOOD_DICT, dict())
task_id = None
data = None
if task:
task_id = task.id
data = task.raw_data
logger.error(u'context key:[%s],action:[%s],task_id:[%s],good:[%s],execute error,data:%s,exception:%s',
context.context_key, self.__class__.__name__, task_id, good, data, exp)
|
nilq/baby-python
|
python
|
import pygame
from Player import PlayerBase
class Player2():
def __init__(self, image, speed = [0,0], pos = [0,0]):
self.image = pygame.image.load(image)
|
nilq/baby-python
|
python
|
import theano.tensor as T
class Regularizer(object):
def __call__(self, **kwargs):
raise NotImplementedError
class L2Regularizer(Regularizer):
def __call__(self, alpha, params):
return alpha * l2_sqr(params) / 2.
def l2_sqr(params):
sqr = 0.0
for p in params:
sqr += T.sum((p ** 2))
return sqr
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
from __future__ import absolute_import, unicode_literals
# 3rd party imports
import pytest
from six import string_types
# project imports
from restible.url_params import from_string
@pytest.mark.parametrize('value,expected_type', (
('123', int),
('3.14159', float),
('value', string_types),
))
def test_coerces_to_the_right_type(value, expected_type):
result = from_string(value)
assert isinstance(result, expected_type)
|
nilq/baby-python
|
python
|
from frangiclave.bot.templates.base import make_section, DIVIDER, URL_FORMAT
from frangiclave.compendium.deck import Deck
def make_deck(deck: Deck):
draw_messages = '\n'.join(f'• <https://www.frangiclave.net/element/{dm.element.element_id}/|{dm.element.element_id}>: {dm.message}' for dm in deck.draw_messages)
cards = '\n'.join(f'• <https://www.frangiclave.net/element/{card.element_id}/|{card.element_id}>' for card in deck.cards)
default_card = f'<https://www.frangiclave.net/element/{deck.default_card.element_id}/|{deck.default_card.element_id}>' if deck.default_card else 'None'
return [
make_section('*Deck: {}*'.format(URL_FORMAT.format('deck', deck.deck_id))),
DIVIDER,
make_section(
f'*_Label:_* {deck.label}\n'
f'*_Description:_* {deck.description}\n'
f'*_Draw Messages:_* \n{draw_messages}\n'
)
]
|
nilq/baby-python
|
python
|
import collections
import logging
import re
import socket
import subprocess
def json_update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = json_update(d.get(k, {}), v)
else:
d[k] = v
return d
def remove_dict_null(d: dict):
"""Remove `None` value in dictionary."""
return {k: v for k, v in d.items() if v is not None}
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except socket.error:
ip = '127.0.0.1'
finally:
s.close()
return ip
def get_device(device: str):
"""Get device (cuda and device order) from device name string.
Args:
device: Device name string.
Returns:
Tuple[bool, Optional[int]]: A tuple containing flag for CUDA device and CUDA device order. If the CUDA device
flag is `False`, the CUDA device order is `None`.
"""
# obtain device
device_num = None
if device == 'cpu':
cuda = False
else:
# match something like cuda, cuda:0, cuda:1
matched = re.match(r'^cuda(?::([0-9]+))?$', device)
if matched is None: # load with CPU
logging.warning('Wrong device specification, using `cpu`.')
cuda = False
else: # load with CUDA
cuda = True
device_num = int(matched.groups()[0])
if device_num is None:
device_num = 0
return cuda, device_num
def check_process_running(port: int):
args = ['lsof', '-t', f'-i:{port}']
try:
pid = int(subprocess.check_output(args, universal_newlines=True, text=True, stderr=subprocess.DEVNULL))
except subprocess.CalledProcessError:
# process not found
pid = None
return pid
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import re
import scrapy
from locations.items import GeojsonPointItem
class GuzmanyGomezSpider(scrapy.Spider):
name = "guzmany_gomez"
item_attributes = {"brand": "Guzman Y Gomez"}
allowed_domains = ["guzmanygomez.com.au"]
start_urls = [
"https://www.guzmanygomez.com.au/wp-json/wpapi/v2/getall",
]
def parse(self, response):
data = response.json()
for i in data:
properties = {
"ref": i["OrderLink"],
"name": i["Name"],
"addr_full": i["Address1"],
"city": i["City"],
"state": i["State"],
"postcode": i["Postcode"],
"country": "AU",
"phone": i["Phone"],
"lat": i["Latitude"],
"lon": i["Longitude"],
}
yield GeojsonPointItem(**properties)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.