content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import sys
from twython import Twython
import time
import json
import os
import re
import dropbox
import subprocess
# Secret Keys
apiKey = ""
apiSecret = ""
accessToken = ""
accessTokenSecret = ""
api = Twython(apiKey,apiSecret,accessToken,accessTokenSecret)
tweet = api.get_mentions_timeline()
results = api.search(q="@project_mayo",count = 10);
all_tweets = results['statuses']
song_List = []
reading = True
content = ""
for tw in all_tweets:
if reading:
if tw['text'].find("NAME:")> 0:
song_List.append(tw['text'])
if tw['text'].find("DONE") < 0:
reading = False
song_List.reverse()
for x in song_List:
content = content + x
#everything is in content
songNameLoc = content.find("NAME:")+6
toLoc = content.find("TO")
songName = content[songNameLoc:toLoc-1]
print("Song Name: "+songName)
toLoc += 4
#print(content[toLoc])
recp = content[toLoc:content.find("!")]
print("To:" + recp)
end = content.find("DONE")
notes = content[content.find("!")+2:end]
print(notes)
#The song that is going to be played
#Name of file
songName = songName.replace(" ","_")
#conversion section for alda language
notes = notes.replace ("<","<")
notes = notes.replace (">",">")
#printing songName and writing to alda
print("Playing:" + songName)
test = songName + ".txt"
test_object = open(test,"w")
test_object.write(notes)
test_object.close()
os.system("./alda play -f "+test)
#record system audio to a file
if len(songName)>0 :
#upload to Dropbox
dbx = dropbox.Dropbox("NViWwhEdVtAAAAAAAAAAWGmCiE9SH9BE097WfoRq4l8Hif21_OLbEb_F0TvYI4KC")
file = open("/Users/tylergabriel/Desktop/CS/projectmayo/"+songName+".txt")
dbx.files_upload(file.read(),"/"+songName+".txt")
#reply to user
tweetStr="Hey,"+recp+"! Here is a link to "+ songName+" , a song made just for you! https://www.dropbox.com/home/Apps/projectmayo?preview="+songName+".txt"
api.update_status(status=tweetStr)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# @Author: Sam Zhang
# @Date: 2020-04-12 15:36:23
# @Last Modified by: Sam Zhang
# @Last Modified time: 2020-04-14 11:27:50
from flask import render_template, request, redirect, url_for
from ..utils import google_search
from . import google
@google.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
return redirect(url_for('google.search', q=request.form.get('query'), page=0))
return render_template('google/index.html')
@google.route('/s/')
def search():
word = request.args.get('q')
page = int(request.args.get('page', 0))
return render_template('google/search.html', keyword=word, cur=page + 1)
@google.route('/s/s/')
def search_s():
word = request.args.get('q')
page = int(request.args.get('page', 0))
results = google_search(word=word, pn=page)
pages = results[1]
results = results[0]
return render_template('google/search_s.html', results=results, pages=pages, keyword=word, cur=page + 1)
|
nilq/baby-python
|
python
|
""" Merge spotify playlists. """
import argparse
import logging
from playlist import Playlist
def spunify(destination_playlist: str, source_playlists: set[str]):
"""
Merge the source playlists into the destination playlist.
:param destination_playlist: The url of the playlist where the
tracks will be merged.
:param source_playlists: The urls of the playlists to be merged
into the destination.
"""
logging.info(f"Merging {source_playlists} into {destination_playlist}.")
destination: Playlist = Playlist(destination_playlist)
for source in source_playlists:
destination += Playlist(source)
def parse_args():
parser = argparse.ArgumentParser(
description="Merge spotify playlists",
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-v",
"--verbose",
action="store_const",
const=logging.INFO,
help="Increase verbosity",
)
group.add_argument(
"-vv",
"--very-verbose",
action="store_const",
const=logging.DEBUG,
help="Increase verbosity further",
)
parser.add_argument(
"-d",
"--destination",
help="The ID, URI, or URL of the playlist where the tracks will be merged",
required=True,
type=str,
)
parser.add_argument(
"-s",
"--sources",
help="The URLs, URIs, or IDs of the playlists to be merged into the destination",
nargs="+",
required=True,
type=str,
)
return parser.parse_args()
def main():
"""Main function."""
args = parse_args()
logging.basicConfig(
level=args.verbose or args.very_verbose or logging.WARNING,
format=f"%(asctime)s [%(module)s]: %(message)s",
datefmt="%I:%M:%S %p",
)
spunify(args.destination, set(args.sources))
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
import random
import math
import numpy as np
"""
Constants
"""
variance = 0.01
CHECKERBOARD_SIZE = 0.2
"""
Functions
"""
# def get_swiss_roll_dataset(numOfSamples):
# sample_list = []
# label_list = []
# for x in range(0, numOfSamples):
# # noise_test = random.random()
# # if noise_test < 0.1: # Add gaussian noise
# # noise_1 = numpy.random.normal(0, 0.1)
# # noise_2 = numpy.random.normal(0, 0.1)
# # noise_3 = numpy.random.normal(0, 0.1)
# # sample_list.append([noise_1, noise_2, noise_3])
# # continue
# p_i = random.random()
# q_i = random.random()
# t_i = math.pi * 3 / 2 * (1 + 2 * p_i)
#
# x_i = [np.random.normal(t_i * math.cos(t_i), variance),
# np.random.normal(t_i * math.sin(t_i), variance),
# np.random.normal(30 * q_i, variance),]
# sample_list.append(x_i)
# label_list.append(label)
# return sample_list, label_list
def get_swiss_roll_dataset_with_labels2(n):
t = (3 * math.pi / 2) * (1 + 2 * np.random.random(n))
height = 30 * np.random.random(n)
X = np.array([t * np.cos(t), height, t * np.sin(t)]) + variance * np.random.normal(0, 1, n * 3).reshape(3, n)
labels = np.fmod(np.around(t / 2) + np.around(height / 12), 2)
return X.T.tolist(), labels.reshape(n, 1).tolist()
# def get_broken_swiss_roll_dataset(numOfSamples):
# sample_list = []
# for x in range(0, numOfSamples):
# # noise_test = random.random()
# # if noise_test < 0.1: # Add gaussian noise
# # noise_1 = numpy.random.normal(0, 0.1)
# # noise_2 = numpy.random.normal(0, 0.1)
# # noise_3 = numpy.random.normal(0, 0.1)
# # sample_list.append([noise_1, noise_2, noise_3])
# # continue
# while True:
# p_i = random.random()
# q_i = random.random()
# t_i = math.pi * 3 / 2 * (1 + 2 * p_i)
# if p_i >= (4 / 5) or p_i <= (2 / 5):
# break
#
# x_i = [np.random.normal(t_i * math.cos(t_i), variance),
# np.random.normal(t_i * math.sin(t_i), variance),
# np.random.normal(30 * q_i, variance)]
# sample_list.append(x_i)
# return sample_list
def get_broken_swiss_roll_dataset_with_label2(n):
t1 = (3 * math.pi / 2) * (1 + 2 * np.random.random(math.floor(n / 2)) * 0.4)
t2 = (3 * math.pi / 2) * (1 + 2 * (np.random.random(math.ceil(n / 2)) * 0.4 + 0.6))
t = np.append(t1, t2)
height = 30 * np.random.random(n)
X = np.array([t * np.cos(t), height, t * np.sin(t)]) + variance * np.random.normal(0, 1, n * 3).reshape(3, n)
labels = np.fmod(np.around(t / 2) + np.around(height / 12), 2)
return X.T.tolist(), labels.reshape(n, 1).tolist()
# def get_helix_dataset(numOfSamples):
# sample_list = []
# result = dict()
# for x in range(0, numOfSamples):
# # noise_test = random.random()
# # if noise_test < 0.1: # Add gaussian noise
# # noise_1 = numpy.random.normal(0, 0.1)
# # noise_2 = numpy.random.normal(0, 0.1)
# # noise_3 = numpy.random.normal(0, 0.1)
# # sample_list.append([noise_1, noise_2, noise_3])
# # continue
# p_i = random.random()
#
# x_i = [np.random.normal((2 + math.cos(8 * p_i)) * math.cos(p_i), variance), np.random.normal((2 + math.cos(8 * p_i)) * math.sin(p_i), variance), np.random.normal(math.sin(8 * p_i), variance)]
# sample_list.append(x_i)
# return sample_list
def get_helix_dataset_with_label2(n):
t = np.random.random(n) * 2 * math.pi;
X = [(2 + np.cos(8 * t)) * np.cos(t), (2 + np.cos(8 * t)) * np.sin(t), np.sin(8 * t)] + variance * np.random.normal(0, 1, n * 3).reshape(3, n)
labels = np.fmod(np.around(t * 1.5), 2);
return X.T.tolist(), labels.reshape(n, 1).tolist()
# def get_twin_peaks(numOfSamples):
# sample_list = []
# for x in range(0, numOfSamples):
# # noise_test = random.random()
# # if noise_test < 0.1: # Add gaussian noise
# # noise_1 = numpy.random.normal(0, 0.1)
# # noise_2 = numpy.random.normal(0, 0.1)
# # noise_3 = numpy.random.normal(0, 0.1)
# # sample_list.append([noise_1, noise_2, noise_3])
# # continue
# p_i = random.random()
# q_i = random.random()
# x_i = [np.random.normal(1 - 2 * p_i, variance), np.random.normal(math.sin(math.pi - 2 * math.pi * p_i), variance), np.random.normal(math.tanh(3 - 6 * q_i), variance)]
# sample_list.append(x_i)
# return sample_list
# def get_twin_peaks_with_label(numOfSamples):
# sample_list = []
# label_list = []
# for x in range(0, numOfSamples):
# p_i = random.random()
# q_i = random.random()
#
# loc_p = int(p_i / CHECKERBOARD_SIZE) % 2
# loc_q = int(q_i / CHECKERBOARD_SIZE) % 2
# if (loc_p == loc_q):
# label = 1
# else:
# label = -1
#
# x_i = [np.random.normal(1 - 2 * p_i, variance),
# np.random.normal(math.sin(math.pi - 2 * math.pi * p_i), variance),
# np.random.normal(math.tanh(3 - 6 * q_i), variance)]
# sample_list.append(x_i)
# label_list.append(label)
# return sample_list, label_list
def get_twin_peaks_with_label2(n):
p = 1 - 2 * np.random.random(n)
q = 1 - 2 * np.random.random(n)
X = [p, q, np.sin(math.pi * p) * np.tanh(3 * q)] + variance * np.random.normal(0, 1, n * 3).reshape(3, n)
X[2] *= 10
labels = np.abs(np.fmod(np.sum(np.around((X.T + np.tile(np.amin(X, 1), (n, 1))) / 10), 1), 2))
return X.T, labels.reshape(n, 1)
def get_hd_dataset(numOfSamples):
sample_list = []
coef = []
for x in range(0, 5):
one_set_coef = []
for y in range(0, 5):
one_set_coef.append(random.random())
coef.append(one_set_coef)
for x in range(0, numOfSamples):
d_1 = random.random()
d_2 = random.random()
d_3 = random.random()
d_4 = random.random()
d_5 = random.random()
powers = []
for y in range(0, 5):
one_set_pow = [pow(d_1, random.random()), pow(d_2, random.random()), pow(d_3, random.random()), pow(d_4, random.random()), pow(d_5, random.random())]
powers.append(one_set_pow)
x_i = (np.mat(coef + powers) * np.mat([[d_1], [d_2], [d_3], [d_4], [d_5]])).transpose()
x_i = x_i.tolist()
sample_list.append(x_i[0])
return sample_list
labels = np.fmod(np.sum(np.around((X.T + np.tile(np.amin(X, 1), (n, 1))) * 10), 1), 2)
return X.T.tolist(), labels.reshape(n, 1).tolist()
# def get_hd_dataset(numOfSamples):
# sample_list = []
# coef = []
# for x in range(0, 5):
# one_set_coef = []
# for y in range(0, 5):
# one_set_coef.append(random.random())
# coef.append(one_set_coef)
# for x in range(0, numOfSamples):
# d_1 = random.random()
# d_2 = random.random()
# d_3 = random.random()
# d_4 = random.random()
# d_5 = random.random()
# powers = []
# for y in range(0, 5):
# one_set_pow = [pow(d_1, random.random()), pow(d_2, random.random()), pow(d_3, random.random()), pow(d_4, random.random()), pow(d_5, random.random())]
# powers.append(one_set_pow)
#
# x_i = (np.mat(coef + powers) * np.mat([[d_1], [d_2], [d_3], [d_4], [d_5]])).transpose()
# x_i = x_i.tolist()
# sample_list.append(x_i[0])
# return sample_list
def get_hd_dataset_with_label2(n):
x1 = np.random.random(n)
x2 = np.random.random(n)
x3 = np.random.random(n)
x4 = np.random.random(n)
x5 = np.random.random(n)
X = [np.cos(x1), np.tanh(3 * x2), x1 + x3, x4 * np.sin(x2), np.sin(x1 + x5), x5 * np.cos(x2), x5 + x4, x2, x3 * x4, x1]
X += variance * np.random.normal(0, 1, n * 10).reshape(10, n)
labels = np.fmod(np.around(x1) + np.around(x2) + np.around(x3) + np.around(x4) + np.around(x5) + 1, 2)
return X.T.tolist(), labels.reshape(n, 1).tolist()
# def get_hd_dataset_with_label2_2(n):
# seed = np.random.random(math.ceil(math.pow(n, 1/5)))
# A = np.array([p for p in itertools.product(seed, repeat = 5)][0:n]).T
# X = [np.cos(A[0]), np.tanh(3 * A[1]), A[0] + A[2], A[3] * np.sin(A[1]), np.sin(A[0] + A[4]), A[4] * np.cos(A[1]), A[4] + A[3], A[1], A[2] * A[3], A[0]]
# X += variance * np.random.normal(0, 1, n * 10).reshape(10, n)
# labels = np.fmod(np.sum(np.around(A) + 1, 0), 2)
# return X.T.tolist(), labels.reshape(n, 1).tolist()
|
nilq/baby-python
|
python
|
#take1
pre = '581634BED11C647479ED07B47702E21EFE8147CFA57BF08E105A81852F70CF2ADD01B9E8191622AA5CAB3129D7B5F9EB007C2AA98E48E6E4793BD624F94D40B6B07072A17748A96153C6681716D8593261DD8E5CD6ADEE644D432BD282CB521FA4EBF64109D79A60EFA887D7E8EBE5EB9B43F16F84CBC705D12D1CD429B0166B'
get = ['7rJ1loTd72z8fyA71JWAYODTEPL1TEIbBSXd0OmGEaO/DqFLRtyWDsW2ufaD5+iN', # base64 encoded strings
'7rJ1loTd72z8fyA71JWAYBcI5GLDQhS9yDwtgf6GXCs=',
'Ptk0New/J17zkRTIgf7tw4zn6IteuuSgcXTv7BLAZ5yrC1saCtfHi/yeeQWELZZ8F1UZZVsmuGCeepAlTPsIKM1ZM2CxXHxoMGKrrgO8B+MT/Gs0Xfl6YwUU096M6gzHlccfqMeErSIvTyNbX23TQaXE1jtl6Ss0ey2Yf2GFyrA=',
'Ptk0New/J17zkRTIgf7tw8QWH1rgH1VP8W6DbbUZybrFjaNjR0DIR6Za9UqeXvtjZcz6j6SDS1dTzlCD1SeMlnHSZvcOYPPEgX15LQkY2ZmhJX1ELaNJNnrIFcDrAPsxFXWUsMo+lRLXr+mok7ViZjkAYorL+f1WJSgUxmjJ+eya1vfyosSns4/flFnHOuNkMlZ65BzOFM17Tx38tr4sVl7ZLvgHIG+DigG2TqFW+ig=',
'7rJ1loTd72z8fyA71JWAYNONDlTZIv7f8uLs0jIlx4Ig6VG2ktjC9h7yu5YIY1cZ',
'7rJ1loTd72z8fyA71JWAYAFdwiG4YbPcCllCmo4tAnShrHYxbgnO8qu3VuY0Ze8Z',
'7rJ1loTd72z8fyA71JWAYA3Rwzrq5FhQJOlLCjVMJSi8uvj/Z9RxLWsU8PaFmIip',
'7rJ1loTd72z8fyA71JWAYP/2e2AxvohF0M0ghFKYLMRBhrZ40X/O84QBEpUAfCtQ',
'7rJ1loTd72z8fyA71JWAYMf/gNdLq0aAawF8iilHOns=',
'7rJ1loTd72z8fyA71JWAYMT9Qbs/xpyk4erVgqzmr6NVHqWKcr11jKZghC6r7szN',
'7rJ1loTd72z8fyA71JWAYPr0QJ5kM+PPZBm8m2dHTT6/19vrMBJs7p2kusQbHQKR',
'Ptk0New/J17zkRTIgf7tw7Sxeg0UNsAnyJVTxRfosHiDFvkPRgp8qYELFEi/R48IRD+FuvRQZAt97zQqyjqKSWmy69orh0iFJ69LfAkGzkUPtGdjPv9P2w5Y9uMHYnSa75Ejr0XajojBm4dvaiaDJ5TUFnocwH3NjB32QWkVvd4IQYJfun9ADnjst+YAPL9fqUhSUstLN7Yrjewign10np8lAo4293EOSRPfVAuqS9mbopr6QNfQ7SvGsBsEaWx/jffMqj/MmMF3/8bvhutL0TcToQ7aX1ame0OFvpXkAm9idPCAqDb2Vv4PoQQlVhs0NVEa7YagNO/BDTnSvKsSEMByNyZQBRlWYo87lk6R8F+l1XGHm4WI6exMhyjON1/BcGRX8cD1K50vtz3kZKMK1w==',
'6YglSkdvgi00pZ5BzRaRVEZDW6IUKESKsbZEbbtTVH/z/2DAPPv6n1CuNB1V8Zn/jIVuAympdkGyrCFj2sfqJQ/01IyBukKzjlyXqP9n3orbjPqBi7fPk3rpWBt5CzToK1jtmlzZaK2VQmiNisUIVCLf9B/kjfJaBpJffioX59YWXKJBRmSL3QsRQxtiHNWi',
'6YglSkdvgi00pZ5BzRaRVIu6eahyBzHSMO+0WhTYbSlk0fRXnq8L7DizQtaXJQlqF0RERqPX0rWNdCsN1kZBykPgDgA72AhSgtprWl4j0oUmI+YYBQwZ49bZZCEg7aDU',
'7rJ1loTd72z8fyA71JWAYGL5lBHgr3JKoX39jY1sXHaucSmo+/lIAktIZEv2iUmK',
'7rJ1loTd72z8fyA71JWAYPnFANhbvVT8MAAAmR2v2FniebSPoB+bxIFy0jbQL8A+',
'7rJ1loTd72z8fyA71JWAYOy4oU9K7bYUSQSL9p8UnxjyhkP6rd4KFUFs+3rw7RFn0hz/AL9XorStDyD2HkzMLLWx24lFB/kWKShEi1ZfCH7o9Qdv04+TD9outna00tFs',
'7rJ1loTd72z8fyA71JWAYBd6EqoP+S+JhYbfMfdVS4Yyu3pHtWIRhhkaSSHE1pdxVI+ETFAFa2nfjByU0tuazAT8M3LyZvAsT7SQ5ti6cMr1/DikToHv0+lta1Zvv8qeHQnXDU4Uhz+mjr9ZUJXgwHbnfTdFgTjFQ+V315BtbL7PB8bzpUNdLaN4utbYIZsW',
]
get[0].decode('base64')
#=> eeb2759684ddef6cfc7f203bd4958060 e0d310f2f54c421b0525ddd0e98611a3 bf0ea14b46dc960ec5b6b9f683e7e88d 48B hex encoded str# ^ IV?
get[1].decode('base64')
#=> eeb2759684ddef6cfc7f203bd4958060 1708e462c34214bdc83c2d81fe865c2b
# take2
# 56,73,E5,AC,9A,4E,AF,FC,26,65,75,A7,39,B0,4B,F8,
# 16,04,A7,53,17,56,1B,BE,82,77,6B,08,DB,89,E3,3F,
# 99,FD,ED,BA,89,F0,FD,B3,B0,FD,F3,51,AE,7E,40,D4,
# 39,30,9B,C6,B9,98,60,A2,29,B2,BC,9E,FA,3F,E9,39,
# D6,2F,81,E9,48,38,EF,18,82,01,B8,95,8B,E6,E4,A8,
# 8E,6F,35,13,B7,DF,C7,7F,C6,B9,3A,F4,9A,5C,99,9E,
# 7D,15,77,12,D8,2C,B6,96,7B,54,9A,DD,0E,1D,E9,0B,
# E8,64,34,61,EC,5B,C6,81,9F,53,00,0E,09,47,B3,8D
# take3
# 64,12,B8,2E,23,2C,31,FC,13,FD,A2,57,29,83,93,35,
# 41,AE,CA,99,01,6C,38,29,70,B9,D0,09,AC,01,DF,62,
# 03,63,6E,2E,3E,A2,12,9A,32,E0,9D,AE,EE,4A,CA,0E,
# 07,66,47,19,7E,6B,7E,83,2C,D3,46,3B,6C,07,55,3B,
# 7B,06,61,77,13,A6,03,FB,8C,62,60,07,3E,B8,49,5C,
# 50,EB,CD,88,CE,8F,33,E3,49,AC,C7,36,08,28,69,7D,
# B6,55,68,7D,B4,63,59,6D,29,B5,23,63,12,10,C7,8F,
# A7,41,78,B0,DE,B6,C1,F5,27,6E,AB,D7,EA,66,52,72
# take4
# 5D,9E,62,00,75,DB,EE,9D,C3,4E,B0,3A,55,F4,7E,30,
# E9,AF,A6,46,A0,DF,77,A6,E7,FD,9C,57,C6,72,0B,35,
# 9A,EE,2E,9C,69,DC,EE,2B,FD,05,9E,32,08,01,03,DD,
# 12,D1,40,34,3B,F3,AE,A3,17,9C,5F,36,7C,4C,A1,BA,
# F3,98,C5,AD,85,90,E5,16,C5,EC,69,6C,C4,0F,1F,92,
# 0D,78,CC,0D,FD,DF,77,13,EE,06,8C,47,F0,BB,E6,BD,
# E7,E3,F0,60,78,45,85,39,A6,49,E0,3D,F0,A1,5F,3E,
# 90,16,5C,3E,61,47,EE,53,04,0B,11,18,2A,54,E6,1F
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
DEPRECATED
"""
from __future__ import division, print_function
import numpy as np
from theano import gof
import theano.tensor as tt
__all__ = ["tensordotDOp"]
class tensordotDOp(tt.Op):
def __init__(self, func):
self.func = func
self._grad_op = tensordotDGradientOp(self)
def make_node(self, *inputs):
inputs = [tt.as_tensor_variable(i) for i in inputs]
outputs = [tt.TensorType(inputs[0].dtype, (False, False))()]
return gof.Apply(self, inputs, outputs)
def infer_shape(self, node, shapes):
return [[shapes[1][0], shapes[0][-1]]]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return eval_points
return self.grad(inputs, eval_points)
def perform(self, node, inputs, outputs):
outputs[0][0] = self.func(*inputs)
def grad(self, inputs, gradients):
return self._grad_op(*(inputs + gradients))
class tensordotDGradientOp(tt.Op):
def __init__(self, base_op):
self.base_op = base_op
def make_node(self, *inputs):
inputs = [tt.as_tensor_variable(i) for i in inputs]
outputs = [i.type() for i in inputs[:-1]]
return gof.Apply(self, inputs, outputs)
def infer_shape(self, node, shapes):
return shapes[:-1]
def perform(self, node, inputs, outputs):
bM, bwta = self.base_op.func(*inputs)
outputs[0][0] = np.reshape(bM, np.shape(inputs[0]))
outputs[1][0] = np.reshape(bwta, np.shape(inputs[1]))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
magic is a wrapper around the libmagic file identification library.
See README for more information.
Usage:
>>> import magic
>>> magic.from_file("testdata/test.pdf")
'PDF document, version 1.2'
>>> magic.from_file("testdata/test.pdf", mime=True)
'application/pdf'
>>> magic.from_buffer(open("testdata/test.pdf").read(1024))
'PDF document, version 1.2'
>>>
"""
import sys
import glob
import os.path
import ctypes
import ctypes.util
import threading
from ctypes import c_char_p, c_int, c_size_t, c_void_p
class MagicException(Exception): pass
class Magic:
"""
Magic is a wrapper around the libmagic C library.
"""
def __init__(self, mime=False, magic_file=None, mime_encoding=False,
keep_going=False, uncompress=False):
"""
Create a new libmagic wrapper.
mime - 값이 True면, mimetypes이 textual descriptions을 대신해 반환
mime_encoding - True면, codec 반환
magic_file - 시스템 기본값 대신 mime database 사용
uncompress - 압축파일 uncompress
"""
self.flags = MAGIC_NONE
if mime:
self.flags |= MAGIC_MIME
elif mime_encoding:
self.flags |= MAGIC_MIME_ENCODING
if keep_going:
self.flags |= MAGIC_CONTINUE
if uncompress:
self.flags |= MAGIC_COMPRESS
self.cookie = magic_open(self.flags)
self.lock = threading.Lock()
magic_load(self.cookie, magic_file)
def from_buffer(self, buf):
#`buf`의 내용물을 확인 및 리턴
with self.lock:
try:
return magic_buffer(self.cookie, buf)
except MagicException as e:
return self._handle509Bug(e)
def from_file(self, filename):
#`filename`의 내용물을 확인
#파일이 존재하지 않으면, IOError 발생
if not os.path.exists(filename):
raise IOError("File does not exist: " + filename)
with self.lock:
try:
return magic_file(self.cookie, filename)
except MagicException as e:
return self._handle509Bug(e)
def _handle509Bug(self, e):
#e의 message 값이 없고, self의 flags 와 MAGIC_MIME 값이 같으면 application/octet-stream을 리턴
if e.message is None and (self.flags & MAGIC_MIME):
return "application/octet-stream"
def __del__(self):
# 두 값이 같으면 close하고, cookie값은 초기화
if self.cookie and magic_close:
magic_close(self.cookie)
self.cookie = None
_instances = {}
def _get_magic_type(mime):
i = _instances.get(mime)
if i is None:
i = _instances[mime] = Magic(mime=mime)
return i
def from_file(filename, mime=False):
#filname의 파일형식을 리턴, 리턴 value = mimetype
m = _get_magic_type(mime)
return m.from_file(filename)
def from_buffer(buffer, mime=False):
#2진법 string의 파일타입을 리턴, 리턴 value = mimetype
m = _get_magic_type(mime)
return m.from_buffer(buffer)
libmagic = None
# libmagic 파일을 아래 셋중에 하나로 찾아서 dll파일에 저장. 만약 libmagic 파일이 없다면 Error 출력
dll = ctypes.util.find_library('magic') or ctypes.util.find_library('magic1') or ctypes.util.find_library('cygmagic-1')
if dll:
libmagic = ctypes.CDLL(dll)
if not libmagic or not libmagic._name:
windows_dlls = ['magic1.dll','cygmagic-1.dll']
platform_to_lib = {'darwin': ['/opt/local/lib/libmagic.dylib',
'/usr/local/lib/libmagic.dylib'] +
# Assumes there will only be one version installed
glob.glob('/usr/local/Cellar/libmagic/*/lib/libmagic.dylib'),
'win32': windows_dlls,
'cygwin': windows_dlls }
for dll in platform_to_lib.get(sys.platform, []):
try:
libmagic = ctypes.CDLL(dll)
break
except OSError:
pass
if not libmagic or not libmagic._name:
raise ImportError('failed to find libmagic. Check your installation')
magic_t = ctypes.c_void_p
#에러 체크 ( 에러가 없을 경우)
def errorcheck_null(result, func, args):
if result is None:
err = magic_error(args[0])
raise MagicException(err)
else:
return result
#에러 체크 (에러가 있을경우)
def errorcheck_negative_one(result, func, args):
if result is -1:
err = magic_error(args[0])
raise MagicException(err)
else:
return result
# 파일 이름 리턴 함수
def coerce_filename(filename):
if filename is None:
return None
is_unicode = (sys.version_info[0] <= 2 and
isinstance(filename, unicode)) or \
(sys.version_info[0] >= 3 and
isinstance(filename, str))
if is_unicode:
return filename.encode('utf-8')
else:
return filename
magic_open = libmagic.magic_open
magic_open.restype = magic_t
magic_open.argtypes = [c_int]
magic_close = libmagic.magic_close
magic_close.restype = None
magic_close.argtypes = [magic_t]
magic_error = libmagic.magic_error
magic_error.restype = c_char_p
magic_error.argtypes = [magic_t]
magic_errno = libmagic.magic_errno
magic_errno.restype = c_int
magic_errno.argtypes = [magic_t]
_magic_file = libmagic.magic_file
_magic_file.restype = c_char_p
_magic_file.argtypes = [magic_t, c_char_p]
_magic_file.errcheck = errorcheck_null
def magic_file(cookie, filename):
return _magic_file(cookie, coerce_filename(filename))
_magic_buffer = libmagic.magic_buffer
_magic_buffer.restype = c_char_p
_magic_buffer.argtypes = [magic_t, c_void_p, c_size_t]
_magic_buffer.errcheck = errorcheck_null
def magic_buffer(cookie, buf):
return _magic_buffer(cookie, buf, len(buf))
_magic_load = libmagic.magic_load
_magic_load.restype = c_int
_magic_load.argtypes = [magic_t, c_char_p]
_magic_load.errcheck = errorcheck_negative_one
def magic_load(cookie, filename):
return _magic_load(cookie, coerce_filename(filename))
magic_setflags = libmagic.magic_setflags
magic_setflags.restype = c_int
magic_setflags.argtypes = [magic_t, c_int]
magic_check = libmagic.magic_check
magic_check.restype = c_int
magic_check.argtypes = [magic_t, c_char_p]
magic_compile = libmagic.magic_compile
magic_compile.restype = c_int
magic_compile.argtypes = [magic_t, c_char_p]
MAGIC_NONE = 0x000000 # No flags
MAGIC_DEBUG = 0x000001 # Turn on debugging
MAGIC_SYMLINK = 0x000002 # Follow symlinks
MAGIC_COMPRESS = 0x000004 # Check inside compressed files
MAGIC_DEVICES = 0x000008 # Look at the contents of devices
MAGIC_MIME = 0x000010 # Return a mime string
MAGIC_MIME_ENCODING = 0x000400 # Return the MIME encoding
MAGIC_CONTINUE = 0x000020 # Return all matches
MAGIC_CHECK = 0x000040 # Print warnings to stderr
MAGIC_PRESERVE_ATIME = 0x000080 # Restore access time on exit
MAGIC_RAW = 0x000100 # Don't translate unprintable chars
MAGIC_ERROR = 0x000200 # Handle ENOENT etc as real errors
MAGIC_NO_CHECK_COMPRESS = 0x001000 # Don't check for compressed files
MAGIC_NO_CHECK_TAR = 0x002000 # Don't check for tar files
MAGIC_NO_CHECK_SOFT = 0x004000 # Don't check magic entries
MAGIC_NO_CHECK_APPTYPE = 0x008000 # Don't check application type
MAGIC_NO_CHECK_ELF = 0x010000 # Don't check for elf details
MAGIC_NO_CHECK_ASCII = 0x020000 # Don't check for ascii files
MAGIC_NO_CHECK_TROFF = 0x040000 # Don't check ascii/troff
MAGIC_NO_CHECK_FORTRAN = 0x080000 # Don't check ascii/fortran
MAGIC_NO_CHECK_TOKENS = 0x100000 # Don't check ascii/tokens
|
nilq/baby-python
|
python
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filetype constants."""
tgz = [
".tar.gz",
".tgz",
]
# Filetype to restrict inputs
tar = [
".tar",
".tar.xz",
] + tgz
deb = [
".deb",
".udeb",
]
# Container images are tarballs (when exported).
container = tar
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
# -- stdlib --
# -- third party --
import gevent
# -- own --
# -- code --
def instantiate(cls):
return cls()
def spawn_autorestart(*args, **kwargs):
def restart(g):
gevent.sleep(1)
spawn_autorestart(*args, **kwargs)
gevent.spawn(*args, **kwargs).link(restart)
def status2emoji(s):
return {
'PROBLEM': u'😱',
'EVENT': u'😱',
'OK': u'😅',
}.get(s, s)
|
nilq/baby-python
|
python
|
from rest_framework.exceptions import MethodNotAllowed
from api.sparse.serializers import SparseNodeSerializer, SparseRegistrationSerializer
from api.nodes.views import (
NodeDetail,
NodeChildrenList,
NodeList,
LinkedNodesList,
NodeLinkedRegistrationsList,
)
from api.registrations.views import RegistrationDetail, RegistrationChildrenList, RegistrationList
from api.users.views import UserNodes, UserRegistrations
class BaseSparseMixin(object):
view_category = 'sparse'
serializer_class = None
# overrides NodeList because these endpoints don't allow writing
def perform_create(self, *args):
raise MethodNotAllowed(method=self.request.method)
# overrides NodeList because these endpoints don't allow writing
def perform_update(self, *args):
raise MethodNotAllowed(method=self.request.method)
# overrides NodeDetail because these endpoints don't allow writing
def perform_destroy(self, *args):
raise MethodNotAllowed(method=self.request.method)
# overrides NodeList because these endpoints don't allow writing
def allow_bulk_destroy_resources(self, *args):
raise MethodNotAllowed(method=self.request.method)
def get_serializer_class(self):
return self.serializer_class
class SparseNodeMixin(BaseSparseMixin):
serializer_class = SparseNodeSerializer
class SparseRegistrationMixin(BaseSparseMixin):
serializer_class = SparseRegistrationSerializer
class SparseNodeList(SparseNodeMixin, NodeList):
pass
class SparseLinkedNodesList(SparseNodeMixin, LinkedNodesList):
pass
class SparseNodeLinkedRegistrationsList(SparseRegistrationMixin, NodeLinkedRegistrationsList):
pass
class SparseUserNodeList(SparseNodeMixin, UserNodes):
pass
class SparseNodeDetail(SparseNodeMixin, NodeDetail):
pass
class SparseNodeChildrenList(SparseNodeMixin, NodeChildrenList):
pass
class SparseRegistrationDetail(SparseRegistrationMixin, RegistrationDetail):
pass
class SparseRegistrationList(SparseRegistrationMixin, RegistrationList):
pass
class SparseRegistrationChildrenList(SparseRegistrationMixin, RegistrationChildrenList):
pass
class SparseUserRegistrationList(SparseRegistrationMixin, UserRegistrations):
pass
|
nilq/baby-python
|
python
|
# The sole purpose of this method is to convert the .dbf file that we have
# received which contains all addresses in the city to a .csv file
import dbfread as dbf #To read our .dbf file
import csv #To write to .csv
def convert_addresses_to_csv():
with open('addresses.csv', 'wb') as csvfile:
headerexists = False
for rec in dbf.DBF('LBRS_Site.dbf'):
if headerexists == False:
writer = csv.DictWriter(csvfile, fieldnames=rec.keys())
writer.writeheader()
headerexists = True
writer.writerow(rec)
|
nilq/baby-python
|
python
|
# pylint:disable=inconsistent-return-statements
"""
This module provides a class to visualise Click CLI structures
"""
import io
from contextlib import redirect_stdout
from copy import deepcopy
from typing import Union, Dict, Any, List
import treelib
from click import Group, MultiCommand
from click_tree_viz.click_utils import ClickNode, recurse_click_cli
from click_tree_viz.rich_utils import build_rich_tree
class ClickTreeViz:
"""
This class is used to traverse the nested CLI structure of a click Click object
and then provide several mechanisms for visualising or exporting the CLI structure
"""
def __init__(self, click_stuct: Union[MultiCommand, Group]):
"""
The constructor for this class accepts a nested Click CLI object
Args:
click_stuct: The structure to traverse and convert
"""
# Copy value just in case
self._raw_struct = deepcopy(click_stuct)
# Flat list of ClickNode objects
self._list_leaf_nodes = recurse_click_cli(click_structure=self._raw_struct)
# Convert to treelib.tree.Tree structure
self._treelib_obj = self._as_tree(node_sequence=self._list_leaf_nodes)
self._treelib_obj_params = self._extend_leaf_params(treelib_obj=self._treelib_obj)
# Graphviz method provided by treelib yields once only
self._graphviz_cached = None
@staticmethod
def _as_tree(node_sequence: List[ClickNode]) -> treelib.tree.Tree:
"""
This method constructs a list of Click leaf nodes (custom dataclass)
to a Treelib object.
Args:
node_sequence: The list of nodes that need to be created as a treelib object
Returns:
The constructed treelib object
"""
# Use tree lib to take clean struct and hold in memory
working_tree = treelib.tree.Tree()
working_tree.create_node(identifier="CLI")
for leaf in node_sequence:
working_tree.create_node(
identifier=leaf.path,
tag=leaf.name,
data=leaf.as_dict(),
parent="CLI" if leaf.is_root else leaf.parent_path,
)
return working_tree
@staticmethod
def _extend_leaf_params(treelib_obj: treelib.tree.Tree) -> treelib.tree.Tree:
"""Add parameters and commands to the tree structure"""
# Copy so working with different reference
working_treelib_obj = deepcopy(treelib_obj)
# Iterate over each node
for node in treelib_obj.nodes:
# Retrieve node object
working_node = treelib_obj[node]
# Filter to nodes with data property
if working_node.data is not None:
params = working_node.data.get("params", [])
for param in params:
# Join any multi-options
opts = ",".join(param["opts"])
# Add to copied tree
working_treelib_obj.create_node(
identifier=working_node.identifier + "." + opts,
tag=f'[{param["type"]}] {opts}',
parent=node,
)
return working_treelib_obj
def to_dict(self, **kwargs) -> Dict[str, Any]:
"""Uses treelib to convert nodes to a dictionary structure"""
return self._treelib_obj.to_dict(with_data=True, **kwargs)
def to_json(self, **kwargs) -> str:
"""Uses treelib to convert nodes to a JSON structure"""
return self._treelib_obj.to_json(with_data=True, **kwargs)
def to_graphviz(self, shape: str = "plain", layout_dir: str = "LR", **kwargs) -> str:
"""
This method leverages the treelib graphviz function, but instead of printing
to the stdout this is captured and returned as a string object. Additionally
the returned graphviz definition is extended to add a layout direction
Args:
shape: The shape to render each node
layout_dir: The direction which the tree will render
**kwargs: Any extra arguments to pass to treelib.tree.Tree.to_graphviz
Returns:
A string of graphviz configuration ready for rendering in another tool
"""
# If graphviz object is already generated, retrieve cached version
if self._graphviz_cached is not None:
return self._graphviz_cached
# treelib graphviz writes once to stdout
stream = io.StringIO()
with redirect_stdout(stream):
self._treelib_obj_params.to_graphviz(shape=shape, **kwargs)
output = stream.getvalue()
# Replace closing } tag with layout condition
output_with_layout = output.replace("}", f'rankdir="{layout_dir}";\n}}')
# save to attr so that we can call >1x
self._graphviz_cached = output_with_layout
return self._graphviz_cached
def print(self, **kwargs):
"""Uses built in treelib print function"""
return self._treelib_obj_params.show(**kwargs)
def rich_print(self, return_object: bool = False):
"""Converts treelib structure to rich.tree.Tree object
and prints it to the console"""
result = build_rich_tree(self._treelib_obj, return_obj=return_object)
if return_object:
return result
|
nilq/baby-python
|
python
|
from pprint import pformat
import click
import py42.sdk.queries.fileevents.filters as f
from click import echo
from pandas import DataFrame
from py42.exceptions import Py42InvalidPageTokenError
from py42.sdk.queries.fileevents.file_event_query import FileEventQuery
from py42.sdk.queries.fileevents.filters import InsertionTimestamp
from py42.sdk.queries.fileevents.filters.exposure_filter import ExposureType
from py42.sdk.queries.fileevents.filters.file_filter import FileCategory
from py42.sdk.queries.fileevents.filters.risk_filter import RiskIndicator
from py42.sdk.queries.fileevents.filters.risk_filter import RiskSeverity
import code42cli.cmds.search.options as searchopt
import code42cli.options as opt
from code42cli.click_ext.groups import OrderedGroup
from code42cli.click_ext.options import incompatible_with
from code42cli.click_ext.types import MapChoice
from code42cli.cmds.search import SendToCommand
from code42cli.cmds.search.cursor_store import FileEventCursorStore
from code42cli.cmds.util import convert_to_or_query
from code42cli.cmds.util import create_time_range_filter
from code42cli.date_helper import convert_datetime_to_timestamp
from code42cli.date_helper import limit_date_range
from code42cli.enums import OutputFormat
from code42cli.logger import get_main_cli_logger
from code42cli.options import column_option
from code42cli.options import format_option
from code42cli.options import sdk_options
from code42cli.output_formats import DataFrameOutputFormatter
from code42cli.output_formats import FileEventsOutputFormat
from code42cli.output_formats import FileEventsOutputFormatter
from code42cli.util import warn_interrupt
logger = get_main_cli_logger()
MAX_EVENT_PAGE_SIZE = 10000
SECURITY_DATA_KEYWORD = "file events"
file_events_format_option = click.option(
"-f",
"--format",
type=click.Choice(FileEventsOutputFormat(), case_sensitive=False),
help="The output format of the result. Defaults to table format.",
default=FileEventsOutputFormat.TABLE,
)
exposure_type_option = click.option(
"-t",
"--type",
multiple=True,
type=click.Choice(list(ExposureType.choices())),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.is_in_filter(f.ExposureType),
help="Limits events to those with given exposure types.",
)
username_option = click.option(
"--c42-username",
multiple=True,
callback=searchopt.is_in_filter(f.DeviceUsername),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to endpoint events for these Code42 users.",
)
actor_option = click.option(
"--actor",
multiple=True,
callback=searchopt.is_in_filter(f.Actor),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to only those enacted by the cloud service user "
"of the person who caused the event.",
)
md5_option = click.option(
"--md5",
multiple=True,
callback=searchopt.is_in_filter(f.MD5),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to file events where the file has one of these MD5 hashes.",
)
sha256_option = click.option(
"--sha256",
multiple=True,
callback=searchopt.is_in_filter(f.SHA256),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to file events where the file has one of these SHA256 hashes.",
)
source_option = click.option(
"--source",
multiple=True,
callback=searchopt.is_in_filter(f.Source),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to only those from one of these sources. For example, Gmail, Box, or Endpoint.",
)
file_name_option = click.option(
"--file-name",
multiple=True,
callback=searchopt.is_in_filter(f.FileName),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to file events where the file has one of these names.",
)
file_path_option = click.option(
"--file-path",
multiple=True,
callback=searchopt.is_in_filter(f.FilePath),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to file events where the file is located at one of these paths. Applies to endpoint file events only.",
)
file_category_option = click.option(
"--file-category",
multiple=True,
type=MapChoice(
choices=list(FileCategory.choices()),
extras_map={
"AUDIO": FileCategory.AUDIO,
"DOCUMENT": FileCategory.DOCUMENT,
"EXECUTABLE": FileCategory.EXECUTABLE,
"IMAGE": FileCategory.IMAGE,
"PDF": FileCategory.PDF,
"PRESENTATION": FileCategory.PRESENTATION,
"SCRIPT": FileCategory.SCRIPT,
"SOURCE_CODE": FileCategory.SOURCE_CODE,
"SPREADSHEET": FileCategory.SPREADSHEET,
"VIDEO": FileCategory.VIDEO,
"VIRTUAL_DISK_IMAGE": FileCategory.VIRTUAL_DISK_IMAGE,
"ARCHIVE": FileCategory.ZIP,
"ZIP": FileCategory.ZIP,
"Zip": FileCategory.ZIP,
},
),
callback=searchopt.is_in_filter(f.FileCategory),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to file events where the file can be classified by one of these categories.",
)
process_owner_option = click.option(
"--process-owner",
multiple=True,
callback=searchopt.is_in_filter(f.ProcessOwner),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits exposure events by process owner, as reported by the device’s operating system. "
"Applies only to `Printed` and `Browser or app read` events.",
)
tab_url_option = click.option(
"--tab-url",
multiple=True,
callback=searchopt.is_in_filter(f.TabURL),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to be exposure events with one of the specified destination tab URLs.",
)
include_non_exposure_option = click.option(
"--include-non-exposure",
is_flag=True,
callback=searchopt.exists_filter(f.ExposureType),
cls=incompatible_with(["advanced_query", "type", "saved_search"]),
help="Get all events including non-exposure events.",
)
risk_indicator_map = {
"PUBLIC_CORPORATE_BOX": RiskIndicator.CloudDataExposures.PUBLIC_CORPORATE_BOX,
"PUBLIC_CORPORATE_GOOGLE": RiskIndicator.CloudDataExposures.PUBLIC_CORPORATE_GOOGLE_DRIVE,
"PUBLIC_CORPORATE_ONEDRIVE": RiskIndicator.CloudDataExposures.PUBLIC_CORPORATE_ONEDRIVE,
"SENT_CORPORATE_GMAIL": RiskIndicator.CloudDataExposures.SENT_CORPORATE_GMAIL,
"SHARED_CORPORATE_BOX": RiskIndicator.CloudDataExposures.SHARED_CORPORATE_BOX,
"SHARED_CORPORATE_GOOGLE_DRIVE": RiskIndicator.CloudDataExposures.SHARED_CORPORATE_GOOGLE_DRIVE,
"SHARED_CORPORATE_ONEDRIVE": RiskIndicator.CloudDataExposures.SHARED_CORPORATE_ONEDRIVE,
"AMAZON_DRIVE": RiskIndicator.CloudStorageUploads.AMAZON_DRIVE,
"BOX": RiskIndicator.CloudStorageUploads.BOX,
"DROPBOX": RiskIndicator.CloudStorageUploads.DROPBOX,
"GOOGLE_DRIVE": RiskIndicator.CloudStorageUploads.GOOGLE_DRIVE,
"ICLOUD": RiskIndicator.CloudStorageUploads.ICLOUD,
"MEGA": RiskIndicator.CloudStorageUploads.MEGA,
"ONEDRIVE": RiskIndicator.CloudStorageUploads.ONEDRIVE,
"ZOHO": RiskIndicator.CloudStorageUploads.ZOHO,
"BITBUCKET": RiskIndicator.CodeRepositoryUploads.BITBUCKET,
"GITHUB": RiskIndicator.CodeRepositoryUploads.GITHUB,
"GITLAB": RiskIndicator.CodeRepositoryUploads.GITLAB,
"SOURCEFORGE": RiskIndicator.CodeRepositoryUploads.SOURCEFORGE,
"STASH": RiskIndicator.CodeRepositoryUploads.STASH,
"163.COM": RiskIndicator.EmailServiceUploads.ONESIXTHREE_DOT_COM,
"126.COM": RiskIndicator.EmailServiceUploads.ONETWOSIX_DOT_COM,
"AOL": RiskIndicator.EmailServiceUploads.AOL,
"COMCAST": RiskIndicator.EmailServiceUploads.COMCAST,
"GMAIL": RiskIndicator.EmailServiceUploads.GMAIL,
"ICLOUD_MAIL": RiskIndicator.EmailServiceUploads.ICLOUD,
"MAIL.COM": RiskIndicator.EmailServiceUploads.MAIL_DOT_COM,
"OUTLOOK": RiskIndicator.EmailServiceUploads.OUTLOOK,
"PROTONMAIL": RiskIndicator.EmailServiceUploads.PROTONMAIL,
"QQMAIL": RiskIndicator.EmailServiceUploads.QQMAIL,
"SINA_MAIL": RiskIndicator.EmailServiceUploads.SINA_MAIL,
"SOHU_MAIL": RiskIndicator.EmailServiceUploads.SOHU_MAIL,
"YAHOO": RiskIndicator.EmailServiceUploads.YAHOO,
"ZOHO_MAIL": RiskIndicator.EmailServiceUploads.ZOHO_MAIL,
"AIRDROP": RiskIndicator.ExternalDevices.AIRDROP,
"REMOVABLE_MEDIA": RiskIndicator.ExternalDevices.REMOVABLE_MEDIA,
"AUDIO": RiskIndicator.FileCategories.AUDIO,
"DOCUMENT": RiskIndicator.FileCategories.DOCUMENT,
"EXECUTABLE": RiskIndicator.FileCategories.EXECUTABLE,
"IMAGE": RiskIndicator.FileCategories.IMAGE,
"PDF": RiskIndicator.FileCategories.PDF,
"PRESENTATION": RiskIndicator.FileCategories.PRESENTATION,
"SCRIPT": RiskIndicator.FileCategories.SCRIPT,
"SOURCE_CODE": RiskIndicator.FileCategories.SOURCE_CODE,
"SPREADSHEET": RiskIndicator.FileCategories.SPREADSHEET,
"VIDEO": RiskIndicator.FileCategories.VIDEO,
"VIRTUAL_DISK_IMAGE": RiskIndicator.FileCategories.VIRTUAL_DISK_IMAGE,
"ZIP": RiskIndicator.FileCategories.ZIP,
"FACEBOOK_MESSENGER": RiskIndicator.MessagingServiceUploads.FACEBOOK_MESSENGER,
"MICROSOFT_TEAMS": RiskIndicator.MessagingServiceUploads.MICROSOFT_TEAMS,
"SLACK": RiskIndicator.MessagingServiceUploads.SLACK,
"WHATSAPP": RiskIndicator.MessagingServiceUploads.WHATSAPP,
"OTHER": RiskIndicator.Other.OTHER,
"UNKNOWN": RiskIndicator.Other.UNKNOWN,
"FACEBOOK": RiskIndicator.SocialMediaUploads.FACEBOOK,
"LINKEDIN": RiskIndicator.SocialMediaUploads.LINKEDIN,
"REDDIT": RiskIndicator.SocialMediaUploads.REDDIT,
"TWITTER": RiskIndicator.SocialMediaUploads.TWITTER,
"FILE_MISMATCH": RiskIndicator.UserBehavior.FILE_MISMATCH,
"OFF_HOURS": RiskIndicator.UserBehavior.OFF_HOURS,
"REMOTE": RiskIndicator.UserBehavior.REMOTE,
"FIRST_DESTINATION_USE": RiskIndicator.UserBehavior.FIRST_DESTINATION_USE,
"RARE_DESTINATION_USE": RiskIndicator.UserBehavior.RARE_DESTINATION_USE,
}
risk_indicator_map_reversed = {v: k for k, v in risk_indicator_map.items()}
def risk_indicator_callback(filter_cls):
def callback(ctx, param, arg):
if arg:
mapped_args = tuple(risk_indicator_map[i] for i in arg)
filter_func = searchopt.is_in_filter(filter_cls)
return filter_func(ctx, param, mapped_args)
return callback
risk_indicator_option = click.option(
"--risk-indicator",
multiple=True,
type=MapChoice(
choices=list(risk_indicator_map.keys()), extras_map=risk_indicator_map_reversed,
),
callback=risk_indicator_callback(f.RiskIndicator),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to those classified by the given risk indicator categories.",
)
risk_severity_option = click.option(
"--risk-severity",
multiple=True,
type=click.Choice(list(RiskSeverity.choices())),
callback=searchopt.is_in_filter(f.RiskSeverity),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
help="Limits events to those classified by the given risk severity.",
)
begin_option = opt.begin_option(
SECURITY_DATA_KEYWORD,
callback=lambda ctx, param, arg: convert_datetime_to_timestamp(
limit_date_range(arg, max_days_back=90)
),
)
end_option = opt.end_option(SECURITY_DATA_KEYWORD)
checkpoint_option = opt.checkpoint_option(
SECURITY_DATA_KEYWORD, cls=searchopt.AdvancedQueryAndSavedSearchIncompatible
)
advanced_query_option = searchopt.advanced_query_option(SECURITY_DATA_KEYWORD)
def _get_saved_search_option():
def _get_saved_search_query(ctx, param, arg):
if arg is None:
return
query = ctx.obj.sdk.securitydata.savedsearches.get_query(arg)
return query
return click.option(
"--saved-search",
help="Get events from a saved search filter with the given ID."
"WARNING: Using a saved search is incompatible with other query-building arguments.",
callback=_get_saved_search_query,
cls=incompatible_with("advanced_query"),
)
def search_options(f):
f = column_option(f)
f = checkpoint_option(f)
f = advanced_query_option(f)
f = searchopt.or_query_option(f)
f = end_option(f)
f = begin_option(f)
return f
def file_event_options(f):
f = exposure_type_option(f)
f = username_option(f)
f = actor_option(f)
f = md5_option(f)
f = sha256_option(f)
f = source_option(f)
f = file_name_option(f)
f = file_path_option(f)
f = file_category_option(f)
f = process_owner_option(f)
f = tab_url_option(f)
f = include_non_exposure_option(f)
f = risk_indicator_option(f)
f = risk_severity_option(f)
f = _get_saved_search_option()(f)
return f
@click.group(cls=OrderedGroup)
@sdk_options(hidden=True)
def security_data(state):
"""Get and send file event data."""
# store cursor getter on the group state so shared --begin option can use it in validation
state.cursor_getter = _get_file_event_cursor_store
@security_data.command()
@click.argument("checkpoint-name")
@sdk_options()
def clear_checkpoint(state, checkpoint_name):
"""Remove the saved file event checkpoint from `--use-checkpoint/-c` mode."""
_get_file_event_cursor_store(state.profile.name).delete(checkpoint_name)
@security_data.command()
@file_event_options
@search_options
@sdk_options()
@column_option
@searchopt.include_all_option
@file_events_format_option
def search(
state,
format,
begin,
end,
advanced_query,
use_checkpoint,
saved_search,
or_query,
columns,
include_all,
**kwargs,
):
"""Search for file events."""
if format == FileEventsOutputFormat.CEF and columns:
raise click.BadOptionUsage(
"columns", "--columns option can't be used with CEF format."
)
# set default table columns
if format == OutputFormat.TABLE:
if not columns and not include_all:
columns = [
"fileName",
"filePath",
"eventType",
"eventTimestamp",
"fileCategory",
"fileSize",
"fileOwner",
"md5Checksum",
"sha256Checksum",
"riskIndicators",
"riskSeverity",
]
if use_checkpoint:
cursor = _get_file_event_cursor_store(state.profile.name)
checkpoint = _handle_timestamp_checkpoint(cursor.get(use_checkpoint), state)
def checkpoint_func(event):
cursor.replace(use_checkpoint, event["eventId"])
else:
checkpoint = checkpoint_func = None
query = _construct_query(state, begin, end, saved_search, advanced_query, or_query)
dfs = _get_all_file_events(state, query, checkpoint)
formatter = FileEventsOutputFormatter(format, checkpoint_func=checkpoint_func)
# sending to pager when checkpointing can be inaccurate due to pager buffering, so disallow pager
force_no_pager = use_checkpoint
formatter.echo_formatted_dataframes(
dfs, columns=columns, force_no_pager=force_no_pager
)
@security_data.command(cls=SendToCommand)
@file_event_options
@search_options
@sdk_options()
@searchopt.server_options
@searchopt.send_to_format_options
def send_to(
state,
begin,
end,
advanced_query,
use_checkpoint,
saved_search,
or_query,
columns,
**kwargs,
):
"""Send events to the given server address.
HOSTNAME format: address:port where port is optional and defaults to 514.
"""
if use_checkpoint:
cursor = _get_file_event_cursor_store(state.profile.name)
checkpoint = _handle_timestamp_checkpoint(cursor.get(use_checkpoint), state)
def checkpoint_func(event):
cursor.replace(use_checkpoint, event["eventId"])
else:
checkpoint = checkpoint_func = None
query = _construct_query(state, begin, end, saved_search, advanced_query, or_query)
dfs = _get_all_file_events(state, query, checkpoint)
formatter = FileEventsOutputFormatter(None, checkpoint_func=checkpoint_func)
with warn_interrupt():
event = None
for event in formatter.iter_rows(dfs, columns=columns):
state.logger.info(event)
if event is None: # generator was empty
click.echo("No results found.")
@security_data.group(cls=OrderedGroup)
@sdk_options()
def saved_search(state):
"""Search for file events using saved searches."""
pass
@saved_search.command("list")
@format_option
@sdk_options()
def _list(state, format=None):
"""List available saved searches."""
formatter = DataFrameOutputFormatter(format)
response = state.sdk.securitydata.savedsearches.get()
saved_searches_df = DataFrame(response["searches"])
formatter.echo_formatted_dataframes(
saved_searches_df, columns=["name", "id", "notes"]
)
@saved_search.command()
@click.argument("search-id")
@sdk_options()
def show(state, search_id):
"""Get the details of a saved search."""
response = state.sdk.securitydata.savedsearches.get_by_id(search_id)
echo(pformat(response["searches"]))
def _get_file_event_cursor_store(profile_name):
return FileEventCursorStore(profile_name)
def _construct_query(state, begin, end, saved_search, advanced_query, or_query):
if advanced_query:
state.search_filters = advanced_query
elif saved_search:
state.search_filters = saved_search._filter_group_list
else:
if begin or end:
state.search_filters.append(
create_time_range_filter(f.EventTimestamp, begin, end)
)
if or_query:
state.search_filters = convert_to_or_query(state.search_filters)
query = FileEventQuery(*state.search_filters)
query.page_size = MAX_EVENT_PAGE_SIZE
query.sort_direction = "asc"
query.sort_key = "insertionTimestamp"
return query
def _get_all_file_events(state, query, checkpoint=""):
try:
response = state.sdk.securitydata.search_all_file_events(
query, page_token=checkpoint
)
except Py42InvalidPageTokenError:
response = state.sdk.securitydata.search_all_file_events(query)
yield DataFrame(response["fileEvents"])
while response["nextPgToken"]:
response = state.sdk.securitydata.search_all_file_events(
query, page_token=response["nextPgToken"]
)
yield DataFrame(response["fileEvents"])
def _handle_timestamp_checkpoint(checkpoint, state):
try:
checkpoint = float(checkpoint)
state.search_filters.append(InsertionTimestamp.on_or_after(checkpoint))
return None
except (ValueError, TypeError):
return checkpoint
|
nilq/baby-python
|
python
|
import os
import sys
sys.path.append(".")
from Utils.HTMLTestRunner import *
from Testcases.test_login import Login
from Testcases.test_02 import Test02
# get the directory path to output report file
dir = os.getcwd()
# get all tests from Login class
login1 = unittest.TestLoader().loadTestsFromTestCase(Login)
test02 = unittest.TestLoader().loadTestsFromTestCase(Test02)
# create a test suite
test_suite = unittest.TestSuite([login1, test02])
# open the report file
outfile = open(dir + '\\Reports\\SeleniumPythonTestSummary.html', 'w', encoding='utf-8')
print(dir + '\\SeleniumPythonTestSummary.html')
# configure HTMLTestRunner options
runner = HTMLTestRunner(stream=outfile, title='Test Report', description='Acceptance Tests')
# run the suite using HTMLTestRunner
runner.run(test_suite)
outfile.close()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import dictc
class DictCTest(unittest.TestCase):
def setUp(self):
pass
class BaseDictTest(unittest.TestCase):
def setUp(self):
from DictC.BaseDict import strip_tags
self.strip_tags = strip_tags
import re
self.raw_strip_tags = lambda text: re.sub(r'<[^>]*?>', '', text)
from DictC.BaseDict import BaseDict
self.BaseDict = BaseDict
def test_strip_tags(self):
self.assertIsInstance(self.strip_tags.func_doc, str)
self.assertEqual('hello', self.strip_tags('<>hello</>'))
self.assertEqual('hello', self.strip_tags('<div>hello</div>'))
def test_raw_strip_tags(self):
self.assertIsNone(self.raw_strip_tags.func_doc)
self.assertEqual('hello', self.raw_strip_tags('<>hello</>'))
self.assertEqual('hello', self.raw_strip_tags('<div>hello</div>'))
def test_fetchSuggestion(self):
keyword = 'hello'
self.assertEqual([keyword], self.BaseDict.fetchSuggestion(keyword))
def test_set_get_keyword(self):
keyword = 'hello'
base_dict = self.BaseDict()
base_dict.setKeyword(keyword)
self.assertEqual(keyword, base_dict.getKeyword())
def test_soundUri(self):
pass
def test_html2txt(self):
pairs = [
('<b>Hello</b>', 'Hello'),
('&', '&'),
('<b>&</b>', '&')
]
base_dict = self.BaseDict()
for orig, raw in pairs:
self.assertEqual(base_dict.html2txt(orig), raw)
def test_getOutput(self):
base_dict = self.BaseDict()
self.assertTupleEqual((False, ''), base_dict.getOutput())
def tearDown(self):
# TODO: django is missing?
import django.utils.html
reload(django.utils.html)
class BingDictTest(unittest.TestCase):
def setUp(self):
self.keywords = ['addicted', 'hello', 'welcome', 'it\'s', '你',
'cancer']
from DictC.BingDict import BingDict
self.BingDict = BingDict
self.bing = BingDict()
def test_fetchSuggestion(self):
keywords = [
'你', "it's", 'hello'
]
for keyword in keywords:
data = self.BingDict.fetchSuggestion(keyword)
self.assertTrue(data)
self.assertEqual(10, len(data))
self.assertTupleEqual(
(u'hello', u'你好;您好;哈喽;喂,表示问候,打招呼或接电话时'),
data[0]
)
def test_getOutput(self):
for keyword in self.keywords:
self.bing.setKeyword(keyword)
status, content = self.bing.getOutput()
self.assertTrue(status)
class DictCnTest(unittest.TestCase):
def setUp(self):
self.keywords = ['addicted', 'hello', 'welcome', 'it\'s', '你',
'cancer']
from DictC.DictCnDict import DictCnDict
self.DictCnDict = DictCnDict
self.dict_cn = DictCnDict()
def test_fetchSuggestion(self):
keywords = [
'你', "it's", 'hello'
]
for keyword in keywords:
data = self.DictCnDict.fetchSuggestion(keyword)
self.assertTrue(data)
if __name__ == "__main__":
unittest.main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 textwidth=79
|
nilq/baby-python
|
python
|
from .clusterer import Clusterer
class ClusterMerge():
def __init__(self, config):
self.clusterer = Clusterer(**config)
self.pattern_generator = self.clusterer.pattern_generator
def merge(self, base_list, other_list):
for [reprA, countA, patternA, linesA] in other_list:
exists = False
for i in range(len(base_list)):
[reprB, countB, patternB, linesB] = base_list[i]
score = self.clusterer.scorer.distance(
reprA, reprB, self.clusterer.max_dist)
if score <= self.clusterer.max_dist:
exists = True
base_list[i][1] += countA
merged_pattern = self.pattern_generator.create_pattern(
patternA, patternB)
base_list[i][2] = merged_pattern
base_list[i][3] |= linesA
break
if not exists:
base_list.append([reprA, countA, patternA, linesA])
|
nilq/baby-python
|
python
|
from setuptools import setup, find_namespace_packages
setup(name='sog',
version='0.1',
description='A creative remake of the 80s MUD',
url='',
author='Jason Newblanc',
author_email='<first>.<last>(at)gmail.com',
license='CC0 1.0',
packages=find_namespace_packages(include=['sog.*']),
zip_safe=False,
)
|
nilq/baby-python
|
python
|
'''
Util module to initialize SimpleML and configure
database management
'''
__author__ = 'Elisha Yadgaran'
# Import table models to register in DeclaritiveBase
from simpleml.persistables.base_persistable import Persistable
import simpleml.datasets.base_dataset
import simpleml.pipelines.base_pipeline
import simpleml.models.base_model
import simpleml.metrics.base_metric
from simpleml.persistables.serializing import custom_dumps, custom_loads
from simpleml.utils.errors import SimpleMLError
from simpleml.utils.configuration import CONFIG, FILESTORE_DIRECTORY
from simpleml.imports import SSHTunnelForwarder
from sqlalchemy import create_engine
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.engine.url import URL, make_url
from alembic import command
from alembic.config import Config
from alembic.migration import MigrationContext
from alembic.script import ScriptDirectory
from os.path import realpath, dirname, join
import os
import logging
import random
LOGGER = logging.getLogger(__name__)
# Database Defaults
DATABASE_NAME = os.getenv('SIMPLEML_DATABASE_NAME', None)
DATABASE_USERNAME = os.getenv('SIMPLEML_DATABASE_USERNAME', None)
DATABASE_PASSWORD = os.getenv('SIMPLEML_DATABASE_PASSWORD', None)
DATABASE_HOST = os.getenv('SIMPLEML_DATABASE_HOST', None)
DATABASE_PORT = os.getenv('SIMPLEML_DATABASE_PORT', None)
DATABASE_DRIVERNAME = os.getenv('SIMPLEML_DATABASE_DRIVERNAME', None)
DATABASE_QUERY = os.getenv('SIMPLEML_DATABASE_QUERY', None)
DATABASE_CONF = os.getenv('SIMPLEML_DATABASE_CONF', None)
DATABASE_URI = os.getenv('SIMPLEML_DATABASE_URI', None)
class BaseDatabase(URL):
'''
Base Database class to configure db connection
Does not assume schema tracking or any other validation
'''
def __init__(self, config=None, configuration_section=None, uri=None,
use_ssh_tunnel=False, sshtunnel_params={}, **credentials):
'''
:param use_ssh_tunnel: boolean - default false. Whether to tunnel sqlalchemy connection
through an ssh tunnel or not
:param sshtunnel_params: Dict of ssh params - specify according to sshtunnel project
https://github.com/pahaz/sshtunnel/ - direct passthrough
'''
self.use_ssh_tunnel = use_ssh_tunnel
# Sort out which credentials are the final ones -- default to remaining passed params
if configuration_section is not None:
if config is None:
raise SimpleMLError('Cannot use config section without a config file')
# Default to credentials in config file
credentials = dict(config[configuration_section])
elif uri is not None:
# Deconstruct URI into credentials
url = make_url(uri)
credentials = {
'drivername': url.drivername,
'username': url.username,
'password': url.password,
'host': url.host,
'port': url.port,
'database': url.database,
'query': url.query,
}
# Reconfigure credentials if SSH tunnel specified
if self.use_ssh_tunnel:
LOGGER.warning(
'''
SSH Tunnel is unreliable at the moment - connections time out randomly.
Usage: call Database.start_tunnel() before Database.initialize() and
end script with Database.stop_tunnel()
'''
)
# Overwrite passed ports and hosts to route localhost port to the
# original destination via tunnel
credentials, self.ssh_config = self.configure_ssh_tunnel(credentials, sshtunnel_params)
super(BaseDatabase, self).__init__(**credentials)
def configure_ssh_tunnel(self, credentials, ssh_config):
# Actual DB location
target_host = credentials.pop('host')
target_port = int(credentials.pop('port'))
# SSH Tunnel location
local_host, local_port = ssh_config.get('local_bind_address', (None, None))
local_host = local_host or 'localhost' # In case it's null
local_port = local_port or random.randint(4000, 5000) # In case it's null
LOGGER.info("Using {}:{} to bind SSH tunnel".format(local_host, local_port))
# Swap em - db URI points to the local tunnel opening and the remote
# ssh tunnel binds to the original host+port
credentials['host'] = local_host
credentials['port'] = local_port
ssh_config['local_bind_address'] = (local_host, local_port)
ssh_config['remote_bind_address'] = (target_host, target_port)
return credentials, ssh_config
def open_tunnel(self):
self.ssh_tunnel.start()
def close_tunnel(self):
self.ssh_tunnel.stop()
@property
def engine(self):
# Custom serializer/deserializer not supported by all drivers
# Definitely works for:
# - Postgres
# - SQLite >= 1.3.7 -- Use _json_serializer for below
return create_engine(self,
json_serializer=custom_dumps,
json_deserializer=custom_loads,
pool_recycle=300)
@property
def ssh_tunnel(self):
if SSHTunnelForwarder is None: # Not installed
raise SimpleMLError('SSHTunnel is not installed, install with `pip install sshtunnel`')
if not hasattr(self, '_sshtunnel'):
self._sshtunnel = SSHTunnelForwarder(**self.ssh_config)
return self._sshtunnel
def create_tables(self, base, drop_tables=False, ignore_errors=False):
'''
Creates database tables (and potentially drops existing ones).
Assumes to be running under a sufficiently privileged user
:param drop_tables: Whether or not to drop the existing tables first.
:return: None
'''
try:
if drop_tables:
base.metadata.drop_all()
base.metadata.create_all()
except ProgrammingError as e: # Permission errors
if ignore_errors:
LOGGER.debug(e)
else:
raise(e)
def _initialize(self, base, create_tables=False, **kwargs):
'''
Initialization method to set up database connection and inject
session manager
:param create_tables: Bool, whether to create tables in database
:param drop_tables: Bool, whether to drop existing tables in database
:return: None
'''
engine = self.engine
session = scoped_session(sessionmaker(autocommit=True,
autoflush=False,
bind=engine))
base.metadata.bind = engine
base.query = session.query_property()
if create_tables:
self.create_tables(base, **kwargs)
base.set_session(session)
def initialize(self, base_list, **kwargs):
'''
Initialization method to set up database connection and inject
session manager
Raises a SimpleML error if database schema is not up to date
:param drop_tables: Bool, whether to drop existing tables in database
:param upgrade: Bool, whether to run an upgrade migration after establishing a connection
:return: None
'''
for base in base_list:
self._initialize(base, **kwargs)
class AlembicDatabase(BaseDatabase):
'''
Base database class to manage dbs with schema tracking. Includes alembic
config references
'''
def __init__(self, alembic_filepath, script_location='migrations', *args, **kwargs):
self.alembic_filepath = alembic_filepath
self.script_location = script_location
super(AlembicDatabase, self).__init__(*args, **kwargs)
@property
def alembic_config(self):
if not hasattr(self, '_alembic_config'):
# load the Alembic configuration
self._alembic_config = Config(self.alembic_filepath)
# For some reason, alembic doesnt use a relative path from the ini
# and cannot find the migration folder without the full path
self._alembic_config.set_main_option('script_location', join(dirname(self.alembic_filepath), self.script_location))
return self._alembic_config
def create_tables(self, base, drop_tables=False, ignore_errors=False):
'''
Creates database tables (and potentially drops existing ones).
Assumes to be running under a sufficiently privileged user
:param drop_tables: Whether or not to drop the existing tables first.
:return: None
'''
try:
if drop_tables:
base.metadata.drop_all()
# downgrade the version table, "stamping" it with the base rev
command.stamp(self.alembic_config, "base")
base.metadata.create_all()
# generate/upgrade the version table, "stamping" it with the most recent rev
command.stamp(self.alembic_config, "head")
except ProgrammingError as e: # Permission errors
if ignore_errors:
LOGGER.debug(e)
else:
raise(e)
def upgrade(self, revision='head'):
'''
Proxy Method to invoke alembic upgrade command to specified revision
'''
command.upgrade(self.alembic_config, revision)
def downgrade(self, revision):
'''
Proxy Method to invoke alembic downgrade command to specified revision
'''
command.downgrade(self.alembic_config, revision)
def validate_schema_version(self):
'''
Check that the newly initialized database is up-to-date
Raises an error otherwise (ahead of any table model mismatches later)
'''
# Establish a context to access db values
context = MigrationContext.configure(self.engine.connect())
current_revision = context.get_current_revision()
# Read local config file to find the current "head" revision
# config = Config()
# config.set_main_option("script_location",
# join(dirname(dirname(dirname(realpath(__file__)))), "migrations"))
script = ScriptDirectory.from_config(self.alembic_config)
head_revision = script.get_current_head()
if current_revision != head_revision:
raise SimpleMLError('''Attempting to connect to an outdated schema.
Set the parameter `upgrade=True` in the initialize method
or manually execute `alembic upgrade head` in a shell''')
def initialize(self, base_list, upgrade=False, **kwargs):
'''
Initialization method to set up database connection and inject
session manager
Raises a SimpleML error if database schema is not up to date
:param drop_tables: Bool, whether to drop existing tables in database
:param upgrade: Bool, whether to run an upgrade migration after establishing a connection
:return: None
'''
# Standard initialization
super(AlembicDatabase, self).initialize(base_list, **kwargs)
# Upgrade schema if necessary
if upgrade:
self.upgrade()
# Assert current db schema is up-to-date
self.validate_schema_version()
class Database(AlembicDatabase):
'''
SimpleML specific configuration to interact with the database
Defaults to sqlite db in filestore directory
'''
def __init__(self,
configuration_section=None,
uri=None,
database=None,
username=None,
password=None,
drivername=None,
host=None,
port=None,
query=None,
*args, **kwargs):
if configuration_section is None and uri is None and \
all([i is None for i in (database, username, password, drivername, port, query)]):
# Fill with env variable values if none are passed directly
configuration_section = DATABASE_CONF
uri = DATABASE_URI
database = DATABASE_NAME
username = DATABASE_USERNAME
password = DATABASE_PASSWORD
drivername = DATABASE_DRIVERNAME
host = DATABASE_HOST
port = DATABASE_PORT
query = DATABASE_QUERY
if configuration_section is None and uri is None and \
all([i is None for i in (database, username, password, drivername, port, query)]):
# Use default creds for a sqlite database in filestore directory if env variables are also null
LOGGER.info('No database connection specified, using default SQLite db in {}'.format(FILESTORE_DIRECTORY))
uri = 'sqlite:///{}'.format(join(FILESTORE_DIRECTORY, 'SimpleML.db'))
root_path = dirname(dirname(dirname(realpath(__file__))))
alembic_filepath = join(root_path, 'simpleml/migrations/alembic.ini')
script_location = ''
super(Database, self).__init__(
config=CONFIG, alembic_filepath=alembic_filepath, script_location=script_location,
configuration_section=configuration_section, uri=uri, database=database,
username=username, password=password, drivername=drivername,
host=host, port=port, query=query,
*args, **kwargs)
def initialize(self, base_list=None, **kwargs):
'''
Initialization method to set up database connection and inject
session manager
Raises a SimpleML error if database schema is not up to date
:param drop_tables: Bool, whether to drop existing tables in database
:param upgrade: Bool, whether to run an upgrade migration after establishing a connection
:return: None
'''
if base_list is None: # Use defaults in project
base_list = [Persistable]
super(Database, self).initialize(base_list, **kwargs)
|
nilq/baby-python
|
python
|
"""
Functions and objects describing electro-optic components.
"""
from arch.block import Block
from arch.models.model import Linear, SymbolicModel
from sympy import Matrix, sqrt, exp, I, pi
import arch.port as port
class Switch2x2(Block):
"""
extinction_ratio: ratio of desired signal to undesired signal from wrong port
loss_dB: positive number of decibels of loss (0 dB -> 100% tx; 10 dB -> 10% tx)
"""
reference_prefix = "SW"
def define(self, loss_dB = 3.0, extinction_ratio=1000.0):
self.add_port(name='in0', kind=port.kind.optical, direction=port.direction.inp)
self.add_port(name='in1', kind=port.kind.optical, direction=port.direction.inp)
self.add_port(name='out0', kind=port.kind.optical, direction=port.direction.out)
self.add_port(name='out1', kind=port.kind.optical, direction=port.direction.out)
state = self.add_port(name='state', kind=port.kind.digital,
direction=port.direction.inp)
# Lagrange polynomial
s,er,tx = state,extinction_ratio,10**(-loss_dB/10)
r = (s-0)/(1-0)*(1-1/er) + (s-1)/(0-1)*(1/er)
M = sqrt(tx) * Matrix([
[sqrt(r), I*sqrt(1 - r)],
[I*sqrt(1 - r), sqrt(r)] ])
self.add_model(Linear('simple switch '+self.name, block=self, unitary_matrix=M))
class ThermoOpticPhaseShifterBasicRT(Block):
"""
Due to Dario, based on https://doi.org/10.1364/OE.27.010456
"""
reference_prefix = "TOPM"
def define(self, device_length=None, centre_wavelength=2.0E-6, ref_index_temp_func=lambda T:1.0*T, R=None):
"""
thermooptic_coeff: constant thermo-optic coefficient
i0: input port current
v0: input port voltage
"""
A,B,C,D = 1,-R,0,1
M = Matrix([[A,B],[C,D]])
inp = self.add_port(name='inp', kind=port.kind.optical, direction=port.direction.inp)
out = self.add_port(name='out', kind=port.kind.optical, direction=port.direction.out)
i0 = self.add_port(name='i0', kind=port.kind.voltage, direction=port.direction.inp)
v0 = self.add_port(name='v0', kind=port.kind.current, direction=port.direction.inp)
i1 = self.add_port(name='i1', kind=port.kind.voltage, direction=port.direction.out)
v1 = self.add_port(name='v1', kind=port.kind.current, direction=port.direction.out)
T = self.add_port(name='T', kind=port.kind.temperature, direction=port.direction.inp)
oes = {
out: exp(I* (2*pi*device_length/centre_wavelength)*ref_index_temp_func(T) )*inp,
v1: +A*v0 + B*i0,
i1: -C*v0 - D*i0}
self.add_model(SymbolicModel('simple phase '+self.name, block=self, out_exprs=oes))
|
nilq/baby-python
|
python
|
# """
# All operations for the Electricity Spot Market
# Based on the role ClearIterativeCO2AndElectricitySpotMarketTwoCountryRole
#
# Jim Hommes - 25-3-2021
# """
# import json
# from modules.marketmodule import MarketModule
# from util.repository import Repository
#
#
# class ElectricitySpotMarketSubmitBids(MarketModule):
# """
# The class that submits all bids to the Electricity Spot Market.
# """
#
# def __init__(self, reps: Repository):
# super().__init__('COMPETES Dummy: Electricity Spot Market: Submit Bids', reps)
# reps.dbrw.stage_init_power_plant_dispatch_plan_structure()
#
# def act(self):
# # For every energy producer we will submit bids to the Capacity Market
# for energy_producer in self.reps.energy_producers.values():
#
# # For every plant owned by energyProducer
# for powerplant in self.reps.get_operational_power_plants_by_owner(energy_producer):
# market = self.reps.get_electricity_spot_market_for_plant(powerplant)
# capacity = powerplant.get_actual_nominal_capacity()
# mc = powerplant.calculate_marginal_cost_excl_co2_market_cost(self.reps,
# self.reps.current_tick)
# self.reps.create_or_update_power_plant_dispatch_plan(powerplant, energy_producer, market, capacity, mc,
# self.reps.current_tick)
#
#
# class ElectricitySpotMarketClearing(MarketModule):
# """
# The class that clears the Electricity Spot Market.
# """
#
# def __init__(self, reps: Repository):
# super().__init__('COMPETES Dummy: Electricity Spot Market: Clear Market', reps)
# reps.dbrw.stage_init_market_clearing_point_structure()
#
# def act(self):
# # Calculate and submit Market Clearing Price
# peak_load = max(json.loads(self.reps.load['NL'].parameters['ldc'].to_database())['data'].values())
# for market in self.reps.electricity_spot_markets.values():
# sorted_ppdp = self.reps.get_sorted_power_plant_dispatch_plans_by_market_and_time(market, self.reps.current_tick)
# clearing_price = 0
# total_load = 0
# for ppdp in sorted_ppdp:
# if total_load + ppdp.amount <= peak_load:
# total_load += ppdp.amount
# clearing_price = ppdp.price
# self.reps.set_power_plant_dispatch_plan_production(
# ppdp, self.reps.power_plant_dispatch_plan_status_accepted, ppdp.amount)
# elif total_load < peak_load:
# clearing_price = ppdp.price
# self.reps.set_power_plant_dispatch_plan_production(
# ppdp, self.reps.power_plant_dispatch_plan_status_partly_accepted, peak_load - total_load)
# total_load = peak_load
# else:
# self.reps.set_power_plant_dispatch_plan_production(
# ppdp, self.reps.power_plant_dispatch_plan_status_failed, 0)
#
# self.reps.create_or_update_market_clearing_point(market, clearing_price, total_load, self.reps.current_tick)
|
nilq/baby-python
|
python
|
"""
Simple integration tests on the API itself.
We make actual ajax requests to the running docker container.
"""
import os
import json
import unittest
import requests
from dotenv import load_dotenv
load_dotenv('.env')
# The URL of the running server from within the docker container
url = 'http://web:5000'
service_token = os.environ['KBASE_SECURE_CONFIG_PARAM_service_token']
os.environ['KBASE_SECURE_CONFIG_PARAM_service_token'] = '' # nosec
def make_request(ws_ref):
"""Helper to make a JSON RPC request with the given workspace ref."""
post_data = {
'params': {
'ws_ref': ws_ref,
'n_max_results': 2,
'bypass_caching': True
},
'method': 'get_homologs',
'id': 0
}
headers = {'Content-Type': 'application/json', 'Authorization': service_token}
resp = requests.post(url, data=json.dumps(post_data), headers=headers)
return resp.json()
class TestApi(unittest.TestCase):
# @unittest.skip('x')
def test_search_reads_paired(self):
"""Test a search on genome read data with paired-ends."""
reads_ref = '15/45/1'
json_resp = make_request(reads_ref)
result = json_resp['result']
self.assertTrue(len(result['distances']))
# @unittest.skip('x')
def test_search_reads_single(self):
"""Test a search on single-ended genome read data."""
reads_ref = '15/43/1'
json_resp = make_request(reads_ref)
result = json_resp['result']
self.assertTrue(len(result['distances']))
# @unittest.skip('x')
def test_search_genome(self):
"""Test a search on a Genome type."""
genome_ref = '34819/14/1'
json_resp = make_request(genome_ref)
result = json_resp['result']
self.assertTrue(len(result['distances']))
# @unittest.skip('x')
def test_search_genome_no_auth(self):
"""Test a search on a Genome type."""
genome_ref = '15792/227059/1'
post_data = {'params': {'ws_ref': genome_ref}, 'method': 'get_homologs', 'id': 0}
headers = {'Content-Type': 'application/json'}
resp = requests.post(url, data=json.dumps(post_data), headers=headers)
json_resp = resp.json()
result = json_resp['result']
self.assertTrue(len(result['distances']))
# @unittest.skip('x')
def test_search_assembly(self):
"""Test a search on an Assembly type."""
assembly_ref = '34819/10/1'
json_resp = make_request(assembly_ref)
result = json_resp['result']
self.assertTrue(len(result['distances']))
# @unittest.skip('x')
def test_search_assembly_contigset(self):
"""Test a search on an Assembly with the legacy ContigSet."""
assembly_ref = '34819/8/1'
json_resp = make_request(assembly_ref)
result = json_resp['result']
self.assertTrue(len(result['distances']))
# @unittest.skip('x')
def test_search_genome_no_assembly_ref(self):
"""Test a failed search against a Genome that has no assembly_ref or contigset_ref."""
genome_ref = '34819/5/9'
json_resp = make_request(genome_ref)
self.assertTrue('no assembly or contigset references' in json_resp['error']['message'])
# @unittest.skip('x')
def test_search_invalid_ws_id(self):
"""Test a search with an invalid workspace reference ID."""
ref = 'x/y/z'
json_resp = make_request(ref)
self.assertTrue(len(json_resp['error']['message']))
# @unittest.skip('x')
def test_search_strain(self):
ref = '34819/8/1'
json_resp = make_request(ref)
result = json_resp['result']
self.assertTrue('strain' in result['distances'][0])
# @unittest.skip('x')
def test_search_unauthorized_ws_id(self):
"""Test a search with an unauthorized workspace ref."""
ref = '/search/1/2/3'
json_resp = make_request(ref)
self.assertTrue(len(json_resp['error']['message']))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Python File Template
"""
import os
import string
from onmt.keyphrase.pke.utils import compute_document_frequency
exec('from __future__ import unicode_literals')
import os
import sys
import random
module_path = os.path.abspath(os.path.join('../'))
if module_path not in sys.path:
sys.path.append(module_path)
module_path = os.path.abspath(os.path.join('../onmt'))
if module_path not in sys.path:
sys.path.append(module_path)
from itertools import repeat
from onmt.utils.logging import init_logger
from onmt.utils.misc import split_corpus
from onmt.translate.translator import build_translator
import onmt.opts as opts
from onmt.utils.parse import ArgumentParser
from kp_gen_eval import _get_parser
import string
import onmt.keyphrase.pke as pke
from nltk.corpus import stopwords
stoplist = stopwords.words('english')
__author__ = "Rui Meng"
__email__ = "rui.meng@pitt.edu"
def extract_deepkp(text_to_extract):
# Supervised Deep Keyphrase Model
parser = _get_parser()
config_path = '../config/translate/config-rnn-keyphrase.yml'
one2one_ckpt_path = '../models/keyphrase/meng17-one2one-kp20k-topmodels/kp20k-meng17-one2one-rnn-BS128-LR0.05-Layer1-Dim150-Emb100-Dropout0.0-Copytrue-Covfalse-Contboth-IF1_step_30000.pt'
one2seq_ckpt_path = '../models/keyphrase/meng17-one2seq-kp20k-topmodels/kp20k-meng17-verbatim_append-rnn-BS64-LR0.05-Layer1-Dim150-Emb100-Dropout0.0-Copytrue-Reusetrue-Covtrue-PEfalse-Contboth-IF1_step_50000.pt'
opt = parser.parse_args('-config %s' % (config_path))
setattr(opt, 'models', [one2one_ckpt_path])
# start generating
translator = build_translator(opt, report_score=False)
scores, predictions = translator.translate(
src=[text_to_extract],
tgt=None,
src_dir=opt.src_dir,
batch_size=opt.batch_size,
attn_debug=opt.attn_debug,
opt=opt
)
# print results
print('Paragraph:\n\t' + text_to_extract)
print('Top predictions:')
keyphrases = [kp.strip() for kp in predictions[0] if (not kp.lower().strip() in stoplist) and (kp != '<unk>')]
for kp_id, kp in enumerate(keyphrases[: min(len(keyphrases), 20)]):
print('\t%d: %s' % (kp_id + 1, kp.strip(string.punctuation)))
def extract_pke(text, method, dataset_path=None, df_path=None, top_k=10):
method = method.lower()
if method == 'tfidf':
# 0. check if DF file exists
if not os.path.exists(df_path):
# stoplist for filtering n-grams
stoplist = list(string.punctuation)
# compute df counts and store as n-stem -> weight values
compute_document_frequency(input_dir=dataset_path,
output_file=df_path,
extension='xml', # input file extension
language='en', # language of files
normalization="stemming", # use porter stemmer
stoplist=stoplist)
# 1. create a TfIdf extractor.
extractor = pke.unsupervised.TfIdf()
# 2. load the content of the document.
extractor.load_document(input=text,
language='en_core_web_sm',
normalization=None)
# 3. select {1-3}-grams not containing punctuation marks as candidates.
extractor.candidate_selection(n=3, stoplist=list(string.punctuation))
# 4. weight the candidates using a `tf` x `idf`
df = pke.load_document_frequency_file(input_file=df_path)
extractor.candidate_weighting(df=df)
# 5. get the 10-highest scored candidates as keyphrases
keyphrases = extractor.get_n_best(n=top_k)
elif method == 'yake':
stoplist = stopwords.words('english')
# 1. create a YAKE extractor.
extractor = pke.unsupervised.YAKE()
# 2. load the content of the document.
extractor.load_document(input=text,
language='en_core_web_sm',
normalization=None)
# 3. select {1-3}-grams not containing punctuation marks and not
# beginning/ending with a stopword as candidates.
extractor.candidate_selection(n=3, stoplist=stoplist)
# 4. weight the candidates using YAKE weighting scheme, a window (in
# words) for computing left/right contexts can be specified.
window = 2
use_stems = False # use stems instead of words for weighting
extractor.candidate_weighting(window=window,
stoplist=stoplist,
use_stems=use_stems)
# 5. get the 10-highest scored candidates as keyphrases.
# redundant keyphrases are removed from the output using levenshtein
# distance and a threshold.
threshold = 0.8
keyphrases = extractor.get_n_best(n=top_k, threshold=threshold)
else:
raise NotImplementedError
for kp_id, kp in enumerate(keyphrases):
print('\t%d: %s (%.4f)' % (kp_id + 1, kp[0], kp[1]))
return keyphrases
if __name__ == '__main__':
dataset_name = 'SF_Prod'
dataset_path = '../data/salesforce/%s/' % dataset_name
prod_dicts = []
for subdir, dirs, files in os.walk(dataset_path):
for file in files:
filepath = subdir + os.sep + file
text = open(filepath, 'r').readlines()
text = '\n'.join([l.strip() for l in text])
doc = {'name': file, 'path': filepath, 'text': text}
prod_dicts.append(doc)
print('Loaded #(PROD docs)=%d' % (len(prod_dicts)))
doc_id = random.randint(0, len(prod_dicts))
doc = prod_dicts[doc_id]
text_to_extract = doc['text']
print(doc_id)
print(doc['name'])
print(text_to_extract)
extract_deepkp(text_to_extract)
extract_pke(text_to_extract, method='tfidf' , dataset_path=dataset_path,
df_path=os.path.abspath(dataset_path + '../%s.df.tsv.gz' % dataset_name))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# test_app.py
import unittest
from unittest.mock import patch
from snakegame import app
from snakegame.player import Player
class TestApp(unittest.TestCase):
def setUp(self):
self.name = "Bob"
self.score = 0
self.high_score_name = "John"
self.high_score = 50
self.player = Player("Bob")
self.player.high_score_player = self.high_score_name
self.player.high_score = self.high_score
def test_play_game(self):
pass
def test_play_again(self):
pass
# with patch('builtins.print') as mock_print:
# with patch('builtins.input', return_value='N') as mock_input:
# play_again("Bob")
# mock_input.assert_called_once()
# mock_print.assert_called_once_with(
# "\nThanks for playing, Bob!\n\nQuitting program...")
# with patch('builtins.input', return_value='Y') as mock_input:
# self.assertEqual(play_again("Bob"), play_game("Bob"), "IDK")
# mock_input.assert_called_once()
def test_validate_player_input(self):
pass
def test_timeout_input(self):
pass
def test_menu(self):
with patch('builtins.print') as mock_print:
app.menu()
mock_print.assert_called_once_with(
"****** SNAKE ******\n",
"Navigation Keys:",
"E - UP",
"D - DOWN",
"S - LEFT",
"F - RIGHT",
"Q - QUIT",
sep='\n'
)
def test_scoreboard(self):
with patch('builtins.print') as mock_print:
app.scoreboard(self.player)
mock_print.assert_called_once_with(
f"\nHIGH SCORE ({self.high_score_name}): {self.high_score}",
f"{self.name}'S SCORE: {self.score}\n",
sep='\n'
)
def test_keyboard_commands(self):
self.assertEqual(app.keyboard_commands("E"), [-1, 0])
self.assertEqual(app.keyboard_commands("D"), [1, 0])
self.assertEqual(app.keyboard_commands("S"), [0, -1])
self.assertEqual(app.keyboard_commands("F"), [0, 1])
self.assertEqual(app.keyboard_commands("Q"), "QUIT")
self.assertEqual(app.keyboard_commands("K"), None)
def test_clear_screen(self):
pass
def test_update_screen(self):
pass
def test_goodbye_msg(self):
with patch('builtins.print') as mock_print:
app.goodbye_msg(self.player.player_name)
mock_print.assert_called_once_with(
f"\nThanks for playing, {self.name}!\n\nQuitting program..."
)
def tearDown(self):
pass
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
"""Add table for datapath
Revision ID: ce8079bf4ab7
Revises: None
Create Date: 2017-08-26 21:42:40.469444
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ce8079bf4ab7'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'datapath',
sa.Column('id', sa.String(64), primary_key=True),
sa.Column('host', sa.String(64), nullable=False),
sa.Column('port', sa.Integer, nullable=False))
op.create_table(
'port',
sa.Column('datapath_id', sa.String(64), sa.ForeignKey(
'datapath.id', ondelete='CASCADE'), primary_key=True),
sa.Column('mac', sa.String(64), nullable=True),
sa.Column('port', sa.String(10), nullable=False, primary_key=True),
sa.Column('subnet_id', sa.String(36), sa.ForeignKey(
'subnet.id', ondelete='CASCADE'), nullable=False))
|
nilq/baby-python
|
python
|
"""Mock objects for testing"""
SUBMISSION_POST_DATA = {
"submission": {
"_id": "random_string_from_formio",
"data": {
"project_name": "123 Market St.",
"email": "test@test.com",
"phone": "415-867-5309",
"name": "Jenny"
}
}
}
|
nilq/baby-python
|
python
|
# Python3
import sys
import shutil
from pathlib import Path
# Get the root path to this repo
repo_dir = Path(__file__).parent
# Get the os dependant kits path
if sys.platform == "win32":
install_path = Path(r"~\AppData\Roaming\Luxology\Kits").expanduser()
elif sys.platform == "darwin":
install_path = Path("~/Library/Application Support/Luxology/Kits").expanduser()
# Get the name of the kits directory
kit_name = "community_hub"
# Get the development kit.
kit_path = repo_dir / kit_name
# Get the modo install path for kit
modo_kit_path = install_path / kit_name
# If the Kit exists in the modo kit path, remove it before copying the new one.
if modo_kit_path.exists():
shutil.rmtree(modo_kit_path)
# Copy the development kit to the modo kit path.
shutil.copytree(src=kit_path, dst=modo_kit_path)
|
nilq/baby-python
|
python
|
from sqlalchemy.ext.asyncio import AsyncSession
from app.crud import dialogue_crud
from app.service import dialogue_exist
async def get_all_dialogues_for_user(db: AsyncSession, user_id: int):
"""
Get all dialogues for user
:param db: DB
:type db: AsyncSession
:param user_id: User ID
:type user_id: int
:return: Dialogues
"""
dialogues = await dialogue_crud.get_for_user(db, user_id)
return (dialogue.__dict__ for dialogue in dialogues)
@dialogue_exist('pk', 'user_id')
async def get_dialogue(*, db: AsyncSession, user_id: int, pk: int) -> dict:
"""
Get dialogue
:param db: DB
:type db: AsyncSession
:param user_id: User ID
:type user_id: int
:param pk: Dialogue ID
:type pk: int
:return: Dialogue
:rtype: dict
"""
dialogue = await dialogue_crud.get(db, id=pk)
return dialogue.__dict__
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_compartmentmodels
----------------------------------
Tests for `compartmentmodels` module.
The tests for the individual models are in separate files.
"""
import pytest
#import tempfile
import os
import numpy as np
from compartmentmodels.compartmentmodels import loaddata, savedata
def test_load_and_save(tmpdir):
time = np.linspace(0,100)
curve= np.random.randn(len(time))
aif = np.random.randn(len(time))
filename = os.path.join(str(tmpdir), 'tempfile.tca')
# filename = tempfile.NamedTemporaryFile()
print filename
savedata(filename, time, curve, aif)
t, c, a = loaddata(filename)
assert np.all(np.equal(time, t))
assert np.all(np.equal(curve, c))
assert np.all(np.equal(aif, a))
|
nilq/baby-python
|
python
|
from typing import Optional
from typing import Union
import attr
@attr.s
class Attachment:
content_type = attr.ib(type=str)
id = attr.ib(type=str)
size = attr.ib(type=int)
stored_filename = attr.ib(type=str)
@attr.s
class Reaction:
emoji = attr.ib(type=str)
target_author = attr.ib(type=Union[str, dict])
target_sent_timestamp = attr.ib(type=int)
remove = attr.ib(type=bool, default=False)
@attr.s
class Payment:
note = attr.ib(type=str)
receipt = attr.ib(type=str)
@attr.s
class Message:
username = attr.ib(type=str)
source = attr.ib(type=Union[str, dict])
text = attr.ib(type=str)
source_device = attr.ib(type=int, default=0)
timestamp = attr.ib(type=int, default=None)
expiration_secs = attr.ib(type=int, default=0)
is_receipt = attr.ib(type=bool, default=False)
attachments = attr.ib(type=list, default=[])
quote = attr.ib(type=str, default=None)
group = attr.ib(type=dict, default={})
group_v2 = attr.ib(type=dict, default={})
reaction = attr.ib(type=Optional[Reaction], default=None)
payment = attr.ib(type=Optional[Payment], default=None)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# coding: utf-8
# Copyright 2018 AstroLab Software
# Author: Chris Arnault
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The indextry.py script has to be present on the <host> machine
where the minimal HTML server has been activated as
> python server.py
Then, call in a web navigator the URL
http://<host>:24701/indextry.py
https://python-django.dev/page-python-serveur-web-creer-rapidement
"""
# coding: utf-8
import cgi
from pylivy.session import *
from pylivy.client import *
"""
Demo of using the pylivy library
https://pylivy.readthedocs.io/en/latest/index.web
"""
# Initialize post variables
class Variable:
def __init__(self, name, type="int"):
self.name = name
self.type = type
self.reset()
def read(self):
try:
if self.type == "int":
self.value = int(form.getvalue(self.name))
else:
value = form.getvalue(self.name)
if value is None:
value = ""
self.value = value
pass
except:
self.reset()
pass
def to_form(self):
out = """<input type="hidden" name="{}" value="{}" />""".format(self.name, self.value)
return out
def debug(self):
out = " {} = {}\n".format(self.name, self.value)
return out
def reset(self):
if self.type == "int":
self.value = -1
else:
self.value = ""
pass
def set(self, value):
if self.type == "int":
try:
self.value = int(value)
except:
self.value = -1
else:
self.value = value
def is_set(self):
if self.type == "int":
try:
if self.value >= 0:
return True
except:
pass
else:
try:
if len(self.value) > 0:
return True
except:
pass
return False
def incr(self):
if self.type == "int":
self.value += 1
def above(self, threshold):
if self.type == "int":
try:
if self.value > threshold:
return True
except:
pass
return False
class VariableSet:
def __init__(self, names, str_names):
self.base = dict()
type = "int"
for name in names:
if name in str_names:
type = "str"
else:
type = "int"
self.base[name] = Variable(name, type)
def variable(self, name):
return self.base[name]
def read(self):
for v in self.base:
self.base[v].read()
def to_form(self):
out = ""
for v in self.base:
out += self.base[v].to_form()
return out
def debug(self):
out = ""
for v in self.base:
out += self.base[v].debug()
return out
# ======================================================
LIVY_URL = "http://vm-75222.lal.in2p3.fr:21111"
form = cgi.FieldStorage()
print("Content-type: text/html; charset=utf-8\n")
client = LivyClient(LIVY_URL)
# init data
variables = VariableSet(["start",
"simul",
"change_simul",
"livy_session",
"waiting_session",
"waiting_statement",
"livy_statement",
"new_statement",
"kill_session",
"result"], ["new_statement", "result"])
start = variables.base["start"]
simul = variables.base["simul"]
change_simul = variables.base["change_simul"]
livy_session = variables.base["livy_session"]
waiting_session = variables.base["waiting_session"]
waiting_statement = variables.base["waiting_statement"]
livy_statement = variables.base["livy_statement"]
kill_session = variables.base["kill_session"]
new_statement = variables.base["new_statement"]
result = variables.base["result"]
variables.read()
if not start.is_set():
simul.set(1)
start.set(1)
# ======================================================
html = """
<!DOCTYPE html>
<head>
<link rel="stylesheet" type="text/css" href="css/finkstyle.css">
<title>Mon programme test</title>
</head>
<body>
<div class="hero-image">
<div class="hero-text">
<h1 style="font-size:50px">Fink</h1>
<h3>Alert dataset monitor</h3>
<div class="topnav"> """
# manage Livy simulation
will_change_simul = change_simul.is_set()
change_simul.reset()
print("<br>change simul = {}".format(will_change_simul))
if will_change_simul:
if simul.is_set():
html += """
<form action="/indextry.py" method="post" name="simul">
<br> Currently using real Livy"""
simul.reset()
html += variables.to_form()
html += """<button type="submit">Simul Livy</button>
</form>
"""
else:
html += """
<form action="/indextry.py" method="post">
<br> Currently simulate Livy"""
simul.set(1)
html += variables.to_form()
html += """<button type="submit">Use real Livy</button>
</form>
"""
else:
if simul.is_set():
html += """
<form action="/indextry.py" method="post">
<br> Currently simulate Livy"""
change_simul.set(1)
html += variables.to_form()
html += """<button type="submit">Use real Livy</button>
</form>
"""
else:
html += """
<form action="/indextry.py" method="post" name="simul">
<br> Currently using real Livy"""
change_simul.set(1)
html += variables.to_form()
html += """<button type="submit">Simul Livy</button>
</form>
"""
change_simul.reset()
# Manage Livy session & Spark statements
html += """
<form action="/indextry.py" method="post" name="operations">
"""
if simul.is_set():
if waiting_session.above(5):
print("<br> session is now idle")
waiting_session.reset()
waiting_statement.reset()
livy_statement.reset()
livy_session.set(1)
if waiting_statement.above(5):
print("<br> statement just finished")
waiting_session.reset()
waiting_statement.reset()
livy_statement.incr()
# debugging
# print("<br>")
# print("Keys = [", ",".join(form.keys()), "]")
print(variables.debug())
"""
Command interface
- select Livy simulation
- open session & wait for idle
- start statement & wait for completion
"""
if kill_session.is_set():
id = livy_session.value
try:
client.delete_session(id)
except:
print("error killing session ", id)
livy_session.reset()
waiting_session.reset()
kill_session.reset()
if livy_session.is_set():
# statement management
if not waiting_statement.is_set():
html += """<br>session is idle: we may start a statement<br>"""
waiting_statement.set(0)
html += variables.to_form()
html += """
Enter a Spark statement
<input type="text" name="new_statement" value="{}" />
<input type="text" name="result" value="{}" />
<button type="submit">Run</button>
""".format(new_statement.value, result.value)
else:
html += """<br>session is idle, we do wait a statement to complete<br>"""
waiting_statement.incr()
id = livy_session.value
s = client.get_session(id)
if not livy_statement.is_set():
st = client.create_statement(s.session_id, new_statement.value)
livy_statement.set(st.statement_id)
else:
st = client.get_statement(s.session_id, livy_statement.value)
if st.state == StatementState.AVAILABLE:
waiting_statement.reset()
result.set(st.output.text)
print("<br>", result.value)
livy_statement.reset()
html += variables.to_form()
html += """<button type="submit">waiting statement to complete</button>"""
else:
# session management
if not waiting_session.is_set():
html += """<br>No session<br>"""
waiting_session.set(0)
print(waiting_session.debug())
waiting_statement.reset()
html += variables.to_form()
html += """<button type="submit">Open a session</button>"""
else:
# we have requested a new session thus waiting_session is set
if simul.is_set():
waiting_session.incr()
else:
if not livy_session.is_set():
print("Create a session ")
s = client.create_session(SessionKind.PYSPARK)
print("<br> session {} <br>".format(s.session_id))
livy_session.set(s.session_id)
# we test if the session is already idle
id = livy_session.value
s = client.get_session(id)
if s.state == SessionState.IDLE:
print("<br> session is now idle")
waiting_session.reset()
waiting_statement.reset()
livy_statement.reset()
new_statement.reset()
html += """<br>Waiting session to become idle<br>"""
html += variables.to_form()
html += """<button type="submit">waiting session</button>"""
html += """</form>"""
if livy_session.is_set():
html += """
<form action="/indextry.py" method="post" name="operations">"""
kill_session.set(1)
html += variables.to_form()
html += """
<button type="submit">Delete the session</button>
</form>
"""
html += """
</div>
<p>© AstroLab Software 2018-2019</p>
</div>
</div>
</body>
</html>
"""
print(html)
|
nilq/baby-python
|
python
|
from ast import literal_eval
from database.model_people import ModelPeople
from database.model_planet import ModelPlanet
from database import base
import logging
import sys
# Load logging configuration
log = logging.getLogger(__name__)
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if __name__ == '__main__':
log.info('Create database {}'.format(base.db_name))
base.Base.metadata.create_all(base.engine)
log.info('Insert Planet data in database')
with open('database/data/planet.json', 'r') as file:
data = literal_eval(file.read())
for record in data:
planet = ModelPlanet(**record)
base.db_session.add(planet)
base.db_session.commit()
log.info('Insert People data in database')
with open('database/data/people.json', 'r') as file:
data = literal_eval(file.read())
for record in data:
planet = ModelPeople(**record)
base.db_session.add(planet)
base.db_session.commit()
|
nilq/baby-python
|
python
|
import numpy.random as rd
import numpy as np
from display import *
from solver import *
def init_list(N):
balls = []
r = 10.
v = 10.
x = 400./float(N+1)
for i in range(N):
m = r*(1.-0.05*i)
vv = [-1.*v, 1.*v]
vx = [float(i+1)*x, float(i+1)*x]
balls.append(Ball(m, m, vx, vv))
return balls
if __name__ == "__main__":
balls = init_list(10)
size = 400.
step = 0.02
Display(balls, step, size)
|
nilq/baby-python
|
python
|
'''
source-directory /etc/network/interfaces.d
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet manual
allow-hotplug ppp0
iface ppp0 inet wvdial
post-up echo "cellular (ppp0) is online"
allow-hotplug wlan0
iface wlan0 inet manual
wpa-roam /etc/wpa_supplicant/wpa_supplicant.conf
allow-hotplug wlan1
iface wlan1 inet manual
wpa-roam /etc/wpa_supplicant/wpa_supplicant.conf
'''
import re
import functools
def _expand_wildcard(*rng):
def outer(func):
@functools.wraps(func)
def inner(i='*', *a, **kw):
return (
'\n'.join(func(i, *a, **kw) for i in range(*rng))
if i == '*' else func(i, *a, **kw))
return inner
return outer
# interfaces
def source_dir(fname='/etc/network/interfaces.d'):
return 'source-directory {}'.format(fname)
def lo(**kw):
return _iface('lo', method='loopback', **kw)
@_expand_wildcard(2)
def eth(i=0, **kw):
return _iface('eth{}'.format(i), **kw)
@_expand_wildcard(2)
def wlan(i=0, wpa='/etc/wpa_supplicant/wpa_supplicant.conf', roam=True, **kw):
kw['wpa_{}'.format('roam' if roam else 'conf')] = wpa
return _iface('wlan{}'.format(i), hotplug=True, **kw)
@_expand_wildcard(2)
def ppp(i=0, method='wvdial', **kw):
name = 'ppp{}'.format(i)
return _iface(
name, hotplug=True, method=method,
post_up='echo "cellular ({}) is online"'.format(name), **kw)
IFACES = {'wlan': wlan, 'eth': eth, 'ppp': ppp}
def iface(name, *a, **kw):
if callable(name):
return name(*a, **kw)
matches = re.search(r'([A-z]+)([\d\*]*)', name)
name, i = matches.groups()
return IFACES[name.lower()](i or '*', *a, **kw)
# utils
def _iface(name, method='manual', hotplug=False, static=None, **kw):
return '''
{allow} {name}
iface {name} inet {method}
{extra}
'''.format(
name=name, allow='allow-hotplug' if hotplug else 'auto', method=method,
extra='\n'.join(l for k, v in kw.items() for l in _cfg_lines(k, v)))
def _cfg_lines(name, value):
name = name.replace('_', '-')
for v in value if isinstance(value, (list, tuple)) else [value]:
yield '{} {}'.format(name, v)
# config formats
def build_file(*ifaces):
return '\n'.join([source_dir(), lo()] + [
ifc() if callable(ifc) else ifc for ifc in ifaces
])
def default():
return build_file(eth, ppp, wlan)
def from_config(config=None):
config = [
{'interface': 'wlan*', 'ssids': 's0nycL1f3l1ne'},
{'interface': 'eth*'},
{'interface': 'ppp*'},
{'interface': 'wlan*'},
]
return build_file(*(
iface(c['interface'], **c.get('etc', {}))
for c in config
))
if __name__ == '__main__':
import fire
fire.Fire()
|
nilq/baby-python
|
python
|
import torch
from PIL import Image
import numpy as np
from torchvision import datasets, models, transforms
import os
import glob
from models import DoveNetG
transformsC = transforms.Compose([transforms.Resize((512, 512)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
transformsG = transforms.Compose([transforms.Resize((512, 512)), transforms.ToTensor(), transforms.Normalize((0.5, ), (0.5, ))])
# model_dir = '/content/drive/MyDrive/TibaMe/Phantom Captcher/DoveNet/saved_models/latest_net_G.pth'
model_dir = 'pth/DoveNetG.pth'
print("Loading GeneCompressedNet...")
net = DoveNetG()
# model = init_net(model, gpu_ids=[0])
net.load_state_dict(torch.load(model_dir))
def run(img_,mask_):
# width, height = img_.size
height,width = img_.size
transformsC = transforms.Compose([transforms.Resize((width, height)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
transformsG = transforms.Compose([transforms.Resize((width, height)), transforms.ToTensor(), transforms.Normalize((0.5, ), (0.5, ))])
img_ = transformsC(img_)
mask_ = transformsG(mask_)
inputs = torch.cat([img_,mask_],0)
inputs = torch.unsqueeze(inputs, 0)
with torch.no_grad():
output = net(inputs)
im_numpy = output.data[0].cpu().float().numpy()
im_numpy = (np.transpose(im_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
im_numpy = im_numpy.astype(np.uint8)
im = Image.fromarray(im_numpy).resize((width, height)).convert("RGB")
return im
|
nilq/baby-python
|
python
|
"""
Desafio 011
Problema: Faça um programa que leia a largura e a altura de uma
parede em metros, calcule a sua área e a quantidade de
tinta necessária para pintá-la, sabendo que cada litro
de tinta pinta uma área de 2 metros quadrados"""
l = float(input('Digite a largura: '))
a = float(input('Digite a altura: '))
ar = l * a
print(f'Sua parede tem a dimensão de {l}x{a} e sua área é de: {ar}m².')
lt = ar/2
print(f'Você precisa de: {lt}l de tinta.')
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
class FactorizedReduce(nn.Module):
"""
Reduce feature map size by factorized pointwise(stride=2).
"""
def __init__(self, C_in, C_out, affine=True):
super().__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.pad2 = nn.ZeroPad2d((0, 1, 0, 1))
self.conv2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x):
x = self.relu(x)
z1 = self.conv1(x)
z2 = self.conv2(x[:, :, 1:, 1:])
out = torch.cat([z1, z2], dim=1)
out = self.bn(out)
return out
a = torch.rand(5, 10, 6, 6)
l = FactorizedReduce(10, 20)
b = l(a)
|
nilq/baby-python
|
python
|
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class DetectedIssueSchema:
"""
Indicates an actual or potential clinical issue with or between one or more
active or proposed clinical actions for a patient; e.g. Drug-drug interaction,
Ineffective treatment frequency, Procedure-condition conflict, etc.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
Indicates an actual or potential clinical issue with or between one or more
active or proposed clinical actions for a patient; e.g. Drug-drug interaction,
Ineffective treatment frequency, Procedure-condition conflict, etc.
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content may not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource, and may be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
resourceType: This is a DetectedIssue resource
identifier: Business identifier associated with the detected issue record.
status: Indicates the status of the detected issue.
category: Identifies the general type of issue identified.
severity: Indicates the degree of importance associated with the identified issue based
on the potential impact on the patient.
patient: Indicates the patient whose record the detected issue is associated with.
date: The date or date-time when the detected issue was initially identified.
author: Individual or device responsible for the issue being raised. For example, a
decision support application or a pharmacist conducting a medication review.
implicated: Indicates the resource representing the current activity or proposed activity
that is potentially problematic.
detail: A textual explanation of the detected issue.
reference: The literature, knowledge-base or similar reference that describes the
propensity for the detected issue identified.
mitigation: Indicates an action that has been taken or is committed to to reduce or
eliminate the likelihood of the risk identified by the detected issue from
manifesting. Can also reflect an observation of known mitigating factors that
may reduce/eliminate the need for any action.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema
from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.detectedissue_mitigation import (
DetectedIssue_MitigationSchema,
)
if (
max_recursion_limit
and nesting_list.count("DetectedIssue") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["DetectedIssue"]
schema = StructType(
[
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the resource. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content may not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content.
StructField("implicitRules", StringType(), True),
# The base language in which the resource is written.
StructField("language", StringType(), True),
# A human-readable narrative that contains a summary of the resource, and may be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# This is a DetectedIssue resource
StructField("resourceType", StringType(), True),
# Business identifier associated with the detected issue record.
StructField(
"identifier",
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Indicates the status of the detected issue.
StructField("status", StringType(), True),
# Identifies the general type of issue identified.
StructField(
"category",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Indicates the degree of importance associated with the identified issue based
# on the potential impact on the patient.
StructField("severity", StringType(), True),
# Indicates the patient whose record the detected issue is associated with.
StructField(
"patient",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The date or date-time when the detected issue was initially identified.
StructField("date", StringType(), True),
# Individual or device responsible for the issue being raised. For example, a
# decision support application or a pharmacist conducting a medication review.
StructField(
"author",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Indicates the resource representing the current activity or proposed activity
# that is potentially problematic.
StructField(
"implicated",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A textual explanation of the detected issue.
StructField("detail", StringType(), True),
# The literature, knowledge-base or similar reference that describes the
# propensity for the detected issue identified.
StructField("reference", StringType(), True),
# Indicates an action that has been taken or is committed to to reduce or
# eliminate the likelihood of the risk identified by the detected issue from
# manifesting. Can also reflect an observation of known mitigating factors that
# may reduce/eliminate the need for any action.
StructField(
"mitigation",
ArrayType(
DetectedIssue_MitigationSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
|
nilq/baby-python
|
python
|
# simple no arg function
def simple_function():
print 'Hello, function!'
simple_function()
# simple function with argument
def fib(n):
a, b = 0, 1
while a < n:
print a,
a, b = b, a+b
fib(10)
print ''
# example of using documentation string (so-called docstring)
def other_function():
"""Simple gibbrish print statement"""
print 'Hello'
other_function()
print other_function.__doc__
# functions can be assigned to variables
f = simple_function
f()
# return values with return statement
def fib_ret(n):
result = []
a, b = 0, 1
while a < n:
result.append(a)
a, b = b, a+b
return result
print fib_ret(20)
# default values in function
def default_args_fun(a=1, b=2):
print a, b
default_args_fun()
default_args_fun(10)
default_args_fun(100, 1000)
# keyword argument notation
# keyword arguments goes after positional arguments
default_args_fun(b=1000)
# *[name] argument contains positional arguments
def positional_arguments(a=1,b=2, *arguments):
print str(arguments)
positional_arguments(1,2)
positional_arguments(1,2,3,4)
# **[name] argument contains keyword arguments
def keyword_arguments(a,b, **arguments):
print str(arguments)
keyword_arguments(10,20)
keyword_arguments(10,20, aa=1, bb=2)
# unpacking argument
# When function requires e.g. three arguments, and we have it all in one list (list with 3 elements), we can use "unapck" synatx
def unpack_function(a,b):
print a,b
args = [1,2]
unpack_function(*args)
# We can unpack key arguments from map as a keyword arguments
args_map = {"a":1, "b":2}
unpack_function(**args_map)
|
nilq/baby-python
|
python
|
import pytest
from pytest import raises
from vyper import compiler
from vyper.exceptions import InvalidType, UnknownType
fail_list = [
"""
x: bat
""",
"""
x: HashMap[int, int128]
""",
"""
x: [bar, baz]
""",
"""
x: [bar(int128), baz(baffle)]
""",
"""
struct A:
b: B
struct B:
a: A
""",
]
@pytest.mark.parametrize("bad_code", fail_list)
def test_unknown_type_exception(bad_code):
with raises(UnknownType):
compiler.compile_code(bad_code)
invalid_list = [
"""
@public
def foo():
raw_log(b"cow", b"dog")
""",
# Must be a literal string.
"""
@public
def mint(_to: address, _value: uint256):
assert msg.sender == self,minter
""",
# Raise reason must be string
"""
@public
def mint(_to: address, _value: uint256):
raise 1
""",
"""
x: int128[3.5]
""",
]
@pytest.mark.parametrize("bad_code", invalid_list)
def test_invalid_type_exception(bad_code):
with raises(InvalidType):
compiler.compile_code(bad_code)
|
nilq/baby-python
|
python
|
import os, sys
import numpy as np
from env_handler import EnvHandler
from q_function import Q
from logger import Logger
from agent import Agent
from action_selectors.mbie_eb import MBIE_EB
from action_selectors.epsilon_greedy import EpsilonGreedy
from action_selectors.boltzmann import Boltzmann
from action_selectors.ucb_1 import UCB_1
from action_selectors.controlability import Controlability
from action_selectors.vdbe import VDBE
env_handler = EnvHandler()
args = sys.argv[1:]
episodes = 1
steps = 200
learning_rate = 0.1
discount_factor = 0.9
env_names = ['Acrobot-v1', 'CartPole-v1', 'MountainCar-v0', 'Pendulum-v0', \
'Copy-v0', 'DuplicatedInput-v0', 'RepeatCopy-v0', 'Reverse-v0', 'ReversedAddition-v0', 'ReversedAddition3-v0', \
'Blackjack-v0', 'Roulette-v0', 'FrozenLake-v0', 'FrozenLake8x8-v0', 'NChain-v0', 'Taxi-v3']
epsilon = 1
omega = 0.5
betas = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
epsilon_disc = [0.9999, 0.999, 0.99, 0.9, 0.85, 0.8, 0.75, 0.7, 0.6, 0.5]
temps = [1000000, 500000, 100000, 10000, 5000, 1000]
deltas = [0.05, 0.1, 0.25, 0.5, 0.75, 1, 2, 5, 10, 25]
c_vals = [0.1, 0.5, 1, 5, 10, 25, 50, 100, 250, 500]
seeds = [101, 100, 99, 98, 97]
action_selector_name = args[0]
if action_selector_name == 'epsilon-greedy':
vals = epsilon_disc
elif action_selector_name == 'boltzmann':
vals = temps
elif action_selector_name == 'ucb-1':
vals = c_vals
elif action_selector_name == 'vdbe':
vals = deltas
elif action_selector_name == 'controlability' or action_selector_name == 'mbie-eb':
vals = betas
base_dir = args[1]
for env_name in env_names:
print(env_name)
env = env_handler.get_env(env_name)
q_function = Q(env.get_total_states(), env.get_total_actions(), learning_rate, discount_factor)
log_dir = base_dir + 'data/' + env_name + '/' + action_selector_name
for val in vals:
val_dir = log_dir + '/' + str(val)
os.makedirs(val_dir + '/q_function', exist_ok=True)
os.makedirs(val_dir + '/training-data', exist_ok=True)
print('EDF = ' + str(val))
for seed in seeds:
print('Seed = ' + str(seed))
env.seed(seed)
if action_selector_name == 'epsilon-greedy':
action_selector = EpsilonGreedy(epsilon, val, seed)
elif action_selector_name == 'boltzmann':
action_selector = Boltzmann(val, seed)
elif action_selector_name == 'ucb-1':
action_selector = UCB_1(val, env.get_total_states(), env.get_total_actions())
elif action_selector_name == 'vdbe':
action_selector = VDBE(env.get_total_states(), val, 1 / env.get_total_actions(), learning_rate, seed)
elif action_selector_name == 'mbie-eb':
action_selector = MBIE_EB(val, env.get_total_states(), env.get_total_actions(), discount_factor)
elif action_selector_name == 'controlability':
action_selector = Controlability(val, env.get_total_states(), env.get_total_actions(), learning_rate, omega)
q_function.reset()
logger = Logger(episodes)
filepath = str(seed)
agent = Agent(env, q_function, action_selector, logger)
agent.train(steps, episodes, val_dir, filepath)
print('Done')
|
nilq/baby-python
|
python
|
#!/bin/python
import sys
import yaml
import datetime
if __name__ == '__main__':
filename = sys.argv[1] if len(sys.argv) > 1 else '/Volumes/Kea/devel/MOSSCO/code/external/fabm/code/util/standard_variables/variables.yaml'
if not filename: sys.exit(1)
with open(filename, 'r') as fid:
yml = yaml.safe_load(fid)
entries = []
for key, value in yml.items():
d=[{'standard_name': item['name'], 'canonical_units': item['units']} for i, item in enumerate(value)]
entries.extend(d)
entries.append({'standard_name': 'x_velocity_at_10m_above_sea_surface', 'canonical_units': 'm s-1'})
entries.append({'standard_name': 'y_velocity_at_10m_above_sea_surface', 'canonical_units': 'm s-1'})
fieldDict={
'version_number': 0.1,
'institution': 'Helmholtz-Zentrum Geesthacht Zentrum für Material- und Küstenforschung',
'source': 'automatically generated from FABM standard variables, with enhancements from MOSSCO',
'contact': 'Carsten Lemmen <carsten.lemmen@hereon.de>',
'last_modified': datetime.datetime.now().isoformat(),
'description': 'Community-based dictionary for shared coupling fields',
'entries': entries}
# We could also use aliases:
# - alias: p
# standard_name: air_pressure
#
with open('field_dictionary.yaml', 'w') as fid:
yaml.dump({'field_dictionary': fieldDict}, stream=fid)
|
nilq/baby-python
|
python
|
import pandas as pd
from scholarly import scholarly
import plotly.express as px
def search_author(name, return_list=False):
search_query = scholarly.search_author(name)
if not return_list:
return next(search_query)
else:
return list(search_query)
def search_author_id(id):
try:
return scholarly.search_author_id(id)
except Exception as e:
print("Invalid scholar id: {}".format(id))
return None
def get_author_list(name):
authors = search_author(name, return_list=True)
result = [
f"{author['affiliation']} | {author['name']} | {author['scholar_id']}" for author in authors]
result.sort()
result = [
f"{author.split(' | ')[1]} | {author.split(' | ')[2]} | {author.split(' | ')[0]}" for author in result]
return result
def search_org(name):
return scholarly.search_org(name)
def search_author_by_org(org_id, return_list=False):
"""Search authors by organization id.
Args:
org_id (str): Organization id. For example, 145051948357103924
return_list (bool, optional): If True, return a list of authors.
Returns:
list: A list of authors.
"""
search_query = scholarly.search_author_by_organization(org_id)
if not return_list:
return next(search_query)
else:
return list(search_query)
def get_author_record(name=None, id=None, sections=[], sortby="citedby", limit=0):
"""[summary]
Args:
name ([type], optional): [description]. Defaults to None.
id ([type], optional): [description]. Defaults to None.
sections (list, optional): The sections that the user wants filled for an Author object. This can be: ['basics', 'indices', 'counts', 'coauthors', 'publications', 'public_access']. Defaults to [].
sortby (str, optional): [description]. Defaults to "citedby".
limit (int, optional): [description]. Defaults to 0.
Raises:
ValueError: [description]
Returns:
[type]: [description]
"""
if name is not None:
author = search_author(name)
elif id is not None:
if "|" in id:
id = id.split("|")[1].strip()
author = search_author_id(id)
else:
raise ValueError("Either name or id must be specified.")
result = scholarly.fill(author, sections=sections,
sortby=sortby, publication_limit=limit)
return result
def get_author_pubs(name=None, id=None, record=None, sections=["publications"], sortby="citedby", limit=0, return_df=False):
if record is None:
pubs = get_author_record(
name=name, id=id, sections=sections, sortby=sortby, limit=limit)["publications"]
else:
pubs = record["publications"]
result = []
for pub in pubs:
if "bib" in pub:
if "title" in pub['bib']:
pub["title"] = pub["bib"]["title"]
if "pub_year" in pub['bib']:
pub["year"] = pub["bib"]["pub_year"]
if "bib" in pub:
pub.pop("bib")
if "source" in pub:
pub.pop("source")
if "filled" in pub:
pub.pop("filled")
result.append(pub)
if return_df:
return pd.DataFrame(result)
else:
return result
def get_author_basics(name=None, id=None, record=None, return_df=False):
if record is None and (name is not None or id is not None):
record = get_author_record(name=name, id=id)
elif record is not None:
pass
else:
raise ValueError("Either name or id must be specified.")
items = ["name", "scholar_id", "affiliation", "affiliation_id", "scholar_url",
"url_picture", "homepage", "email_domain", "interests", "citedby", "citedby5y", "hindex", "hindex5y", "i10index", "i10index5y", "cites_per_year"]
result = {}
for item in items:
if item in record:
result[item] = record[item]
else:
result[item] = ""
if "organization" in record:
result["affiliation_id"] = record["organization"]
result["scholar_url"] = f"https://scholar.google.com/citations?user={record['scholar_id']}"
if return_df:
df = pd.DataFrame([result]).transpose()
df.reset_index(inplace=True)
df.columns = ["key", "value"]
return df
else:
return result
def author_pubs_by_year(name=None, id=None, record=None, return_plot=False):
pubs = get_author_pubs(name=name, id=id, record=record, return_df=True)
stats = pubs.groupby("year").size()
df = pd.DataFrame({"pubs": stats})
df.reset_index(inplace=True)
if not return_plot:
return df
else:
fig = px.bar(df, x="year", y="pubs",
title=f"Publications by year")
return df, fig
def author_citations_by_year(name=None, id=None, record=None, return_plot=False):
if record is None and (name is not None or id is not None):
record = get_author_record(name=name, id=id)
elif record is not None:
pass
else:
raise ValueError("Either name or id must be specified.")
citations = record["cites_per_year"]
df = pd.DataFrame(
{"year": citations.keys(), "citations": citations.values()})
if not return_plot:
return df
else:
fig = px.bar(df, x="year", y="citations",
title=f"Citations by year")
return df, fig
def get_author_coauthors(name=None, id=None, record=None, return_df=False):
if record is None and (name is not None or id is not None):
record = get_author_record(name=name, id=id)
elif record is not None:
pass
else:
raise ValueError("Either name or id must be specified.")
coauthors = record["coauthors"]
if not return_df:
return coauthors
else:
df = pd.DataFrame(coauthors)
df = df[["name", "scholar_id", "affiliation"]]
return df
|
nilq/baby-python
|
python
|
from Tkinter import *
from common import Codes
from ..controllers import AdminDataController
from ..handlers.data import Data
class Admin(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.elements = {}
title_frame = Frame(self)
title_frame.pack(expand=True, fill=BOTH, padx=70, pady=(30, 15))
self.elements['title'] = Label(title_frame, text='Admin',
fg='#003399', font=('Arial', 28))
self.elements['title'].pack(side=TOP)
used_space_frame = Frame(self)
used_space_frame.pack(expand=True, fill=BOTH, padx=70, pady=30)
self.elements['used_space_label'] = Label(used_space_frame, text='Used Space: ',
font=('Arial', 18))
self.elements['used_space_label'].pack(side=LEFT)
errors_frame = Frame(self)
errors_frame.pack(expand=True, fill=BOTH, padx=70, pady=(0, 15))
self.elements['errors_label'] = Label(errors_frame, text='Errors:',
font=('Arial', 18))
self.elements['errors_label'].pack(side=TOP, anchor=NW, pady=5)
y_errors_scrollbar = Scrollbar(errors_frame)
y_errors_scrollbar.pack(side=RIGHT, fill=Y)
x_errors_scrollbar = Scrollbar(errors_frame, orient='horizontal')
x_errors_scrollbar.pack(side=BOTTOM, fill=X)
self.elements['errors_listbox'] = Listbox(errors_frame, font=('Arial', 14),
yscrollcommand=y_errors_scrollbar.set, xscrollcommand=x_errors_scrollbar.set)
self.elements['errors_listbox'].pack(side=TOP, anchor=NW, expand=True, fill=BOTH)
y_errors_scrollbar.config(command=self.elements['errors_listbox'].yview)
x_errors_scrollbar.config(command=self.elements['errors_listbox'].xview)
activity_frame = Frame(self)
activity_frame.pack(expand=True, fill=BOTH, padx=70, pady=(15, 40))
self.elements['activity_label'] = Label(activity_frame, text='Activity:',
font=('Arial', 18))
self.elements['activity_label'].pack(side=TOP, anchor=NW, pady=5)
y_activity_scrollbar = Scrollbar(activity_frame)
y_activity_scrollbar.pack(side=RIGHT, fill=Y)
x_activity_scrollbar = Scrollbar(activity_frame, orient='horizontal')
x_activity_scrollbar.pack(side=BOTTOM, fill=X)
self.elements['activity_listbox'] = Listbox(activity_frame, font=('Arial', 14),
yscrollcommand=y_activity_scrollbar.set, xscrollcommand=x_activity_scrollbar.set)
self.elements['activity_listbox'].pack(side=TOP, anchor=NW, expand=True, fill=BOTH)
y_activity_scrollbar.config(command=self.elements['activity_listbox'].yview)
x_activity_scrollbar.config(command=self.elements['activity_listbox'].xview)
def initialize(self):
admin_data_response = AdminDataController.get_admin_data(Data().get_token())
if admin_data_response.code != Codes.SUCCESS:
self.parent.display_error(admin_data_response.payload['message'])
self.parent.return_frame()
return
self.admin_data = admin_data_response.payload
self.elements['used_space_label']['text'] = 'Used Space: ' + str(
self.admin_data['used_space']) + 'MB'
self.elements['errors_listbox'].delete(0, END)
self.elements['activity_listbox'].delete(0, END)
for log in self.admin_data['logs']:
if log['type'] == 'error':
self.elements['errors_listbox'].insert(END, log['message'])
elif log['type'] == 'activity':
self.elements['activity_listbox'].insert(END, log['message'])
|
nilq/baby-python
|
python
|
# Import library(toolkit) for deep learning
import numpy as np
import os
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset
import pytorchtools as tools
import pandas as pd
import time
import myfunctions as my
# read data
workspace_dir = './output'
print("Reading data")
train_x, train_y = my.readfile(os.path.join(workspace_dir, "training"), True)
print("Size of training data = {}".format(len(train_x)))
test_x, test_y = my.readfile(os.path.join(workspace_dir, "testing"), True)
print("Size of Testing data = {}".format(len(test_x)))
train_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomRotation((-15,15)),
transforms.ToTensor(),
])
test_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
])
train_x, train_y = my._shuffle(train_x, train_y)
train_x, train_y, val_x, val_y = my._train_dev_split(train_x, train_y, 0.2)
batch_size = 150
train_set = tools.DigitDataset(train_x, train_y, train_transform)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle = True)
print("Size of training data = {}".format(len(train_x)))
val_set = tools.DigitDataset(val_x, val_y, test_transform)
val_loader = DataLoader(val_set, batch_size=batch_size, shuffle = False)
print("Size of validation data = {}".format(len(val_x)))
#training model
cpu = torch.device("cpu")
gpu = torch.device("cuda:0")
model = tools.maxout_conv_net(4).to(gpu)
model.load_state_dict(torch.load('digitalnetwork_3.pkl'))
patience = 3
loss = nn.CrossEntropyLoss() # since now it is problem about classification, we would use croos entropy to be our loss function
optimizer = torch.optim.Adam(model.parameters(), lr = 0.001) # Adam is a optimizer with momentum, avoiding stucked at saddle points or local minimum
num_epoch = 15
trainingtime = 10
early_stopping = tools.EarlyStopping(patience = patience, verbose=True)
for i in range(trainingtime):
if i > 0:
train_x, train_y = my._shuffle(np.concatenate((train_x, val_x), axis = 0), np.concatenate((train_y, val_y), axis = 0))
train_x, train_y, val_x, val_y = my._train_dev_split(train_x, train_y, 0.2)
train_set = tools.DigitDataset(train_x, train_y, train_transform)
train_loader = DataLoader(train_set, batch_size = batch_size, shuffle = True)
val_set = tools.DigitDataset(val_x, val_y, test_transform)
val_loader = DataLoader(val_set, batch_size = batch_size, shuffle = False)
early_stopping.refresh()
for epoch in range(num_epoch):
epoch_start_time = time.time()
train_acc = 0.0
train_loss = 0.0
val_acc = 0.0
val_loss = 0.0
model.train() # ensure model in train mode(for dropout)
for i, data in enumerate(train_loader):
optimizer.zero_grad() # we have to set gradient to be zero before new decending
train_pred = model(data[0].to(gpu)) #use model to get the predicted probabilities distrubution, which actually is done by calling forward function in the model
batch_loss = loss(train_pred, data[1].to(gpu)) # to calculate out loss, noting prediction and label should be simultaneously on CPU or GPU
batch_loss.backward() # use back propagation algorithm to calculate out gradients for each parameters
optimizer.step() # use gradient to update our parameters by optimizer
train_acc += np.sum(np.argmax(train_pred.to(cpu).data.numpy(), axis=1) == data[1].numpy())
train_loss += batch_loss.item()
model.eval()
with torch.no_grad():
for i, data in enumerate(val_loader):
val_pred = model(data[0].to(gpu))
batch_loss = loss(val_pred, data[1].to(gpu))
val_acc += np.sum(np.argmax(val_pred.to(cpu).data.numpy(), axis=1) == data[1].numpy())
val_loss += batch_loss.item()
early_stopping(val_loss/val_set.__len__(), model)
#print out now on accuracy
print('[%03d/%03d] %2.2f sec(s) Train Acc: %3.6f Loss: %3.6f | Val Acc: %3.6f loss: %3.6f' % \
(epoch + 1, num_epoch, time.time()-epoch_start_time, \
train_acc/train_set.__len__(), train_loss/train_set.__len__(), val_acc/val_set.__len__(), val_loss/val_set.__len__()))
if early_stopping.early_stop:
print("Early stopping")
break
#combine validation data and training data in order to get better model adter getting better model
#train_val_x = np.concatenate((train_x, val_x), axis = 0)
#train_val_y = np.concatenate((train_y, val_y), axis = 0)
#train_val_x, train_val_y = _shuffle(train_val_x, train_val_y)
#train_val_set = DigitDataset(train_val_x, train_val_y, train_transform)
#train_val_loader = DataLoader(train_val_set, batch_size=batch_size, shuffle=True)
#for epoch in range(num_epoch):
# epoch_start_time = time.time()
# train_acc = 0.0
# train_loss = 0.0
# model.train()
# for i, data in enumerate(train_val_loader):
# optimizer.zero_grad()
# train_pred = model(data[0].to(gpu))
# batch_loss = loss(train_pred, data[1].to(gpu))
# batch_loss.backward()
# optimizer.step()
# train_acc += np.sum(np.argmax(train_pred.to(cpu).data.numpy(), axis=1) == data[1].numpy())
# train_loss += batch_loss.item()
# print('[%03d/%03d] %2.2f sec(s) Train Acc: %3.6f Loss: %3.6f' % \
# (epoch + 1, num_epoch, time.time()-epoch_start_time, \
# train_acc/train_val_set.__len__(), train_loss/train_val_set.__len__()))
#print out prediction on testing set
test_set = tools.DigitDataset(test_x, test_y, transform = test_transform)
test_loader = DataLoader(test_set, batch_size = batch_size, shuffle = False)
model.load_state_dict(torch.load('digitalnetwork_3.pkl'))
model.eval()
test_acc = 0.0
test_loss = 0.0
with torch.no_grad():
for i, data in enumerate(test_loader):
epoch_start_time = time.time()
test_pred = model(data[0].to(gpu))
batch_loss = loss(test_pred, data[1].to(gpu))
test_acc += np.sum(np.argmax(test_pred.to(cpu).data.numpy(), axis=1) == data[1].numpy())
test_loss += batch_loss.item()
#print out accuracy and loss
print('%2.2f sec(s) Test Acc: %3.6f Loss: %3.6f' % \
(time.time()-epoch_start_time, \
test_acc/test_set.__len__(), test_loss/test_set.__len__()))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 5 04:53:01 2020
@author: Infraestructura 3D
"""
"""
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# =============================================================================
# %% PATHS
# =============================================================================
PATH_chrome = r'D:/LifeWare Technologies/Alarm System/DataCapture/Templates/chromedriver.exe'
# =============================================================================
# %% URLs
# =============================================================================
#An example of how to input the keywords.
#urls = ['https://www.usatoday.com/search/?q=toilet+paper',
# 'https://www.wsj.com/search/term.html?KEYWORDS=toilet%20paper&min-date=2016/04/05&max-date=2020/04/05&isAdvanced=true&daysback=4y&andor=AND&sort=date-desc&source=wsjarticle,wsjblogs,wsjvideo,interactivemedia,sitesearch,wsjpro',
# 'https://www.nytimes.com/search?dropmab=true&endDate=20200405&query=toilet%20paper&sort=newest&startDate=20190405',
# https://nypost.com/search/toilet+paper/?sf=20180101&orderby=date&order=desc''
# 'https://www.latimes.com/search?s=1&q=toilet+paper',
# 'https://www.washingtonpost.com/newssearch/?datefilter=All%20Since%202005&sort=Date&query=toilet%20paper',
# 'https://www.startribune.com/search/?sort=date-desc&q=toilet+paper',
# 'https://www.newsday.com/search#filter=stories&query=toilet%20paper',
# "https://www.chicagotribune.com/search/covid+19/100-y/ALL/date/100/",
# 'https://www3.bostonglobe.com/queryResult/search?q=toilet%20paper&p1=BGMenu_Search&arc404=true']
#urls = ['https://www.usatoday.com/search/?q=',
# ['https://www.wsj.com/search/term.html?min-date=2018/01/01', '&max-date=2020/04/05', '&isAdvanced=true&daysback=4y&andor=AND&sort=date-desc&source=wsjarticle,wsjblogs,wsjvideo,interactivemedia,sitesearch,wsjpro&KEYWORDS='],
# 'https://www.nytimes.com/search?dropmab=true&endDate=20200405&startDate=20180101&sort=newest&query=toilet%20paper',
# ['https://nypost.com/search/', '', '/?sf=20180101&orderby=date&order=desc', ],
# 'https://www.latimes.com/search?s=1&q=',
# 'https://www.washingtonpost.com/newssearch/?datefilter=All%20Since%202005&sort=Date&query=',
# 'https://www.startribune.com/search/?sort=date-desc&q=',
# 'https://www.newsday.com/search#filter=stories&query=',
# "https://www.chicagotribune.com/search/covid+19/100-y/ALL/date/100/",
# ['https://www3.bostonglobe.com/queryResult/search?','q=toilet%20paper','&p1=BGMenu_Search&arc404=true']
# ]
urls = {'usa_today': ['https://www.usatoday.com/search/?q=','&page='],
'wsj': ['https://www.wsj.com/search/term.html?min-date=2018/01/01&max-date=','&page=', '&isAdvanced=true&andor=AND&sort=date-desc&source=wsjarticle,wsjblogs,wsjvideo,interactivemedia,sitesearch,wsjpro&KEYWORDS='],
'ny_t': ['https://www.nytimes.com/search?dropmab=true&endDate=','&startDate=20180101&sort=newest&query='],
'ny_p': ['https://nypost.com/search/', '/?sf=20180101&orderby=date&order=desc'],
'la_t': ['https://www.latimes.com/search?s=','&q='],
'washington_p': ['https://www.washingtonpost.com/newssearch/?datefilter=All%20Since%202005&sort=Date&query='],
'star_t': ['https://www.startribune.com/search/?sort=date-desc&q='],
'news_day': ['https://www.newsday.com/search#filter=stories&query='],
'chicago_t': ['https://www.chicagotribune.com/search/','/100-y/ALL/date/100/'],
'boston_g': ['https://www3.bostonglobe.com/queryResult/search?q=','&p', '=BGMenu_Search&arc404=true']
}
# masks format ['separator for words', 'format of date', 'Does it have different pages?', [array. index of the string number where the search, the date, and the pagination must be joined]]
masks = {'usa_today': {'q_sep':['+', 0], 'pag':[True,1]},
'wsj': {'q_sep': ['%20', 2], 'd_sep': ['/',0], 'pag':[True,1]},
'ny_t': {'q_sep':['%20',1], 'd_sep':['',0]},
'ny_p': {'q_sep':['%20',0]},
'la_t': {'q_sep':['+',1], 'pag':[True,0]},
'washington_p': {'q_sep':['%20',0]},
'star_t': {'q_sep': ['+',0]},
'news_day': {'q_sep':['%20',0]},
'chicago_t': {'q_sep':['+',0]},
'boston_g': {'q_sep':['%20',0], 'pag':[True,1]}
}
#html_tags = {'usa_today': {'general_class':['class', 'gnt_se_a'], 'news_link':['href','gnt_se_a'], 'news_tag':'dat-c-ssts', 'description':'data-c-desc', 'photo_video_link':'gnt_se_f_i','date':'data-c-dt','author':'data-c-by','pag':'gnt_se_pgn_pg'},
# 'wsj': {'general_class':['class', 'item-container headline-item'], 'news_link':['class','gnt_se_a'], 'news_tag':'dat-c-ssts', 'description':'data-c-desc', 'photo_video_link':'gnt_se_f_i','date':'data-c-dt','author':'data-c-by','pag':'gnt_se_pgn_pg'},
# 'ny_t': {'q_sep':['%20',1], 'd_sep':['',0]},
#
# 'ny_p': {'q_sep':['%20',0]},
# 'la_t': {'q_sep':['+',1], 'pag':[True,0]},
# 'washington_p': {'q_sep':['%20',0]},
#
# 'star_t': {'q_sep': ['+',0]},
# 'news_day': {'q_sep':['%20',0]},
# 'chicago_t': {'q_sep':['+',0]},
#
# 'boston_g': {'q_sep':['%20',0], 'pag':[True,1]}
# }
#html_tags = {'usa_today': {'general_class':['class', 'gnt_se_a']},
# 'wsj': {'general_class':['class', 'item-container headline-item']},
# 'ny_t': {'q_sep':['%20',1], 'd_sep':['',0]},
#
# 'ny_p': {'q_sep':['%20',0]},
# 'la_t': {'q_sep':['+',1], 'pag':[True,0]},
# 'washington_p': {'q_sep':['%20',0]},
#
# 'star_t': {'q_sep': ['+',0]},
# 'news_day': {'q_sep':['%20',0]},
# 'chicago_t': {'q_sep':['+',0]},
#
# 'boston_g': {'q_sep':['%20',0], 'pag':[True,1]}
# }
# =============================================================================
# %% Imports
# =============================================================================
import numpy as np
import pandas as pd
from threading import Thread
from threading import Timer
import gc,requests,json
from datetime import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import time
# =============================================================================
# %% Functions
# =============================================================================
def downloadPage(url,verbose):
page = requests.get(url)
print('Status Code: '+str(page.status_code))
if verbose:
print(page)
return page
# =============================================================================
# %% Settings
# =============================================================================
keyword = 'toilet paper'
end_date = str(datetime(2020, 4, 5))[:10]
# =============================================================================
# %% Download Pages
# =============================================================================
browser = webdriver.Chrome(executable_path=PATH_chrome)
pages_ = {}
# iter through the urls and its masks
keys = list(urls.keys())
for key in keys:
url = urls[key]
mask = masks[key]
keys_mask = list(mask.keys())
n = False
for key_mask in keys_mask:
if key_mask == 'q_sep':
#convert the work into a readable format for the databse of the page
keyword_temp = keyword.replace(' ', mask[key_mask][0])
idx = mask[key_mask][1]
url[idx] = ''.join([url[idx],keyword_temp])
elif key_mask == 'd_sep':
end_date_temp = end_date.replace('-', mask[key_mask][0])
idx = mask[key_mask][1]
url[idx] = ''.join([url[idx],end_date_temp])
elif key_mask == 'pag':
n = 1
#TO DO: ge the number of pages so is easier to iterate through.
idx = mask[key_mask][1]
url_pag = url.copy()
url[idx] = ''.join([url[idx],str(n)])
pages_[key] = {}
if url:
url = ''.join(url)
print(url)
browser.get(url)
time.sleep(2)
content = browser.page_source
page_parsed = BeautifulSoup(content, 'html.parser')
wait = WebDriverWait(browser, 600)
#Get the individual News
news = page_parsed.find_all(class_= html_tags[key]['general_class'][1])
pages_[key]['page_'+str(n)] = news
#get the pagination numbers
pag = page_parsed.find_all(class_= html_tags[key]['pag'])
pag = len(pag)
for n in range(2,pag+1):
#TO DO: ge the number of pages so is easier to iterate through.
idx = mask[key_mask][1]
url = url_pag.copy()
url[idx] = ''.join([url_pag[idx],str(n)])
url = ''.join(url)
print(url)
browser.get(url)
time.sleep(2)
content = browser.page_source
page_parsed = BeautifulSoup(content, 'html.parser')
wait = WebDriverWait(browser, 600)
f = open(key+'.txt', 'wb')
f.write(content.encode())
f.close()
# Iter through every news link
# for item in news:
#
#
#
# #Get the values and links
# news = page_parsed.find_all(class_= html_tags[key]['news_link'])
# html_keys = list(html_tags[key].keys())
#
# pages_[key] = content
#
#
# f = open(key+'.txt', 'wb')
# f.write(content.encode())
# f.close()
# =============================================================================
# %% End
# =============================================================================
|
nilq/baby-python
|
python
|
from django.core.cache import cache
from django.test import Client, TestCase
from django.urls import reverse
class MoviesTest(TestCase):
def setUp(self):
self.client = Client()
cache.clear()
def test_view_movies_correct_template(self):
"""check if the right template is called"""
response = self.client.get(reverse('cinema:films'))
self.assertTemplateUsed(response, 'cinema/movies.html')
def test_view_movies(self):
"""Verify that all movies are retrieved"""
response = self.client.get(reverse('cinema:films'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '</table>')
self.assertContains(response, '<title>Movies</title>')
def test_fetch_movies_pagination_is_10(self):
"""Verify that movies are returned based on the paginated default value per page"""
response = self.client.get(reverse('cinema:films'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['movies']), 10)
def test_fetch_single_movie(self):
"""Verify that a single movie is retrieved"""
movie_response = self.client.get(reverse('cinema:films'))
movie_id = list(movie_response.context['movies'])[0]['id']
response = self.client.get(reverse('cinema:film', args=(movie_id,)))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'cinema/movie.html')
self.assertContains(response, 'Cast')
|
nilq/baby-python
|
python
|
#!usr/env/bin python3
interface = input("enter your interface>>")
mac = input("enter new mac>>")
print (" ")
print ("--------------------------------------------------------------------------")
import subprocess
subprocess.call("ifconfig " + interface + " down",shell=True)
subprocess.call("ifconfig " + interface + " hw ether " + mac,shell=True)
subprocess.call("ifconfig " + interface + " up",shell=True)
print ("ONLY ON ROOTED DEVICE")
|
nilq/baby-python
|
python
|
"""
Horizontal boxplot with observations
====================================
_thumb: .7, .45
"""
import numpy as np
import seaborn as sns
sns.set(style="ticks", palette="muted", color_codes=True)
# Load the example planets dataset
planets = sns.load_dataset("planets")
# Plot the orbital period with horizontal boxes
ax = sns.boxplot(x="distance", y="method", data=planets,
whis=np.inf, color="c")
# Add in points to show each observation
sns.stripplot(x="distance", y="method", data=planets,
jitter=True, size=3, color=".3", linewidth=0)
# Make the quantitative axis logarithmic
ax.set_xscale("log")
sns.despine(trim=True)
|
nilq/baby-python
|
python
|
# Generated by Django 3.1 on 2020-08-26 08:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mining', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='match_key',
name='MK_key_type',
),
migrations.RemoveField(
model_name='match_key',
name='MK_match',
),
migrations.RemoveField(
model_name='matched',
name='MAT_match_method_type',
),
migrations.RemoveField(
model_name='matched',
name='MAT_pattern',
),
migrations.RemoveField(
model_name='matched',
name='MAT_topology',
),
migrations.RemoveField(
model_name='pattern_source',
name='PS_pattern',
),
migrations.RemoveField(
model_name='pattern_source',
name='PS_topology',
),
migrations.DeleteModel(
name='Key_Type',
),
migrations.DeleteModel(
name='Match_Key',
),
migrations.DeleteModel(
name='Match_Method_Type',
),
migrations.DeleteModel(
name='Matched',
),
migrations.DeleteModel(
name='Pattern',
),
migrations.DeleteModel(
name='Pattern_Source',
),
]
|
nilq/baby-python
|
python
|
from flask import Blueprint
bp = Blueprint("StCourierServer", __name__, url_prefix="/master")
from . import view
|
nilq/baby-python
|
python
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2022 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
from functools import partial
from ..inputs import BooleanInput, IntegerInput, ListInput, ScientificInput, StringInput
from ..Qt import QtCore, QtGui
from ...experiment import parameters
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class InputsWidget(QtGui.QWidget):
# tuple of Input classes that do not need an external label
NO_LABEL_INPUTS = (BooleanInput,)
def __init__(self, procedure_class, inputs=(), parent=None, hide_groups=True):
super().__init__(parent)
self._procedure_class = procedure_class
self._procedure = procedure_class()
self._inputs = inputs
self._setup_ui()
self._layout()
self._hide_groups = hide_groups
self._setup_visibility_groups()
def _setup_ui(self):
parameter_objects = self._procedure.parameter_objects()
for name in self._inputs:
parameter = parameter_objects[name]
if parameter.ui_class is not None:
element = parameter.ui_class(parameter)
elif isinstance(parameter, parameters.FloatParameter):
element = ScientificInput(parameter)
elif isinstance(parameter, parameters.IntegerParameter):
element = IntegerInput(parameter)
elif isinstance(parameter, parameters.BooleanParameter):
element = BooleanInput(parameter)
elif isinstance(parameter, parameters.ListParameter):
element = ListInput(parameter)
elif isinstance(parameter, parameters.Parameter):
element = StringInput(parameter)
setattr(self, name, element)
def _layout(self):
vbox = QtGui.QVBoxLayout(self)
vbox.setSpacing(6)
self.labels = {}
parameters = self._procedure.parameter_objects()
for name in self._inputs:
if not isinstance(getattr(self, name), self.NO_LABEL_INPUTS):
label = QtGui.QLabel(self)
label.setText("%s:" % parameters[name].name)
vbox.addWidget(label)
self.labels[name] = label
vbox.addWidget(getattr(self, name))
self.setLayout(vbox)
def _setup_visibility_groups(self):
groups = {}
parameters = self._procedure.parameter_objects()
for name in self._inputs:
parameter = parameters[name]
group_state = {g: True for g in parameter.group_by}
for group_name, condition in parameter.group_by.items():
if group_name not in self._inputs or group_name == name:
continue
if isinstance(getattr(self, group_name), BooleanInput):
# Adjust the boolean condition to a condition suitable for a checkbox
condition = QtCore.Qt.CheckState.Checked if condition else QtCore.Qt.CheckState.Unchecked # noqa: E501
if group_name not in groups:
groups[group_name] = []
groups[group_name].append((name, condition, group_state))
for group_name, group in groups.items():
toggle = partial(self.toggle_group, group_name=group_name, group=group)
group_el = getattr(self, group_name)
if isinstance(group_el, BooleanInput):
group_el.stateChanged.connect(toggle)
toggle(group_el.checkState())
elif isinstance(group_el, StringInput):
group_el.textChanged.connect(toggle)
toggle(group_el.text())
elif isinstance(group_el, (IntegerInput, ScientificInput)):
group_el.valueChanged.connect(toggle)
toggle(group_el.value())
elif isinstance(group_el, ListInput):
group_el.currentTextChanged.connect(toggle)
toggle(group_el.currentText())
else:
raise NotImplementedError(
f"Grouping based on {group_name} ({group_el}) is not implemented.")
def toggle_group(self, state, group_name, group):
for (name, condition, group_state) in group:
if callable(condition):
group_state[group_name] = condition(state)
else:
group_state[group_name] = (state == condition)
visible = all(group_state.values())
if self._hide_groups:
getattr(self, name).setHidden(not visible)
else:
getattr(self, name).setDisabled(not visible)
if name in self.labels:
if self._hide_groups:
self.labels[name].setHidden(not visible)
else:
self.labels[name].setDisabled(not visible)
def set_parameters(self, parameter_objects):
for name in self._inputs:
element = getattr(self, name)
element.set_parameter(parameter_objects[name])
def get_procedure(self):
""" Returns the current procedure """
self._procedure = self._procedure_class()
parameter_values = {}
for name in self._inputs:
element = getattr(self, name)
parameter_values[name] = element.parameter.value
self._procedure.set_parameters(parameter_values)
return self._procedure
|
nilq/baby-python
|
python
|
"""protected field on Address
Revision ID: 427743e76984
Revises: f8c342997aab
Create Date: 2021-02-02 11:39:45.955233
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '427743e76984'
down_revision = 'f8c342997aab'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('address', sa.Column('protected', sa.Boolean(), nullable=True))
op.drop_column('location', 'protected')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('location', sa.Column('protected', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.drop_column('address', 'protected')
# ### end Alembic commands ###
|
nilq/baby-python
|
python
|
import binance
from config import BINANCE_API_KEY, BINANCE_API_SECRET
async def connect():
binance_client = binance.Client(BINANCE_API_KEY, BINANCE_API_SECRET)
await binance_client.load()
return binance_client
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import os
import pyopencl as cl
import numpy as np
# initialize OpenCL
def initCl():
PACKAGE_PATH = os.path.dirname( os.path.realpath( __file__ ) ); print(PACKAGE_PATH)
#CL_PATH = os.path.normpath( PACKAGE_PATH + '../../cl/' )
CL_PATH = os.path.normpath( PACKAGE_PATH + '/../cl' )
#CL_PATH = PACKAGE_PATH+"/cl/"
print(CL_PATH)
plats = cl.get_platforms()
ctx = cl.Context(properties=[(cl.context_properties.PLATFORM, plats[0])], devices=None)
queue = cl.CommandQueue(ctx)
f = open(CL_PATH+"/STM.cl", 'r')
fstr = "".join(f.readlines())
program = cl.Program(ctx, fstr).build()
return ctx,queue,program
ctx,queue,program = initCl()
def initArgs(atoms, CAOs, Spectral, rTips ):
'''
int nAtoms, int nMOs,
__global float4* atoms, // [nAtoms]
__global float4* CAOs, // [nMOs*nAtoms]
__global float2* DOSs, // [nMOs] occupation
__global float4* rTips, // [global_size]
__global float * Iout, // [global_size] output current
'''
nDim = rTips.shape
ntot = (nDim[0]*nDim[1]*nDim[2],)
print("initArgs rTips ", rTips.shape, ntot)
nAtoms = np.int32( len(atoms) )
nMOs = np.int32( len(CAOs) )
print("initArgs nAtoms, nMOs", nAtoms, nMOs)
mf = cl.mem_flags
cl_Gout = cl.Buffer(ctx, mf.WRITE_ONLY , rTips.nbytes/4 )
cl_atoms = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=atoms )
cl_CAOs = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=CAOs )
cl_Spectral = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=Spectral )
cl_rTips = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=rTips )
kargs = ( nAtoms, nMOs, cl_atoms, cl_CAOs, cl_Spectral, cl_rTips, cl_Gout )
return kargs
def run( kargs, nDim, local_size=(32,) ):
print("run opencl kernel ...")
global_size = (nDim[0]*nDim[1]*nDim[2],)
assert ( global_size[0]%local_size[0]==0 ), "number of grid points %i must be divisible by local_group_size %i" %(global_size[0],local_size[0]);
Gout = np.zeros( nDim, dtype=np.float32 )
print("FE.shape", Gout.shape)
print("global_size: ", global_size)
print("local_size: ", local_size)
program.Conductance_s_sp( queue, global_size, local_size, *(kargs))
cl.enqueue_copy ( queue, Gout, kargs[6] );
queue.finish()
print("... opencl kernel DONE")
return Gout
def getPos(lvec, nDim=None, step=(0.1,0.1,0.1) ):
if nDim is None:
nDim = ( int(np.linalg.norm(lvec[3,:])/step[2]),
int(np.linalg.norm(lvec[2,:])/step[1]),
int(np.linalg.norm(lvec[1,:])/step[0]))
dCell = np.array( ( lvec[1,:]/nDim[2], lvec[2,:]/nDim[1], lvec[3,:]/nDim[0] ) )
ABC = np.mgrid[0:nDim[0],0:nDim[1],0:nDim[2]]
print("nDim",nDim)
print("ABC[0].shape ", ABC[0].shape)
X = lvec[0,0] + ABC[2]*dCell[0,0] + ABC[1]*dCell[1,0] + ABC[0]*dCell[2,0]
Y = lvec[0,1] + ABC[2]*dCell[0,1] + ABC[1]*dCell[1,1] + ABC[0]*dCell[2,1]
Z = lvec[0,2] + ABC[2]*dCell[0,2] + ABC[1]*dCell[1,2] + ABC[0]*dCell[2,2]
return X, Y, Z
def XYZ2float4(X,Y,Z):
nDim = X.shape
XYZW = np.zeros( (nDim[0],nDim[1],nDim[2],4), dtype=np.float32)
XYZW[:,:,:,0] = X
XYZW[:,:,:,1] = Y
XYZW[:,:,:,2] = Z
return XYZW
def getPos_f4( lvec, nDim=None, step=(0.1,0.1,0.1) ):
X,Y,Z = getPos(lvec, nDim=nDim, step=step )
return XYZ2float4(X,Y,Z)
def xyzq2float4(xyzs,qs):
atoms_ = np.zeros( (len(qs),4), dtype=np.float32)
atoms_[:,:3] = xyzs[:,:]
atoms_[:, 3] = qs[:]
return atoms_
def getSpectral( eigenvals, Wf = 1.0, w=0.2 ):
w2 = w*w
lorentz = np.ones( len(eigenvals), dtype=np.float32 )
lorentz[:] /= ( w**2 + (eigenvals-Wf)**2 )
return lorentz
def CAOsp2f4(CAOs,nAtoms):
CAOs = CAOs.reshape((-1,nAtoms,4))
print("CAOs.shape ", CAOs.shape)
CAOs_ = np.zeros( (len(CAOs),nAtoms,4), dtype=np.float32)
CAOs_[:,:,0] = CAOs[:,:,0] # s
CAOs_[:,:,1] = CAOs[:,:,3] # px
CAOs_[:,:,2] = CAOs[:,:,1] # py --- because fireball
CAOs_[:,:,3] = CAOs[:,:,2] # pz
return CAOs_
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import sfml as sf
WIDTH = 640
HEIGHT = 480
TITLE = "Python SFML Events"
window = sf.RenderWindow(sf.VideoMode(WIDTH, HEIGHT), TITLE)
while window.is_open:
for event in window.events:
if type(event) is sf.CloseEvent:
window.close()
if type(event) is sf.MouseMoveEvent:
print("Fare hareket etti! %s" % event.position)
if type(event) is sf.KeyEvent:
if event.released and event.code is sf.Keyboard.ESCAPE:
print("ESC'ye basıldı!")
window.close()
if not event.released and event.code is sf.Keyboard.W:
print("W tuşuna basılıyor!")
window.clear()
window.display()
|
nilq/baby-python
|
python
|
from flask import Flask, request
app = Flask(__name__)
def getAllPuppies():
return "Getting All the puppies!"
def makeANewPuppy():
return "Creating A New Puppy!"
def getPuppy(id):
return "Getting Puppy with id {}".format(id)
def updatePuppy(id):
return "Updating Puppy with id {}".format(id)
def deletePuppy(id):
return "Removing Puppy with id {}".format(id)
@app.route('/puppies', methods=['GET', 'POST'])
def puppiesFunction():
if request.method == 'GET':
return getAllPuppies()
elif request.method == 'POST':
return makeANewPuppy()
else:
print("This is a diffren request {}".format(request.method))
@app.route('/puppies/<int:id>',methods=['GET','PUT','DELETE'])
def puppiesFunctionId(id):
if request.method == 'GET':
return getPuppy(id)
elif request.method == 'PUT':
return updatePuppy(id)
elif request.method == 'DELETE':
return deletePuppy(id)
else:
print("This is a diffrent request {}".format(request.method))
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
nilq/baby-python
|
python
|
import numpy as np
import matplotlib.pylab as pl
pl.rcParams['pdf.fonttype'] = 42
pl.rcParams['ps.fonttype'] = 42
fig = pl.figure(figsize=(12,4))
ax = fig.add_subplot(1,2,1)
def plot_perf(nlist, err, color, label, errbar=False, perc=20):
pl.loglog(nlist, err.mean(0), label=label, color=color)
if errbar:
pl.fill_between(nlist, np.percentile(err,perc,axis=0), np.percentile(err,100-perc,axis=0),
alpha=0.2, facecolor=color)
fs = 16
list_d = [2, 5, 7]
list_m = [128]
list_n = [1000*i+300 for i in range(11)]
values_dim = np.load('samples_comp.npy')
values_m = np.load('samples_comp_d_fix.npy')
plot_perf(list_n, values_dim[0, 0], 'g', 'd=2', errbar=True, perc=20)
plot_perf(list_n, values_dim[1, 0], 'b', 'd=7', errbar=True, perc=20)
plot_perf(list_n, values_dim[2, 0], 'r', 'd=10', errbar=True, perc=20)
pl.title(r'Sample complexity of $\widetilde{\Lambda}_{\overline{W_2^2}^\mathtt{W}}^k$, $k=10^3$, m=128', fontsize=fs)
pl.ylabel(r'value $\widetilde{\Lambda}_{\overline{W_2^2}^\mathtt{W}}^k(\alpha_n, \beta_n)$', fontsize=fs-1)
pl.xlabel('n : number of data', fontsize=fs-1)
pl.legend()
pl.tight_layout()
ax = fig.add_subplot(1,2,2)
plot_perf(list_n, values_m[0, 0], 'g', 'm=64', errbar=True, perc=20)
plot_perf(list_n, values_m[0, 1], 'b', 'm=128', errbar=True, perc=20)
plot_perf(list_n, values_m[0, 2], 'r', 'm=256', errbar=True, perc=20)
pl.title(r'Sample complexity of $\widetilde{\Lambda}_{\overline{W_2^2}^\mathtt{W}}^k$, $k=10^3$, d=7', fontsize=fs)
pl.ylabel(r'value $\widetilde{\Lambda}_{\overline{W_2^2}^\mathtt{W}}^k(\alpha_n, \beta_n)$', fontsize=fs-1)
pl.xlabel('n : number of data', fontsize=fs-1)
pl.legend()
pl.tight_layout()
pl.savefig('imgs/sample_complexity.pdf')
pl.show()
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
__all__ = ['core','util']
import os
import warnings
import ruamel.yaml as yaml
__author__ = "Pymatgen Development Team"
__email__ ="pymatgen@googlegroups.com"
__maintainer__ = "Shyue Ping Ong"
__maintainer_email__ ="shyuep@gmail.com"
__version__ = "2017.8.16"
SETTINGS_FILE = os.path.join(os.path.expanduser("~"), ".pmgrc.yaml")
def _load_pmg_settings():
try:
with open(SETTINGS_FILE, "rt") as f:
d = yaml.safe_load(f)
except IOError:
# If there are any errors, default to using environment variables
# if present.
d = {}
for k, v in os.environ.items():
if k.startswith("PMG_"):
d[k] = v
elif k in ["VASP_PSP_DIR", "MAPI_KEY", "DEFAULT_FUNCTIONAL"]:
d["PMG_" + k] = v
clean_d = {}
for k, v in d.items():
if not k.startswith("PMG_"):
warnings.warn('With effect from pmg 5.0, all pymatgen settings are'
' prefixed with a "PMG_". E.g., "PMG_VASP_PSP_DIR" '
'instead of "VASP_PSP_DIR".')
clean_d["PMG_" + k] = v
else:
clean_d[k] = v
return clean_d
SETTINGS = _load_pmg_settings()
|
nilq/baby-python
|
python
|
import json
import requests
import xlrd
SOURCES = (
('awois_wrecks', 'http://wrecks.nauticalcharts.noaa.gov/downloads/AWOIS_Wrecks.xls'),
('enc_wrecks', 'http://wrecks.nauticalcharts.noaa.gov/downloads/ENC_Wrecks.xls'),
)
if __name__ == '__main__':
for source_item in SOURCES:
# Source and URL
source_name = source_item[0]
source_url = source_item[1]
# Request the Excel spreadsheet from the URL
request = requests.get(source_url)
data = request.content
# Open the data as an Excel Workbook and get the first sheet
workbook = xlrd.open_workbook(file_contents=data)
worksheet = workbook.sheets()[0]
# We are going to create a GeoJSON Point feature for each row
features = []
# Iterate over all the rows in the worksheet
for row_index in range(1, worksheet.nrows):
# The row will be returned as an array of cell objects
cells = worksheet.row(row_index)
# Construct a GeoJSON Feature dictionary stub that we can fill in
feature = {
'type': 'Feature',
'id': None,
'geometry': {
'type': 'Point',
'coordinates': None,
},
'properties': {},
}
# The column layouts/values are different between the two sources
if source_name == 'awois_wrecks':
source_id = '%.0f' % cells[0].value
vessel_name = cells[1].value
feature_type = cells[2].value
lat = float(cells[3].value)
lng = float(cells[4].value)
chart = None
gp_quality = cells[5].value
depth = cells[6].value
sounding = cells[7].value
year_sunk = cells[8].value
history = cells[9].value
sounding_quality = None
water_level_effect = None
else:
source_id = None
vessel_name = cells[1].value
feature_type = cells[2].value
chart = cells[3].value # Not Used
lat = float(cells[4].value)
lng = float(cells[5].value)
gp_quality = cells[6].value
depth = cells[7].value
sounding = cells[8].value
year_sunk = cells[9].value
history = cells[10].value
sounding_quality = cells[11].value
water_level_effect = cells[12].value
# Get the lat and lng from the cell values
feature['geometry']['coordinates'] = (lng, lat)
# Get the unique ID
feature['id'] = source_id
# Get the properties from the cell values
feature['properties']['vessel_name'] = vessel_name
feature['properties']['feature_type'] = feature_type
feature['properties']['gp_quality'] = gp_quality
feature['properties']['depth'] = depth
feature['properties']['chart'] = chart
feature['properties']['sounding'] = sounding
feature['properties']['yearsunk'] = year_sunk
feature['properties']['history'] = history
feature['properties']['sounding_quality'] = sounding_quality
feature['properties']['water_level_effect'] = water_level_effect
# Add the source to the properties
feature['properties']['source'] = source_name
# Add the feature to our array
features.append(feature)
# Output the GeoJSON Feature Collection
output = {
"type": "FeatureCollection",
"features": features
}
# Output to a GeoJSON file
with open('%s.geojson' % source_name, 'w') as f:
f.write(json.dumps(output, indent=4))
print 'Done.'
|
nilq/baby-python
|
python
|
__author__ = 'arjun010'
from visObject import *
from chartDataFormatter import *
from dataFactGenerator import *
from itertools import combinations, permutations
def getPossibleVisualizations(attributeList, dataList, metadataMap):
possibleVisualizations = []
possibleDataFacts = []
itemAttribute = None # itemAttribute is used in charts like scatterplot and tick plot to enable referring to individual data items
for attribute in metadataMap:
if 'isItemAttr' in metadataMap[attribute]:
if metadataMap[attribute]['isItemAttr'] == "y":
itemAttribute = attribute
break
if len(attributeList) == 1:
attribute = attributeList[0]
if metadataMap[attribute]['type']=="quantitative":
singleAxisTickPlot = getSingleAxisTickPlot(attribute, itemAttribute, dataList)
possibleVisualizations.append(singleAxisTickPlot)
formattedData = getDataForSingleAxisTickPlot(dataList,attribute,itemAttribute)
# tickPlotDataFacts = getDataFacts_TickPlot_Q(attribute,formattedData)
# for dataFact in tickPlotDataFacts:
# dataFact['relatedVisObjects'].append(singleAxisTickPlot)
# possibleDataFacts.append(dataFact)
singleAxisBoxPlot = getSingleAxisBoxPlot(attribute)
possibleVisualizations.append(singleAxisBoxPlot)
singleAxisHistogram = getHistogram(attribute)
possibleVisualizations.append(singleAxisHistogram)
# commonDataFactsForTickAndBoxPlot = getCommonDataFactsForTickPlotAndBoxPlotAndHistogram_Q(attribute, formattedData)
# for dataFact in commonDataFactsForTickAndBoxPlot:
# dataFact['relatedVisObjects'].append(singleAxisTickPlot)
# dataFact['relatedVisObjects'].append(singleAxisBoxPlot)
# if dataFact['type']=="RangeDistributionFact":
# dataFact['relatedVisObjects'].append(singleAxisHistogram)
# possibleDataFacts.append(dataFact)
elif metadataMap[attribute]['type'] == "ordinal" or metadataMap[attribute]['type'] == "nominal":
barChartWithCount = getBarChartWithCount(attribute, dataList)
possibleVisualizations.append(barChartWithCount)
donutChartWithCount = getDonutChartWithCount(attribute, dataList)
possibleVisualizations.append(donutChartWithCount)
formattedData = getDataForBarChartWithCount(dataList,attribute)
commonDataFactsForBarAndDonutChartsWithCount = getCommonFacts_BarAndDonutChartWithCount_N(attribute,formattedData)
for dataFact in commonDataFactsForBarAndDonutChartsWithCount:
dataFact['relatedVisObjects'].append(barChartWithCount)
dataFact['relatedVisObjects'].append(donutChartWithCount)
possibleDataFacts.append(dataFact)
elif len(attributeList) == 2:
attribute1 = attributeList[0]
attribute2 = attributeList[1]
attributeTypeList = [metadataMap[attribute1]['type'],metadataMap[attribute2]['type']]
if attributeTypeList.count("quantitative")==1 and (attributeTypeList.count("nominal")==1 or attributeTypeList.count("ordinal")==1): # N/O x Q
if metadataMap[attribute1]['type']=="quantitative":
yAttr = attribute1
xAttr = attribute2
else:
xAttr = attribute1
yAttr = attribute2
#====================
# generating two axis tick plot and dot plot
#====================
twoAxisTickPlot = getTwoAxisTickPlot(xAttr, yAttr, itemAttribute, dataList)
possibleVisualizations.append(twoAxisTickPlot)
scatterplot = getScatterplot(xAttr, yAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(scatterplot)
formattedData = getDataForTwoAxisTickPlot(dataList,xAttr,yAttr,itemAttribute)
# commonFactsForTickAndDotPlots = getCommonFacts_TickAndDotPlot_NxQ(xAttr,yAttr,None,formattedData)
# for dataFact in commonFactsForTickAndDotPlots:
# dataFact['relatedVisObjects'].append(twoAxisTickPlot)
# dataFact['relatedVisObjects'].append(scatterplot)
# possibleDataFacts.append(dataFact)
#====================
# generating AVG based bar and donut charts
#====================
barChartWithAvg = getBarChartWithAvg(xAttr, yAttr, dataList)
possibleVisualizations.append(barChartWithAvg)
donutChartWithAvg = getDonutChartWithAvg(xAttr, yAttr, dataList)
possibleVisualizations.append(donutChartWithAvg)
formattedData = getDataForBarChartWithAvg(dataList,xAttr,yAttr)
commonDataFactsForBarAndDonutChartsWithAvg = getCommonFacts_BarAndDonutChartWithAvg_NxQ(xAttr, yAttr, "AVG", formattedData)
for dataFact in commonDataFactsForBarAndDonutChartsWithAvg:
dataFact['relatedVisObjects'].append(barChartWithAvg)
dataFact['relatedVisObjects'].append(donutChartWithAvg)
possibleDataFacts.append(dataFact)
#====================
# generating SUM based bar and donut charts
#====================
barChartWithSum = getBarChartWithSum(xAttr, yAttr, dataList)
possibleVisualizations.append(barChartWithSum)
donutChartWithSum = getDonutChartWithSum(xAttr, yAttr, dataList)
possibleVisualizations.append(donutChartWithSum)
formattedData = getDataForBarChartWithSum(dataList,xAttr,yAttr)
commonDataFactsForBarAndDonutChartsWithSum = getCommonFacts_BarAndDonutChartWithSum_NxQ(xAttr, yAttr, "SUM", formattedData)
for dataFact in commonDataFactsForBarAndDonutChartsWithSum:
dataFact['relatedVisObjects'].append(barChartWithSum)
dataFact['relatedVisObjects'].append(donutChartWithSum)
possibleDataFacts.append(dataFact)
elif attributeTypeList.count("quantitative")==2: # Q x Q
# 2 permutations
scatterplot1 = getScatterplot(attribute1,attribute2,itemAttribute,dataList,metadataMap)
possibleVisualizations.append(scatterplot1)
scatterplot2 = getScatterplot(attribute2,attribute1,itemAttribute,dataList,metadataMap)
possibleVisualizations.append(scatterplot2)
formattedData = getDataForScatterplot(dataList,metadataMap,attribute1,attribute2,itemAttribute)
scatterplotDataFacts = getDataFacts_Scatterplot_QxQ(attribute1,attribute2,formattedData,metadataMap)
for dataFact in scatterplotDataFacts:
dataFact['relatedVisObjects'].append(scatterplot1)
dataFact['relatedVisObjects'].append(scatterplot2)
possibleDataFacts.append(dataFact)
elif attributeTypeList.count("quantitative")==0: # N/O x N/O
# aggregated scatterplot with count (2 permutations)
aggregatedScatterplotWithCount1 = getAggregatedScatterplotWithCount(attribute1,attribute2,dataList)
possibleVisualizations.append(aggregatedScatterplotWithCount1)
aggregatedScatterplotWithCount2 = getAggregatedScatterplotWithCount(attribute2,attribute1,dataList)
possibleVisualizations.append(aggregatedScatterplotWithCount2)
# stacked bar chart (2 permutations)
stackedBarChart1 = getStackedBarChart(attribute1,attribute2,dataList)
possibleVisualizations.append(stackedBarChart1)
stackedBarChart2 = getStackedBarChart(attribute2,attribute1,dataList)
possibleVisualizations.append(stackedBarChart2)
# grouped bar chart (maybe)
formattedData1 = getDataForAggregatedScatterplotByCount(dataList,metadataMap,attribute1,attribute2)
commonDataFactsForStackedBarAndAggregatedDotPlotWithCount = getCommonDataFacts_StackedBarAndAggregatedDotPlotWithCount_NxN(attribute1,attribute2,formattedData1)
for dataFact in commonDataFactsForStackedBarAndAggregatedDotPlotWithCount:
dataFact['relatedVisObjects'].append(aggregatedScatterplotWithCount1)
dataFact['relatedVisObjects'].append(aggregatedScatterplotWithCount2)
dataFact['relatedVisObjects'].append(stackedBarChart1)
dataFact['relatedVisObjects'].append(stackedBarChart2)
possibleDataFacts.append(dataFact)
dataFactsForStackedBarChartWithCount = getStackedBarCharDataFacts_NxN(attribute1,attribute2,formattedData1)
for dataFact in dataFactsForStackedBarChartWithCount:
dataFact['relatedVisObjects'].append(aggregatedScatterplotWithCount1)
dataFact['relatedVisObjects'].append(aggregatedScatterplotWithCount2)
dataFact['relatedVisObjects'].append(stackedBarChart1)
dataFact['relatedVisObjects'].append(stackedBarChart2)
possibleDataFacts.append(dataFact)
# formattedData2 = getDataForAggregatedScatterplotByCount(dataList,metadataMap,attribute2,attribute1)
# commonDataFactsForStackedBarAndAggregatedDotPlotWithCount = getCommonDataFacts_StackedBarAndAggregatedDotPlotWithCount_NxN(attribute2,attribute1,formattedData2)
# for dataFact in commonDataFactsForStackedBarAndAggregatedDotPlotWithCount:
# dataFact['relatedVisObjects'].append(aggregatedScatterplotWithCount2)
# dataFact['relatedVisObjects'].append(stackedBarChart2)
# possibleDataFacts.append(dataFact)
# dataFactsForStackedBarChartWithCount = getStackedBarCharDataFacts_NxN(attribute2,attribute1,formattedData2)
# for dataFact in dataFactsForStackedBarChartWithCount:
# dataFact['relatedVisObjects'].append(stackedBarChart2)
# possibleDataFacts.append(dataFact)
elif len(attributeList) == 3:
attribute1 = attributeList[0]
attribute2 = attributeList[1]
attribute3 = attributeList[2]
attributeTypeList = [metadataMap[attribute1]['type'],metadataMap[attribute2]['type'],metadataMap[attribute3]['type']]
if attributeTypeList.count("quantitative")==0: # 3 N/O
pass
elif attributeTypeList.count("quantitative")==1: # 1 Q x 2 N/O
if metadataMap[attribute1]['type']=="quantitative":
quantitativeAttr = attribute1
if len(metadataMap[attribute2]['domain']) <= len(metadataMap[attribute3]['domain']):
smallerNOAttr = attribute2
largerNOAttr = attribute3
else:
smallerNOAttr = attribute3
largerNOAttr = attribute2
elif metadataMap[attribute2]['type']=="quantitative":
quantitativeAttr = attribute2
if len(metadataMap[attribute1]['domain']) <= len(metadataMap[attribute3]['domain']):
smallerNOAttr = attribute1
largerNOAttr = attribute3
else:
smallerNOAttr = attribute3
largerNOAttr = attribute1
elif metadataMap[attribute3]['type']=="quantitative":
quantitativeAttr = attribute3
if len(metadataMap[attribute1]['domain']) <= len(metadataMap[attribute2]['domain']):
smallerNOAttr = attribute1
largerNOAttr = attribute2
else:
smallerNOAttr = attribute2
largerNOAttr = attribute1
# N/O x Q x N/O (2 coloring variations possible for each chart)
coloredTickPlot1 = getColoredTickPlot(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList)
possibleVisualizations.append(coloredTickPlot1)
coloredTickPlot2 = getColoredTickPlot(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList)
possibleVisualizations.append(coloredTickPlot2)
coloredScatterplot1 = getColoredScatterplot(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplot1)
coloredScatterplot2 = getColoredScatterplot(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplot2)
formattedData = getDataForColoredScatterplot(dataList,metadataMap,largerNOAttr,quantitativeAttr,smallerNOAttr,itemAttribute)
# commonDataFactsForColoredTickPlotAndScatterplot = getCommonDataFacts_ColoredTickPlotAndScatterplot_NxQxN(largerNOAttr,quantitativeAttr,smallerNOAttr,formattedData,metadataMap,itemAttribute)
# for dataFact in commonDataFactsForColoredTickPlotAndScatterplot:
# dataFact['relatedVisObjects'].append(coloredTickPlot1)
# dataFact['relatedVisObjects'].append(coloredTickPlot2)
# dataFact['relatedVisObjects'].append(coloredScatterplot1)
# dataFact['relatedVisObjects'].append(coloredScatterplot2)
# possibleDataFacts.append(dataFact)
#========================
coloredScatterplotByAvg1 = getColoredScatterplotByAvg(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplotByAvg1)
coloredScatterplotByAvg2 = getColoredScatterplotByAvg(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplotByAvg2)
coloredTickPlotByAvg1 = getColoredTickPlotByAvg(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredTickPlotByAvg1)
coloredTickPlotByAvg2 = getColoredTickPlotByAvg(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredTickPlotByAvg2)
# N/O x N/O x Q (2 variations for AVG and SUM)
aggregatedAvgScatterplot1 = getAggregatedScatterplotByAvg(smallerNOAttr, largerNOAttr, quantitativeAttr, dataList,metadataMap)
possibleVisualizations.append(aggregatedAvgScatterplot1)
aggregatedAvgScatterplot2 = getAggregatedScatterplotByAvg(largerNOAttr, smallerNOAttr, quantitativeAttr, dataList,metadataMap)
possibleVisualizations.append(aggregatedAvgScatterplot2)
formattedData = getDataForAggregatedScatterplotByAvg(dataList,metadataMap,smallerNOAttr,largerNOAttr,quantitativeAttr)
dataFactsForAggregatedScatterplotByAvg = getDataFactsForAggregatedScatterplotByAvg_NxNxQ(smallerNOAttr, largerNOAttr, quantitativeAttr, formattedData)
for dataFact in dataFactsForAggregatedScatterplotByAvg:
dataFact['relatedVisObjects'].append(aggregatedAvgScatterplot1)
dataFact['relatedVisObjects'].append(aggregatedAvgScatterplot2)
dataFact['relatedVisObjects'].append(coloredScatterplotByAvg1)
dataFact['relatedVisObjects'].append(coloredScatterplotByAvg2)
dataFact['relatedVisObjects'].append(coloredTickPlotByAvg1)
dataFact['relatedVisObjects'].append(coloredTickPlotByAvg2)
possibleDataFacts.append(dataFact)
coloredScatterplotBySum1 = getColoredScatterplotBySum(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplotBySum1)
coloredScatterplotBySum2 = getColoredScatterplotBySum(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplotBySum2)
coloredTickPlotBySum1 = getColoredTickPlotBySum(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredTickPlotBySum1)
coloredTickPlotBySum2 = getColoredTickPlotBySum(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredTickPlotBySum2)
aggregatedSumScatterplot1 = getAggregatedScatterplotBySum(smallerNOAttr, largerNOAttr, quantitativeAttr, dataList,metadataMap)
possibleVisualizations.append(aggregatedSumScatterplot1)
aggregatedSumScatterplot2 = getAggregatedScatterplotBySum(largerNOAttr, smallerNOAttr, quantitativeAttr, dataList,metadataMap)
possibleVisualizations.append(aggregatedSumScatterplot2)
formattedData = getDataForAggregatedScatterplotBySum(dataList,metadataMap,smallerNOAttr,largerNOAttr,quantitativeAttr)
dataFactsForAggregatedScatterplotBySum = getDataFactsForAggregatedScatterplotBySum_NxNxQ(smallerNOAttr, largerNOAttr, quantitativeAttr, formattedData)
for dataFact in dataFactsForAggregatedScatterplotBySum:
dataFact['relatedVisObjects'].append(aggregatedSumScatterplot1)
dataFact['relatedVisObjects'].append(aggregatedSumScatterplot2)
dataFact['relatedVisObjects'].append(coloredScatterplotBySum1)
dataFact['relatedVisObjects'].append(coloredScatterplotBySum2)
dataFact['relatedVisObjects'].append(coloredTickPlotBySum1)
dataFact['relatedVisObjects'].append(coloredTickPlotBySum2)
possibleDataFacts.append(dataFact)
elif attributeTypeList.count("quantitative")==2: # 2 Q x 1 N/O
if metadataMap[attribute1]['type']=="ordinal" or metadataMap[attribute1]['type']=="nominal":
nonQAttribute = attribute1
quantitativeAttr1 = attribute2
quantitativeAttr2 = attribute3
elif metadataMap[attribute2]['type']=="ordinal" or metadataMap[attribute2]['type']=="nominal":
nonQAttribute = attribute2
quantitativeAttr1 = attribute1
quantitativeAttr2 = attribute3
elif metadataMap[attribute3]['type']=="ordinal" or metadataMap[attribute3]['type']=="nominal":
nonQAttribute = attribute3
quantitativeAttr1 = attribute1
quantitativeAttr2 = attribute2
# 2 axis variations possible for scatterplot of QxQ +color
coloredScatterplot1 = getColoredScatterplot(quantitativeAttr1,quantitativeAttr2,nonQAttribute,itemAttribute,dataList,metadataMap)
possibleVisualizations.append(coloredScatterplot1)
coloredScatterplot2 = getColoredScatterplot(quantitativeAttr2,quantitativeAttr1,nonQAttribute,itemAttribute,dataList,metadataMap)
possibleVisualizations.append(coloredScatterplot2)
formattedData = getDataForColoredScatterplot(dataList,metadataMap,quantitativeAttr1,quantitativeAttr2,nonQAttribute,itemAttribute)
dataFactsForColoredScatterplots = getDataFactsForColoredScatterplot_QxQxN(quantitativeAttr1,quantitativeAttr2,nonQAttribute,formattedData,metadataMap)
for dataFact in dataFactsForColoredScatterplots:
dataFact['relatedVisObjects'].append(coloredScatterplot1)
dataFact['relatedVisObjects'].append(coloredScatterplot2)
possibleDataFacts.append(dataFact)
# 2 sizing variations possible for scatterplot of N/O x Q +size
sizedScatterplot1 = getSizedScatterplot(nonQAttribute, quantitativeAttr1, quantitativeAttr2, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(sizedScatterplot1)
sizedScatterplot2 = getSizedScatterplot(nonQAttribute, quantitativeAttr2, quantitativeAttr1, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(sizedScatterplot2)
formattedData = getDataForColoredScatterplot(dataList,metadataMap,quantitativeAttr1,quantitativeAttr2,nonQAttribute,itemAttribute)
commonDataFactsForColoredAndSizedScatterplot = getCommonDataFactsForColoredAndSizedScatterplot_QxQxN(quantitativeAttr1,quantitativeAttr2,nonQAttribute,formattedData,metadataMap)
for dataFact in commonDataFactsForColoredAndSizedScatterplot:
dataFact['relatedVisObjects'].append(coloredScatterplot1)
dataFact['relatedVisObjects'].append(coloredScatterplot2)
dataFact['relatedVisObjects'].append(sizedScatterplot1)
dataFact['relatedVisObjects'].append(sizedScatterplot2)
possibleDataFacts.append(dataFact)
elif attributeTypeList.count("quantitative")==3: # 3 Q
# 6 permutations
for attributePermutation in permutations(attributeList,3):
attributePermutation = list(attributePermutation)
sizedScatterplot = getSizedScatterplot(attributePermutation[0],attributePermutation[1],attributePermutation[2],itemAttribute,dataList,metadataMap)
possibleVisualizations.append(sizedScatterplot)
formattedData = getDataForSizedScatterplot(dataList, metadataMap, attributePermutation[0],attributePermutation[1],attributePermutation[2],itemAttribute)
dataFactsForSizedScatterplot = getDataFactsForSizedScatterplot_QxQxQ(attributePermutation[0],attributePermutation[1],attributePermutation[2],formattedData,metadataMap)
for dataFact in dataFactsForSizedScatterplot:
dataFact['relatedVisObjects'].append(sizedScatterplot)
possibleDataFacts.append(dataFact)
return possibleVisualizations, possibleDataFacts
def getSingleAxisTickPlot(yAttr, itemAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "SingleAxisTickPlot"
visObject['mark'] = "tick"
visObject['y']['attribute'] = yAttr
# visObject['shapedData'] = getDataForSingleAxisTickPlot(dataList, yAttr, itemAttr)
return visObject
def getSingleAxisBoxPlot(yAttr):
visObject = getEmptyVisObject()
visObject['type'] = "SingleAxisBoxPlot"
visObject['mark'] = "box"
visObject['y']['attribute'] = yAttr
# visObject['shapedData'] = getDataForSingleAxisTickPlot(dataList, yAttr, itemAttr)
return visObject
def getHistogram(yAttr):
visObject = getEmptyVisObject()
visObject['type'] = "Histogram"
visObject['mark'] = "bar"
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "BIN"
# visObject['shapedData'] = getDataForSingleAxisTickPlot(dataList, yAttr, itemAttr)
return visObject
def getBarChartWithCount(attribute, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "BarWithCount"
visObject['mark'] = "bar"
visObject['x']['attribute'] = attribute
visObject['y']['transform'] = "COUNT"
# visObject['shapedData'] = getDataForBarChartWithCount(dataList, attribute)
return visObject
def getDonutChartWithCount(attribute, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "DonutWithCount"
visObject['mark'] = "arc"
visObject['x']['attribute'] = attribute
visObject['y']['transform'] = "COUNT"
visObject['color']['attribute'] = attribute
# visObject['shapedData'] = getDataForBarChartWithCount(dataList, attribute) # same data format as bar chart
return visObject
def getTwoAxisTickPlot(xAttr, yAttr, itemAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "TwoAxisTickPlot"
visObject['mark'] = "tick"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
# visObject['shapedData'] = getDataForTwoAxisTickPlot(dataList, xAttr, yAttr, itemAttr)
return visObject
def getBarChartWithAvg(xAttr, yAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "BarWithAvg"
visObject['mark'] = "bar"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "AVG"
# visObject['shapedData'] = getDataForBarChartWithAvg(dataList, xAttr, yAttr)
return visObject
def getBarChartWithSum(xAttr, yAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "BarWithSum"
visObject['mark'] = "bar"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "SUM"
# visObject['shapedData'] = getDataForBarChartWithSum(dataList, xAttr, yAttr)
return visObject
def getDonutChartWithAvg(xAttr, yAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "DonutWithAvg"
visObject['mark'] = "arc"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "AVG"
visObject['color']['attribute'] = xAttr
# visObject['shapedData'] = getDataForBarChartWithAvg(dataList, xAttr, yAttr) # same data format as bar chart
return visObject
def getDonutChartWithSum(xAttr, yAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "DonutWithSum"
visObject['mark'] = "arc"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "SUM"
visObject['color']['attribute'] = xAttr
# visObject['shapedData'] = getDataForBarChartWithSum(dataList, xAttr, yAttr) # same data format as bar chart
return visObject
def getScatterplot(xAttr, yAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "Scatterplot"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
# visObject['shapedData'] = getDataForScatterplot(dataList, metadataMap, xAttr,yAttr,itemAttr)
return visObject
def getColoredTickPlot(xAttr, yAttr, colorAttr, itemAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "TickPlotWithColor"
visObject['mark'] = "tick"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['color']['attribute'] = colorAttr
# visObject['shapedData'] = getDataForColoredTickPlot(dataList,xAttr,yAttr,colorAttr,itemAttr)
return visObject
def getColoredScatterplot(xAttr, yAttr, colorAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "ScatterplotWithColor"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['color']['attribute'] = colorAttr
# visObject['shapedData'] = getDataForColoredScatterplot(dataList, metadataMap,xAttr,yAttr,colorAttr,itemAttr)
return visObject
def getColoredScatterplotByAvg(xAttr, yAttr, colorAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "ScatterplotWithColorByAvg"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "AVG"
visObject['color']['attribute'] = colorAttr
return visObject
def getColoredTickPlotByAvg(xAttr, yAttr, colorAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "TickPlotWithColorByAvg"
visObject['mark'] = "tick"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "AVG"
visObject['color']['attribute'] = colorAttr
return visObject
def getColoredScatterplotBySum(xAttr, yAttr, colorAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "ScatterplotWithColorBySum"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "SUM"
visObject['color']['attribute'] = colorAttr
return visObject
def getColoredTickPlotBySum(xAttr, yAttr, colorAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "TickPlotWithColorBySum"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "SUM"
visObject['color']['attribute'] = colorAttr
return visObject
def getAggregatedScatterplotByAvg(xAttr, yAttr, sizeAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "AggregatedScatterplotWithAvgSize"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['size']['attribute'] = sizeAttr
visObject['size']['transform'] = "AVG"
# visObject['shapedData'] = getDataForAggregatedScatterplotByAvg(dataList, metadataMap, xAttr,yAttr,sizeAttr)
return visObject
def getAggregatedScatterplotBySum(xAttr, yAttr, sizeAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "AggregatedScatterplotWithSumSize"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['size']['attribute'] = sizeAttr
visObject['size']['transform'] = "SUM"
# visObject['shapedData'] = getDataForAggregatedScatterplotBySum(dataList, metadataMap, xAttr,yAttr,sizeAttr)
return visObject
def getAggregatedScatterplotWithCount(xAttr, yAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "AggregatedScatterplotWithCountSize"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['size']['transform'] = "COUNT"
# visObject['shapedData'] = getDataForAggregatedScatterplotByCount(dataList,metadataMap,xAttr,yAttr)
return visObject
def getSizedScatterplot(xAttr, yAttr, sizeAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "ScatterplotWithSize"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['size']['attribute'] = sizeAttr
# visObject['shapedData'] = getDataForSizedScatterplot(dataList, metadataMap, xAttr,yAttr,sizeAttr,itemAttr)
return visObject
def getStackedBarChart(xAttr,colorAttr,dataList):
visObject = getEmptyVisObject()
visObject['type'] = "StackedBarChart"
visObject['mark'] = "bar"
visObject['x']['attribute'] = xAttr
visObject['y']['transform'] = "COUNT"
visObject['color']['attribute'] = colorAttr
# visObject['shapedData'] = getDataForStackedBarChart(dataList,xAttr,colorAttr)
return visObject
if __name__ == '__main__':
pass
|
nilq/baby-python
|
python
|
from __future__ import division
# X é o numero no qual queremos a raiz quadrada
# I quantidade de interações
def calc_raizq(x, chute, i):
if i < 1:
raise ValueError("É necessário pelo menos uma iteração")
if chute < 1:
chute = 1
if x < 0:
return complex(0, calc_raizq(-x, chute, i))
else:
for n in range(i):
chute = 1/2*(chute+x/chute)
return chute
print(calc_raizq(9, 3, 3))
#Para trabalhar com numeros complexos é mais comum o uso do modulo CMATH
#https://docs.python.org/3/library/cmath.html
|
nilq/baby-python
|
python
|
############################################################################
# Copyright ESIEE Paris (2019) #
# #
# Contributor(s) : Giovanni Chierchia, Benjamin Perret #
# #
# Distributed under the terms of the CECILL-B License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
############################################################################
from collections import OrderedDict
from sklearn import datasets
import numpy as np
import scipy
from .plots import show_grid, plot_clustering
from .graph import build_graph
def load_datasets(n_samples, n_labeled, preprocess=lambda x: x):
sets = OrderedDict()
np.random.seed(2)
sets['circles'] = create_dataset(n_samples, n_labeled, preprocess, make_circles)
sets['moons'] = create_dataset(n_samples, 2*n_labeled, preprocess, make_moons)
sets['blobs'] = create_dataset(n_samples, n_labeled, preprocess, make_blobs)
sets['varied'] = create_dataset(n_samples, n_labeled, preprocess, make_varied)
sets['aniso'] = create_dataset(n_samples, n_labeled, preprocess, make_aniso)
return sets
def show_datasets(sets, show_labeled=False, figname=None):
get_list = lambda key: [sets[name][key] for name in sets]
X_list = get_list("X")
y_list = get_list("y")
i_list = get_list("labeled") if show_labeled else len(X_list)*[None]
i_list = [i[0] if i.ndim == 2 else i for i in i_list] # show only first fold if several exist
show_grid(plot_clustering, X_list, y_list, i_list, figname=figname)
#-----------------------------#
def create_dataset(n_samples, n_labeled, preprocess, make_data):
X, y= make_data(n_samples, n_labeled)
idx = np.arange(X.shape[0])
np.random.shuffle(idx)
data = {
"X": preprocess(X),
"y": y,
"n_clusters": len(np.unique(y)),
"labeled": idx[:n_labeled],
"unlabeled": idx[n_labeled:],
}
return data
def make_circles(n_samples, n_labeled):
X, y = datasets.make_circles(n_samples, factor=.5, noise=.05, random_state=10)
return X, y
def make_moons(n_samples, n_labeled):
X, y = datasets.make_moons(n_samples, noise=.05, random_state=42)
X, y = np.concatenate((X, X + (2.5, 0))), np.concatenate((y, y+2))
return X, y
def make_blobs(n_samples, n_labeled):
X, y = datasets.make_blobs(n_samples, random_state=42)
return X, y
def make_varied(n_samples, n_labeled):
X, y = datasets.make_blobs(n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=170)
return X, y
def make_aniso(n_samples, n_labeled):
X, y = datasets.make_blobs(n_samples, random_state=170)
X = np.dot(X, [[0.6, -0.6], [-0.4, 0.8]])
return X, y
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import pytorch_lightning as pl
class Net(pl.LightningModule):
def __init__(self):
super(Net, self).__init__()
self.layer1 = nn.Linear(28*28, 1024)
self.layer2 = nn.Linear(1024, 128)
self.layer3 = nn.Linear(128, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
out = self.layer1(x)
out = F.relu(out)
out = self.layer2(out)
out = F.relu(out)
out = self.layer3(out)
return out
def training_step(self, batch, batch_idx):
data, target = batch
out = self.forward(data)
loss = F.cross_entropy(out, target)
preds = out.argmax(dim=1, keepdim=True)
corrects = torch.eq(loss, target.view(-1, 1)).sum() / 1.0
acc = torch.mean(corrects)
result = pl.TrainResult(loss)
result.log('train_loss', loss)
result.log('train_acc', acc)
return result
def validation_step(self, batch, batch_idx):
data, target = batch
out = self.forward(data)
loss = F.cross_entropy(out, target)
preds = out.argmax(dim=1, keepdim=True)
corrects = torch.eq(loss, target.view(-1, 1)).sum() / 1.0
acc = torch.mean(corrects)
result = pl.EvalResult(checkpoint_on=loss)
result.log('val_loss', loss)
result.log('val_acc', acc)
return result
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=1e-3)
return optimizer
|
nilq/baby-python
|
python
|
def foo(x):
return x*x
y = [1,2,3,4,5]
z = list(map(foo,y))
for val in z:
print(val)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
__author__ = "Michele Samorani"
import pandas as pd
import cplex
import time
import random
TIME_LIMIT_SECONDS = 60
def build_scenarios(show_probs, max_scenarios,seed):
"""
Builds the scenarios
:param show_probs:
:type show_probs: list[float]
:return: a list of (probability, 0-1 show list)
"""
random.seed(seed)
n = len(show_probs)
if 2 ** n <= max_scenarios:
import itertools
lst = [list(i) for i in itertools.product([0, 1], repeat=n)]
for s in lst:
p = 1
for j in range(n):
p *= (show_probs[j] if s[j] == 1 else 1 - show_probs[j])
yield p,s
else:
s = show_probs.copy()
for i in range(max_scenarios):
for j in range(n):
p2 = random.uniform(0, 1)
s[j] = 1 if p2 < show_probs[j] else 0
yield 1 / max_scenarios, s.copy()
# s = show_probs.copy()
# for i in range(max_scenarios):
# for j in range(n):
# p2 = random.uniform(0, 1)
# s[j] = 1 if p2 < show_probs[j] else 0
# p = 1
# for j in range(n):
# p *= (show_probs[j] if s[j] == 1 else 1 - show_probs[j])
#
# # input(f'returning {str(p)}->{str(s)}')
# yield p, s.copy()
def optimally_schedule(show_probs, wtc, otc, nslots,seed, max_scenarios = 100000, delta_sim = 0):
print_steps = False
# First, find the scenarios
qs = [] # a list of sets of patients that show under a scenario
ps = [] # a list of probabilities
init = time.time()
ser = pd.Series(data=show_probs)
sorted_indices = list(ser.sort_values().index)
# Similar index (for each index i, the index of the other patient for constraint 4)
similar = {}
for iii in range(len(sorted_indices)-1):
i = sorted_indices[iii]
j = sorted_indices[iii+1]
# check whether i is similar to j
if show_probs[j] - show_probs[i] <= delta_sim + 0.00000001:
similar[i] = j
else:
similar[i] = -1
similar[sorted_indices[-1]] = -1
if print_steps:
print('Building scenarios')
totp = 0
for p,s in build_scenarios(show_probs, max_scenarios,seed):
qs.append(set()) # set of showing indices
ps.append(p)
totp+=p
for i in range(len(s)):
if s[i] == 1:
qs[-1].add(i)
#print(f'totp={totp}')
# if abs(totp-1) > 0.01:
# input('TOT P < 1!!!!!!')
S = len(qs) # number of scenarios
F = nslots # number of slots
N = len(show_probs) # number of patients
F_max = N
if print_steps:
print(f'Done in {time.time() - init}. Built {S} scenarios. Setting up problem...')
c = cplex.Cplex()
# variables
c.variables.add(names=[f'x{i}_{j}' for i in range(N) for j in range(F)],types=[c.variables.type.binary for i in range(N) for j in range(F)])
c.variables.add(names=[f'b{s}_{j}' for j in range(F_max) for s in range(S)],lb=[0 for j in range(F_max) for s in range(S)])
c.set_log_stream(None)
c.set_results_stream(None)
c.set_warning_stream(None)
c.parameters.timelimit.set(TIME_LIMIT_SECONDS)
# objective
if print_steps:
print(f'Setting up objective...')
for s in range(S):
tot_shows = len(qs[s]) #N^s
#print(f'Scenario {s} with probability {ps[s]} and tot_shows = {tot_shows}:')
#print(qs[s])
if tot_shows == 0:
continue
for j in range(F_max):
#print(f'scenario {s}, j={j}: adding b{s}_{j} * (ps_s={ps[s]}) * (wtc={wtc}) / (tot_shows={tot_shows})')
c.objective.set_linear(f'b{s}_{j}',ps[s] * wtc)
c.objective.set_linear(f'b{s}_{F-1}', ps[s] * (otc + wtc))
#print(f'scenario {s}: adding b{s}_{F-1} * (ps_s={ps[s]}) * (otc={otc})')
# constraint set (1)
if print_steps:
print(f'Setting up constraint set 1...')
for i in range(N):
c.linear_constraints.add(lin_expr=[cplex.SparsePair(
ind = [f'x{i}_{j}' for j in range(F)], val = [1.0 for j in range(F)])],
senses = ['E'],
rhs=[1],
names=[f'(1_{i})'])
# constraint set (2)
if print_steps:
print(f'Setting up constraint set 2...')
for s in range(S):
if print_steps and s % 1000 == 0:
print(f'Built constraints for {s} scenarios')
for j in range(0,F_max):
expr = []
if j < F:
expr = [f'x{i}_{j}' for i in qs[s]]
expr.append(f'b{s}_{j}')
if j >= 1:
expr.append(f'b{s}_{j-1}')
vals = []
if j <F:
vals = [-1.0 for i in qs[s]]
vals.append(1)
if j >=1 :
vals.append(-1)
c.linear_constraints.add(lin_expr=[cplex.SparsePair(expr,vals)],
senses=['G'],
rhs=[-1],
names=[f'(2_{s}_{j})'])
# constraint set (3)
if print_steps:
print(f'Setting up constraint set 3...')
# original constraint 3
if (N >= F):
for j in range(0, F):
c.linear_constraints.add(lin_expr=[cplex.SparsePair(
ind=[f'x{i}_{j}' for i in range(N)], val=[1.0 for i in range(N)])],
senses=['G'],
rhs=[1],
names=[f'(3_{j})'])
# constraint set (4)
if print_steps:
print(f'Setting up constraint set 4...')
for i1 in range(N):
i2 = similar[i1]
if i2 == -1:
continue
for j_prime in range(F-1):
expr = []
vals = []
# old and faster
expr = [f'x{i1}_{j}' for j in range(j_prime+1,F)]
# new and slower
#expr = [f'x{i1}_{j_prime}']
# expr.extend([f'x{i2}_{j}' for j in range(0,j_prime+1)])
# vals = [1 for i in range(len(expr))]
# c.linear_constraints.add(lin_expr=[cplex.SparsePair(expr, vals)],
# senses=['L'],
# rhs=[1],
# names=[f'(4_{i1}_{j_prime})'])
#c.write(filename='model.txt', filetype='lp')
if print_steps:
print(f'Solving...')
c.solve()
time_taken = time.time() - init
# c.solution.write('solution.txt')
#print(f'Value = {c.solution.get_objective_value()}')
solution = []
try:
for i in range(N):
sols = c.solution.get_values([f'x{i}_{j}' for j in range(F)])
for j in range(F):
if sols[j] >= .9:
solution.append(j)
break
except:
import numpy as np
return np.nan, np.nan, np.nan, np.nan
return c.solution.get_objective_value(),c.solution.MIP.get_mip_relative_gap(), solution, time_taken
|
nilq/baby-python
|
python
|
# This file is part of the Etsin service
#
# Copyright 2017-2018 Ministry of Education and Culture, Finland
#
# :author: CSC - IT Center for Science Ltd., Espoo Finland <servicedesk@csc.fi>
# :license: MIT
"""Language and translation utilities"""
from flask import request, session
from etsin_finder.auth.authentication_fairdata_sso import get_sso_environment_prefix, get_decrypted_sso_session_details
languages = ['en', 'fi']
default_language = 'en'
# Map common locales to languages
locale_mapping = {
'en_US': 'en',
'en_GB': 'en',
'en': 'en',
'fi_FI': 'fi',
'fi': 'fi',
}
def set_language(language):
"""
Set session language
Returns True if language is supported, otherwise False.
"""
if language in languages:
session['language'] = language
return True
return False
def get_language():
"""
Get language for request.
Returns first found language in the following order
* Session language setting
* SSO language setting
* Accept-Languages request header
* Default language
"""
session_lang = session.get('language')
if session_lang in languages:
return session_lang
sso_session = get_decrypted_sso_session_details() or {}
sso_lang = sso_session.get('language')
if sso_lang in languages:
return sso_lang
supported_locales = locale_mapping.keys()
locale = request.accept_languages.best_match(supported_locales)
return locale_mapping.get(locale, default_language)
translations = {
'fi': {
'etsin.download.notification.subject': 'Lataus on aloitettavissa Etsimessä',
'etsin.download.notification.body.partial': 'Lataus paketille {folder} aineistossa {pref_id} voidaan aloittaa Etsimessä:\n\n{data_url}\n',
'etsin.download.notification.body.full': 'Lataus aineistolle {pref_id} voidaan aloittaa Etsimessä:\n\n{data_url}\n',
'etsin.title': 'Etsin | Tutkimusaineistojen hakupalvelu',
'etsin.description': ('Kuvailutietojen perusteella käyttäjät voivat etsiä aineistoja ja arvioida'
'löytämiensä aineistojen käyttökelpoisuutta tarpeisiinsa.'),
'qvain.title': 'Qvain | Tutkimusaineiston metatietotyökalu',
'qvain.description': ('Fairdata Qvain -työkalu tekee datasi '
'kuvailun ja julkaisemisen helpoksi.')
},
'en': {
'etsin.download.notification.subject': 'Download can be started in Etsin',
'etsin.download.notification.body.partial': 'Download for package {folder} in dataset {pref_id} can now be started in Etsin:\n\n{data_url}\n',
'etsin.download.notification.body.full': 'Download for dataset {pref_id} can now be started in Etsin:\n\n{data_url}\n',
'etsin.title': 'Etsin | Research Dataset Finder ',
'etsin.description': 'Etsin enables you to find research datasets from all fields of science.',
'qvain.title': 'Qvain | Research Dataset Description Tool',
'qvain.description': 'Fairdata Qvain tool makes describing and publishing your research data effortless for you.',
}
}
def translate(lang, key, context=None):
"""Return translation from the translations dict for a given language."""
if context is None:
context = {}
lang_translations = translations.get(lang)
if not lang_translations:
return f'invalid language: {lang}' % lang
translation = lang_translations.get(key)
if not translation:
return f'missing translation: {lang}.{key}'
return translation.format(**context)
|
nilq/baby-python
|
python
|
import pygame;
from .drawable import Drawable;
# --------------------------------------------------- *\
# [class] Image()
#
# * Image element *
#
# --------------------------------------------------- */
class Image(Drawable):
# --------------------------------------------------- *\
# [function] __init__():
#
# * Constructor *
# --------------------------------------------------- */
def __init__(self, imagePath):
super().__init__();
self.type = "image";
surface = pygame.image.load(imagePath).convert_alpha();
if surface:
self.setTexture(surface);
size = surface.get_size();
self.setSize(size[0], size[1]);
else:
print("Couldn't load the image...");
|
nilq/baby-python
|
python
|
"""
This file contains all the sales related resources
"""
# Third party imports
from flask import request, json, abort
from flask_restplus import Resource
from flask_jwt_extended import jwt_required, get_jwt_identity
# Local application imports
from app.api.v1.models.sales import Sale
from app.api.v1.models.db import Db
from app.api.v1.views.expect import SaleEtn
from app.api.common.validators import sales_validator, admin_required
new_s = SaleEtn().sales
v1 = SaleEtn.v1
@v1.route('/<int:id>')
class SalesRecords(Resource):
@v1.doc( security='apikey')
@jwt_required
@admin_required
def get(self, id):
"""
Get a specicific sale record
"""
email = get_jwt_identity()
user = Db.get_user(email=email)
store_id = user.store_id
sale = Db.get_s_by_id(id)
if sale.store_id != store_id:
msg = 'That record does not exist'
return abort(404, msg)
sk = sale.json_dump()
return {"status": "Success!", "data": sk}, 200
@v1.doc( security='apikey')
@jwt_required
@admin_required
def delete(self, id):
"""
Delete a sale
"""
sale = Db.get_s_by_id(id)
email = get_jwt_identity()
user = Db.get_user(email=email)
store_id = user.store_id
if sale.store_id != store_id:
msg = 'That record does not exist'
return abort(404, msg)
sk = sale.json_dump()
Db.sales.remove(sale)
return {"status": "Deleted!", "data": sk}, 200
@v1.doc( security='apikey')
@jwt_required
@admin_required
@v1.expect(new_s)
def put(self, id):
"""
Update a sale
"""
s = Db.get_s_by_id(id)
email = get_jwt_identity()
user = Db.get_user(email=email)
store_id = user.store_id
if s.store_id != store_id:
msg = 'Sale does not exist'
abort(404, msg)
json_data = request.get_json(force=True)
sales_validator(json_data)
number = json_data['number']
s.number = number
return {"status": "Success!", "data": s.json_dump()}, 200
@v1.route('/')
class SalesRecord(Resource):
@v1.doc( security='apikey')
@jwt_required
@admin_required
def get(self):
"""
Get all sales
"""
sales = Db.sales
if len(sales) < 1:
res ={"message":'There are no sale records'},404
return res
email = get_jwt_identity()
user = Db.get_user(email=email)
store_id = user.store_id
s_list = [s.json_dump() for s in sales if s.store_id == store_id]
return {"status": "Success!", "data": s_list}, 200
|
nilq/baby-python
|
python
|
from flask import Flask, render_template,request, session , redirect , url_for , g,flash, jsonify, make_response, json,flash
from flask_mail import Mail,Message
from flask_cors import CORS
from pusher import pusher
from flask_wtf import FlaskForm
from wtforms import (StringField ,PasswordField,SubmitField)
from wtforms.fields.html5 import EmailField
from wtforms.validators import ValidationError,DataRequired,InputRequired
import model
from flask_apscheduler import APScheduler
app = Flask(__name__)
mail = Mail(app)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
scheduler = APScheduler()
app.secret_key = 'mohsin5432'
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_DEFAULT_SENDER'] = 'it.timepay@gmail.com'
app.config['MAIL_USERNAME'] = 'it.timepay@gmail.com'
app.config['MAIL_PASSWORD'] = 'Moh$in531'
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
pusher = pusher_client = pusher.Pusher(
app_id = "1118828",
key = "4f4c1cd696946236d54c",
secret = "dee623f36d26edb30254",
cluster = "ap1",
ssl=True
)
username = ''
user = model.check_users()
admin = ''
class loginform(FlaskForm):
username = StringField(validators=[DataRequired(message="enter username")],render_kw={"placeholder": "username"})
password = PasswordField(validators=[DataRequired()],render_kw={"placeholder": "Password"})
submit = SubmitField('submit')
class signupform(FlaskForm):
username = StringField(validators=[DataRequired(message="enter username")],render_kw={"placeholder": "username"})
email = EmailField(validators=[InputRequired()],render_kw={"placeholder": "Email"})
password = PasswordField(validators=[DataRequired()],render_kw={"placeholder": "Password"})
submit = SubmitField('submit')
class newsletter(FlaskForm):
email = EmailField(validators=[InputRequired()],render_kw={"placeholder": "Email"})
submit = SubmitField('Submit')
@app.route('/',methods = ['GET'])
def home():
if 'username' in session:
g.user = session['username']
pending=model.pendingtask(g.user)
progress=model.progresstask(g.user)
completed=model.completedtask(g.user)
if not progress:
pmsg = 'NO PROGRESS TASK'
else:
pmsg=""
if not pending:
dmsg = 'NO PENDING TASK'
else:
dmsg=""
if not completed:
cmsg = 'NO COMPLETED TASK'
else:
cmsg=""
return render_template('homepage.html',pending=pending,progress=progress,completed=completed,pmsg=pmsg,dmsg=dmsg,cmsg=cmsg)
return redirect(url_for('login'))
@app.route('/login',methods = ['GET' ,'POST'])
def login():
username = False
password = False
form = loginform()
nform = newsletter()
sform = signupform()
if request.method == 'GET':
return render_template('login.html',form=form,nform=nform,sform=sform)
else:
session.pop('username', None)
areyouuser = form.username.data
pwd = model.pass_check(areyouuser)
if form.password.data == pwd:
session['username'] = form.username.data
return redirect(url_for('home'))
return render_template('login.html',form=form,nform=nform,sform=sform)
@app.before_request
def before_request():
g.username = None
if 'username' in session:
g.username = session['username']
@app.route('/logout', methods = ['POST'])
def logout():
session.pop('username' , None)
return redirect(url_for('home'))
@app.route('/getsession')
def getsession():
if 'username' in session:
return session['username']
return redirect(url_for('login'))
@app.route('/new/guest', methods=['POST'])
def guestUser():
data = request.json
pusher.trigger(u'general-channel', u'new-guest-details', {
'name' : data['name'],
'email' : data['email']
})
return json.dumps(data)
@app.route("/pusher/auth", methods=['POST'])
def pusher_authentication():
auth = pusher.authenticate(channel=request.form['channel_name'],socket_id=request.form['socket_id'])
return json.dumps(auth)
@app.route('/admin/livechat')
def adminchat():
return render_template('adminchat.html')
@app.route('/signup',methods = ['POST'])
def signup():
form = loginform()
nform = newsletter()
sform = signupform()
email = sform.email.data
username = sform.username.data
password = sform.password.data
agp = model.signup(email,username,password)
if agp is True:
msg = Message('TimePay', recipients=[sform.email.data])
msg.body = "THANKS FOR SIGNING UP"
mail.send(msg)
message = "Signed up successfully"
else:
message = "USER Already Exist"
return render_template('login.html',message = message,nform=nform,sform=sform,form=form)
@app.route('/addtask',methods = ['GET','POST'])
def addtask():
if 'username' in session:
if request.method == 'GET':
return render_template('addtask.html')
else:
g.user = session['username']
username = g.user
email = model.email(g.user)
subject = request.form["subject"]
memo = request.form["memo"]
status = "pending"
date = request.form["date"]
message = model.addtask(username,email,subject,memo,status,date)
return redirect(url_for('home'))
else:
return redirect(url_for('login'))
@app.route('/start/<string:id_data>', methods = ['GET'])
def tdelete(id_data):
model.start(id_data)
return redirect(url_for('home'))
@app.route('/completed/<string:id_data>', methods = ['GET'])
def completed(id_data):
model.completed(id_data)
return redirect(url_for('home'))
@app.route('/delete/<string:id_data>', methods = ['GET'])
def delete(id_data):
model.delete(id_data)
return redirect(url_for('home'))
#for Newsletter
@app.route('/news',methods = ['POST'])
def news():
form = loginform()
nform = newsletter()
email = nform.email.data
con = model.news(email)
if con is True:
msg = Message('Welcome To TimePay', recipients=[nform.email.data])
msg.body = "THANKS FOR SUBSCRIBING OUR NEWSLETTER WE WILL BE LAUNCHING SOON GREAT SERVICES"
mail.send(msg)
flash("THANKS FOR SUBSCRIBING")
else:
flash("YOU ARE Already SUBSCRIBED")
return redirect(url_for('login'))
#admin section
@app.route('/admin',methods = ['GET','POST'])
def admin():
if 'admin' in session:
return redirect(url_for('adminpanel'))
else:
if request.method == 'GET':
return render_template('adminlog.html')
else:
admin = request.form['user']
password = request.form['password']
db_pass = model.admpass_check(admin)
if password == db_pass:
session["admin"] = admin
return redirect(url_for('adminpanel'))
else:
return redirect(url_for('admin'))
@app.route('/adminpanel',methods = ['GET','POST'])
def adminpanel():
if 'admin' in session:
mail = model.emails()
return render_template('admin.html',mail=mail)
return redirect(url_for('admin'))
@app.route('/logoutadm')
def logoutadm():
session.pop('admin' , None)
return redirect(url_for('admin'))
#remainder for task
def remainder():
emails = model.remainder()
if not emails:
print("NO Email Found")
else:
with app.app_context():
for mails in emails:
msg = Message('TASK SUBMISSION DATE IS SO CLOSE', recipients=['{}'.format(mails[0])])
msg.body = "HI there \n your task submission date is so close start your project"
mail.send(msg)
print("remainder email sended to :")
print(mails[0])
return True
if __name__ == '__main__':
scheduler.add_job(id ='Scheduled task', func = remainder , trigger="interval" , hours = 20 )
scheduler.start()
app.run(port=8000 ,debug = True,use_reloader=False)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -----------------------------------------------------------------------
# VIZPAIRWISESEQ
# Copyright 2015, Stephen Gould <stephen.gould@anu.edu.au>
# -----------------------------------------------------------------------
# Script to visualize an integer sequence based on the visualization of
# the decimal expansion of \pi by Martin Krzywinski and Cristian Vasile.
# -----------------------------------------------------------------------
import math
import matplotlib.pyplot as plt
import matplotlib.patches as pth
import random
import sys
# --- distance to arc centre --------------------------------------------
def distance_to_centres(x, y, r):
"""Calculates the distance to the mid-point of an chord on the unit
circle and the distance to the centre of a circle of a circle of
radius r with the same chord."""
h1 = 0.5 * math.sqrt((x[0] + x[1]) ** 2 + (y[0] + y[1]) ** 2)
h2 = 0.5 * math.sqrt(4.0 * r ** 2 - (x[0] - x[1]) ** 2 - (y[0] - y[1]) ** 2)
return (h1, h2)
# --- visualization -----------------------------------------------------
def visualize_sequence(int_seq, block=True):
"""Visualize a sequence of integers"""
seq_length = len(int_seq)
min_value = min(int_seq)
max_value = max(int_seq)
val_range = max_value - min_value + 1
# convert a sequence of numbers to a sequence in [0.0, 1.0]
counts = [0.02 * seq_length for i in range(val_range)]
counted_seq = []
for n in int_seq:
counted_seq.append((n - min_value, counts[n - min_value]))
counts[n - min_value] += 1.0
linear_seq = [(p[0] + p[1] / counts[p[0]]) / float(val_range) for p in counted_seq]
# set up plots
fig = plt.figure()
ax = plt.axes()
ax.set_axis_off()
ax.set_ylim([-1, 1])
ax.set_xlim([-1, 1])
fig.set_facecolor('black')
fig.add_axes(ax)
cm = plt.get_cmap('Paired')
radius = 1.1
# plot arcs connecting consecutive elements
last_point = linear_seq.pop()
for next_point in linear_seq:
theta_last = 2 * math.pi * last_point
theta_next = 2 * math.pi * next_point
x = [math.cos(theta_last), math.cos(theta_next)]
y = [math.sin(theta_last), math.sin(theta_next)]
d = distance_to_centres(x, y, radius)
scale = 0.5 * d[1] / d[0] + 0.5
x_centre = scale * (x[0] + x[1])
y_centre = scale * (y[0] + y[1])
theta_1 = math.degrees(math.atan2(y[0] - y_centre, x[0] - x_centre))
theta_2 = math.degrees(math.atan2(y[1] - y_centre, x[1] - x_centre))
if (math.fmod(theta_2 - theta_1 + 720.0, 360.0) > 180.0):
theta_1, theta_2 = theta_2, theta_1
colour = cm(last_point)
ax.add_patch(pth.Arc((x_centre, y_centre), 2.0 * radius, 2.0 * radius, 0,
theta_1, theta_2, color=colour, fill=False, linewidth=0.25))
last_point = next_point
plt.show(block)
# --- main --------------------------------------------------------------
if __name__ == "__main__":
int_seq = []
if (len(sys.argv) == 1):
print("Generating a random sequence of digits...");
int_seq = [random.randint(0, 9) for i in range(2500)]
else:
print("Reading sequence from {0}...".format(sys.argv[1]))
fh = open(sys.argv[1])
int_seq = [int(i) for i in fh.read().split()]
fh.close()
visualize_sequence(int_seq)
|
nilq/baby-python
|
python
|
import sys
sys.path.append("../bundle_adjustment/ceres-solver/ceres-bin/lib/") # so
import PyCeres
import numpy as np
import scipy.io as sio
import cv2
from utils import geo_utils
def order_cam_param_for_c(Rs, ts, Ks):
"""
Orders a [m, 12] matrix for the ceres function as follows:
Ps_for_c[i, 0:3] 3 parameters for the vector representing the rotation
Ps_for_c[i, 3:6] 3 parameters for the location of the camera
Ps_for_c[i, 6:11] 5 parameters for the upper triangular part of the calibration matrix
:param Rs: [m,3,3]
:param ts: [m,3]
:param Ks: [m,3,3]
:return: Ps_for_c [m, 12]
"""
n_cam = len(Rs)
Ps_for_c = np.zeros([n_cam, 12])
for i in range(n_cam):
Ps_for_c[i, 0:3] = cv2.Rodrigues(Rs[i].T)[0].T
Ps_for_c[i, 3:6] = (-Rs[i].T @ ts[i].reshape([3, 1])).T
Ps_for_c[i, 6:11] = [Ks[i, 0, 0], Ks[i, 0, 1], Ks[i, 0, 2], Ks[i, 1, 1], Ks[i, 1, 2]]
Ps_for_c[i, -1] = 1.0
return Ps_for_c
def reorder_from_c_to_py(Ps_for_c, Ks):
"""
Read back the camera parameters from the
:param Ps_for_c:
:return: Rs, ts, Ps
"""
n_cam = len(Ps_for_c)
Rs = np.zeros([n_cam, 3, 3])
ts = np.zeros([n_cam, 3])
Ps = np.zeros([n_cam, 3,4])
for i in range(n_cam):
Rs[i] = cv2.Rodrigues(Ps_for_c[i, 0:3])[0].T
ts[i] = -Rs[i] @ Ps_for_c[i, 3:6].reshape([3, 1]).flatten()
Ps[i] = geo_utils.get_camera_matrix(R=Rs[i], t=ts[i], K=Ks[i])
return Rs, ts, Ps
def run_euclidean_ceres(Xs, xs, Rs, ts, Ks, point_indices):
"""
Calls a c++ function that optimizes the camera parameters and the 3D points for a lower reprojection error.
:param Xs: [n, 3]
:param xs: [v,2]
:param Rs: [m,3,3]
:param ts: [m,3]
:param Ks: [m,3,3]
:param point_indices: [2,v]
:return:
new_Rs, new_ts, new_Ps, new_Xs Which have a lower reprojection error
"""
if Xs.shape[-1] == 4:
Xs = Xs[:,:3]
assert Xs.shape[-1] == 3
assert xs.shape[-1] == 2
n_cam = len(Rs)
n_pts = Xs.shape[0]
n_observe = xs.shape[0]
Ps_for_c = order_cam_param_for_c(Rs, ts, Ks).astype(np.double)
Xs_flat = Xs.flatten("C").astype(np.double)
Ps_for_c_flat = Ps_for_c.flatten("C").astype(np.double)
xs_flat = xs.flatten("C").astype(np.double)
point_indices = point_indices.flatten("C")
Xsu = np.zeros_like(Xs_flat)
Psu = np.zeros_like(Ps_for_c_flat)
PyCeres.eucPythonFunctionOursBA(Xs_flat, xs_flat, Ps_for_c_flat, point_indices, Xsu, Psu, n_cam, n_pts, n_observe)
new_Ps_for_c = Ps_for_c + Psu.reshape([n_cam, 12], order="C")
new_Rs, new_ts, new_Ps = reorder_from_c_to_py(new_Ps_for_c, Ks)
new_Xs = Xs + Xsu.reshape([n_pts,3], order="C")
return new_Rs, new_ts, new_Ps, new_Xs
def run_projective_ceres(Ps, Xs, xs, point_indices):
"""
Calls the c++ function, that loops over the variables:
for i in range(v):
xs[2*i], xs[2*i + 1], Ps + 12 * (camIndex), Xs + 3 * (point3DIndex)
:param Ps: [m, 3, 4]
:param Xs: [n, 3]
:param xs: [v, 2]
:param point_indices: [2,v]
:return: new_Ps: [m, 12]
new_Xs: [n,3]
"""
if Xs.shape[-1] == 4:
Xs = Xs[:,:3]
assert Xs.shape[-1] == 3
assert xs.shape[-1] == 2
m = Ps.shape[0]
n = Xs.shape[0]
v = point_indices.shape[1]
Ps_single_flat = Ps.reshape([-1, 12], order="F") # [m, 12] Each camera is in *column* major as in matlab! the cpp code assumes it because the original code was in matlab
Ps_flat = Ps_single_flat.flatten("C") # row major as in python
Xs_flat = Xs.flatten("C")
xs_flat = xs.flatten("C")
point_idx_flat = point_indices.flatten("C")
Psu = np.zeros_like(Ps_flat)
Xsu = np.zeros_like(Xs_flat)
PyCeres.pythonFunctionOursBA(Xs_flat, xs_flat, Ps_flat, point_idx_flat, Xsu, Psu, m, n, v)
Psu = Psu.reshape([m,12], order="C")
Psu = Psu.reshape([m,3,4], order="F") # [m, 12] Each camera is in *column* major as in matlab! the cpp code assumes it because the original code was in matlab
Xsu = Xsu.reshape([n,3])
new_Ps = Ps + Psu
new_Xs = Xs + Xsu
return new_Ps, new_Xs
def run_euclidean_python_ceres(Xs, xs, Rs, ts, Ks, point_indices, print_out=True):
"""
Calls a c++ function that optimizes the camera parameters and the 3D points for a lower reprojection error.
:param Xs: [n, 3]
:param xs: [v,2]
:param Rs: [m,3,3]
:param ts: [m,3]
:param Ks: [m,3,3]
:param point_indices: [2,v]
:return:
new_Rs, new_ts, new_Ps, new_Xs Which have a lower reprojection error
"""
if Xs.shape[-1] == 4:
Xs = Xs[:,:3]
assert Xs.shape[-1] == 3
assert xs.shape[-1] == 2
n_cam = len(Rs)
n_pts = Xs.shape[0]
n_observe = xs.shape[0]
Ps_for_c = order_cam_param_for_c(Rs, ts, Ks).astype(np.double)
Xs_flat = Xs.flatten("C").astype(np.double)
Ps_for_c_flat = Ps_for_c.flatten("C").astype(np.double)
xs_flat = xs.flatten("C").astype(np.double)
point_indices = point_indices.flatten("C")
Xsu = np.zeros_like(Xs_flat)
Psu = np.zeros_like(Ps_for_c_flat)
problem = PyCeres.Problem()
for i in range(n_observe): # loop over the observations
camIndex = int(point_indices[i])
point3DIndex = int(point_indices[i + n_observe])
cost_function = PyCeres.eucReprojectionError(xs_flat[2 * i], xs_flat[2 * i + 1],
Ps_for_c_flat[12 * camIndex:12 * (camIndex + 1)],
Xs_flat[3 * point3DIndex:3 * (point3DIndex + 1)])
loss_function = PyCeres.HuberLoss(0.1)
problem.AddResidualBlock(cost_function, loss_function, Psu[12 * camIndex:12 * (camIndex + 1)],
Xsu[3 * point3DIndex:3 * (point3DIndex + 1)])
options = PyCeres.SolverOptions()
options.function_tolerance = 0.0001
options.max_num_iterations = 100
options.num_threads = 24
options.linear_solver_type = PyCeres.LinearSolverType.DENSE_SCHUR
options.minimizer_progress_to_stdout = True
if not print_out:
PyCeres.LoggingType = PyCeres.LoggingType.SILENT
summary = PyCeres.Summary()
PyCeres.Solve(options, problem, summary)
if print_out:
print(summary.FullReport())
if ~Psu.any():
print('Warning no change to Ps')
if ~Xsu.any():
print('Warning no change to Xs')
new_Ps_for_c = Ps_for_c + Psu.reshape([n_cam, 12], order="C")
new_Rs, new_ts, new_Ps = reorder_from_c_to_py(new_Ps_for_c, Ks)
new_Xs = Xs + Xsu.reshape([n_pts,3], order="C")
return new_Rs, new_ts, new_Ps, new_Xs
def run_projective_python_ceres(Ps, Xs, xs, point_indices, print_out=True):
"""
Calls the c++ function, that loops over the variables:
for i in range(v):
xs[2*i], xs[2*i + 1], Ps + 12 * (camIndex), Xs + 3 * (point3DIndex)
:param Ps: [m, 3, 4]
:param Xs: [n, 3]
:param xs: [v, 2]
:param point_indices: [2,v]
:return: new_Ps: [m, 12]
new_Xs: [n,3]
"""
if Xs.shape[-1] == 4:
Xs = Xs[:,:3]
assert Xs.shape[-1] == 3
assert xs.shape[-1] == 2
m = Ps.shape[0]
n = Xs.shape[0]
v = point_indices.shape[1]
Ps_single_flat = Ps.reshape([-1, 12], order="F") # [m, 12] Each camera is in *column* major as in matlab! the cpp code assumes it because the original code was in matlab
Ps_flat = Ps_single_flat.flatten("C").astype(np.double) # row major as in python
Xs_flat = Xs.flatten("C").astype(np.double)
xs_flat = xs.flatten("C")
point_idx_flat = point_indices.flatten("C")
Psu = np.zeros_like(Ps_flat)
Xsu = np.zeros_like(Xs_flat)
problem = PyCeres.Problem()
for i in range(v): # loop over the observations
camIndex = int(point_idx_flat[i])
point3DIndex = int(point_idx_flat[i + v])
cost_function = PyCeres.projReprojectionError(xs_flat[2*i], xs_flat[2*i + 1], Ps_flat[12*camIndex:12*(camIndex+1)], Xs_flat[3 *point3DIndex:3*(point3DIndex+1)])
loss_function = PyCeres.HuberLoss(0.1)
problem.AddResidualBlock(cost_function, loss_function, Psu[12*camIndex:12*(camIndex+1)], Xsu[3 *point3DIndex:3*(point3DIndex+1)])
options = PyCeres.SolverOptions()
options.function_tolerance = 0.0001
options.max_num_iterations = 100
options.num_threads = 24
options.linear_solver_type = PyCeres.LinearSolverType.DENSE_SCHUR
options.minimizer_progress_to_stdout = True
summary = PyCeres.Summary()
PyCeres.Solve(options, problem, summary)
if print_out:
print(summary.FullReport())
Psu = Psu.reshape([m,12], order="C")
Psu = Psu.reshape([m,3,4], order="F") # [m, 12] Each camera is in *column* major as in matlab! the cpp code assumes it because the original code was in matlab
Xsu = Xsu.reshape([n,3])
new_Ps = Ps + Psu
new_Xs = Xs + Xsu
return new_Ps, new_Xs
|
nilq/baby-python
|
python
|
"""Turrets and torpedo mpunt data in a useable form"""
from schemas import TURRETS
class Turret:
"""Container for the data needed to draw a turret
Args:
caliber (int): caliber of the gun in inches (urgh)
pos (string): the letter of the turret, like "A", "X", etc...
positions 1 to 4 are also passed as strings
guns (int): how many guns in the turret
half_length (int): the length from middle to bow of the ship, in funnel coordinates
all_turrs (list[string]): the list of all the turret position used on the ship
parameters (Parameters): parameters for the whole program
Attr:
outline (list[(x,y)]): a list of vertexes for the turret's outline. In funnel coordinates
"""
def __init__(self, caliber, pos, guns, half_length, all_turrs, parameters):
to_bow = parameters.turrets_positions[pos]["to_bow"]
scale = parameters.turrets_scale[caliber]
rel_position = rel_tur_or_torp_position(pos, all_turrs, parameters)
position = (rel_position[0]*half_length, rel_position[1]*half_length)
raw_outline = parameters.turrets_outlines[guns]
#mirror if the turret should be backward
if not to_bow:
mirrored_outline = [(vertex[0], -vertex[1]) for vertex in raw_outline]
else:
mirrored_outline = raw_outline
#also mirror if the turret is to starboard
if position[0] >0:
mirrored_outline = [(-vertex[0], vertex[1]) for vertex in mirrored_outline]
#scale according to gun caliber
scaled_outline = [(vertex[0]*scale, vertex[1]*scale) for vertex in mirrored_outline]
#move according to position
self.outline = [(vertex[0]+position[0], vertex[1]+position[1]) for vertex in scaled_outline]
def rel_tur_or_torp_position(pos, all_turrs, parameters):
"""Apply the game's logic to get a turret or toorp mount position
Args:
pos (string): the letter of the turret, like "A", "X", etc...
positions 1 to 4 are also passed as strings
all_turrs (list[string]): the list of all the turret position used on the ship
parameters (Parameters): parameters for the whole program
"""
rel_position = parameters.turrets_positions[pos]["positions"][0]
if pos == "X":
if ("W" in all_turrs or "V" in all_turrs or
"R" in all_turrs or "C" in all_turrs):
rel_position = parameters.turrets_positions[pos]["positions"][1]
elif pos == "W":
if ("X" in all_turrs or "V" in all_turrs or "B" in all_turrs):
rel_position = parameters.turrets_positions[pos]["positions"][1]
elif pos == "A":
if ("V" in all_turrs or
{"W", "X", "Y"}.issubset(all_turrs) or
"C" in all_turrs and "X" in all_turrs or
"B" in all_turrs and "R" in all_turrs and (
("W" in all_turrs or "X" in all_turrs or "Y" in all_turrs))):
rel_position = parameters.turrets_positions[pos]["positions"][2]
elif ("X" in all_turrs or "W" in all_turrs or
"B" in all_turrs and ("C" in all_turrs or "R" in all_turrs or "W" in all_turrs)):
rel_position = parameters.turrets_positions[pos]["positions"][1]
elif pos == "B":
if ("V" in all_turrs or
"W" in all_turrs or
"C" in all_turrs and ("X" in all_turrs or "Y" in all_turrs) or
"A" in all_turrs and "R" in all_turrs and ("X" in all_turrs or "Y" in all_turrs)):
rel_position = parameters.turrets_positions[pos]["positions"][2]
elif ("X" in all_turrs or "Y" in all_turrs or "C" in all_turrs or "R" in all_turrs):
rel_position = parameters.turrets_positions[pos]["positions"][1]
elif pos == "Y":
if (("X" in all_turrs and "W" in all_turrs.keys()) or
("V" in all_turrs and "W"in all_turrs)):
rel_position = parameters.turrets_positions[pos]["positions"][3]
elif ("V" in all_turrs or "W" in all_turrs or
({"A", "B", "C"}.issubset(all_turrs)) or
({"A", "B", "R"}.issubset(all_turrs))):
rel_position = parameters.turrets_positions[pos]["positions"][2]
elif ("B" in all_turrs or "C" in all_turrs or "R" in all_turrs or "X" in all_turrs):
rel_position = parameters.turrets_positions[pos]["positions"][1]
return rel_position
class Torpedo:
"""Container for the data needed to draw a torpedo mount
Args:
section_content (dict): the a TorpedoMount<x> section from the parser
that read the ship file
half_length (int): the length from middle to bow of the ship, in funnel coordinates
parameters (Parameters): parameters for the whole program
Attr:
outline (list[(x,y)]): a list of vertexes for the mount's outline. In funnel coordinates
"""
def __init__(self, section_content, half_length, parameters):
pos = section_content["Pos"]
tubes_count = int(section_content["Tubes"])
if pos in TURRETS:
to_bow = parameters.turrets_positions[pos]["to_bow"]
rel_position = parameters.turrets_positions[pos]["positions"][0]
else:
to_bow = True
#draw the mount outside of the visible area, so hidden
rel_position = [0, 1.5]
position = (rel_position[0]*half_length, rel_position[1]*half_length)
raw_outline = parameters.torpedo_outlines[tubes_count]
#rotate if the turret should be backward
if not to_bow:
rotated_outline = [(point[0], -point[1]) for point in raw_outline]
else:
rotated_outline = raw_outline
#move according to position
self.outline = [(point[0]+position[0], point[1]+position[1]) for point in rotated_outline]
|
nilq/baby-python
|
python
|
# Copyright (c) 2021, Carlos Millett
# All rights reserved.
# This software may be modified and distributed under the terms
# of the Simplified BSD License. See the LICENSE file for details.
import abc
from pathlib import Path
from .types import Types
class Media(abc.ABC):
def __init__(self, media_type: Types, path: Path) -> None:
self._type: Types = media_type
self._path: Path = path
self._title: str = ''
@property
def type(self) -> Types:
return self._type
@property
def path(self) -> Path:
return self._path
@property
def title(self) -> str:
if not self._title:
self._title = '{0}{1}'.format(self.format(self._path.name), self._path.suffix)
return self._title
@title.setter
def title(self, title: str) -> None:
self._title = title
@classmethod
@abc.abstractmethod
def match(cls, filename: str) -> bool:
pass
@classmethod
@abc.abstractmethod
def parse(cls, filename: str) -> str:
pass
@classmethod
@abc.abstractmethod
def format(cls, filename: str) -> str:
pass
|
nilq/baby-python
|
python
|
class Solution(object):
def minDistance(self, w1, w2):
if w1 == "":
return len(w2)
if w2 == "":
return len(w1)
l1 = len(w1)
l2 = len(w2)
d=[]
for i in range(l1+1):
d.append([0] * (l2 + 1))
for i in range(0, l1 + 1):
for j in range(0, l2 + 1):
if i==0 and j!=0:
d[i][j] = j
if i!=0 and j==0:
d[i][j] = i
for i in range(1, l1 + 1):
for j in range(1, l2 + 1):
if w1[i-1] == w2[j-1]:
d[i][j] = d[i-1][j-1]
else:
#print("--d[i-1][j]", d[i-1][j])
#print("--d[i][j-1]", d[i][j-1])
d[i][j] = min(d[i-1][j], d[i][j-1], d[i-1][j-1]) + 1
#print(i,j,w1[:i], w2[:j], d[i][j])
return d[l1][l2]
def test():
s=Solution()
w1 = "abc"
w2 = "1abc"
w1 = "abc1"
w2 = "abc"
w1 = "abc"
w2 = "abc1"
w1="sma"
w2="uism"
w1="ity"
w2="ties"
w1="asma"
w2="truism"
w1="prosperity"
w2="properties"
w1 = "horse"
w2 = "ros"
w1 = "abcde"
w2 = "bcdea"
w1 = "intention"
w2 = "execution"
r=s.minDistance(w1,w2)
print(w1)
print(w2)
print(r)
test()
|
nilq/baby-python
|
python
|
import math
import os
import time
import unicodedata
from pyrogram.errors import MessageNotModified
from help.progress import humanbytes, TimeFormatter
import requests
from config import Config
from tqdm.utils import CallbackIOWrapper
from pathlib import Path
from tqdm.contrib.telegram import tqdm
CHUNK_SIZE = 10240
TIMEOUT: float = 60
header = {
'Connection': 'keep-alive',
'sec-ch-ua': '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"',
'Accept': 'application/json, text/plain, */*',
'requesttoken': '',
'sec-ch-ua-mobile': '?0',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4389.90 Safari/537.36',
'Content-Type': 'application/json;charset=UTF-8',
'Origin': 'https://nube.ucf.edu.cu',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Dest': 'empty',
'Accept-Language': 'en-US,en;q=0.9,es;q=0.8'
}
#async def upload_file_old(file):
# print("Func. upload_file")
# with open(file, 'rb') as upload:
# with requests.Session() as request:
# request.auth = (Config.USER, Config.PASSWORD)
# conn = request.put('https://nube.ucf.edu.cu/remote.php/webdav/{}'.format(file), data=upload)
# print(conn.status_code)
# os.unlink(file)
# print('Upload Ok!')
async def upload_file(file, chat_id):
filename_path = Path(f"{file}")
print("Func. upload_file")
# with open(file, 'rb') as upload:
with requests.Session() as request:
request.auth = (Config.USER, Config.PASSWORD)
size = filename_path.stat().st_size if filename_path.exists() else 0
print(size)
with tqdm(token=Config.BOT,
chat_id=chat_id,
total=size,
desc="Subiendo... ",
mininterval=3.0,
unit="B",
unit_scale=True,
bar_format="{desc}{percentage:3.0f}% / {rate_fmt}{postfix}",
unit_divisor=CHUNK_SIZE,
) as t, open(filename_path, "rb") as fileobj:
wrapped_file = CallbackIOWrapper(t.update, fileobj, "read")
with request.put(
url="https://nube.ucf.edu.cu/remote.php/webdav/{}".format(file),
data=wrapped_file, # type: ignore
headers=header,
timeout=TIMEOUT,
stream=True,
) as resp:
print(resp.status_code)
resp.raise_for_status()
t.tgio.delete()
print("UPLOAD OK!")
async def get_share_link(full_name):
with requests.Session() as request:
request.auth = (Config.USER, Config.PASSWORD)
response = request.get('https://nube.ucf.edu.cu/index.php/apps/dashboard/')
i = response.content.index(b'token=')
tok = str(response.content[i + 7:i + 96])[2:-1]
header.update({'requesttoken': tok})
data = '{"path":"' + f'/{full_name}' + '","shareType":3, "password":"' + f'{Config.LINK_PASSWORD}' + '"}'
response = request.post('https://nube.ucf.edu.cu/ocs/v2.php/apps/files_sharing/api/v1/shares',
headers=header, cookies=response.cookies, data=data)
url = response.json()
try:
url = url['ocs']['data']['url']
url = url + "/download/" + full_name
except Exception as e:
print(f'Error getting share link: {e}')
url = "Error: {}".format(e)
return url
async def delete_file(filename):
with requests.Session() as request:
request.auth = (Config.USER, Config.PASSWORD)
url = "https://nube.ucf.edu.cu/remote.php/webdav{}".format(filename)
req = request.delete(url=url)
return req.status_code
async def filename_geturl(url, resp):
if url.find("heroku") != -1:
print("heroku")
return await get_heroku_bot(resp, url)
else:
file = url.split("/", -1)[-1]
if file.find("?") != -1:
file = file.split("?", -1)[0]
if file.find(".") == -1:
try:
file = resp.headers["Content-Disposition"].split("", 1)[1].split("=", 1)[1][1:-1]
except Exception as err:
print(err)
if url.find("checker") != -1:
file += ".mp4"
else:
file += ".ext"
return ["direct", file]
async def get_heroku_bot(resp, url):
print(resp.headers)
try:
file = resp.headers["Conetnt-Disposition"].split(" ", 1)[1].split("=", 1)[1][1:-1]
except Exception as err:
print(err)
try:
# ext = resp.headers["Content-Type"]
# file = "heroku_file.{}".format(ext.split("/", -1)[1])
file_name = url.split("/")
file = file_name[-1]
except Exception as error:
print(error)
file = "defaul_name.ext"
return ["heroku", file]
async def clean_name(name):
full_name = unicodedata.normalize("NFKD", name).encode("ascii", "ignore").decode("ascii")
full_name = full_name.replace(" ", "_")
full_name = full_name.replace("%20", "_")
full_name = full_name.replace("(", "")
full_name = full_name.replace(")", "")
full_name = full_name.replace("$", "")
full_name = full_name.replace("%", "_")
full_name = full_name.replace("@", "_")
full_name = full_name.replace("/", "")
full_name = full_name.replace("|", "")
full_name = full_name.replace("..", ".")
return full_name
async def download_file(message, url, file_name):
start = time.time()
with open(file_name, mode='wb') as f:
with requests.Session() as session:
with session.get(url, stream=True) as r:
total_length = r.headers.get('content-length') or r.headers.get("Content-Length")
current = 0
if total_length is None:
await message.edit(f"Descargando archivo... \nArchivo: {file_name}\nTamaño: Desconocido")
f.write(r.content)
total_length = 0
else:
total = int(total_length)
for chunk in r.iter_content(1024*1204*15):
now = time.time()
diff = now - start
current += len(chunk)
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
elapsed_time = TimeFormatter(milliseconds=elapsed_time)
estimated_total_time = TimeFormatter(milliseconds=estimated_total_time)
progressed = "[{0}{1}] \n\nProgreso: {2}%\n".format(
''.join(["█" for i in range(math.floor(percentage / 5))]),
''.join(["░" for i in range(20 - math.floor(percentage / 5))]),
round(percentage, 2))
tmp = progressed + "Descargado: {0}\nTotal: {1}\nVelocidad: {2}/s\nFaltan: {3}\n".format(
humanbytes(current),
humanbytes(total),
humanbytes(speed),
# elapsed_time if elapsed_time != '' else "0 s",
estimated_total_time if estimated_total_time != '' else "0 s")
f.write(chunk)
try:
await message.edit("Descargando...\n{}".format(tmp))
except MessageNotModified:
time.sleep(5.0)
pass
return file_name, int(total_length)
|
nilq/baby-python
|
python
|
import click
from flask.cli import with_appcontext
from goslinks.db.factory import get_model
@click.command()
@with_appcontext
def migrate():
"""Creates and migrates database tables."""
for model_name in ("user", "link"):
model = get_model(model_name)
click.echo(f"Creating table {model.Meta.table_name}... ", nl=False)
try:
model.create_table()
except Exception:
click.echo(click.style("FAILED!", fg="red"))
raise
else:
click.echo(click.style("SUCCESS!", fg="green"))
|
nilq/baby-python
|
python
|
from .nondet import Nondet
|
nilq/baby-python
|
python
|
# test_file.py
import io
import pytest
import graspfile.torfile
test_file = "tests/test_data/tor_files/python-graspfile-example.tor"
"""TICRA Tools 10.0.1 GRASP .tor file"""
@pytest.fixture
def empty_tor_file():
"""Return an empty GraspTorFile instance."""
return graspfile.torfile.GraspTorFile()
@pytest.fixture
def input_file_object():
"""Return a file object pointing to the test file."""
return open(test_file)
@pytest.fixture
def filled_tor_file(empty_tor_file, input_file_object):
"""Return a GraspTorFile instance filled from the tor_file"""
empty_tor_file.read(input_file_object)
input_file_object.close()
return empty_tor_file
def test_loading_tor_file(filled_tor_file):
"""Test loading from a tor cutfile"""
# Check that something was loaded
assert len(filled_tor_file.keys()) > 0
# Check that the frequencies were loaded
assert len(filled_tor_file["single_frequencies"].keys()) > 0
def test_reloading_tor_file(filled_tor_file):
"""Test outputting the filled_tor_file to text and reloading it with StringIO"""
test_str = repr(filled_tor_file)
try:
test_io = io.StringIO(test_str)
except TypeError:
test_io = io.StringIO(unicode(test_str))
reload_tor_file = graspfile.torfile.GraspTorFile(test_io)
assert len(filled_tor_file.keys()) == len(reload_tor_file.keys())
|
nilq/baby-python
|
python
|
"""Choose python classifiers with a curses frontend."""
from __future__ import unicode_literals
import os
import curses
from collections import namedtuple
from .constants import VERSION
from .constants import CHECKMARK
class BoxSelector(object): # pragma: no cover
"""Originally designed for accman.py.
Display options build from a list of strings in a (unix) terminal.
The user can browser though the textboxes and select one with enter.
Used in pypackage to display the python trove classifiers in a somewhat
logical/easy to navigate way. The unfortunate part is that this uses
curses to display this to the user. Ideally a cross-platform solution can
be found to replace this class.
Known issues:
curses incorrectly handles unicode, might look like crap, YMMV
curses uses (y,x) for coordinates because fuck your logic
curses support on winderps is sketchy/non-existant
"""
# Author: Nikolai Tschacher
# Date: 02.06.2013
# adapted for use in pypackage by Adam Talsma in May 2015
def __init__(self, classifier, screen, choices=None, current=0):
"""Create a BoxSelector object.
Args:
classifier: the Classifier root to find choices inside of
screen: the curses screen object
choices: a list of values in the classifier that are selected
current: integer index of classifiers/values to start on
"""
self.stdscr = screen
choices = choices or []
self.current_selected = current
selections = []
if classifier.name != "__root__":
selections.append("..")
for group in classifier.classifiers:
selections.append("[+] {}".format(group.name))
for value in classifier.values:
selections.append(" {} {}".format(
CHECKMARK if value in choices else " ",
value,
))
# Element parameters. Change them here.
self.TEXTBOX_WIDTH = max(79, max([len(i) for i in selections]) + 2)
self.TEXTBOX_HEIGHT = 3
if classifier.name == "__root__":
selections.append("Done".center(self.TEXTBOX_WIDTH - 4, " "))
self.L = selections
self.PAD_WIDTH = 600
self.PAD_HEIGHT = 10000
def pick(self):
"""Runs the user selection proccess, returns their choice index."""
self._init_curses()
self._create_pad()
windows = self._make_textboxes()
picked = self._select_textbox(windows)
self._end_curses()
return picked
def _init_curses(self):
"""Initializes the curses appliation."""
# turn off automatic echoing of keys to the screen
curses.noecho()
# Enable non-blocking mode. Keys are read without hitting enter
curses.cbreak()
# Disable the mouse cursor.
curses.curs_set(0)
self.stdscr.keypad(1)
# Enable colorous output.
curses.start_color()
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_GREEN)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_BLACK)
self.stdscr.bkgd(curses.color_pair(2))
self.stdscr.refresh()
def _end_curses(self):
"""Terminates the curses application."""
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
def _create_pad(self):
"""Creates a big self.pad to place the textboxes in."""
self.pad = curses.newpad(self.PAD_HEIGHT, self.PAD_WIDTH)
self.pad.box()
def _make_textboxes(self):
"""Build the textboxes in the center of the pad."""
# Get the actual screensize.
maxy, maxx = self.stdscr.getmaxyx()
banner = "{} -- choose python trove classifiers".format(VERSION)
self.stdscr.addstr(0, maxx // 2 - len(banner) // 2, banner)
windows = []
i = 2
for item in self.L:
pad = self.pad.derwin(
self.TEXTBOX_HEIGHT,
self.TEXTBOX_WIDTH,
i,
self.PAD_WIDTH // 2 - self.TEXTBOX_WIDTH // 2,
)
pad.box()
try:
pad.addstr(1, 2, item)
except UnicodeEncodeError:
# curses has fucked unicode support
item = item.replace(CHECKMARK, "*")
pad.addstr(1, 2, item)
windows.append(pad)
i += self.TEXTBOX_HEIGHT
return windows
def _center_view(self, window):
"""Centers and aligns the view according to the window argument given.
Returns:
the (y, x) coordinates of the centered window
"""
# The refresh() and noutrefresh() methods of a self.pad require 6 args
# to specify the part of self.pad to be displayed and the location on
# the screen to be used for the display. The arguments are pminrow,
# pmincol, sminrow, smincol, smaxrow, smaxcol; the p arguments refer
# to the top left corner of the self.pad region to be displayed and the
# s arguments define a clipping box on the screen within which the
# self.pad region is to be displayed.
cy, cx = window.getbegyx()
maxy, maxx = self.stdscr.getmaxyx()
self.pad.refresh(cy, cx, 1, maxx // 2 - self.TEXTBOX_WIDTH // 2,
maxy - 1, maxx - 1)
return (cy, cx)
def _select_textbox(self, windows):
"""Handles keypresses and user selection."""
# See at the root textbox.
topy, topx = self._center_view(windows[0])
last = self.current_selected - 1
top_textbox = windows[0]
while True:
# Highligth the selected one, the last selected textbox should
# become normal again.
windows[self.current_selected].bkgd(curses.color_pair(1))
windows[last].bkgd(curses.color_pair(2))
# While the textbox can be displayed on the page with the current
# top_textbox, don't alter the view. When this becomes impossible,
# center the view to last displayable textbox on the previous view.
maxy, maxx = self.stdscr.getmaxyx()
cy, cx = windows[self.current_selected].getbegyx()
# The current window is to far down. Switch the top textbox.
if ((topy + maxy - self.TEXTBOX_HEIGHT) <= cy):
top_textbox = windows[self.current_selected]
# The current window is to far up. There is a better way though...
if topy >= cy + self.TEXTBOX_HEIGHT:
top_textbox = windows[self.current_selected]
if last != self.current_selected:
last = self.current_selected
topy, topx = self._center_view(top_textbox)
c = self.stdscr.getch()
# Vim like KEY_UP/KEY_DOWN with j(DOWN) and k(UP).
if c in (106, curses.KEY_DOWN): # 106 == j
if self.current_selected >= len(windows) - 1:
self.current_selected = 0 # wrap around.
else:
self.current_selected += 1
elif c in (107, curses.KEY_UP): # 107 == k
if self.current_selected <= 0:
self.current_selected = len(windows) - 1 # wrap around.
else:
self.current_selected -= 1
elif c == 113: # 113 = q == Quit without selecting.
break
# At hitting enter, return the index of the selected list element.
elif c == curses.KEY_ENTER or c == 10:
return int(self.current_selected)
elif c == 27: # esc or alt, try to determine which
self.stdscr.nodelay(True)
n_seq = self.stdscr.getch()
self.stdscr.nodelay(False)
if n_seq == -1:
# Escape was pressed, check if the top option has .. in it
if ".." in str(windows[0].instr(1, 0)):
return 0 # backs up a level
else:
break # exits
Classifier = namedtuple("Classifier", ("name", "values", "classifiers"))
def _ensure_chain(top_level, sub_categories):
"""Ensure a chain of Classifiers from top_level through sub_categories."""
def _chain_in(level, item):
for sub_class in level.classifiers:
if sub_class.name == item:
return sub_class
else:
new_sub = Classifier(item, [], [])
level.classifiers.append(new_sub)
return new_sub
for sub_cat in sub_categories:
top_level = _chain_in(top_level, sub_cat)
return top_level
def read_classifiers():
"""Reads the trove file and returns a Classifier representing all."""
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"classifiers")) as openc:
classifiers = [c.strip() for c in openc.read().splitlines() if c]
all_classifiers = []
def _get_classifier(categories):
"""Find or create the classifier for categories."""
top_level = categories[0]
sub_categories = categories[1:]
for classifier in all_classifiers:
if classifier.name == top_level:
top_level = classifier
break
else:
top_level = Classifier(top_level, [], [])
all_classifiers.append(top_level)
return _ensure_chain(top_level, sub_categories)
for clsifier in classifiers:
_get_classifier(clsifier.split(" :: ")[:-1]).values.append(clsifier)
return Classifier("__root__", [], all_classifiers)
def back_it_up(current_level, all_classifiers, recursive=False):
"""Returns the classifier up a level from current."""
for classifier in all_classifiers.classifiers:
if current_level in classifier.classifiers:
return classifier
for classifier in all_classifiers.classifiers:
attempt = back_it_up(current_level, classifier, True)
if attempt:
return attempt
if not recursive:
return all_classifiers
def choose_classifiers(config):
"""Get some user input for the classifiers they'd like to use.
Returns:
list of valid classifier names
"""
all_classifiers = read_classifiers()
root_classifier = all_classifiers
old_delay = os.getenv("ESCDELAY")
os.environ["ESCDELAY"] = "25" # the default delay is a full second...
screen = curses.initscr()
choices = getattr(config, "classifiers", [])
choice = BoxSelector(root_classifier, screen, choices).pick()
while choice is not None:
init = 0
if choice == 0 and root_classifier.name != "__root__":
root_classifier = back_it_up(root_classifier, all_classifiers)
elif choice == 9 and root_classifier.name == "__root__":
break # the "done" box from the top level
elif choice > len(root_classifier.classifiers):
choice_index = (choice - len(root_classifier.classifiers) -
int(root_classifier.name != "__root__"))
choice_as_str = root_classifier.values[choice_index]
if choice_as_str not in choices:
choices.append(choice_as_str)
else:
choices.remove(choice_as_str)
init = choice
else:
choice_index = choice - int(root_classifier.name != "__root__")
root_classifier = root_classifier.classifiers[choice_index]
choice = BoxSelector(root_classifier, screen, choices, init).pick()
if old_delay:
os.environ["ESCDELAY"] = old_delay
else:
os.environ.pop("ESCDELAY")
return choices
|
nilq/baby-python
|
python
|
'''
Created on 20/01/2014
@author: MMPE
See documentation of HTCFile below
'''
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
from builtins import int
from builtins import str
from future import standard_library
import os
standard_library.install_aliases()
class HTCDefaults(object):
empty_htc = """begin simulation;
time_stop 600;
solvertype 2; (newmark)
begin newmark;
deltat 0.02;
end newmark;
end simulation;
;
;----------------------------------------------------------------------------------------------------------------------------------------------------------------
;
begin wind ;
density 1.225 ;
wsp 10 ;
tint 1;
horizontal_input 1 ; 0=false, 1=true
windfield_rotations 0 0.0 0.0 ; yaw, tilt, rotation
center_pos0 0 0 -30 ; hub heigth
shear_format 1 0;0=none,1=constant,2=log,3=power,4=linear
turb_format 0 ; 0=none, 1=mann,2=flex
tower_shadow_method 0 ; 0=none, 1=potential flow, 2=jet
end wind;
;
;----------------------------------------------------------------------------------------------------------------------------------------------------------------
;
;
begin output;
filename ./tmp;
general time;
end output;
exit;"""
def add_mann_turbulence(self, L=29.4, ae23=1, Gamma=3.9, seed=1001, high_frq_compensation=True,
filenames=None,
no_grid_points=(16384, 32, 32), box_dimension=(6000, 100, 100),
dont_scale=False,
std_scaling=None):
wind = self.add_section('wind')
wind.turb_format = 1
mann = wind.add_section('mann')
if 'create_turb_parameters' in mann:
mann.create_turb_parameters.values = [L, ae23, Gamma, seed, int(high_frq_compensation)]
else:
mann.add_line('create_turb_parameters', [L, ae23, Gamma, seed, int(high_frq_compensation)],
"L, alfaeps, gamma, seed, highfrq compensation")
if filenames is None:
import numpy as np
dxyz = tuple(np.array(box_dimension) / no_grid_points)
from wetb.wind.turbulence import mann_turbulence
filenames = ["./turb/" + mann_turbulence.name_format %
((L, ae23, Gamma, high_frq_compensation) + no_grid_points + dxyz + (seed, uvw))
for uvw in ['u', 'v', 'w']]
if isinstance(filenames, str):
filenames = ["./turb/%s_s%04d%s.bin" % (filenames, seed, c) for c in ['u', 'v', 'w']]
for filename, c in zip(filenames, ['u', 'v', 'w']):
setattr(mann, 'filename_%s' % c, filename)
for c, n, dim in zip(['u', 'v', 'w'], no_grid_points, box_dimension):
setattr(mann, 'box_dim_%s' % c, "%d %.4f" % (n, dim / (n)))
if dont_scale:
mann.dont_scale = 1
else:
try:
del mann.dont_scale
except KeyError:
pass
if std_scaling is not None:
mann.std_scaling = "%f %f %f" % std_scaling
else:
try:
del mann.std_scaling
except KeyError:
pass
def add_turb_export(self, filename="export_%s.turb", samplefrq=None):
exp = self.wind.add_section('turb_export', allow_duplicate=True)
for uvw in 'uvw':
exp.add_line('filename_%s' % uvw, [filename % uvw])
sf = samplefrq or max(1, int(self.wind.mann.box_dim_u[1] / (self.wind.wsp[0] * self.deltat())))
exp.samplefrq = sf
if "time" in self.output:
exp.time_start = self.output.time[0]
else:
exp.time_start = 0
exp.nsteps = (self.simulation.time_stop[0] - exp.time_start[0]) / self.deltat()
for vw in 'vw':
exp.add_line('box_dim_%s' % vw, self.wind.mann['box_dim_%s' % vw].values)
def import_dtu_we_controller_input(self, filename):
dtu_we_controller = [dll for dll in self.dll if dll.name[0] == 'dtu_we_controller'][0]
with open(filename) as fid:
lines = fid.readlines()
K_r1 = float(lines[1].replace("K = ", '').replace("[Nm/(rad/s)^2]", ''))
Kp_r2 = float(lines[4].replace("Kp = ", '').replace("[Nm/(rad/s)]", ''))
Ki_r2 = float(lines[5].replace("Ki = ", '').replace("[Nm/rad]", ''))
Kp_r3 = float(lines[7].replace("Kp = ", '').replace("[rad/(rad/s)]", ''))
Ki_r3 = float(lines[8].replace("Ki = ", '').replace("[rad/rad]", ''))
KK = lines[9].split("]")
KK1 = float(KK[0].replace("K1 = ", '').replace("[deg", ''))
KK2 = float(KK[1].replace(", K2 = ", '').replace("[deg^2", ''))
cs = dtu_we_controller.init
cs.constant__11.values[1] = "%.6E" % K_r1
cs.constant__12.values[1] = "%.6E" % Kp_r2
cs.constant__13.values[1] = "%.6E" % Ki_r2
cs.constant__16.values[1] = "%.6E" % Kp_r3
cs.constant__17.values[1] = "%.6E" % Ki_r3
cs.constant__21.values[1] = "%.6E" % KK1
cs.constant__22.values[1] = "%.6E" % KK2
def add_hydro(self, mudlevel, mwl, gravity=9.81, rho=1027):
wp = self.add_section("hydro").add_section('water_properties')
wp.mudlevel = mudlevel
wp.mwl = mwl
wp.gravity = gravity
wp.rho = rho
class HTCExtensions(object):
def get_shear(self):
shear_type, parameter = self.wind.shear_format.values
z0 = -self.wind.center_pos0[2]
wsp = self.wind.wsp[0]
if shear_type == 1: # constant
return lambda z: wsp
elif shear_type == 3:
from wetb.wind.shear import power_shear
return power_shear(parameter, z0, wsp)
else:
raise NotImplementedError
|
nilq/baby-python
|
python
|
from apscheduler.schedulers.blocking import BlockingScheduler
from server.sync import sync_blocks, sync_tokens
background = BlockingScheduler()
background.add_job(sync_tokens, "interval", seconds=30)
background.add_job(sync_blocks, "interval", seconds=5)
background.start()
|
nilq/baby-python
|
python
|
########################################################
# plot_norms.py #
# Matheus J. Castro #
# Version 1.2 #
# Last Modification: 11/11/2021 (month/day/year) #
# https://github.com/MatheusJCastro/spectra_comparator #
# Licensed under MIT License #
########################################################
from astropy.io import fits
import matplotlib.pyplot as plt
import numpy as np
import os
def open_spec(fl_name):
# Subroutine to open the .fits spectrum and read it
hdul = fits.open(fl_name) # open the file
spec_data = hdul[0].data # get the data
spec_header = hdul[0].header # get the header
if spec_data.shape != (2048,): # get only the actual spectrum (for multidimensional data)
spec_data = spec_data[1][0]
# Get the wavelength information from the header
# CDELT1 or CD1_1
wl = spec_header['CRVAL1'] + spec_header['CD1_1'] * np.arange(0, len(spec_data))
hdul.close() # close the file
return wl, spec_data, spec_header
def finish_plot(show=False, save=False, fl1=None, fl2=None):
# End and save plot subroutine
if save:
plt.savefig("Plots_{}_{}".format(fl1, fl2))
if show:
plt.show()
plt.close()
def plot_spectra(spec, name=None):
# Subroutine to plot the spectrum
plt.plot(spec[0], spec[1], label=name)
def main():
# Main subroutine, find and plot the spectra
onlynorm = False # change to True to plot only the normalized spectrum
files = []
for i in os.listdir(): # search for all non-normalized files in the current directory
if "tha_" in i and "norm" not in i and "list" not in i:
files.append(i)
files_norm = []
for i in os.listdir(): # search for all normalized files in the current directory
if "norm_tha_" in i:
files_norm.append(i)
for i in range(len(files)): # for each tuple of files
fig = plt.figure(figsize=(21, 9))
fig.suptitle("Comparison of normalized and non normalized spectrum", fontsize=28)
if not onlynorm: # to plot non-normalized files as subplot
plt.subplot(121)
plt.title("Standard", fontsize=22)
plt.xlabel("Pixel", fontsize=20)
plt.ylabel("Intensity", fontsize=20)
plt.yscale("log")
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tick_params(axis='y', which='minor', labelsize=16)
spec_info = open_spec(files[i]) # open the current spectrum
plot_spectra(spec_info) # plot the spectrum
plt.grid(True, which="both", linewidth=1)
plt.subplot(122)
plt.title("Normalized", fontsize=22)
plt.xlabel("Pixel", fontsize=20)
if onlynorm:
plt.ylabel("Intensity", fontsize=20)
plt.yscale("log")
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.tick_params(axis='y', which='minor', labelsize=16)
spec_info = open_spec(files_norm[i]) # open the current spectrum
plot_spectra(spec_info) # plot the spectrum
plt.grid(True, which="both", linewidth=1)
if files[i][-16:-12] == "3080": # there are two spectra of the 3080A, save both without erasing one
finish_plot(save=True, fl1="comp_norm", fl2=files[i][-16:-8])
else:
finish_plot(save=True, fl1="comp_norm", fl2=files[i][-16:-12])
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from django.http import JsonResponse
import os
import json
import time
from .api import GoogleAPI
from threpose.settings import BASE_DIR
from src.caching.caching_gmap import APICaching
from decouple import config
gapi = GoogleAPI()
api_caching = APICaching()
PLACE_IMG_PATH = os.path.join(BASE_DIR, 'media', 'places_image')
# Place List page
def get_next_page_from_token(request): # pragma: no cover
"""Get places list data by next_page_token."""
# Check request
if request.method != 'POST':
return JsonResponse({"status": "INVALID METHOD"})
if 'token' not in request.POST:
return JsonResponse({"status": "INVALID PAYLOAD"})
# Get next page token from request
token = request.POST['token']
context = []
# Check next_page cache
if api_caching.get(f'{token[:30]}') is None:
for _ in range(6):
# Request data for 6 times, if response is not OK
# and reached maximum, it will return empty
data = json.loads(gapi.next_search_nearby(token))
if data['status'] == "OK":
context = restruct_nearby_place(data['results'])
break
time.sleep(0.2)
# write cache file
byte_context = json.dumps({"cache": context, "status": "OK"}, indent=3).encode()
api_caching.add(f'{token[:30]}', byte_context)
if len(context) > 0:
return JsonResponse({"places": context, "status": "OK"})
return JsonResponse({"places": context, "status": "NOT FOUND"})
else: # Have cache
# load cache
context = json.loads(api_caching.get(f'{token[:30]}'))
# check place images
context = check_downloaded_image(context['cache'])
return JsonResponse({"places": context, "status": "OK"})
def place_list(request, *args, **kwargs): # pragma: no cover
"""Place_list view for list place that nearby the user search input."""
data = request.GET # get lat and lng from url
# Our default search type
types = ['restaurant', 'zoo', 'tourist_attraction', 'museum', 'cafe', 'aquarium']
lat = data['lat']
lng = data['lng']
# Get place cache
if api_caching.get(f'{lat}{lng}searchresult'):
# data exists
data = json.loads(api_caching.get(f'{lat}{lng}searchresult'))
context = data['cache']
token = data['next_page_token']
else:
# data not exist
context, token = get_new_context(types, lat, lng)
context = check_downloaded_image(context)
# get all image file name in static/images/place_image
api_key = config('FRONTEND_API_KEY')
return render(request, "search/place_list.html", {'places': context, 'all_token': token, 'api_key': api_key})
# Helper function
def get_new_context(types: list, lat: int, lng: int) -> list: # pragma: no cover
"""Cache new data and return the new data file
Args:
types: place type
lat, lng: latitude and longitude of user search input for
Returns:
context: places nearby data
token: next page token
"""
token = {}
# This create for keeping data from search nearby
tempo_context = []
for type in types:
data = json.loads(gapi.search_nearby(lat, lng, type))
if 'next_page_token' in data:
token[type] = data['next_page_token']
places = data['results']
restructed_places = restruct_nearby_place(places)
tempo_context = add_more_place(tempo_context, restructed_places)
# Caching places nearby
cache = {'cache': tempo_context, 'next_page_token': token}
api_caching.add(f'{lat}{lng}searchresult', json.dumps(cache, indent=3).encode())
# Load data from cache
context = json.loads(api_caching.get(f'{lat}{lng}searchresult'))['cache']
return context, token
def restruct_nearby_place(places: list) -> list:
"""Process data for frontend
Args:
places: A place nearby data from google map api.
Returns:
context: A place data that place-list page needed.
Data struct:
[
{
# Essential key
'place_name': <name>,
'place_id': <place_id>,
'photo_ref': [<photo_ref],
'types': [],
# other...
}
. . .
]
"""
context = []
for place in places:
init_place = {
'place_name': None,
'place_id': None,
'photo_ref': [],
'types': [],
}
if 'photos' in place:
# Place have an image
photo_ref = place['photos'][0]['photo_reference']
init_place['photo_ref'].append(photo_ref)
else:
# Place don't have an image
continue
init_place['place_name'] = place['name']
init_place['place_id'] = place['place_id']
init_place['types'] = place['types']
context.append(init_place)
return context
def check_downloaded_image(context: list) -> list:
"""Check that image from static/images/place_image that is ready for frontend to display or not
Args:
context: place nearby data
Returns:
context: place nearby data with telling the image of each place were downloaded or not
"""
# Check places_image dir that is exists
if os.path.exists(PLACE_IMG_PATH):
# Get image file name from static/images/places_image
all_img_file = [f for f in os.listdir(PLACE_IMG_PATH)
if os.path.isfile(os.path.join(PLACE_IMG_PATH, f))]
for place in context:
# If place that have photo_ref imply that place have an images
if 'photo_ref' in place:
place_id = place['place_id']
downloaded_img = f'{place_id}photo.jpeg' in all_img_file
have_image = len(place['photo_ref']) == 0
if downloaded_img or have_image:
place['downloaded'] = True
else:
place['downloaded'] = False
return context
def add_more_place(context: list, new: list):
"""Append places to context
Args:
context: total nearby palce data
new: new data by next page tokens
Returns:
context: total nearby place that append
new to is's with out duplicated place
"""
place_exist = [place['place_id'] for place in context]
for place in new:
# Check that place is exists or not
if place['place_id'] in place_exist:
continue
context.append(place)
return context
|
nilq/baby-python
|
python
|
import os
import math
import random
import argparse
def generate_entities(num_entities=100):
"""generate num_entities random entities for synthetic knowledge graph."""
i = 0
entity_list = []
hex_chars = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
l = int(math.log(num_entities, 18)+1)
# print l
while i < num_entities:
entity = "/entity_{}".format(''.join(random.sample(hex_chars, l)))
if entity not in entity_list:
entity_list.append(entity)
i += 1
return entity_list
parser = argparse.ArgumentParser()
parser.add_argument("--N", type=int)
args = parser.parse_args()
N = args.N
print N
entities = generate_entities(N)
entity_file = os.path.join(os.getcwd(), "data", "fake-420", "entities.txt")
f = open(entity_file, 'w+')
with open(entity_file, 'w+') as f:
for e in entities:
f.write("{}\n".format(e))
f.close()
|
nilq/baby-python
|
python
|
# Simple script for drawing the chi-squared density
#
from rpy import *
def draw(df, start=0, end=10):
grid = r.seq(start, end, by=0.1)
l = [r.dchisq(x, df) for x in grid]
r.par(ann=0, yaxt='n')
r.plot(grid, l, type='lines')
if __name__ == '__main__':
print "<Enter> to quit."
while 1:
try:
df = int(raw_input('Degrees of freedom> '))
draw(df)
except ValueError:
break
|
nilq/baby-python
|
python
|
"""
queue.py
location queue implementation for ducky25, decides next location to travel to
"""
class DestinationQueue:
def __init__(self):
self.queue = []
self.position = 0
def add_to_queue(self, location):
self.queue.append(location)
def pop_next_destination(self):
if len(self.queue) > self.position:
result = self.queue[self.position]
self.position = self.position + 1
else:
result = -1
return result
|
nilq/baby-python
|
python
|
from logging.config import dictConfig # type: ignore
import json
from config import CONFIG_DICT
done_setup = False
def setup_logging():
global done_setup
if not done_setup and CONFIG_DICT['LOGGING']:
try:
logging_config_file_path = CONFIG_DICT['LOGGING_CONFIG_FILE_PATH']
with open(logging_config_file_path, 'rt') as file:
config = json.load(file)
dictConfig(config)
done_setup = True
except IOError as e:
raise(Exception('Failed to load logging configuration', e))
|
nilq/baby-python
|
python
|
"""
MIT License
Copyright(c) 2021 Andy Zhou
"""
from flask import render_template, request, abort
from flask.json import jsonify
from flask_wtf.csrf import CSRFError
from flask_babel import _
def api_err_response(err_code: int, short_message: str, long_message: str = None):
if (
request.accept_mimetypes.accept_json
and not request.accept_mimetypes.accept_html
or request.blueprint == "api_v1"
or request.blueprint == "api_v2"
or request.blueprint == "api_v3"
):
response = {"error": short_message}
if long_message:
response["message"] = long_message
response = jsonify(response)
response.status_code = err_code
return response
return None # explicitly return None
def err_handler(
err_code: int,
short_message: str,
long_message: str,
error_description: str,
template: str = "errors/error.html",
):
json_response = api_err_response(err_code, short_message)
if json_response is not None:
return json_response
return (
render_template(
template, error_message=long_message, error_description=error_description
),
err_code,
)
def register_error_handlers(app): # noqa: C901
@app.errorhandler(400)
@app.errorhandler(CSRFError)
def bad_request(e):
return err_handler(
400,
"bad request",
"400 Bad Request",
"You have sent an invalid request. This can either be caused by a false CSRF-token or an invalid value of a form.",
)
@app.errorhandler(403)
def forbidden(e):
return err_handler(
403,
"forbidden",
"403 Forbidden",
"You do not have the permission to access this page. Maybe you are not signed in (viewing posts directly), or you tried to enter a page where you aren't allowed to enter.",
)
@app.errorhandler(404)
def page_not_found(e):
return err_handler(
404,
"not found",
"404 Not Found",
"The page you want is not here or deleted.",
"errors/404.html",
)
@app.errorhandler(405)
def method_not_allowed(e):
return err_handler(
405,
"method not allowed",
"405 Method Not Allowed",
"Your request has a wrong method. Maybe you entered some page without a form submission.",
)
@app.errorhandler(413)
def payload_to_large(e):
return err_handler(
413,
"request entity too large",
"413 Request Entity Too Large",
"Things you upload is too large.",
)
@app.errorhandler(429) # handle when IP is limited
def too_many_requests(e):
return err_handler(
429,
"too many requests",
"429 Too Many Requests",
"You see 429 because you entered a page too many times and triggered our self-protection program. Usually you can wait for a while, in some cases it takes a day.",
)
@app.errorhandler(500)
def internal_server_error(e):
return err_handler(
500,
"internal server error",
"500 Internal Server Error",
"The server went wrong and returned 500. You can contact them to report this 500 error.",
)
|
nilq/baby-python
|
python
|
from tidyms import lcms
from tidyms import utils
import numpy as np
import pytest
from itertools import product
mz_list = [200, 250, 300, 420, 450]
@pytest.fixture
def simulated_experiment():
mz = np.array(mz_list)
rt = np.linspace(0, 100, 100)
# simulated features params
mz_params = np.array([mz_list,
[3, 10, 5, 31, 22]])
mz_params = mz_params.T
rt_params = np.array([[30, 40, 60, 80, 80],
[1, 2, 2, 3, 3],
[1, 1, 1, 1, 1]])
rt_params = rt_params.T
noise_level = 0.1
sim_exp = utils.SimulatedExperiment(mz, rt, mz_params, rt_params,
noise=noise_level, mode="centroid")
return sim_exp
# parameters of make_chromatograms are tested in the test_validation module
def test_make_chromatograms(simulated_experiment):
# test that the chromatograms generated are valid
# create chromatograms
n_sp = simulated_experiment.getNrSpectra()
n_mz = simulated_experiment.mz_params.shape[0]
rt = np.zeros(n_sp)
chromatogram = np.zeros((n_mz, n_sp))
for scan in range(n_sp):
sp = simulated_experiment.getSpectrum(scan)
rt[scan] = sp.getRT()
_, spint = sp.get_peaks()
chromatogram[:, scan] = spint
expected_chromatograms = [lcms.Chromatogram(rt, x) for x in chromatogram]
test_chromatograms = lcms.make_chromatograms(simulated_experiment, mz_list)
assert len(test_chromatograms) == len(expected_chromatograms)
for ec, tc in zip(expected_chromatograms, test_chromatograms):
assert np.array_equal(ec.rt, tc.rt)
assert np.array_equal(ec.spint, tc.spint)
def test_make_chromatograms_accumulator_mean(simulated_experiment):
lcms.make_chromatograms(simulated_experiment, mz_list, accumulator="mean")
assert True
def test_make_chromatograms_start(simulated_experiment):
n_sp = simulated_experiment.getNrSpectra()
start = 10
chromatogram_length = n_sp - start
chromatograms = lcms.make_chromatograms(simulated_experiment, mz_list,
start=start)
for c in chromatograms:
assert c.rt.size == chromatogram_length
assert c.rt[0] == simulated_experiment.getSpectrum(start).getRT()
def test_make_chromatograms_end(simulated_experiment):
end = 90
chromatograms = lcms.make_chromatograms(simulated_experiment, mz_list,
end=end)
for c in chromatograms:
assert c.rt.size == end
assert c.rt[-1] == simulated_experiment.getSpectrum(end - 1).getRT()
def test_make_chromatograms_outside_range_mz(simulated_experiment):
# the total intensity of the chromatogram should be equal to zero
chromatograms = lcms.make_chromatograms(simulated_experiment, [550])
assert np.isclose(chromatograms[0].spint.sum(), 0)
# def test_accumulate_spectra(simulated_experiment):
# lcms.accumulate_spectra_profile(simulated_experiment, start=10, end=20)
# assert True
#
#
# def test_accumulate_spectra_subtract(simulated_experiment):
# lcms.accumulate_spectra_profile(simulated_experiment, start=10, end=20,
# subtract_left=5, subtract_right=25)
# assert True
#
#
# def test_get_roi_params():
# func_params = [("uplc", "qtof"), ("uplc", "orbitrap"), ("hplc", "qtof"),
# ("hplc", "orbitrap")]
# n_sp = 100 # dummy value for the validator
# for separation, instrument in func_params:
# params = lcms.get_roi_params(separation, instrument)
# validation.validate_make_roi_params(n_sp, params)
# assert True
#
#
# def test_get_roi_params_bad_separation():
# with pytest.raises(ValueError):
# lcms.get_roi_params("bad-value", "qtof")
#
#
# def test_get_roi_params_bad_instrument():
# with pytest.raises(ValueError):
# lcms.get_roi_params("uplc", "bad-value")
#
#
# # Test Chromatogram object
#
@pytest.fixture
def chromatogram_data():
rt = np.arange(200)
spint = utils.gauss(rt, 50, 2, 100)
spint += np.random.normal(size=rt.size, scale=1.0)
return rt, spint
def test_chromatogram_creation(chromatogram_data):
# test building a chromatogram with default mode
rt, spint = chromatogram_data
chromatogram = lcms.Chromatogram(rt, spint)
assert chromatogram.mode == "uplc"
def test_chromatogram_creation_with_mode(chromatogram_data):
rt, spint = chromatogram_data
chromatogram = lcms.Chromatogram(rt, spint, mode="hplc")
assert chromatogram.mode == "hplc"
def test_chromatogram_creation_invalid_mode(chromatogram_data):
rt, spint = chromatogram_data
with pytest.raises(ValueError):
lcms.Chromatogram(rt, spint, mode="invalid-mode")
def test_chromatogram_find_peaks(chromatogram_data):
chromatogram = lcms.Chromatogram(*chromatogram_data)
chromatogram.find_peaks()
assert len(chromatogram.peaks) == 1
# Test MSSPectrum
@pytest.fixture
def ms_data():
mz = np.linspace(100, 110, 1000)
spint = utils.gauss(mz, 105, 0.005, 100)
spint += + np.random.normal(size=mz.size, scale=1.0)
return mz, spint
def test_ms_spectrum_creation(ms_data):
sp = lcms.MSSpectrum(*ms_data)
assert sp.instrument == "qtof"
def test_ms_spectrum_creation_with_instrument(ms_data):
instrument = "orbitrap"
sp = lcms.MSSpectrum(*ms_data, instrument=instrument)
assert sp.instrument == instrument
def test_ms_spectrum_creation_invalid_instrument(ms_data):
with pytest.raises(ValueError):
instrument = "invalid-mode"
lcms.MSSpectrum(*ms_data, instrument=instrument)
def test_find_centroids_qtof(ms_data):
sp = lcms.MSSpectrum(*ms_data)
# the algorithm is tested on test_peaks.py
sp.find_centroids()
assert True
# Test ROI
@pytest.fixture
def roi_data():
rt = np.arange(200)
spint = utils.gauss(rt, 50, 2, 100)
mz = np.random.normal(loc=150.0, scale=0.001, size=spint.size)
# add some nan values
nan_index = [0, 50, 100, 199]
spint[nan_index] = np.nan
mz[nan_index] = np.nan
return rt, mz, spint
def test_roi_creation(roi_data):
rt, mz, spint = roi_data
lcms.Roi(spint, mz, rt, rt)
assert True
def test_fill_nan(roi_data):
rt, mz, spint = roi_data
roi = lcms.Roi(spint, mz, rt, rt)
roi.fill_nan()
has_nan = np.any(np.isnan(roi.mz) & np.isnan(roi.spint))
assert not has_nan
# roi making tests
def test_match_mz_no_multiple_matches():
tolerance = 2
mz1 = np.array([50, 75, 100, 125, 150])
mz2 = np.array([40, 51, 78, 91, 99, 130, 150])
sp2 = np.array([100] * mz2.size)
# expected values for match/no match indices
mz1_match_index = np.array([0, 2, 4], dtype=int)
mz2_match_index = np.array([1, 4, 6], dtype=int)
mz2_no_match_index = np.array([0, 2, 3, 5], dtype=int)
mode = "closest"
test_mz1_index, mz2_match, sp2_match, mz2_no_match, sp2_no_match = \
lcms._match_mz(mz1, mz2, sp2, tolerance, mode, np.mean, np.mean)
# test match index
assert np.array_equal(mz1_match_index, test_mz1_index)
# test match mz and sp values
assert np.array_equal(mz2[mz2_match_index], mz2_match)
assert np.array_equal(sp2[mz2_match_index], sp2_match)
# test no match mz and sp values
assert np.array_equal(mz2[mz2_no_match_index], mz2_no_match)
assert np.array_equal(sp2[mz2_no_match_index], sp2_no_match)
def test_match_mz_no_matches():
tolerance = 2
mz1 = np.array([50, 75, 100, 125, 150])
mz2 = np.array([40, 53, 78, 91, 97, 130, 154])
sp2 = np.array([100] * mz2.size)
# expected values for match/no match indices
mz1_match_index = np.array([], dtype=int)
mz2_match_index = np.array([], dtype=int)
mz2_no_match_index = np.array([0, 1, 2, 3, 4, 5, 6], dtype=int)
mode = "closest"
test_mz1_index, mz2_match, sp2_match, mz2_no_match, sp2_no_match = \
lcms._match_mz(mz1, mz2, sp2, tolerance, mode, np.mean, np.mean)
# test match index
assert np.array_equal(mz1_match_index, test_mz1_index)
# test match mz and sp values
assert np.array_equal(mz2[mz2_match_index], mz2_match)
assert np.array_equal(sp2[mz2_match_index], sp2_match)
# test no match mz and sp values
assert np.array_equal(mz2[mz2_no_match_index], mz2_no_match)
assert np.array_equal(sp2[mz2_no_match_index], sp2_no_match)
def test_match_mz_all_match():
tolerance = 2
mz1 = np.array([50, 75, 100, 125, 150])
mz2 = np.array([51, 77, 99, 126, 150])
sp2 = np.array([100] * mz2.size)
# expected values for match/no match indices
mz1_match_index = np.array([0, 1, 2, 3, 4], dtype=int)
mz2_match_index = np.array([0, 1, 2, 3, 4], dtype=int)
mz2_no_match_index = np.array([], dtype=int)
mode = "closest"
test_mz1_index, mz2_match, sp2_match, mz2_no_match, sp2_no_match = \
lcms._match_mz(mz1, mz2, sp2, tolerance, mode, np.mean, np.mean)
# test match index
assert np.array_equal(mz1_match_index, test_mz1_index)
# test match mz and sp values
assert np.array_equal(mz2[mz2_match_index], mz2_match)
assert np.array_equal(sp2[mz2_match_index], sp2_match)
# test no match mz and sp values
assert np.array_equal(mz2[mz2_no_match_index], mz2_no_match)
assert np.array_equal(sp2[mz2_no_match_index], sp2_no_match)
def test_match_mz_multiple_matches_mode_closest():
tolerance = 2
mz1 = np.array([50, 75, 100, 125, 150])
mz2 = np.array([49, 51, 78, 99, 100, 101, 126, 150, 151])
sp2 = np.array([100] * mz2.size)
# expected values for match/no match indices
# in closest mode, argmin is used to select the closest value. If more
# than one value has the same difference, the first one in the array is
# going to be selected.
mz1_match_index = np.array([0, 2, 3, 4], dtype=int)
mz2_match_index = np.array([0, 4, 6, 7], dtype=int)
mz2_no_match_index = np.array([1, 2, 3, 5, 8], dtype=int)
mode = "closest"
test_mz1_index, mz2_match, sp2_match, mz2_no_match, sp2_no_match = \
lcms._match_mz(mz1, mz2, sp2, tolerance, mode, np.mean, np.mean)
# test match index
assert np.array_equal(mz1_match_index, test_mz1_index)
# test match mz and sp values
assert np.array_equal(mz2[mz2_match_index], mz2_match)
assert np.array_equal(sp2[mz2_match_index], sp2_match)
# test no match mz and sp values
assert np.array_equal(mz2[mz2_no_match_index], mz2_no_match)
assert np.array_equal(sp2[mz2_no_match_index], sp2_no_match)
def test_match_mz_multiple_matches_mode_reduce():
tolerance = 2
mz1 = np.array([50, 75, 100, 125, 150], dtype=float)
mz2 = np.array([49, 51, 78, 99, 100, 101, 126, 150, 151], dtype=float)
sp2 = np.array([100] * mz2.size, dtype=float)
# expected values for match/no match indices
# in closest mode, argmin is used to select the closest value. If more
# than one value has the same difference, the first one in the array is
# going to be selected.
mz1_match_index = np.array([0, 2, 3, 4], dtype=int)
mz2_match_index = np.array([0, 1, 3, 4, 5, 6, 7, 8], dtype=int)
mz2_no_match_index = np.array([2], dtype=int)
expected_mz2_match = [50.0, 100.0, 126.0, 150.5]
expected_sp2_match = [200, 300, 100, 200]
mode = "reduce"
test_mz1_index, mz2_match, sp2_match, mz2_no_match, sp2_no_match = \
lcms._match_mz(mz1, mz2, sp2, tolerance, mode, np.mean, np.sum)
# test match index
assert np.array_equal(mz1_match_index, test_mz1_index)
# test match mz and sp values
assert np.allclose(mz2_match, expected_mz2_match)
assert np.allclose(sp2_match, expected_sp2_match)
# test no match mz and sp values
assert np.array_equal(mz2[mz2_no_match_index], mz2_no_match)
assert np.array_equal(sp2[mz2_no_match_index], sp2_no_match)
def test_match_mz_invalid_mode():
tolerance = 2
mz1 = np.array([50, 75, 100, 125, 150])
mz2 = np.array([49, 51, 78, 99, 100, 101, 126, 150, 151])
sp2 = np.array([100] * mz2.size)
# expected values for match/no match indices
# in closest mode, argmin is used to select the closest value. If more
# than one value has the same difference, the first one in the array is
# going to be selected.
mz1_match_index = np.array([0, 2, 3, 4], dtype=int)
mz2_match_index = np.array([0, 4, 6, 7], dtype=int)
mz2_no_match_index = np.array([1, 2, 3, 5, 8], dtype=int)
mode = "invalid-mode"
with pytest.raises(ValueError):
test_mz1_index, mz2_match, sp2_match, mz2_no_match, sp2_no_match = \
lcms._match_mz(mz1, mz2, sp2, tolerance, mode, np.mean, np.mean)
def test_make_roi(simulated_experiment):
roi_list = lcms.make_roi(simulated_experiment, tolerance=0.005,
max_missing=0, min_length=0, min_intensity=0,
multiple_match="reduce")
assert len(roi_list) == simulated_experiment.mz_params.shape[0]
def test_make_roi_targeted_mz(simulated_experiment):
# the first three m/z values generated by simulated experiment are used
targeted_mz = simulated_experiment.mz_params[:, 0][:3]
roi_list = lcms.make_roi(simulated_experiment, tolerance=0.005,
max_missing=0, min_length=0, min_intensity=0,
multiple_match="reduce", targeted_mz=targeted_mz)
assert len(roi_list) == targeted_mz.size
def test_make_roi_min_intensity(simulated_experiment):
min_intensity = 15
roi_list = lcms.make_roi(simulated_experiment, tolerance=0.005,
max_missing=0, min_length=0,
min_intensity=min_intensity,
multiple_match="reduce")
# only two roi should have intensities greater than 15
assert len(roi_list) == 2
def test_make_roi_start(simulated_experiment):
start = 10
roi_list = lcms.make_roi(simulated_experiment, tolerance=0.005,
max_missing=0, min_length=0, min_intensity=0,
multiple_match="reduce", start=start)
n_sp = simulated_experiment.getNrSpectra()
for r in roi_list:
assert r.mz.size == (n_sp - start)
def test_make_roi_end(simulated_experiment):
end = 10
roi_list = lcms.make_roi(simulated_experiment, tolerance=0.005,
max_missing=0, min_length=0, min_intensity=0,
multiple_match="reduce", end=end)
n_sp = simulated_experiment.getNrSpectra()
for r in roi_list:
assert r.mz.size == end
def test_make_roi_multiple_match_closest(simulated_experiment):
roi_list = lcms.make_roi(simulated_experiment, tolerance=0.005,
max_missing=0, min_length=0, min_intensity=0,
multiple_match="closest")
assert len(roi_list) == simulated_experiment.mz_params.shape[0]
def test_make_roi_multiple_match_reduce_merge(simulated_experiment):
# set a tolerance such that two mz values are merged
# test is done in targeted mode to force a multiple match by removing
# one of the mz values
targeted_mz = simulated_experiment.mz_params[:, 0]
targeted_mz = np.delete(targeted_mz, 3)
tolerance = 31
roi_list = lcms.make_roi(simulated_experiment, tolerance=tolerance,
max_missing=0, min_length=0, min_intensity=0,
multiple_match="reduce", targeted_mz=targeted_mz)
assert len(roi_list) == (simulated_experiment.mz_params.shape[0] - 1)
def test_make_roi_multiple_match_reduce_custom_mz_reduce(simulated_experiment):
roi_list = lcms.make_roi(simulated_experiment, tolerance=0.005,
max_missing=0, min_length=0, min_intensity=0,
multiple_match="reduce", mz_reduce=np.median)
assert len(roi_list) == simulated_experiment.mz_params.shape[0]
def test_make_roi_multiple_match_reduce_custom_sp_reduce(simulated_experiment):
sp_reduce = lambda x: 1
roi_list = lcms.make_roi(simulated_experiment, tolerance=0.005,
max_missing=0, min_length=0, min_intensity=0,
multiple_match="reduce", sp_reduce=sp_reduce)
assert len(roi_list) == simulated_experiment.mz_params.shape[0]
def test_make_roi_invalid_multiple_match(simulated_experiment):
with pytest.raises(ValueError):
lcms.make_roi(simulated_experiment, tolerance=0.005, max_missing=0,
min_length=0, min_intensity=0,
multiple_match="invalid-value")
# test accumulate spectra
def test_accumulate_spectra_centroid(simulated_experiment):
n_sp = simulated_experiment.getNrSpectra()
sp = lcms.accumulate_spectra_centroid(simulated_experiment, 0, n_sp - 1,
tolerance=0.005)
assert sp.mz.size == simulated_experiment.mz_params.shape[0]
def test_accumulate_spectra_centroid_subtract_left(simulated_experiment):
sp = lcms.accumulate_spectra_centroid(simulated_experiment, 70, 90,
subtract_left=20, tolerance=0.005)
# only two peaks at rt 80 should be present
assert sp.mz.size == 2
# test default parameter functions
def test_get_lc_filter_params_uplc():
lcms.get_lc_filter_peak_params("uplc")
assert True
def test_get_lc_filter_params_hplc():
lcms.get_lc_filter_peak_params("hplc")
assert True
def test_get_lc_filter_params_invalid_mode():
with pytest.raises(ValueError):
lcms.get_lc_filter_peak_params("invalid-mode")
@pytest.mark.parametrize("separation,instrument",
list(product(["hplc", "uplc"], ["qtof", "orbitrap"])))
def test_get_roi_params(separation, instrument):
lcms.get_roi_params(separation, instrument)
assert True
def test_get_roi_params_bad_separation():
with pytest.raises(ValueError):
lcms.get_roi_params("invalid-separation", "qtof")
def test_get_roi_params_bad_ms_mode():
with pytest.raises(ValueError):
lcms.get_roi_params("uplc", "invalid-ms-mode")
|
nilq/baby-python
|
python
|
from flask_login import UserMixin
from datetime import datetime
from sqlalchemy import ForeignKey
from sqlalchemy.orm import backref
from app.extensions import db
ACCESS = {
'guest': 0,
'user': 1,
'admin': 2
}
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True, unique=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(100), index=True, unique=True)
password = db.Column(db.String(50))
profile = db.relationship('Profile', back_populates='user', cascade='all,delete', uselist=False)
fantasy_team = db.relationship('FantasyTeam', back_populates='user', cascade='all,delete', uselist=False)
is_admin = db.Column(db.Boolean, default=0)
def __repr__(self):
return '<User {}>'.format(self.username)
class Profile(db.Model):
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(255))
last_name = db.Column(db.String(255))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', back_populates='profile')
def __repr__(self):
return '<Profile {}>'.format(self.user_id, self.first_name, self.last_name)
# FANTASYTEAM and PLAYER relationship Many to Many
fantasy_teams_players = db.Table('association',
db.Column('fantasy_team_id', db.Integer, ForeignKey('fantasy_team.id')),
db.Column('player_id', db.Integer, ForeignKey('player.id'))
)
class FantasyTeam(db.Model):
__tablename__ = 'fantasy_team'
id = db.Column(db.Integer, primary_key=True)
players = db.relationship("Player",
secondary=fantasy_teams_players,
back_populates="fantasy_teams")
name = db.Column(db.String(255))
# USER relationship 1 to 1
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', back_populates='fantasy_team')
overall_score = db.Column(db.Integer)
# ROUNDSCORE relationship (1) to Many
round_scores = db.relationship('RoundScore', back_populates='fantasy_team', cascade='all,delete', uselist=False)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<FantasyTeam {}>'.format(self.name)
class Player(db.Model):
id = db.Column(db.Integer, primary_key=True)
fantasy_teams = db.relationship("FantasyTeam",
secondary=fantasy_teams_players,
back_populates="players")
# GOAL relationship (1) to Many
goals = db.relationship("Goal", back_populates="player", cascade='all,delete')
# TEAM relationship 1 to (Many)
team_id = db.Column(db.Integer, ForeignKey('team.id'))
team = db.relationship("Team", back_populates="players")
number = db.Column(db.Integer)
first_name = db.Column(db.String(255))
last_name = db.Column(db.String(255))
nickname = db.Column(db.String(255))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<Player {}>'.format(self.id, self.number)
class Goal(db.Model):
id = db.Column(db.Integer, primary_key=True)
# PLAYER relationship 1 to (Many)
player_id = db.Column(db.Integer, ForeignKey('player.id'))
player = db.relationship("Player", back_populates="goals")
# MATCH relationship 1 to (Many)
match_id = db.Column(db.Integer, ForeignKey('match.id'))
match = db.relationship("Match", back_populates="goals")
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<Goal {}>'.format(self.fantasy_team_id)
class Match(db.Model):
id = db.Column(db.Integer, primary_key=True)
match_id = db.Column(db.Integer)
# GOAL relationship (1) to Many
goals = db.relationship("Goal", back_populates="match", cascade='all,delete')
# TEAM relationship (1) to Many
# TEAM relationship (1) to Many
team1_id = db.Column(db.Integer, ForeignKey("team.id"))
team2_id = db.Column(db.Integer, ForeignKey("team.id"))
team1 = db.relationship("Team", foreign_keys="Match.team1_id", back_populates="matches1")
team2 = db.relationship("Team", foreign_keys="Match.team2_id", back_populates="matches2")
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<Match {}>'.format(self.id)
class Team(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
# PLAYER relationship (1) to Many
players = db.relationship("Player", back_populates="team", cascade='all,delete')
# MATCH relationship 1 to (Many)
matches1 = db.relationship("Match", foreign_keys="Match.team1_id", back_populates="team1")
matches2 = db.relationship("Match", foreign_keys="Match.team2_id", back_populates="team2")
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<Team {}>'.format(self.id, self.name)
class RoundScore(db.Model):
__tablename__ = 'round_score'
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.Integer)
name = db.Column(db.String(255))
round_score = db.Column(db.Integer)
# FANTASYTEAM relationship 1 to (Many)
fantasy_team_id = db.Column(db.Integer, db.ForeignKey('fantasy_team.id'))
fantasy_team = db.relationship('FantasyTeam', back_populates='round_scores')
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<RoundScore {}>'.format(self.fantasy_team_id)
|
nilq/baby-python
|
python
|
# encoding: utf-8
"""
Enumerations related to tables in WordprocessingML files
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from docxx.enum.base import (
alias, Enumeration, EnumMember, XmlEnumeration, XmlMappedEnumMember
)
@alias('WD_ALIGN_VERTICAL')
class WD_CELL_VERTICAL_ALIGNMENT(XmlEnumeration):
"""
alias: **WD_ALIGN_VERTICAL**
Specifies the vertical alignment of text in one or more cells of a table.
Example::
from docxx.enum.table import WD_ALIGN_VERTICAL
table = document.add_table(3, 3)
table.cell(0, 0).vertical_alignment = WD_ALIGN_VERTICAL.BOTTOM
"""
__ms_name__ = 'WdCellVerticalAlignment'
__url__ = 'https://msdn.microsoft.com/en-us/library/office/ff193345.aspx'
__members__ = (
XmlMappedEnumMember(
'TOP', 0, 'top', 'Text is aligned to the top border of the cell.'
),
XmlMappedEnumMember(
'CENTER', 1, 'center', 'Text is aligned to the center of the cel'
'l.'
),
XmlMappedEnumMember(
'BOTTOM', 3, 'bottom', 'Text is aligned to the bottom border of '
'the cell.'
),
XmlMappedEnumMember(
'BOTH', 101, 'both', 'This is an option in the OpenXml spec, but'
' not in Word itself. It\'s not clear what Word behavior this se'
'tting produces. If you find out please let us know and we\'ll u'
'pdate this documentation. Otherwise, probably best to avoid thi'
's option.'
),
)
@alias('WD_ROW_HEIGHT')
class WD_ROW_HEIGHT_RULE(XmlEnumeration):
"""
alias: **WD_ROW_HEIGHT**
Specifies the rule for determining the height of a table row
Example::
from docxx.enum.table import WD_ROW_HEIGHT_RULE
table = document.add_table(3, 3)
table.rows[0].height_rule = WD_ROW_HEIGHT_RULE.EXACTLY
"""
__ms_name__ = "WdRowHeightRule"
__url__ = 'https://msdn.microsoft.com/en-us/library/office/ff193620.aspx'
__members__ = (
XmlMappedEnumMember(
'AUTO', 0, 'auto', 'The row height is adjusted to accommodate th'
'e tallest value in the row.'
),
XmlMappedEnumMember(
'AT_LEAST', 1, 'atLeast', 'The row height is at least a minimum '
'specified value.'
),
XmlMappedEnumMember(
'EXACTLY', 2, 'exact', 'The row height is an exact value.'
),
)
class WD_TABLE_ALIGNMENT(XmlEnumeration):
"""
Specifies table justification type.
Example::
from docxx.enum.table import WD_TABLE_ALIGNMENT
table = document.add_table(3, 3)
table.alignment = WD_TABLE_ALIGNMENT.CENTER
"""
__ms_name__ = 'WdRowAlignment'
__url__ = ' http://office.microsoft.com/en-us/word-help/HV080607259.aspx'
__members__ = (
XmlMappedEnumMember(
'LEFT', 0, 'left', 'Left-aligned'
),
XmlMappedEnumMember(
'CENTER', 1, 'center', 'Center-aligned.'
),
XmlMappedEnumMember(
'RIGHT', 2, 'right', 'Right-aligned.'
),
)
class WD_TABLE_DIRECTION(Enumeration):
"""
Specifies the direction in which an application orders cells in the
specified table or row.
Example::
from docxx.enum.table import WD_TABLE_DIRECTION
table = document.add_table(3, 3)
table.direction = WD_TABLE_DIRECTION.RTL
"""
__ms_name__ = 'WdTableDirection'
__url__ = ' http://msdn.microsoft.com/en-us/library/ff835141.aspx'
__members__ = (
EnumMember(
'LTR', 0, 'The table or row is arranged with the first column '
'in the leftmost position.'
),
EnumMember(
'RTL', 1, 'The table or row is arranged with the first column '
'in the rightmost position.'
),
)
|
nilq/baby-python
|
python
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient import api_versions
from novaclient.tests.unit import utils
from novaclient.tests.unit.v2 import fakes
class QuotaClassSetsTest(utils.TestCase):
def setUp(self):
super(QuotaClassSetsTest, self).setUp()
self.cs = fakes.FakeClient(api_versions.APIVersion("2.0"))
def test_class_quotas_get(self):
class_name = 'test'
q = self.cs.quota_classes.get(class_name)
self.assert_request_id(q, fakes.FAKE_REQUEST_ID_LIST)
self.cs.assert_called('GET', '/os-quota-class-sets/%s' % class_name)
return q
def test_update_quota(self):
q = self.cs.quota_classes.get('test')
self.assert_request_id(q, fakes.FAKE_REQUEST_ID_LIST)
q.update(cores=2)
self.cs.assert_called('PUT', '/os-quota-class-sets/test')
return q
def test_refresh_quota(self):
q = self.cs.quota_classes.get('test')
q2 = self.cs.quota_classes.get('test')
self.assertEqual(q.cores, q2.cores)
q2.cores = 0
self.assertNotEqual(q.cores, q2.cores)
q2.get()
self.assertEqual(q.cores, q2.cores)
class QuotaClassSetsTest2_50(QuotaClassSetsTest):
"""Tests the quota classes API binding using the 2.50 microversion."""
api_version = '2.50'
invalid_resources = ['floating_ips', 'fixed_ips', 'networks',
'security_groups', 'security_group_rules']
def setUp(self):
super(QuotaClassSetsTest2_50, self).setUp()
self.cs = fakes.FakeClient(api_versions.APIVersion(self.api_version))
def test_class_quotas_get(self):
"""Tests that network-related resources aren't in a 2.50 response
and server group related resources are in the response.
"""
q = super(QuotaClassSetsTest2_50, self).test_class_quotas_get()
for invalid_resource in self.invalid_resources:
self.assertFalse(hasattr(q, invalid_resource),
'%s should not be in %s' % (invalid_resource, q))
# Also make sure server_groups and server_group_members are in the
# response.
for valid_resource in ('server_groups', 'server_group_members'):
self.assertTrue(hasattr(q, valid_resource),
'%s should be in %s' % (invalid_resource, q))
def test_update_quota(self):
"""Tests that network-related resources aren't in a 2.50 response
and server group related resources are in the response.
"""
q = super(QuotaClassSetsTest2_50, self).test_update_quota()
for invalid_resource in self.invalid_resources:
self.assertFalse(hasattr(q, invalid_resource),
'%s should not be in %s' % (invalid_resource, q))
# Also make sure server_groups and server_group_members are in the
# response.
for valid_resource in ('server_groups', 'server_group_members'):
self.assertTrue(hasattr(q, valid_resource),
'%s should be in %s' % (invalid_resource, q))
def test_update_quota_invalid_resources(self):
"""Tests trying to update quota class values for invalid resources.
This will fail with TypeError because the network-related resource
kwargs aren't defined.
"""
q = self.cs.quota_classes.get('test')
self.assertRaises(TypeError, q.update, floating_ips=1)
self.assertRaises(TypeError, q.update, fixed_ips=1)
self.assertRaises(TypeError, q.update, security_groups=1)
self.assertRaises(TypeError, q.update, security_group_rules=1)
self.assertRaises(TypeError, q.update, networks=1)
return q
class QuotaClassSetsTest2_57(QuotaClassSetsTest2_50):
"""Tests the quota classes API binding using the 2.57 microversion."""
api_version = '2.57'
def setUp(self):
super(QuotaClassSetsTest2_57, self).setUp()
self.invalid_resources.extend(['injected_files',
'injected_file_content_bytes',
'injected_file_path_bytes'])
def test_update_quota_invalid_resources(self):
"""Tests trying to update quota class values for invalid resources.
This will fail with TypeError because the file-related resource
kwargs aren't defined.
"""
q = super(
QuotaClassSetsTest2_57, self).test_update_quota_invalid_resources()
self.assertRaises(TypeError, q.update, injected_files=1)
self.assertRaises(TypeError, q.update, injected_file_content_bytes=1)
self.assertRaises(TypeError, q.update, injected_file_path_bytes=1)
|
nilq/baby-python
|
python
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import nlm_pb2 as nlm__pb2
class NLMStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.StrRecall = channel.unary_unary(
'/nlm.NLM/StrRecall',
request_serializer=nlm__pb2.RawString.SerializeToString,
response_deserializer=nlm__pb2.GraphOutput.FromString,
)
self.NLURecall = channel.unary_unary(
'/nlm.NLM/NLURecall',
request_serializer=nlm__pb2.NLMInput.SerializeToString,
response_deserializer=nlm__pb2.GraphOutput.FromString,
)
self.NodeRecall = channel.unary_unary(
'/nlm.NLM/NodeRecall',
request_serializer=nlm__pb2.GraphNode.SerializeToString,
response_deserializer=nlm__pb2.GraphNode.FromString,
)
self.RelationRecall = channel.unary_unary(
'/nlm.NLM/RelationRecall',
request_serializer=nlm__pb2.GraphRelation.SerializeToString,
response_deserializer=nlm__pb2.GraphRelation.FromString,
)
class NLMServicer(object):
# missing associated documentation comment in .proto file
pass
def StrRecall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def NLURecall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def NodeRecall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RelationRecall(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_NLMServicer_to_server(servicer, server):
rpc_method_handlers = {
'StrRecall': grpc.unary_unary_rpc_method_handler(
servicer.StrRecall,
request_deserializer=nlm__pb2.RawString.FromString,
response_serializer=nlm__pb2.GraphOutput.SerializeToString,
),
'NLURecall': grpc.unary_unary_rpc_method_handler(
servicer.NLURecall,
request_deserializer=nlm__pb2.NLMInput.FromString,
response_serializer=nlm__pb2.GraphOutput.SerializeToString,
),
'NodeRecall': grpc.unary_unary_rpc_method_handler(
servicer.NodeRecall,
request_deserializer=nlm__pb2.GraphNode.FromString,
response_serializer=nlm__pb2.GraphNode.SerializeToString,
),
'RelationRecall': grpc.unary_unary_rpc_method_handler(
servicer.RelationRecall,
request_deserializer=nlm__pb2.GraphRelation.FromString,
response_serializer=nlm__pb2.GraphRelation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'nlm.NLM', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, PibiCo and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
import datetime, time
from frappe.utils import cstr
from frappe import msgprint, _
from frappe.core.doctype.sms_settings.sms_settings import send_sms
from atvirtual.atvirtual.doctype.telegram_settings.telegram_settings import send_telegram
import paho.mqtt.client as mqtt
import os, ssl, urllib, json
from frappe.utils.password import get_decrypted_password
class pibiMessage(Document):
def validate(self):
if self.message_type == "IoT" and not self.std_message:
frappe.throw(_("Please fill the message content"))
if self.message_type == "IoT":
if not self.all_places and not self.all_roles:
if len(self.location_table) == 0 and len(self.device_table) == 0 and len(self.recipient_table) == 0 and len(self.participant_table) == 0:
frappe.throw(_("Please choose any destination recipient"))
def before_save(self):
if self.message_type == "IoT":
std_message = frappe.get_doc("Standard Message", self.std_message)
def before_submit(self):
## Prepare recipients list
sms_list = []
telegram_list = []
mqtt_list = []
email_list = []
str_attach = ''
recipients = []
str_message = ""
## Send E-mails
if self.message_type == "E-mail":
## Read message body
str_message = self.email_body
## Read Recipients Table
recipient_list = self.recipient_item
if len(recipient_list) > 0:
for item in recipient_list:
recipients.append(item.participant_email_id)
## Read and prepare message with Attachments
if len(self.message_item) > 0:
for idx, row in enumerate(self.message_item):
if "http" in row.attachment:
str_attach = str_attach + '<a href="' + row.attachment + '">Anexo ' +str(idx+1) + ': ' + row.description + '</a><br>'
else:
str_attach = str_attach + '<a href="' + frappe.utils.get_url() + urllib.parse.quote(row.attachment) + '">Anexo ' +str(idx+1) + ': ' + row.description + '</a><br>'
str_message = str_message + "<p>Con archivos anexos:</p><p>" + str_attach + "</p>"
## Finally Send message by Email
email_args = {
"sender": self.from_email_account,
"recipients": recipients,
"message": str_message,
"subject": self.subject,
"reference_doctype": self.doctype,
"reference_name": self.name
}
frappe.sendmail(**email_args)
## Send IoT messages
if self.message_type == "IoT":
## Read main message
dict_message = json.loads(self.message_text)
if "message" in dict_message:
str_message = dict_message["message"]["text"]
## Read and prepare message with attachments
if len(self.message_item) > 0 and str_message != '':
for idx, row in enumerate(self.message_item):
if "http" in row.attachment:
str_attach = str_attach + 'Anexo ' + str(idx+1) + ': ' + row.description + ' @ ' + row.attachment + '\n'
else:
str_attach = str_attach + 'Anexo ' + str(idx+1) + ': ' + row.description + ' @ ' + frappe.utils.get_url() + urllib.parse.quote(row.attachment) + '\n'
str_message = str_message + "\nCon archivos anexos:\n" + str_attach
dict_message["message"]["text"] = str_message
## Prepare location recipients
if len(self.location_table) > 0 and not self.all_places:
for loc in self.location_table:
""" Get from database devices assigned to locations in session """
locdev = frappe.db.sql("""SELECT device FROM `tabPlace Item` WHERE parent=%s AND place=%s and docstatus < 2""", (self.course, loc.place), True)
if len(locdev) > 0:
for plc in locdev:
if plc.device is not None:
sms_list, mqtt_list, telegram_list, email_list = append_recipients(plc.device, sms_list, mqtt_list, telegram_list, email_list)
## Prepare device recipients even in case all places selectect
if len(self.device_table) > 0 and not self.all_places:
for dev in self.device_table:
if dev.device is not None:
sms_list, mqtt_list, telegram_list, email_list = append_recipients(dev.device, sms_list, mqtt_list, telegram_list, email_list)
## Prepare all devices
if self.all_places:
""" Get from database devices in session """
locdev = frappe.db.sql("""SELECT device FROM `tabPlace Item` WHERE parent=%s and docstatus < 2""", (self.course), True)
if len(locdev) > 0:
for plc in locdev:
if plc.device is not None:
sms_list, mqtt_list, telegram_list, email_list = append_recipients(plc.device, sms_list, mqtt_list, telegram_list, email_list)
""" Get from database devices in session in roles table """
roldev = frappe.db.sql("""SELECT device FROM `tabSession Role Item` WHERE parent=%s and docstatus < 2""", (self.course), True)
if len(roldev) > 0:
for itm in roldev:
if itm.device is not None:
sms_list, mqtt_list, telegram_list, email_list = append_recipients(itm.device, sms_list, mqtt_list, telegram_list, email_list)
## Prepare role recipients
if len(self.recipient_table) > 0 and not self.all_roles:
for rol in self.recipient_table:
frappe.msgprint(rol.participant_role)
""" Get from database devices ported in session """
roldev = frappe.db.sql("""SELECT device FROM `tabSession Role Item` WHERE parent=%s AND participant_role=%s and docstatus < 2""", (self.course, rol.participant_role), True)
if len(roldev) > 0:
for itm in roldev:
if itm.device is not None:
sms_list, mqtt_list, telegram_list, email_list = append_recipients(itm.device, sms_list, mqtt_list, telegram_list, email_list)
## Prepare participants
if len(self.participant_table) > 0 and not self.all_roles:
for per in self.participant_table:
frappe.msgprint(per.participant)
""" Get from database devices ported in session """
perdev = frappe.db.sql("""SELECT device FROM `tabSession Role Item` WHERE parent=%s AND participant=%s and docstatus < 2""", (self.course, per.participant), True)
if len(perdev) > 0:
for per in perdev:
if per.device is not None:
sms_list, mqtt_list, telegram_list, email_list = append_recipients(per.device, sms_list, mqtt_list, telegram_list, email_list)
## Prepare all roles
if self.all_roles:
""" Get from database devices in session in roles table """
roldev = frappe.db.sql("""SELECT device FROM `tabSession Role Item` WHERE parent=%s and docstatus < 2""", (self.course), True)
if len(roldev) > 0:
for itm in roldev:
if itm.device is not None:
sms_list, mqtt_list, telegram_list, email_list = append_recipients(itm.device, sms_list, mqtt_list, telegram_list, email_list)
## Send message by MQTT
if len(mqtt_list) > 0:
path = frappe.utils.get_bench_path()
site_name = frappe.utils.get_url().replace("http://","").replace("https://","")
if ":" in site_name:
pos = site_name.find(":")
site_name = site_name[:pos]
client = frappe.get_doc('MQTT Settings', 'MQTT Settings')
server = client.broker_gateway
port = client.port
user = client.user
client.secret = get_decrypted_password('MQTT Settings', 'MQTT Settings', 'secret', False)
secret = client.secret
do_ssl = client.is_ssl
# connect to MQTT Broker to Publish Message
pid = os.getpid()
client_id = '{}:{}'.format('client', str(pid))
try:
backend = mqtt.Client(client_id=client_id, clean_session=True)
backend.username_pw_set(user, password=secret)
if do_ssl == True:
ca = os.path.join(path, "sites", site_name, frappe.get_site_path('private', 'files', client.ca)[1:])
client_crt = os.path.join(path, "sites", site_name, frappe.get_site_path('private', 'files', client.client_crt)[1:])
client_key = os.path.join(path, "sites", site_name, frappe.get_site_path('private', 'files', client.client_key)[1:])
port_ssl = client.ssl_port
## Prepare mqtt
backend.tls_set(ca_certs=ca, certfile=client_crt, keyfile=client_key, cert_reqs=ssl.CERT_REQUIRED, ciphers=None)
backend.tls_insecure_set(False)
time.sleep(.5)
backend.connect(server, port_ssl)
else:
backend.connect(server, port)
payload = frappe.safe_decode(json.dumps(dict_message)).encode('utf-8')
for dev in mqtt_list:
mqtt_topic = str(dev) + "/display/text"
backend.publish(mqtt_topic, cstr(payload))
backend.disconnect()
except:
frappe.msgprint(_("Error in MQTT Broker sending to ", str(mqtt_list)))
pass
## Send message by Email
if len(email_list) > 0 and "email" in dict_message:
try:
email_args = {
"sender": dict_message['email']['email_account'],
"recipients": email_list,
"message": str_message,
"subject": dict_message['email']['subject'],
"reference_doctype": self.doctype,
"reference_name": self.name
}
frappe.sendmail(**email_args)
except:
frappe.throw(_("Error in sending mail"))
pass
## Send message by Telegram
if len(telegram_list) > 0 and self.message_type == "IoT" and str_message != "":
try:
send_telegram(telegram_list, cstr(str_message))
except:
frappe.throw(_("Error in sending Telegram"))
pass
## Send message by SMS
if len(sms_list) > 0 and self.message_type == "IoT" and str_message != "":
try:
send_sms(sms_list, cstr(str_message))
except:
frappe.throw(_("Error in sending SMS"))
pass
## Final Message
frappe.msgprint(_("Actions Completed and Messages Sent"))
def append_recipients(device, sms_list, mqtt_list, telegram_list, email_list):
doc = frappe.get_doc('Device', device)
if not doc.disabled:
if doc.is_connected and doc.alerts_active:
if doc.by_sms:
if doc.sms_number != '':
if not doc.sms_number in sms_list:
sms_list.append(doc.sms_number)
#frappe.msgprint(_("Message by sms to ") + str(doc.sms_number))
if doc.by_text:
if doc.device_name != '' and doc.by_mqtt and not doc.device_name in mqtt_list:
mqtt_list.append(doc.device_name)
#frappe.msgprint(_("Message by mqtt to ") + str(doc.device_name))
if doc.by_email and doc.device_email != '' and not doc.device_email in email_list:
email_list.append(doc.device_email)
if doc.by_telegram:
if doc.telegram_number != '':
if not doc.telegram_number in telegram_list:
telegram_list.append(doc.telegram_number)
#frappe.msgprint(_("Message by sms to ") + str(doc.telegram_number))
return sms_list, mqtt_list, telegram_list, email_list
|
nilq/baby-python
|
python
|
from __future__ import division
import json
import time
import serial as _serial
import platform
import sys
if sys.version_info >= (3, 0):
import queue
else:
import Queue as queue
from threading import Event, Thread
from serial.tools.list_ports import comports
from . import IOHandler
try:
JSONDecodeError = json.decoder.JSONDecodeError
except AttributeError:
JSONDecodeError = ValueError
class Serial(IOHandler):
poll_frequency = 200
@classmethod
def available_hosts(cls):
devices = comports(include_links=True)
return [d.device for d in devices]
@classmethod
def is_host_compatible(cls, host):
return host in cls.available_hosts()
def __init__(self, host, baudrate=1000000):
self._serial = _serial.Serial(host, baudrate)
self._serial.flush()
self._msg = queue.Queue(100)
self._running = True
self._poll_loop = Thread(target=self._poll)
self._poll_loop.daemon = True
self._poll_loop.start()
def is_ready(self):
if self._serial.in_waiting == 0:
return False
try:
self.read()
return True
except (UnicodeDecodeError, JSONDecodeError):
return False
def recv(self):
return self._msg.get()
def write(self, data):
self._serial.write(data + '\r'.encode() + '\n'.encode())
#print(data + '\r'.encode())
def close(self):
self._running = False
self._poll_loop.join()
self._serial.close()
def _poll(self):
def extract_line(s):
j = s.find(b'\n')
if j == -1:
return b'', s
# Sometimes the begin of serial data can be wrong remove it
# Find the first '{'
x = s.find(b'{')
if x == -1:
return b'', s[j + 1:]
return s[x:j], s[j + 1:]
period = 1 / self.poll_frequency
buff = b''
while self._running:
to_read = self._serial.in_waiting
if to_read == 0:
time.sleep(period)
continue
s = self._serial.read(to_read)
buff = buff + s
while self._running:
line, buff = extract_line(buff)
if not len(line):
break
if self._msg.full():
self._msg.get()
self._msg.put(line)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name = 'elastico',
version = '0.6.3',
description = "Elasticsearch Companion - a commandline tool",
author = "Kay-Uwe (Kiwi) Lorenz",
author_email = "kiwi@franka.dyndns.org",
url = 'https://github.com/klorenz/python-elastico',
license = "MIT",
install_requires=[
'elasticsearch',
'PyYAML',
'pyaml',
'requests',
'argdeco',
'markdown',
'jinja2',
'pytz',
'python-dateutil',
],
packages=[
'elastico', 'elastico.cli'
],
# package_data = {
# elastico: ['subfolder/*.x', ...]
# }
# include_package_data = True
entry_points={
'console_scripts': [
'elastico = elastico.cli:main',
]
}
)
|
nilq/baby-python
|
python
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import Required,Email, EqualTo
from ..models import User
from wtforms import ValidationError
class RegisterationForm(FlaskForm):
email = StringField('Enter Your email Address', validators= [Required(), Email()])
username = StringField('Enter your username', validators=[Required()])
password = PasswordField('Password', validators=[Required(), EqualTo('password_confirm', message='passwords must match')])
password_confirm = PasswordField('Confirm Passwords', validators=[Required()])
submit = SubmitField('sign Up')
def validate_email(self,data_field):
if User.query.filter_by(email =data_field.data).first():
raise ValidationError('There is an account with that email address')
def validate_username(self,data_field):
if User.query.filter_by(username = data_field.data).first():
raise ValidationError('The username is already taken')
class LoginForm(FlaskForm):
email = StringField('Your email Address', validators=[Required(), Email()])
password = PasswordField('password', validators=[Required()])
remember = BooleanField('Remember me')
submit = SubmitField('sign in')
|
nilq/baby-python
|
python
|
import pytest
from nengo_os import SimpleProc, ConventScheduler
procs = [ # name id arrival1,+2, comp_t, needed_cores
SimpleProc("Cifar", 0, 0, 0, 10, 4038), # test single process
SimpleProc("SAR", 1, 0, 0, 100, 3038), #test two procs that can not run together
SimpleProc("SAR", 2, 0, 0, 100, 3038)
]
@pytest.fixture(params=["SINGLE","TWO_INT","ALL"])
def paper_procs(request):
pl = procs
if request.param == "SINGLE":
return [pl[0]], 1
if request.param == "TWO_INT":
return [pl[1],pl[2]], 2
if request.param == "ALL":
pl[1].arrival = 10
pl[2].arrival = 10
return pl, len(pl)
@pytest.fixture
def create_non_nengo_scheduler(paper_procs):
pl, num = paper_procs
return ConventScheduler(simple_proc_list = pl, mode="RR",time_slice=5), num
class RR_Status:
def __init__(self, time=0):
self.time = time
def test_non_nengo_rr(create_non_nengo_scheduler):
sched,num = create_non_nengo_scheduler
end_time_est = sum(et.needed_time for et in sched.queue.wait_q)
quant = sched.time_slice
running_proc = 2
waiting_proc = 1
rtx = []
for i in range(end_time_est):
sched.scheduler_run_tick()
if num == 1:
if i == 0:
assert(sched.running_proc_size == 4038)
elif num == 2:
if i == 10:
print(i)
assert(sched.running_proc_size == 3038)
if i % sched.time_slice == 0:
#assert(sched.running_proc_list[0] == running_proc)
r = waiting_proc
running_proc = waiting_proc
waiting_proc = r
rtx.append(sched.running_proc_list[0])
elif i > 200:
assert(sched.waiting_proc_size < 1)
print(rtx)
def test_load_json():
from pathlib import Path
model_data_file = Path("/Users/plaggm/dev/nemo-codes/config/paper_models.json")
sched_type = "RR"
rr_ts = 100
scheduler = ConventScheduler(mode=sched_type, total_cores=4096, time_slice=rr_ts,
proc_js_file=str(model_data_file.absolute()))
|
nilq/baby-python
|
python
|
from daq_server import DAQServer
import asyncio
server = DAQServer
event_loop = asyncio.get_event_loop()
# task = asyncio.ensure_future(heartbeat())
task_list = asyncio.Task.all_tasks()
async def heartbeat():
while True:
print('lub-dub')
await asyncio.sleep(10)
def shutdown(server):
print('shutdown:')
# for controller in controller_list:
# # print(sensor)
# controller.stop()
server.shutdown()
task = asyncio.ensure_future(heartbeat())
tasks = asyncio.Task.all_tasks()
for t in tasks:
# print(t)
t.cancel()
print("Tasks canceled")
asyncio.get_event_loop().stop()
try:
event_loop.run_until_complete(asyncio.wait(task_list))
except KeyboardInterrupt:
print('closing client')
shutdown(server)
event_loop.run_forever()
finally:
print('closing event loop')
event_loop.close()
|
nilq/baby-python
|
python
|
import pathlib
import numpy as np
import simscale_eba.HourlyContinuous as hc
epw = hc.HourlyContinuous()
# Put any path here
path = r'E:\Current Cases\SimScale Objects\examples\epw_to_stat\USA_MA_Boston-Logan.Intl.AP.725090_TMYx.2004-2018.epw'
epw.import_epw(pathlib.Path(path))
weather_stats = hc.WeatherStatistics()
weather_stats.set_directions(np.arange(0, 360, 10))
weather_stats.set_speeds(np.arange(0.5, 16, 1))
weather_stats.set_hourly_continuous(epw)
weather_stats.to_stat()
weather_stats.plot_cumulative_distributions()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.