hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1e1bbe7cdaf4c0f82311fc2983215d16948afb65 | 1,890 | py | Python | reviews/amazon.py | BraunPhilipp/sentiment-analyzer | de1528c924b7015bafda56196b264523b64dc7c1 | [
"MIT"
] | 4 | 2016-09-24T22:09:49.000Z | 2017-05-17T12:51:48.000Z | reviews/amazon.py | BraunPhilipp/sentiment-analyzer | de1528c924b7015bafda56196b264523b64dc7c1 | [
"MIT"
] | 2 | 2016-11-29T05:50:01.000Z | 2021-02-13T18:07:20.000Z | reviews/amazon.py | BraunPhilipp/sentiment-analyzer | de1528c924b7015bafda56196b264523b64dc7c1 | [
"MIT"
] | null | null | null | import string
import numpy as np
import re
import urllib.request
from bs4 import BeautifulSoup
import time
import random
import string
from logger import logger
class amazon:
def __init__(self, query=""):
self.query = query
self.data = []
# Internal Function
self.search()
def search(self):
if (self.query == ""):
self.query = ''.join(random.choice(string.ascii_lowercase) for _ in range(3))
try:
# Get possible Search Items
page = urllib.request.urlopen("http://www.amazon.com/s/ref=nb_sb_noss?field-keywords=" + self.query, timeout=5)
soup = BeautifulSoup(page, "html.parser")
products = soup.find_all('li', class_='s-result-item celwidget')
link_list = []
for product in products:
bar = product.find('div', class_='a-row a-spacing-mini')
rating = bar.find('a', class_='a-size-small a-link-normal a-text-normal')
if (rating != None):
if (1 < len(rating.text) < 5):
link_list.append('http://www.amazon.com/product-reviews/' + rating['href'].split('/')[-1])
for link in link_list:
page = urllib.request.urlopen(link, timeout=5)
soup = BeautifulSoup(page, "html.parser")
reviews = soup.find_all('div', class_='a-section review')
for review in reviews:
title = review.find('a', class_='a-size-base a-link-normal review-title a-color-base a-text-bold').text
score = int(review.find('span', class_='a-icon-alt').text[0])
self.data.append({'text':title, 'score':int((score-1)/4), 'category':'AMAZON/EN'})
return self.data
except Exception as e:
logger.log(str(e))
pass
| 33.157895 | 123 | 0.560847 |
feae2655faa2254ebae1bce6c1630ce9e8579405 | 7,333 | py | Python | tests.py | cyberdelia/perlinpinpin | 7793ea283165108156586047ef18231f424357f3 | [
"BSD-3-Clause"
] | 4 | 2015-12-24T11:51:29.000Z | 2019-02-20T02:35:10.000Z | tests.py | cyberdelia/perlinpinpin | 7793ea283165108156586047ef18231f424357f3 | [
"BSD-3-Clause"
] | null | null | null | tests.py | cyberdelia/perlinpinpin | 7793ea283165108156586047ef18231f424357f3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
import unittest
import perlinpinpin
class TestPerlinpinpin(unittest.TestCase):
def _make_date(self):
class MockDate(datetime.date):
@classmethod
def today(cls):
return datetime.date(2009, 3, 6)
return MockDate
def setUp(self):
self.old_date = datetime.date
datetime.date = self._make_date()
self.perlinpinpin = perlinpinpin.perlinpinpin
def tearDown(self):
datetime.date = self.old_date
def test_exception(self):
self.assertRaises(ValueError, self.perlinpinpin, u"4")
self.assertRaises(ValueError, self.perlinpinpin, u"35 Jnaier")
self.assertRaises(ValueError, self.perlinpinpin, u"Luni prochain")
self.assertRaises(ValueError, self.perlinpinpin, u"supercalifragilisticexpialidocious")
def test_now(self):
self.assertEqual(self.perlinpinpin(u"maintenant"), datetime.date(2009, 3, 6))
def test_today(self):
self.assertEqual(self.perlinpinpin(u"aujourd'hui"), datetime.date(2009, 3, 6))
self.assertEqual(self.perlinpinpin(u"aujourdhui"), datetime.date(2009, 3, 6))
def test_this_morning(self):
self.assertEqual(self.perlinpinpin(u"matin"), datetime.date(2009, 3, 6))
self.assertEqual(self.perlinpinpin(u"ce matin"), datetime.date(2009, 3, 6))
def test_this_afternoon(self):
self.assertEqual(self.perlinpinpin(u"apres-midi"), datetime.date(2009, 3, 6))
self.assertEqual(self.perlinpinpin(u"cet apres-midi"), datetime.date(2009, 3, 6))
self.assertEqual(self.perlinpinpin(u"apresmidi"), datetime.date(2009, 3, 6))
self.assertEqual(self.perlinpinpin(u"cet apresmidi"), datetime.date(2009, 3, 6))
def test_this_evening(self):
self.assertEqual(self.perlinpinpin(u"soir"), datetime.date(2009, 3, 6))
self.assertEqual(self.perlinpinpin(u"ce soir"), datetime.date(2009, 3, 6))
def test_yesterday(self):
self.assertEqual(self.perlinpinpin(u"hier"), datetime.date(2009, 3, 5))
def test_before_yesterday(self):
self.assertEqual(self.perlinpinpin(u"avant-hier"), datetime.date(2009, 3, 4))
self.assertEqual(self.perlinpinpin(u"avant hier"), datetime.date(2009, 3, 4))
def test_tomorrow(self):
self.assertEqual(self.perlinpinpin(u"demain"), datetime.date(2009, 3, 7))
def test_after_tomorrow(self):
self.assertEqual(self.perlinpinpin(u"après-demain"), datetime.date(2009, 3, 8))
self.assertEqual(self.perlinpinpin(u"après demain"), datetime.date(2009, 3, 8))
def test_last_tuesday(self):
self.assertEqual(self.perlinpinpin(u"mardi dernier"), datetime.date(2009, 3, 3))
def test_next_tuesday(self):
self.assertEqual(self.perlinpinpin(u"mardi prochain"), datetime.date(2009, 3, 10))
self.assertEqual(self.perlinpinpin(u"mardi suivant"), datetime.date(2009, 3, 10))
def test_last_week(self):
self.assertEqual(self.perlinpinpin(u"la semaine dernière"), datetime.date(2009, 2, 27))
self.assertEqual(self.perlinpinpin(u"semaine dernière"), datetime.date(2009, 2, 27))
def test_next_week(self):
self.assertEqual(self.perlinpinpin(u"la semaine prochaine"), datetime.date(2009, 3, 13))
self.assertEqual(self.perlinpinpin(u"semaine prochaine"), datetime.date(2009, 3, 13))
def test_day(self):
self.assertEqual(self.perlinpinpin(u"vendredi 4"), datetime.date(2009, 3, 4))
self.assertEqual(self.perlinpinpin(u"le 4"), datetime.date(2009, 3, 4))
self.assertEqual(self.perlinpinpin(u"le vendredi 4"), datetime.date(2009, 3, 4))
self.assertEqual(self.perlinpinpin(u"le 1er"), datetime.date(2009, 3, 1))
self.assertEqual(self.perlinpinpin(u"le 1 er"), datetime.date(2009, 3, 1))
self.assertEqual(self.perlinpinpin(u"le 1ier"), datetime.date(2009, 3, 1))
def test_day_and_month(self):
self.assertEqual(self.perlinpinpin(u"4 Avril"), datetime.date(2009, 4, 4))
self.assertEqual(self.perlinpinpin(u"Mardi 4 Avril"), datetime.date(2009, 4, 4))
self.assertEqual(self.perlinpinpin(u"le 4 Avril"), datetime.date(2009, 4, 4))
self.assertEqual(self.perlinpinpin(u"le mardi 4 Avril"), datetime.date(2009, 4, 4))
self.assertEqual(self.perlinpinpin(u"4 Fevrier"), datetime.date(2009, 2, 4))
self.assertEqual(self.perlinpinpin(u"4 Février"), datetime.date(2009, 2, 4))
self.assertEqual(self.perlinpinpin(u"le 1er février"), datetime.date(2009, 2, 1))
self.assertEqual(self.perlinpinpin(u"le 1 er février"), datetime.date(2009, 2, 1))
def test_day_and_month_and_year(self):
self.assertEqual(self.perlinpinpin(u"4 Avril 2008"), datetime.date(2008, 4, 4))
self.assertEqual(self.perlinpinpin(u"Vendredi 4 Avril 2008"), datetime.date(2008, 4, 4))
self.assertEqual(self.perlinpinpin(u"le 4 Avril 2008"), datetime.date(2008, 4, 4))
self.assertEqual(self.perlinpinpin(u"le Mardi 4 Avril 2008"), datetime.date(2008, 4, 4))
self.assertEqual(self.perlinpinpin(u"le 1er février 2008"), datetime.date(2008, 2, 1))
self.assertEqual(self.perlinpinpin(u"le 1 er février 2008"), datetime.date(2008, 2, 1))
def test_european_style(self):
self.assertEqual(self.perlinpinpin(u"02/03/2009"), datetime.date(2009, 3, 2))
self.assertEqual(self.perlinpinpin(u"2/3/2009"), datetime.date(2009, 3, 2))
self.assertEqual(self.perlinpinpin(u"le 02/03/2009"), datetime.date(2009, 3, 2))
def test_european_short_style(self):
self.assertEqual(self.perlinpinpin(u"02/03/09"), datetime.date(2009, 3, 2))
self.assertEqual(self.perlinpinpin(u"2/3/09"), datetime.date(2009, 3, 2))
self.assertEqual(self.perlinpinpin(u"le 02/03/09"), datetime.date(2009, 3, 2))
def test_american_style(self):
self.assertEqual(self.perlinpinpin(u"01/24/2009"), datetime.date(2009, 1, 24))
self.assertEqual(self.perlinpinpin(u"1/24/2009"), datetime.date(2009, 1, 24))
def test_american_short_style(self):
self.assertEqual(self.perlinpinpin(u"01/24/09"), datetime.date(2009, 1, 24))
self.assertEqual(self.perlinpinpin(u"1/24/09"), datetime.date(2009, 1, 24))
def test_iso_style(self):
self.assertEqual(self.perlinpinpin(u"2009-01-09"), datetime.date(2009, 1, 9))
self.assertEqual(self.perlinpinpin(u"2009-1-9"), datetime.date(2009, 1, 9))
def test_time_ahead(self):
self.assertEqual(self.perlinpinpin(u"dans 2 jours"), datetime.date(2009, 3, 8))
self.assertEqual(self.perlinpinpin(u"dans 1 semaine"), datetime.date(2009, 3, 13))
self.assertEqual(self.perlinpinpin(u"dans 2 semaines"), datetime.date(2009, 3, 20))
self.assertEqual(self.perlinpinpin(u"dans 1 semaine et 3 jours"), datetime.date(2009, 3, 16))
def test_time_ago(self):
self.assertEqual(self.perlinpinpin(u"il y a 2 jours"), datetime.date(2009, 3, 4))
self.assertEqual(self.perlinpinpin(u"il y a 1 semaine"), datetime.date(2009, 2, 27))
self.assertEqual(self.perlinpinpin(u"il y a 2 semaines"), datetime.date(2009, 2, 20))
self.assertEqual(self.perlinpinpin(u"il y a 1 semaine et 3 jours"), datetime.date(2009, 2, 24))
if __name__ == '__main__':
unittest.main()
| 51.640845 | 103 | 0.68444 |
c304b0c1a84c70d076717c44c4207b22882a670a | 3,422 | py | Python | Teste03-linux.py | nata27junior/Brincando_com_lotofacil | d56dc1c19f77c6c24dd6bb763a622249ebdb39c4 | [
"Apache-2.0"
] | null | null | null | Teste03-linux.py | nata27junior/Brincando_com_lotofacil | d56dc1c19f77c6c24dd6bb763a622249ebdb39c4 | [
"Apache-2.0"
] | null | null | null | Teste03-linux.py | nata27junior/Brincando_com_lotofacil | d56dc1c19f77c6c24dd6bb763a622249ebdb39c4 | [
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# windows
# In[1]:
#cd D:\Natanael\Fatec\Curso R\Lotofacil 25-05-18
# linux
# cd /media/natanael/Arquivos/Natanael/Fatec/Curso R/Lotofacil 20-05-18
# In[2]:
import sys
# sys.path.insert (0, \users\natanael\anaconda3\lib\site-packages)
sys.path.insert(0, 'c:/users/natanael/anaconda3/lib/site-packages')
from collections import Counter
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
#get_ipython().run_line_magic('matplotlib', 'inline')
# tabela=pd.read_html('D_LOTFAC.HTM')
# tabela
# tabela.to_excel("D_LOTFAC.xlsx", sheet_name='Sheet1', index=False, engine='xlsxwriter')
# In[3]:
xlsx = pd.ExcelFile('D_LOTFAC.xlsx')
df = pd.read_excel(xlsx, 'D_LOTFAC')
# apagando linhas vazias
# In[4]:
df.dropna(inplace=True)
# mostrar tabela
# In[5]:
df.head()
# In[6]:
df.drop('Resultado da Lotofácil',axis = 1)
# In[7]:
df.drop(['Unnamed: 1','Resultado da Lotofácil'],axis = 1)
# In[8]:
df.drop(['Unnamed: 23','Unnamed: 24','Unnamed: 25','Unnamed: 26','Unnamed: 27','Unnamed: 28','Unnamed: 29','Unnamed: 30','Unnamed: 31','Unnamed: 32'],axis = 1)
# In[9]:
df.drop(['Unnamed: 17','Unnamed: 18','Unnamed: 19','Unnamed: 20','Unnamed: 21','Unnamed: 22','Unnamed: 23','Unnamed: 24','Unnamed: 25','Unnamed: 26','Unnamed: 27','Unnamed: 28','Unnamed: 29','Unnamed: 30','Unnamed: 31','Unnamed: 32'],axis = 1,inplace=True)
# In[10]:
df.head()
# In[11]:
df.drop(['Resultado da Lotofácil','Unnamed: 1'],axis = 1,inplace=True)
# In[12]:
df.head()
# In[13]:
df.rename(columns={" ":"col"},inplace=True)
# In[14]:
df.head()
# Uma função bastante interessante dos DataFrames é o describe(). O describe calcula estatísticas para cada coluna numérica do DataFrame, como contagem de valores, soma, média, mediana, etc
# In[15]:
print(df.describe())
# mostra a linha 1
# In[16]:
print(df.head(1))
# In[17]:
print(df.groupby('Unnamed: 2').count())
# In[18]:
from collections import Counter
# contou elementos da primeira linha
# In[19]:
print(Counter(df))
# df[['Unnamed: 2','Unnamed: 3','Unnamed: 4','Unnamed: 5','Unnamed: 6','Unnamed: 7','Unnamed: 8','Unnamed: 9','Unnamed: 10','Unnamed: 11','Unnamed: 12','Unnamed: 13','Unnamed: 14','Unnamed: 15','Unnamed: 16']].plot(figsize=(55, 40), title='lotofacil', grid=True)
# In[20]:
print(df.corr())
# In[21]:
contados=df['Unnamed: 2'and'Unnamed: 3'].value_counts()
# In[22]:
print (contados)
# contagem dos valores
# In[23]:
contados2=pd.value_counts(df.values.flatten())
# In[24]:
print(contados2)
# In[25]:
contados3=pd.value_counts(df.values.flatten()).head(25)
# In[26]:
#contados3
# contados3.rename(columns={"":"Sorteadas","":"Somatoria"})
# contados3.head()
# In[29]:
contados3.describe()
# plt.plot(contados3)
# In[34]:
figura_resultado=contados3.plot.bar()
# In[42]:
fig = plt.figure()
fig=figura_resultado.get_figure()
contados3.to_excel("resultado.xlsx", sheet_name='Sheet1')
fig.savefig('figura_resultado.png')
print ('concluidos')
# In[35]:
# sns.pairplot(contados3)
# sns.distplot(contados3)
# plt.scatter(contados3)
# fig,axes=plt.subplots(figsize=(8,10))
# axes.plot(contados3,'r-',label='x^2')
#
# axes.set_title('Titulo')
# axes.legend()
# fig, axes = plt.subplots(figsize=(12,3))
# axes.bar(contados3)
# axes.set_title("bar")
# contados3.plot(figsize=(55, 40), title='lotofacil')
| 13.910569 | 262 | 0.661894 |
02e6ef99f9c6953b8f94d8427a1719eae987e489 | 797 | py | Python | driver/call.py | nitinkaveriappa/downward | 5c9a1b5111d667bb96f94da61ca2a45b1b70bb83 | [
"MIT"
] | 4 | 2019-04-23T10:41:35.000Z | 2019-10-27T05:14:42.000Z | driver/call.py | nitinkaveriappa/downward | 5c9a1b5111d667bb96f94da61ca2a45b1b70bb83 | [
"MIT"
] | null | null | null | driver/call.py | nitinkaveriappa/downward | 5c9a1b5111d667bb96f94da61ca2a45b1b70bb83 | [
"MIT"
] | 4 | 2018-01-16T00:00:22.000Z | 2019-11-01T23:35:01.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
"""Make subprocess calls with time and memory limits."""
from . import limits
import subprocess
import sys
def check_call(cmd, stdin=None, time_limit=None, memory_limit=None):
def set_limits():
limits.set_time_limit(time_limit)
limits.set_memory_limit(memory_limit)
kwargs = {}
if time_limit is not None or memory_limit is not None:
if limits.can_set_limits():
kwargs["preexec_fn"] = set_limits
else:
sys.exit(limits.RESOURCE_MODULE_MISSING_MSG)
sys.stdout.flush()
if stdin:
with open(stdin) as stdin_file:
return subprocess.check_call(cmd, stdin=stdin_file, **kwargs)
else:
return subprocess.check_call(cmd, **kwargs)
| 25.709677 | 73 | 0.672522 |
d4604dabf227abe6ba81553657af741db2303b2a | 13,218 | py | Python | examples/sparser.py | pexip/os-pyparsing | e7230e6d9dbd50defeb1c1f1f74296c0d4c8db42 | [
"MIT"
] | 1 | 2019-01-06T21:51:21.000Z | 2019-01-06T21:51:21.000Z | examples/sparser.py | pexip/os-pyparsing | e7230e6d9dbd50defeb1c1f1f74296c0d4c8db42 | [
"MIT"
] | 1 | 2019-08-24T21:25:49.000Z | 2019-08-26T22:44:40.000Z | examples/sparser.py | pexip/os-pyparsing | e7230e6d9dbd50defeb1c1f1f74296c0d4c8db42 | [
"MIT"
] | 2 | 2019-03-21T03:47:03.000Z | 2019-09-30T23:59:22.000Z | #!/usr/bin/env python
"""
NAME:
sparser.py
SYNOPSIS:
sparser.py [options] filename
DESCRIPTION:
The sparser.py script is a Specified PARSER. It is unique (as far as I can
tell) because it doesn't care about the delimiter(s). The user specifies
what is expected, and the order, for each line of text. All of the heavy
lifting is handled by pyparsing (http://pyparsing.sf.net).
OPTIONS:
-h,--help this message
-v,--version version
-d,--debug turn on debug messages
EXAMPLES:
1. As standalone
sparser.py myfile
2. As library
import sparser
...
#Copyright (C) 2006 Tim Cera timcera@earthlink.net
#
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 675 Mass Ave, Cambridge, MA 02139, USA.
"""
#===imports======================
import sys
import os
import getopt
from pyparsing import *
#===globals======================
modname = "sparser"
__version__ = "0.1"
#--option args--
debug_p = 0
#opt_b=None #string arg, default is undefined
#---positional args, default is empty---
pargs = []
#---other---
#===utilities====================
def msg(txt):
"""Send message to stdout."""
sys.stdout.write(txt)
sys.stdout.flush()
def debug(ftn, txt):
"""Used for debugging."""
if debug_p:
sys.stdout.write("{0}.{1}:{2}\n".format(modname, ftn, txt))
sys.stdout.flush()
def fatal(ftn, txt):
"""If can't continue."""
msg = "{0}.{1}:FATAL:{2}\n".format(modname, ftn, txt)
raise SystemExit(msg)
def usage():
"""Prints the docstring."""
print(__doc__)
#====================================
class ToInteger(TokenConverter):
"""Converter to make token into an integer."""
def postParse( self, instring, loc, tokenlist ):
return int(tokenlist[0])
class ToFloat(TokenConverter):
"""Converter to make token into a float."""
def postParse( self, instring, loc, tokenlist ):
return float(tokenlist[0])
class ParseFileLineByLine:
"""
Bring data from text files into a program, optionally parsing each line
according to specifications in a parse definition file.
ParseFileLineByLine instances can be used like normal file objects (i.e. by
calling readline(), readlines(), and write()), but can also be used as
sequences of lines in for-loops.
ParseFileLineByLine objects also handle compression transparently. i.e. it
is possible to read lines from a compressed text file as if it were not
compressed. Compression is deduced from the file name suffixes '.Z'
(compress/uncompress), '.gz' (gzip/gunzip), and '.bz2' (bzip2).
The parse definition fi le name is developed based on the input file name.
If the input file name is 'basename.ext', then the definition file is
'basename_def.ext'. If a definition file specific to the input file is not
found, then the program searches for the file 'sparse.def' which would be
the definition file for all files in that directory without a file specific
definition file.
Finally, ParseFileLineByLine objects accept file names that start with '~'
or '~user' to indicate a home directory, as well as URLs (for reading
only).
Constructor:
ParseFileLineByLine(|filename|, |mode|='"r"'), where |filename| is the name
of the file (or a URL) and |mode| is one of '"r"' (read), '"w"' (write) or
'"a"' (append, not supported for .Z files).
"""
def __init__(self, filename, mode = 'r'):
"""Opens input file, and if available the definition file. If the
definition file is available __init__ will then create some pyparsing
helper variables. """
if mode not in ['r', 'w', 'a']:
raise IOError(0, 'Illegal mode: ' + repr(mode))
if string.find(filename, ':/') > 1: # URL
if mode == 'w':
raise IOError("can't write to a URL")
import urllib.request, urllib.parse, urllib.error
self.file = urllib.request.urlopen(filename)
else:
filename = os.path.expanduser(filename)
if mode == 'r' or mode == 'a':
if not os.path.exists(filename):
raise IOError(2, 'No such file or directory: ' + filename)
filen, file_extension = os.path.splitext(filename)
command_dict = {
('.Z', 'r'):
"self.file = os.popen('uncompress -c ' + filename, mode)",
('.gz', 'r'):
"self.file = gzip.GzipFile(filename, 'rb')",
('.bz2', 'r'):
"self.file = os.popen('bzip2 -dc ' + filename, mode)",
('.Z', 'w'):
"self.file = os.popen('compress > ' + filename, mode)",
('.gz', 'w'):
"self.file = gzip.GzipFile(filename, 'wb')",
('.bz2', 'w'):
"self.file = os.popen('bzip2 > ' + filename, mode)",
('.Z', 'a'):
"raise IOError, (0, 'Can\'t append to .Z files')",
('.gz', 'a'):
"self.file = gzip.GzipFile(filename, 'ab')",
('.bz2', 'a'):
"raise IOError, (0, 'Can\'t append to .bz2 files')",
}
exec(command_dict.get((file_extension, mode),
'self.file = open(filename, mode)'))
self.grammar = None
# Try to find a parse ('*_def.ext') definition file. First try to find
# a file specific parse definition file, then look for 'sparse.def'
# that would be the definition file for all files within the directory.
# The definition file is pure Python. The one variable that needs to
# be specified is 'parse'. The 'parse' variable is a list of tuples
# defining the name, type, and because it is a list, the order of
# variables on each line in the data file. The variable name is a
# string, the type variable is defined as integer, real, and qString.
# parse = [
# ('year', integer),
# ('month', integer),
# ('day', integer),
# ('value', real),
# ]
definition_file_one = filen + "_def" + file_extension
definition_file_two = os.path.dirname(filen) + os.sep + "sparse.def"
if os.path.exists(definition_file_one):
self.parsedef = definition_file_one
elif os.path.exists(definition_file_two):
self.parsedef = definition_file_two
else:
self.parsedef = None
return None
# Create some handy pyparsing constructs. I kept 'decimal_sep' so that
# could easily change to parse if the decimal separator is a ",".
decimal_sep = "."
sign = oneOf("+ -")
# part of printables without decimal_sep, +, -
special_chars = string.replace('!"#$%&\'()*,./:;<=>?@[\\]^_`{|}~',
decimal_sep, "")
integer = ToInteger(
Combine(Optional(sign) +
Word(nums))).setName("integer")
positive_integer = ToInteger(
Combine(Optional("+") +
Word(nums))).setName("integer")
negative_integer = ToInteger(
Combine("-" +
Word(nums))).setName("integer")
real = ToFloat(
Combine(Optional(sign) +
Word(nums) +
decimal_sep +
Optional(Word(nums)) +
Optional(oneOf("E e") +
Word(nums)))).setName("real")
positive_real = ToFloat(
Combine(Optional("+") +
Word(nums) +
decimal_sep +
Optional(Word(nums)) +
Optional(oneOf("E e") +
Word(nums)))).setName("real")
negative_real = ToFloat(
Combine("-" +
Word(nums) +
decimal_sep +
Optional(Word(nums)) +
Optional(oneOf("E e") +
Word(nums)))).setName("real")
qString = ( sglQuotedString | dblQuotedString ).setName("qString")
# add other characters we should skip over between interesting fields
integer_junk = Optional(
Suppress(
Word(alphas +
special_chars +
decimal_sep))).setName("integer_junk")
real_junk = Optional(
Suppress(
Word(alphas +
special_chars))).setName("real_junk")
qString_junk = SkipTo(qString).setName("qString_junk")
# Now that 'integer', 'real', and 'qString' have been assigned I can
# execute the definition file.
exec(compile(open(self.parsedef).read(), self.parsedef, 'exec'))
# Build the grammar, combination of the 'integer', 'real, 'qString',
# and '*_junk' variables assigned above in the order specified in the
# definition file.
grammar = []
for nam, expr in parse:
grammar.append( eval(expr.name + "_junk"))
grammar.append( expr.setResultsName(nam) )
self.grammar = And( grammar[1:] + [restOfLine] )
def __del__(self):
"""Delete (close) the file wrapper."""
self.close()
def __getitem__(self, item):
"""Used in 'for line in fp:' idiom."""
line = self.readline()
if not line:
raise IndexError
return line
def readline(self):
"""Reads (and optionally parses) a single line."""
line = self.file.readline()
if self.grammar and line:
try:
return self.grammar.parseString(line).asDict()
except ParseException:
return self.readline()
else:
return line
def readlines(self):
"""Returns a list of all lines (optionally parsed) in the file."""
if self.grammar:
tot = []
# Used this way instead of a 'for' loop against
# self.file.readlines() so that there wasn't two copies of the file
# in memory.
while 1:
line = self.file.readline()
if not line:
break
tot.append(line)
return tot
return self.file.readlines()
def write(self, data):
"""Write to a file."""
self.file.write(data)
def writelines(self, list):
"""Write a list to a file. Each item in the list is a line in the
file.
"""
for line in list:
self.file.write(line)
def close(self):
"""Close the file."""
self.file.close()
def flush(self):
"""Flush in memory contents to file."""
self.file.flush()
#=============================
def main(pargs):
"""This should only be used for testing. The primary mode of operation is
as an imported library.
"""
input_file = sys.argv[1]
fp = ParseFileLineByLine(input_file)
for i in fp:
print(i)
#-------------------------
if __name__ == '__main__':
ftn = "main"
opts, pargs = getopt.getopt(sys.argv[1:], 'hvd',
['help', 'version', 'debug', 'bb='])
for opt in opts:
if opt[0] == '-h' or opt[0] == '--help':
print(modname+": version="+__version__)
usage()
sys.exit(0)
elif opt[0] == '-v' or opt[0] == '--version':
print(modname+": version="+__version__)
sys.exit(0)
elif opt[0] == '-d' or opt[0] == '--debug':
debug_p = 1
elif opt[0] == '--bb':
opt_b = opt[1]
#---make the object and run it---
main(pargs)
#===Revision Log===
#Created by mkpythonproj:
#2006-02-06 Tim Cera
#
| 36.313187 | 80 | 0.524663 |
f00b26aa6642670b7edceb46806b44c4b9e641ee | 1,597 | py | Python | bikesanity/io_utils/throttled.py | JohnHenrySplitMyHeart/bikesanity | bf27f162017fdc919534e16ac12b940e1b873e93 | [
"Apache-2.0"
] | 4 | 2021-01-22T14:13:25.000Z | 2021-05-04T16:59:35.000Z | bikesanity/io_utils/throttled.py | JohnHenrySplitMyHeart/bikesanity | bf27f162017fdc919534e16ac12b940e1b873e93 | [
"Apache-2.0"
] | null | null | null | bikesanity/io_utils/throttled.py | JohnHenrySplitMyHeart/bikesanity | bf27f162017fdc919534e16ac12b940e1b873e93 | [
"Apache-2.0"
] | null | null | null | import datetime
import random
import time
from bikesanity.io_utils import log_handler as log_handler
from .base_session import BaseSession
class ThrottledSession(BaseSession):
STANDARD_REQUEST_RATE_LIMITER = 25
STANDARD_REQUEST_RATE_LIMITER_MIN = 25
STANDARD_REQUEST_RATE_LIMITER_MAX = 30
FAILED_REQUEST_RATE_LIMITER = 5
def __init__(self):
super().__init__()
self.last_request = None
self.current_rate_limit = self.STANDARD_REQUEST_RATE_LIMITER
def _stochastic_delay(self):
return random.randrange(self.STANDARD_REQUEST_RATE_LIMITER_MIN, self.STANDARD_REQUEST_RATE_LIMITER_MAX)
def _wait_since_last_request(self):
rate_limit_delay = self._stochastic_delay()
if not self.last_request:
self.last_request = datetime.datetime.now()
while (datetime.datetime.now() - self.last_request).total_seconds() < rate_limit_delay:
time.sleep(0.2)
self.last_request = datetime.datetime.now()
def make_request(self, url):
super().make_request(url)
try:
self._wait_since_last_request()
return self.session.get(url, headers=self.headers)
except Exception as exc:
# Log the exception, delay a while, then raise
log_handler.log.error("Error connecting when downloading {0}".format(url))
time.sleep(self.FAILED_REQUEST_RATE_LIMITER)
raise
def make_stream_request(self, url):
super().make_stream_request(url)
return self.session.get(url, headers=self.headers, stream=True)
| 33.978723 | 111 | 0.703193 |
8e0876907c0dc7a07d86e4f4a67e96e528dcbca2 | 1,311 | py | Python | simul/plots/failing_network.py | ConsenSys/handel | bc3f6f8194db140a1067ab157fc6bb1fb53a0144 | [
"Apache-2.0"
] | 48 | 2018-11-06T16:52:27.000Z | 2021-05-25T07:50:52.000Z | simul/plots/failing_network.py | ConsenSys/handel | bc3f6f8194db140a1067ab157fc6bb1fb53a0144 | [
"Apache-2.0"
] | 39 | 2018-11-27T10:40:26.000Z | 2019-08-04T17:27:45.000Z | simul/plots/failing_network.py | ConsenSys/handel | bc3f6f8194db140a1067ab157fc6bb1fb53a0144 | [
"Apache-2.0"
] | 4 | 2018-12-20T15:07:56.000Z | 2021-08-08T09:11:33.000Z | #!/usr/bin/env python
## This script generate the graphs that compares handel signature
## generation with different number of failing nodes for a fixed
## number of total nodes, and a fixed threshold 51%
##
import sys
from lib import *
import pandas as pd
import matplotlib.pyplot as plt
netColumn = "net_sentBytes_avg"
nodeColumn = "totalNbOfNodes"
failingColumn = "failing"
## threshold of signatures required
threshold = "51"
expectedNodes = 4000
nodes = None
files = {"csv/handel_4000_failing.csv": "handel"}
datas = read_datafiles(files)
for f,v in datas.items():
nodes = v[nodeColumn].max() # should be 2000
if int(v[nodeColumn].mean()) != expectedNodes:
print("error : nodes should be " + str(expectedNodes))
sys.exit(1)
x = v[failingColumn].map(lambda x: int((x/nodes) * 100))
y = v[netColumn].map(lambda x: x/1024)
print("file %s -> %d data points on %s" % (f,len(y),netColumn))
label = files[f]
if label == "":
label = input("Label for file %s: " % f)
plot(x,y,"-",label,allColors.popleft())
plt.legend(fontsize=fs_label)
plt.ylabel("KBytes ",fontsize=fs_label)
plt.xlabel("failing nodes in %",fontsize=fs_label)
# plt.yscale('log')
# plt.title("Outgoing network consumption for 51% signature threshold over 4000 nodes")
plt.show()
| 28.5 | 88 | 0.688024 |
2fc5064fea580ec5dcd69d7b9a3297f9ef4554b0 | 272 | py | Python | csf_tz/clearing_and_forwarding/doctype/container_issue_detail/container_issue_detail.py | Craftint/CSF_TZ | b5cb2d59d8f4e958ad7d4cb89421cfbec992abc5 | [
"MIT"
] | null | null | null | csf_tz/clearing_and_forwarding/doctype/container_issue_detail/container_issue_detail.py | Craftint/CSF_TZ | b5cb2d59d8f4e958ad7d4cb89421cfbec992abc5 | [
"MIT"
] | null | null | null | csf_tz/clearing_and_forwarding/doctype/container_issue_detail/container_issue_detail.py | Craftint/CSF_TZ | b5cb2d59d8f4e958ad7d4cb89421cfbec992abc5 | [
"MIT"
] | 1 | 2022-03-17T22:49:40.000Z | 2022-03-17T22:49:40.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Bravo Logisitcs and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ContainerIssueDetail(Document):
pass
| 24.727273 | 54 | 0.790441 |
7957787f8415e186326ba6f325359195d25e97ac | 3,688 | py | Python | source/setup.py | ph4s3r/webhook-shims | 077ab606800612d1cfd048731264ad4cf91bfba6 | [
"Apache-2.0"
] | null | null | null | source/setup.py | ph4s3r/webhook-shims | 077ab606800612d1cfd048731264ad4cf91bfba6 | [
"Apache-2.0"
] | null | null | null | source/setup.py | ph4s3r/webhook-shims | 077ab606800612d1cfd048731264ad4cf91bfba6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os, sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
#from pip.req import parse_requirements
from pip._internal.req import parse_requirements
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
try:
from loginsightwebhookdemo import __version__ as loginsightwebhookdemoversion # TODO Replace with a static variant?
except ImportError:
loginsightwebhookdemoversion = "0.dev0"
# Hack from https://stackoverflow.com/questions/14399534/how-can-i-reference-requirements-txt-for-the-install-requires-kwarg-in-setuptool
# parse_requirements() returns generator of pip.req.InstallRequirement objects
try:
if os.environ['PYTHONPATH']:
HDIR = os.environ['PYTHONPATH']
except:
try:
if os.environ['TRAVIS_BUILD_DIR']:
HDIR = os.environ['TRAVIS_BUILD_DIR']
except:
HDIR = '.'
#install_reqs = parse_requirements(HDIR + '/requirements.txt', session='hack')
#test_reqs = parse_requirements(HDIR + '/test-requirements.txt', session='hack')
# reqs is a list of requirement
# e.g. ['django==1.5.1', 'mezzanine==1.4.6']
reqs = parse_requirements(HDIR + '/requirements.txt')
treqs = parse_requirements(HDIR + '/test-requirements.txt')
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
description = "Run tests in the current environment"
def initialize_options(self):
TestCommand.initialize_options(self)
self.args = []
def run(self):
import shlex
# import here, cause outside the eggs aren't loaded
import pytest
try:
args = shlex.split(self.args)
except AttributeError:
args = []
errno = pytest.main(args)
sys.exit(errno)
class ToxTest(TestCommand):
user_options = [('tox-args=', "t", "Arguments to pass to pytest")]
description = "Run tests in all configured tox environments"
def initialize_options(self):
TestCommand.initialize_options(self)
self.args = []
def run(self):
import shlex
# import here, cause outside the eggs aren't loaded
from tox.__main__ import main
try:
args = shlex.split(self.args)
except AttributeError:
args = []
errno = main(args)
sys.exit(errno)
setup(
name='loginsightwebhookdemo',
version=loginsightwebhookdemoversion,
url='http://github.com/vmw-loginsight/loginsightwebhookdemo/',
license='Apache Software License 2.0',
author='Steve Flanders',
install_requires=reqs,
tests_require=treqs,
description='VMware vRealize Log Insight Webhook Shim',
author_email='stevefl@vmware.com',
long_description=open('README.rst').read(),
packages=find_packages(),
platforms='any',
classifiers=[
'Programming Language :: Python :: 2.7',
'Development Status :: 1 - Planning',
'Natural Language :: English',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
],
entry_points={
'console_scripts': [
'li = loginsightwebhookdemo.__init__:main'
]
},
cmdclass={'test': PyTest, 'tox': ToxTest}
)
| 32.928571 | 137 | 0.667299 |
f9c113305758a373d61855feacd8e1162b98cac4 | 2,300 | py | Python | vendor/bundle/ruby/2.0.0/gems/pygments.rb-1.1.2/vendor/pygments-main/pygments/lexers/capnproto.py | apaigesh/apaigesh.github.io | c79e576c8fb620c9b5cb3cc812e76d1f897a1c37 | [
"MIT"
] | null | null | null | vendor/bundle/ruby/2.0.0/gems/pygments.rb-1.1.2/vendor/pygments-main/pygments/lexers/capnproto.py | apaigesh/apaigesh.github.io | c79e576c8fb620c9b5cb3cc812e76d1f897a1c37 | [
"MIT"
] | null | null | null | vendor/bundle/ruby/2.0.0/gems/pygments.rb-1.1.2/vendor/pygments-main/pygments/lexers/capnproto.py | apaigesh/apaigesh.github.io | c79e576c8fb620c9b5cb3cc812e76d1f897a1c37 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
pygments.lexers.capnproto
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Cap'n Proto schema language.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal
__all__ = ['CapnProtoLexer']
class CapnProtoLexer(RegexLexer):
"""
For `Cap'n Proto <https://capnproto.org>`_ source.
.. versionadded:: 2.2
"""
name = 'Cap\'n Proto'
filenames = ['*.capnp']
aliases = ['capnp']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'@[0-9a-zA-Z]*', Name.Decorator),
(r'=', Literal, 'expression'),
(r':', Name.Class, 'type'),
(r'\$', Name.Attribute, 'annotation'),
(r'(struct|enum|interface|union|import|using|const|annotation|extends|in|of|on|as|with|from|fixed)\b',
Keyword),
(r'[a-zA-Z0-9_.]+', Name),
(r'[^#@=:$a-zA-Z0-9_]+', Text),
],
'type': [
(r'[^][=;,(){}$]+', Name.Class),
(r'[[(]', Name.Class, 'parentype'),
(r'', Name.Class, '#pop')
],
'parentype': [
(r'[^][;()]+', Name.Class),
(r'[[(]', Name.Class, '#push'),
(r'[])]', Name.Class, '#pop'),
(r'', Name.Class, '#pop')
],
'expression': [
(r'[^][;,(){}$]+', Literal),
(r'[[(]', Literal, 'parenexp'),
(r'', Literal, '#pop')
],
'parenexp': [
(r'[^][;()]+', Literal),
(r'[[(]', Literal, '#push'),
(r'[])]', Literal, '#pop'),
(r'', Literal, '#pop')
],
'annotation': [
(r'[^][;,(){}=:]+', Name.Attribute),
(r'[[(]', Name.Attribute, 'annexp'),
(r'', Name.Attribute, '#pop')
],
'annexp': [
(r'[^][;()]+', Name.Attribute),
(r'[[(]', Name.Attribute, '#push'),
(r'[])]', Name.Attribute, '#pop'),
(r'', Name.Attribute, '#pop')
],
}
| 28.75 | 114 | 0.433043 |
d2a9517f04bbf59326d0404e2215b38ce9e4dc68 | 197 | py | Python | HousePassword.py | formeo/checkio_tasks | 54ab77821e797238379afe483f6f6358cfba39a6 | [
"MIT"
] | null | null | null | HousePassword.py | formeo/checkio_tasks | 54ab77821e797238379afe483f6f6358cfba39a6 | [
"MIT"
] | null | null | null | HousePassword.py | formeo/checkio_tasks | 54ab77821e797238379afe483f6f6358cfba39a6 | [
"MIT"
] | null | null | null | def checkio(data):
x=False
if len(data) >= 10:
if not data.isalpha() and not data.isdigit():
if not data.islower() and not data.isupper():
x=True
return x
| 24.625 | 55 | 0.553299 |
5bb4fdcf12275a33956776447831ed6bb16a6458 | 6,535 | py | Python | SearchScrapper.py | prakhar21/News-Crawler | 39430ddb276d130a41793a9ba78ef55edaa48848 | [
"Apache-2.0"
] | 3 | 2019-04-19T00:20:58.000Z | 2021-05-08T15:52:46.000Z | SearchScrapper.py | prakhar21/News-Crawler | 39430ddb276d130a41793a9ba78ef55edaa48848 | [
"Apache-2.0"
] | null | null | null | SearchScrapper.py | prakhar21/News-Crawler | 39430ddb276d130a41793a9ba78ef55edaa48848 | [
"Apache-2.0"
] | 2 | 2019-04-19T00:20:59.000Z | 2021-05-08T15:59:25.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 18 16:56:47 2015
@author: prakhar
"""
from urllib2 import urlopen
from bs4 import BeautifulSoup
import requests
import urlparse
import sqlite3
import re
import tweepy
import subprocess
import datetime
#mainURL
url = 'http://www.news18.com/search-result.php'
poll_url = 'http://www.news18.com'
poll_url_POST = 'http://www.news18.com/index.php/?pollone=poll-submitted'
news_headings = []
news_href = []
news_summary = []
final_news = []
i=0
answer_code = 0
'''
#Creates Database and obtain cursor
createDB = sqlite3.connect('/home/prakhar/Desktop/scraping/news18.db')
qc = createDB.cursor()
#takes user input
def TakeInput(i):
if i == 1:
query_string = raw_input("\nEnter the query to be searched: ").lower()
return query_string
if i == 2:
query_state = raw_input("\nEnter the state for news: ").lower()
return query_state
'''
#requests data
def SendPostRequest(url,p):
try:
postIt = requests.post(url,data=p)
return postIt
except Exception as e:
print e
#BeautifulSoup Obj Creator
def CreateBeautifulSoupObj(s):
soup = BeautifulSoup(s)
return soup
#Prints Instructions
def Instructions(p):
print '\n\n...............News Search - www.news18.com ..............\n'
print ':: Below are the parameters of your search ::\n'
print p
print '-'*90
#Link Extractor
def ExtractLinks(soup):
for i in soup.find_all('li','clearfix'):
headlines = i.find('p').find('a',href=True).contents[0]
link = i.find('p').find('a',href=True)
#print link['href']
news_headings.append(headlines)
news_href.append(link['href'])
#print news_headings[0]
#print news_href[0]
return news_headings, news_href
#News Extractor
def ExtractNews(i):
while i < len(news_href):
tempURL = news_href[i]
html = urlopen(tempURL)
htmlText = html.read()
html.close()
soup = BeautifulSoup(htmlText)
#n = soup.find('article',{"id":"story_box"}).find('aside',{'id':'mid_box_inner'}).findAll('p')[:]
news_summary.append(soup.find('article',{"id":"story_box"}).find('aside',{'id':'mid_box_inner'}).findAll('p')[:])
#print soup.find('article',{"id":"story_box"})
i += 1
return news_summary
#Clean Text
def CleanNews(n):
nws = re.sub('<[^>]*>', '', n)
nws = re.sub('#','',nws)
nws = nws.strip()
return nws
#---------------------------------------------------------------------------------------------
#--------------------------------------------------MAIN BEGINS--------------------------------
#---------------------------------------------------------------------------------------------
#User Input and string transformation
query_string = TakeInput(1).replace(' ','+')
query_state = TakeInput(2).replace(' ','-')
param = {'search_type':'news','query_string':query_string,'search_state':query_state,'search_city':'','limit':'10'}
#prints Instructions
Instructions(param)
#send post request
try:
search = SendPostRequest(url,param)
except Exception as e:
print e
soup = CreateBeautifulSoupObj(search.text)
news_headings,news_href = ExtractLinks(soup)
news_summary = ExtractNews(i)
#print news_summaryy
for i in news_summary[:]:
j=0
while j < len(i):
news = str(i[j])
news_clean = CleanNews(news)
final_news.append(news_clean)
print news_clean
j+=1
print '.'*95
print '\n'
#print final_news[2]
############################################################################
# Ask if user wants to tweet the news
############################################################################
tweet_response = raw_input("Do you want to tweet the news(y/n): ").lower()
ckey = 'Insert Consumer Key'
csecret = 'Insert Conksumer Key'
atoken = 'Insert Access Token'
asecret = 'Insert Token secret'
auth = tweepy.OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
api = tweepy.API(auth)
if tweet_response == 'y':
news_index = int(raw_input("Enter the news index[0,1,....,8,9] : "))
api.update_status(status=str(news_href[news_index]))
print '\n\n:::::: Status updated successfully :::::::\n'
if tweet_response == 'n':
print '\n\nWe respect your decision\n'
for i in final_news[:3]:
print i
############################################################################
# Ask if user wants to speak out the news
############################################################################
activeEspeak = raw_input("Do you wish to listen any news(y/n): ").lower()
if activeEspeak:
idx = int(raw_input("Enter the news index[0,1,....,8,9] : "))
text = str(final_news[idx])
print text
#subprocess.call('espeak '+text, shell=True)
#else:
# print '\n\nWe respect your decision\n'''
############################################################################
# Ask if user wants to poll
############################################################################
wants_to_poll = raw_input("Do you wish to use news18 poll option(y/n): ").lower()
html = urlopen(poll_url)
htmlText = html.read()
html.close()
soup = BeautifulSoup(htmlText)
def FetchPoll(soup):
for p in soup.find_all('div',{'id':'pool1'}):
question = p.find('div','poll_question_cls').contents[0]
options = p.find_all('li','poll_ans_li_cls')
option1 = options[0].next.next.strip()
value1 = options[0].contents[0]['value']
option2 = options[1].next.next.strip()
value2 = options[0].contents[0]['value']
return question, option1, value1, option2, value2
question, option1, value1, option2, value2 = FetchPoll(soup)
dict_opt_val = {'option1':value1,'option2':value2}
print 'Q. ', question
print '\n', option1, '\n', option2
option_input = int(raw_input('\n\ntype 1 or 2 to select options\n'))
if option_input == 1:
value = dict_opt_val['option1']
if option_input == 2:
value = dict_opt_val['option2']
#time = subprocess.call('date')
#time = str(time) + 'GMT+0530 (India Standard Time)'
#today = datetime.date.today()
#t = datetime.time(0)
#print t, 'GMT+0530 (India Standard Time)'
fd = {'ans':'value','timestamp':'Thu Oct 01 2015 17:40:34 GMT+0530 (India Standard Time)','action':'0.13215709175698032'}
response_poll = SendPostRequest(poll_url_POST,fd)
soup = BeautifulSoup(response_poll.text)
print soup
| 25.134615 | 122 | 0.57368 |
88d041ce046c283207a1cf153bc7730bdfa86b41 | 1,093 | py | Python | var/spack/repos/builtin/packages/conserver/package.py | robertodr/spack | 9b809e01b47d48f01b3d257912fe1b752943cd3d | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 9 | 2018-04-18T07:51:40.000Z | 2021-09-10T03:56:57.000Z | var/spack/repos/builtin/packages/conserver/package.py | robertodr/spack | 9b809e01b47d48f01b3d257912fe1b752943cd3d | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 907 | 2018-04-18T11:17:57.000Z | 2022-03-31T13:20:25.000Z | var/spack/repos/builtin/packages/conserver/package.py | robertodr/spack | 9b809e01b47d48f01b3d257912fe1b752943cd3d | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 29 | 2018-11-05T16:14:23.000Z | 2022-02-03T16:07:09.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Conserver(AutotoolsPackage):
"""Conserver is an application that allows multiple users to
watch a serial console at the same time. """
homepage = "https://www.conserver.com/"
url = "https://github.com/bstansell/conserver/releases/download/v8.2.5/conserver-8.2.5.tar.gz"
version('8.2.5', sha256='7db192f304126d7e5c15421c4c83cd5c08039f2f2b3c61b2998e71881ae47eea')
version('8.2.4', sha256='a591eabb4abb632322d2f3058a2f0bd6502754069a99a153efe2d6d05bd97f6f')
version('8.2.3', sha256='764443b2798047f7429747510eeb3207240260590551700d13dbbad8a5bdee08')
version('8.2.2', sha256='05ea1693bf92b42ad2f0a9389c60352ccd35c2ea93c8fc8e618d0153362a7d81')
version('8.2.1', sha256='251ae01997e8f3ee75106a5b84ec6f2a8eb5ff2f8092438eba34384a615153d0')
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix.sbin)
| 45.541667 | 103 | 0.772187 |
0d75af2e5d2e1cfd81e31d52ba74a9f2b30b9720 | 2,424 | py | Python | utils/python-rpc/console.py | honzapatCZ/NejCoin | 2b28d790bc9137af2c8f4a0db44ca7613c120733 | [
"MIT"
] | 1 | 2019-07-30T19:28:52.000Z | 2019-07-30T19:28:52.000Z | utils/python-rpc/console.py | honzapatCZ/NejCoin | 2b28d790bc9137af2c8f4a0db44ca7613c120733 | [
"MIT"
] | 1 | 2019-11-21T21:46:09.000Z | 2019-11-21T21:46:09.000Z | utils/python-rpc/console.py | honzapatCZ/NejCoin | 2b28d790bc9137af2c8f4a0db44ca7613c120733 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
import sys
import subprocess
import socket
import urlparse
import framework.rpc
import framework.daemon
import framework.wallet
USAGE = 'usage: python -i console.py [[[scheme]<host>:]<port> [[[scheme]<host>:]<port>...]]'
daemons = []
wallets = []
rpcs = []
for n in range(1, len(sys.argv)):
scheme='http'
host='127.0.0.1'
port=None
try:
try:
port = int(sys.argv[n])
except:
t = urlparse.urlparse(sys.argv[n], allow_fragments = False)
scheme = t.scheme or scheme
host = t.hostname or host
port = t.port or port
if scheme != 'http' and scheme != 'https':
raise Exception(USAGE)
if port <= 0 or port > 65535:
raise Exception(USAGE)
except Exception, e:
print('Error: ' + str(e))
raise Exception(USAGE)
# check for open port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
if s.connect_ex((host, port)) != 0:
raise Exception('No wallet or daemon RPC on port ' + str(port))
s.close()
# both wallet and daemon have a get_version JSON RPC
rpc = framework.rpc.JSONRPC('{protocol}://{host}:{port}'.format(protocol=scheme, host=host, port=port))
get_version = {
'method': 'get_version',
'jsonrpc': '2.0',
'id': '0'
}
try:
res = rpc.send_json_rpc_request(get_version)
except Exception, e:
raise Exception('Failed to call version RPC: ' + str(e))
if 'version' not in res:
raise Exception('Server is not a Nejcoin process')
if 'status' in res:
daemons.append(framework.daemon.Daemon(port=port))
rpcs.append(daemons[-1])
else:
wallets.append(framework.wallet.Wallet(port=port))
rpcs.append(wallets[-1])
# add tab completion if we can: https://stackoverflow.com/questions/246725
try:
import readline
except:
pass
else:
import rlcompleter
readline.parse_and_bind('tab: complete')
if len(daemons) == 1:
daemon = daemons[0]
if len(wallets) == 1:
wallet = wallets[0]
didx = 0
widx = 0
for rpc in rpcs:
if type(rpc) == framework.daemon.Daemon:
var = "daemon" if len(daemons) == 1 else "daemons[" + str(didx) + "]"
didx += 1
else:
var = "wallet" if len(wallets) == 1 else "wallets[" + str(widx) + "]"
widx += 1
print('Variable \'%s\' connected to %s RPC on %s:%u' % (var, 'daemon' if type(rpc) == framework.daemon.Daemon else 'wallet', rpc.host ,rpc.port))
| 27.235955 | 147 | 0.646865 |
bacb69615414e75902c91d21eefb2bb540343f29 | 2,161 | py | Python | share/qt/extract_strings_qt.py | mongercoin/Mongercoin | fd1567993fd54415b7d5cf26418d843f0ca18f24 | [
"MIT"
] | null | null | null | share/qt/extract_strings_qt.py | mongercoin/Mongercoin | fd1567993fd54415b7d5cf26418d843f0ca18f24 | [
"MIT"
] | null | null | null | share/qt/extract_strings_qt.py | mongercoin/Mongercoin | fd1567993fd54415b7d5cf26418d843f0ca18f24 | [
"MIT"
] | null | null | null | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/Mongerstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *Monger_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("Monger-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| 25.72619 | 105 | 0.619158 |
66a479ad41c646daf1e99042d4d228118ea873f2 | 856 | py | Python | server/domain/cyclic_data.py | wazatoki/IotLogger | 9baec9bff7762fbc0d279207fabf8902d9650a2d | [
"MIT"
] | null | null | null | server/domain/cyclic_data.py | wazatoki/IotLogger | 9baec9bff7762fbc0d279207fabf8902d9650a2d | [
"MIT"
] | 7 | 2021-03-11T00:57:20.000Z | 2022-02-27T07:53:56.000Z | server/domain/cyclic_data.py | wazatoki/IotLogger | 9baec9bff7762fbc0d279207fabf8902d9650a2d | [
"MIT"
] | null | null | null | from datetime import datetime
class Log_data:
version = 0
dt = datetime.strptime('1900/1/1 0:0:0', '%Y/%m/%d %H:%M:%S')
device_id = '0000'
item0 = -32000
item1 = -32000
item2 = -32000
item3 = -32000
item4 = -32000
item5 = -32000
item6 = -32000
item7 = -32000
item8 = -32000
item9 = -32000
def get_Data(self):
return {
"version": self.version,
"datetime": self.dt,
"deviceID": self.device_id,
"item0": self.item0,
"item1": self.item1,
"item2": self.item2,
"item3": self.item3,
"item4": self.item4,
"item5": self.item5,
"item6": self.item6,
"item7": self.item7,
"item8": self.item8,
"item9": self.item9,
} | 23.777778 | 65 | 0.475467 |
3eb2eda0591d6e503734390ae900454bcd165d96 | 265 | py | Python | simple_rest_client/models.py | rfrp/python-simple-rest-client | f7d6b337849954c99861b81b5eb5e535eff06549 | [
"MIT"
] | null | null | null | simple_rest_client/models.py | rfrp/python-simple-rest-client | f7d6b337849954c99861b81b5eb5e535eff06549 | [
"MIT"
] | null | null | null | simple_rest_client/models.py | rfrp/python-simple-rest-client | f7d6b337849954c99861b81b5eb5e535eff06549 | [
"MIT"
] | null | null | null | from collections import namedtuple
Request = namedtuple(
'Request', ['url', 'method', 'params', 'body', 'headers', 'timeout', 'verify', 'kwargs']
)
Response = namedtuple(
'Response', ['url', 'method', 'body', 'headers', 'status_code', 'client_response']
)
| 29.444444 | 92 | 0.649057 |
57c6b07e01bc03c81603c2d848b9fd73b041ccaa | 35 | py | Python | test_pref/__init__.py | jamesabel/pref | 3e3c0933349e94cb10daaff9476398f6133d6e29 | [
"MIT"
] | null | null | null | test_pref/__init__.py | jamesabel/pref | 3e3c0933349e94cb10daaff9476398f6133d6e29 | [
"MIT"
] | null | null | null | test_pref/__init__.py | jamesabel/pref | 3e3c0933349e94cb10daaff9476398f6133d6e29 | [
"MIT"
] | null | null | null | __application_name__ = "test_pref"
| 17.5 | 34 | 0.828571 |
3d53c3a4bfba051f2f304d7c3a16fab13cc07480 | 185 | py | Python | BOJ_Solved/BOJ-24356.py | CodingLeeSeungHoon/Python_Algorithm_TeamNote | 1e92986999b45aa9951e12e67b23062e410e9b36 | [
"MIT"
] | 7 | 2021-11-19T14:50:59.000Z | 2022-02-25T20:00:20.000Z | BOJ_Solved/BOJ-24356.py | CodingLeeSeungHoon/Python_Algorithm_TeamNote | 1e92986999b45aa9951e12e67b23062e410e9b36 | [
"MIT"
] | null | null | null | BOJ_Solved/BOJ-24356.py | CodingLeeSeungHoon/Python_Algorithm_TeamNote | 1e92986999b45aa9951e12e67b23062e410e9b36 | [
"MIT"
] | null | null | null | """
백준 24356번 : ЧАСОВНИК
"""
t1, m1, t2, m2 = map(int, input().split())
start = t1 * 60 + m1
end = t2 * 60 + m2
if start > end:
end += 24 * 60
print(end-start, (end-start) // 30) | 15.416667 | 42 | 0.540541 |
ad1acbbd185b853a2069aa80ad94dacbbbebecd8 | 3,931 | py | Python | label_studio/users/forms.py | xhuaustc/label-studio | b787824a9e16f488a9b4cd2cef83e1ac526a64f3 | [
"Apache-2.0"
] | 1 | 2021-06-23T19:47:46.000Z | 2021-06-23T19:47:46.000Z | label_studio/users/forms.py | xhuaustc/label-studio | b787824a9e16f488a9b4cd2cef83e1ac526a64f3 | [
"Apache-2.0"
] | 1 | 2021-04-02T14:58:03.000Z | 2021-04-02T14:59:54.000Z | label_studio/users/forms.py | xhuaustc/label-studio | b787824a9e16f488a9b4cd2cef83e1ac526a64f3 | [
"Apache-2.0"
] | 1 | 2021-05-24T15:46:08.000Z | 2021-05-24T15:46:08.000Z | """This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import os
import logging
from datetime import datetime
from django import forms
from django.contrib import auth
from django.conf import settings
from users.models import User
PASS_MAX_LENGTH = 64
PASS_MIN_LENGTH = 8
USERNAME_MAX_LENGTH = 30
DISPLAY_NAME_LENGTH = 100
USERNAME_LENGTH_ERR = 'Please enter a username 30 characters or fewer in length'
DISPLAY_NAME_LENGTH_ERR = 'Please enter a display name 100 characters or fewer in length'
PASS_LENGTH_ERR = 'Please enter a password 8-12 characters in length'
INVALID_USER_ERROR = 'The email and password you entered don\'t match.'
logger = logging.getLogger(__name__)
class LoginForm(forms.Form):
""" For logging in to the app and all - session based
"""
# use username instead of email when LDAP enabled
email = forms.CharField(label='User') if settings.USE_USERNAME_FOR_LOGIN\
else forms.EmailField(label='Email')
password = forms.CharField(widget=forms.PasswordInput())
def clean(self, *args, **kwargs):
cleaned = super(LoginForm, self).clean()
email = cleaned.get('email', '').lower()
password = cleaned.get('password', '')
# advanced way for user auth
user = settings.USER_AUTH(User, email, password)
# regular access
if user is None:
user = auth.authenticate(email=email, password=password)
if user and user.is_active:
return {'user': user}
else:
raise forms.ValidationError(INVALID_USER_ERROR)
class ForgotPasswordForm(forms.Form):
email = forms.EmailField()
def clean_email(self):
email = self.cleaned_data['email'].lower()
try:
User.objects.get(email=email)
except User.DoesNotExist:
raise forms.ValidationError('A user matching that email address was not found')
return email
class BasePasswordForm(forms.Form):
password = forms.CharField(max_length=PASS_MAX_LENGTH, error_messages={
'required': PASS_LENGTH_ERR})
def clean_password(self):
password = self.cleaned_data['password']
if len(password) < PASS_MIN_LENGTH:
raise forms.ValidationError(PASS_LENGTH_ERR)
return password
class ResetPasswordForm(BasePasswordForm):
pass
class UserSignupForm(forms.Form):
email = forms.EmailField(label="Work Email", error_messages={'required': 'Invalid email'})
password = forms.CharField(max_length=PASS_MAX_LENGTH,
error_messages={'required': PASS_LENGTH_ERR},
widget=forms.TextInput(attrs={'type': 'password'}))
def clean_password(self):
password = self.cleaned_data['password']
if len(password) < PASS_MIN_LENGTH:
raise forms.ValidationError(PASS_LENGTH_ERR)
return password
def clean_username(self):
username = self.cleaned_data.get('username')
if username and User.objects.filter(username=username.lower()).exists():
raise forms.ValidationError('User with username already exists')
return username
def clean_email(self):
email = self.cleaned_data.get('email').lower()
if email and User.objects.filter(email=email).exists():
raise forms.ValidationError('User with email already exists')
return email
def save(self):
cleaned = self.cleaned_data
password = cleaned['password']
email = cleaned['email'].lower()
user = User.objects.create_user(email, password)
return user
class UserProfileForm(forms.ModelForm):
""" This form is used in profile account pages
"""
class Meta:
model = User
fields = ('first_name', 'last_name', 'phone')
| 33.033613 | 168 | 0.673111 |
d3024e0044803c0cbabecacec28318750c866e8f | 6,244 | py | Python | newspaper/outputformatters.py | keeeb/newspaper | 39c5600a24ab5fb6ef6269faf5a6f78c3337704a | [
"Apache-2.0",
"MIT"
] | 10 | 2020-05-01T09:09:14.000Z | 2020-05-01T15:33:14.000Z | newspaper/outputformatters.py | enlivensystems/newspaper | 6454283c052405e2d0203b7e4163d7d3190e7501 | [
"Apache-2.0",
"MIT"
] | null | null | null | newspaper/outputformatters.py | enlivensystems/newspaper | 6454283c052405e2d0203b7e4163d7d3190e7501 | [
"Apache-2.0",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Output formatting to text via lxml xpath nodes abstracted in this file.
"""
__title__ = 'newspaper'
__author__ = 'Lucas Ou-Yang'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014, Lucas Ou-Yang'
from html import unescape
import logging
from .text import innerTrim
log = logging.getLogger(__name__)
class OutputFormatter(object):
def __init__(self, config):
self.top_node = None
self.config = config
self.parser = self.config.get_parser()
self.language = config.language
self.stopwords_class = config.stopwords_class
def update_language(self, meta_lang):
'''Required to be called before the extraction process in some
cases because the stopwords_class has to set incase the lang
is not latin based
'''
if meta_lang:
self.language = meta_lang
self.stopwords_class = \
self.config.get_stopwords_class(meta_lang)
def get_top_node(self):
return self.top_node
def get_formatted(self, top_node):
"""Returns the body text of an article, and also the body article
html if specified. Returns in (text, html) form
"""
self.top_node = top_node
html, text = '', ''
self.remove_negativescores_nodes()
if self.config.keep_article_html:
html = self.convert_to_html()
self.links_to_text()
self.add_newline_to_br()
self.add_newline_to_li()
self.replace_with_text()
self.remove_empty_tags()
self.remove_trailing_media_div()
text = self.convert_to_text()
# print(self.parser.nodeToString(self.get_top_node()))
return (text, html)
def convert_to_text(self):
txts = []
for node in list(self.get_top_node()):
try:
txt = self.parser.getText(node)
except ValueError as err: # lxml error
log.info('%s ignoring lxml node error: %s', __title__, err)
txt = None
if txt:
txt = unescape(txt)
txt_lis = innerTrim(txt).split(r'\n')
txt_lis = [n.strip(' ') for n in txt_lis]
txts.extend(txt_lis)
return '\n\n'.join(txts)
def convert_to_html(self):
cleaned_node = self.parser.clean_article_html(self.get_top_node())
return self.parser.nodeToString(cleaned_node)
def add_newline_to_br(self):
for e in self.parser.getElementsByTag(self.top_node, tag='br'):
e.text = r'\n'
def add_newline_to_li(self):
for e in self.parser.getElementsByTag(self.top_node, tag='ul'):
li_list = self.parser.getElementsByTag(e, tag='li')
for li in li_list[:-1]:
li.text = self.parser.getText(li) + r'\n'
for c in self.parser.getChildren(li):
self.parser.remove(c)
def links_to_text(self):
"""Cleans up and converts any nodes that should be considered
text into text.
"""
self.parser.stripTags(self.get_top_node(), 'a')
def remove_negativescores_nodes(self):
"""If there are elements inside our top node that have a
negative gravity score, let's give em the boot.
"""
gravity_items = self.parser.css_select(
self.top_node, "*[gravityScore]")
for item in gravity_items:
score = self.parser.getAttribute(item, 'gravityScore')
score = float(score) if score else 0
if score < 1:
item.getparent().remove(item)
def replace_with_text(self):
"""
Replace common tags with just text so we don't have any crazy
formatting issues so replace <br>, <i>, <strong>, etc....
With whatever text is inside them.
code : http://lxml.de/api/lxml.etree-module.html#strip_tags
"""
self.parser.stripTags(
self.get_top_node(), 'b', 'strong', 'i', 'br', 'sup')
def remove_empty_tags(self):
"""It's common in top_node to exit tags that are filled with data
within properties but not within the tags themselves, delete them
"""
all_nodes = self.parser.getElementsByTags(
self.get_top_node(), ['*'])
all_nodes.reverse()
for el in all_nodes:
tag = self.parser.getTag(el)
text = self.parser.getText(el)
if (tag != 'br' or text != '\\r') \
and not text \
and len(self.parser.getElementsByTag(
el, tag='object')) == 0 \
and len(self.parser.getElementsByTag(
el, tag='embed')) == 0 \
and len(self.parser.getElementsByTag(
el, tag='img')) == 0 \
and tag not in {'img', }:
self.parser.remove(el)
def remove_trailing_media_div(self):
"""Punish the *last top level* node in the top_node if it's
DOM depth is too deep. Many media non-content links are
eliminated: "related", "loading gallery", etc. It skips removal if
last top level node's class is one of NON_MEDIA_CLASSES.
"""
NON_MEDIA_CLASSES = ('zn-body__read-all', )
def get_depth(node, depth=1):
"""Computes depth of an lxml element via BFS, this would be
in parser if it were used anywhere else besides this method
"""
children = self.parser.getChildren(node)
if not children:
return depth
max_depth = 0
for c in children:
e_depth = get_depth(c, depth + 1)
if e_depth > max_depth:
max_depth = e_depth
return max_depth
top_level_nodes = self.parser.getChildren(self.get_top_node())
if len(top_level_nodes) < 3:
return
last_node = top_level_nodes[-1]
last_node_class = self.parser.getAttribute(last_node, 'class')
if last_node_class in NON_MEDIA_CLASSES:
return
if get_depth(last_node) >= 2:
self.parser.remove(last_node)
| 34.882682 | 75 | 0.581839 |
3364c3f5423ce1207fa261d9ec7b95ca117a7683 | 602 | py | Python | flatline/migrations/0005_auto_20150801_2211.py | flatline-sot/backend | 2551fcc23bbd8d7c7ae49123af3b5e09e0fd49c6 | [
"MIT"
] | null | null | null | flatline/migrations/0005_auto_20150801_2211.py | flatline-sot/backend | 2551fcc23bbd8d7c7ae49123af3b5e09e0fd49c6 | [
"MIT"
] | null | null | null | flatline/migrations/0005_auto_20150801_2211.py | flatline-sot/backend | 2551fcc23bbd8d7c7ae49123af3b5e09e0fd49c6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('flatline', '0004_auto_20150801_0442'),
]
operations = [
migrations.AddField(
model_name='bill',
name='flat',
field=models.ForeignKey(blank=True, to='flatline.Flat', null=True),
),
migrations.AddField(
model_name='user',
name='flat',
field=models.ForeignKey(blank=True, to='flatline.Flat', null=True),
),
]
| 24.08 | 79 | 0.581395 |
91f45538afa3b794621cc7c469da195bbca2956a | 627 | py | Python | samples/cordic/cordic_golden.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
] | 236 | 2019-05-19T01:48:11.000Z | 2022-03-31T09:03:54.000Z | samples/cordic/cordic_golden.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
] | 248 | 2019-05-17T19:18:36.000Z | 2022-03-30T21:25:47.000Z | samples/cordic/cordic_golden.py | hj424/heterocl | e51b8f7f65ae6ad55c0c2426ab7192c3d8f6702b | [
"Apache-2.0"
] | 85 | 2019-05-17T20:09:27.000Z | 2022-02-28T20:19:00.000Z | import numpy as np
golden = np.array([
[100.0, 100.0],
[206.226840616, 179.610387213],
[1190.25124092, 1197.15702025],
[1250.76639667, 1250.3933971],
[1261.76760093, 1250.17718583],
[1237.4846285, 1237.56490579],
[1273.56730356, 1266.82141705],
[1272.899992, 1259.92589118],
[1.17000308922e-06, 1.21115462165e-06],
[4.69048419035e-08, 5.61093645301e-08],
[1.50244060584e-09, 2.44292250731e-09],
[8.47391624349e-11, 1.15593790738e-10],
[5.10649970307e-12, 4.80114236959e-12],
[8.34326950279e-13, 4.1368839091e-13],
[3.66142109259e-14, 4.95319932219e-14],
[8.20801944862e-15, 4.94154683061e-14]])
| 31.35 | 41 | 0.700159 |
67500c946c0f3c8515c3f51853da872b0fda5f7c | 9,812 | py | Python | roam_to_git/scrapping.py | bjornrud/roam-to-git | 92555cf9d5185a42cfa90a9e7e6b7bf0fd8bddb0 | [
"MIT"
] | 1 | 2020-10-15T05:59:07.000Z | 2020-10-15T05:59:07.000Z | roam_to_git/scrapping.py | bjornrud/roam-to-git | 92555cf9d5185a42cfa90a9e7e6b7bf0fd8bddb0 | [
"MIT"
] | null | null | null | roam_to_git/scrapping.py | bjornrud/roam-to-git | 92555cf9d5185a42cfa90a9e7e6b7bf0fd8bddb0 | [
"MIT"
] | 1 | 2020-10-23T20:17:04.000Z | 2020-10-23T20:17:04.000Z | import asyncio
import atexit
import os
import sys
from pathlib import Path
from typing import Optional
import psutil
import pyppeteer.connection
from loguru import logger
from pyppeteer.page import Page
def patch_pyppeteer():
"""Fix https://github.com/miyakogi/pyppeteer/issues/178"""
import pyppeteer.connection
original_method = pyppeteer.connection.websockets.client.connect
def new_method(*args, **kwargs):
kwargs['ping_interval'] = None
kwargs['ping_timeout'] = None
return original_method(*args, **kwargs)
pyppeteer.connection.websockets.client.connect = new_method
async def get_text(page, b, norm=True):
"""Get the inner text of an element"""
text = await page.evaluate('(element) => element.textContent', b)
if norm:
text = text.lower().strip()
return text
class Config:
def __init__(self, database: Optional[str], debug: bool, sleep_duration: float = 2.):
self.user = os.environ["ROAMRESEARCH_USER"]
self.password = os.environ["ROAMRESEARCH_PASSWORD"]
assert self.user
assert self.password
if database:
self.database: Optional[str] = database
else:
self.database = os.environ["ROAMRESEARCH_DATABASE"]
assert self.database, "Please define the Roam database you want to backup."
self.debug = debug
self.sleep_duration = sleep_duration
async def download_rr_archive(output_type: str,
output_directory: Path,
config: Config,
slow_motion=10,
):
logger.debug("Creating browser")
browser = await pyppeteer.launch(devtools=config.debug,
slowMo=slow_motion,
autoClose=False,
)
if config.debug:
# We want the browser to stay open for debugging the interface
pages = await browser.pages()
document = pages[0]
return await _download_rr_archive(document, output_type, output_directory, config)
try:
pages = await browser.pages()
document = pages[0]
return await _download_rr_archive(document, output_type, output_directory, config)
except (KeyboardInterrupt, SystemExit):
logger.debug("Closing browser on interrupt {}", output_type)
await browser.close()
logger.debug("Closed browser {}", output_type)
raise
finally:
logger.debug("Closing browser {}", output_type)
await browser.close()
logger.debug("Closed browser {}", output_type)
async def _download_rr_archive(document: Page,
output_type: str,
output_directory: Path,
config: Config,
):
"""Download an archive in RoamResearch.
:param output_type: Download JSON or Markdown
:param output_directory: Directory where to stock the outputs
"""
if not config.debug:
logger.debug("Configure downloads to {}", output_directory)
cdp = await document.target.createCDPSession()
await cdp.send('Page.setDownloadBehavior',
{'behavior': 'allow', 'downloadPath': str(output_directory)})
await signin(document, config, sleep_duration=config.sleep_duration)
if config.database:
await go_to_database(document, config.database)
logger.debug("Wait for interface to load")
dot_button = None
for _ in range(100):
# Starting is a little bit slow, so we wait for the button that signal it's ok
await asyncio.sleep(config.sleep_duration)
dot_button = await document.querySelector(".bp3-icon-more")
if dot_button is not None:
break
# If we have multiple databases, we will be stuck. Let's detect that.
await asyncio.sleep(config.sleep_duration)
strong = await document.querySelector("strong")
if strong:
if "database's you are an admin of" == await get_text(document, strong):
logger.error(
"You seems to have multiple databases. Please select it with the option "
"--database")
sys.exit(1)
assert dot_button is not None, "All roads leads to Roam, but that one is too long. Try " \
"again when Roam servers are faster."
# Click on something empty to remove the eventual popup
# "Sync Quick Capture Notes with Workspace"
await document.mouse.click(0, 0)
await dot_button.click()
logger.debug("Launch download popup")
divs_pb3 = await document.querySelectorAll(".bp3-fill")
export_all, = [b for b in divs_pb3 if await get_text(document, b) == 'export all']
await export_all.click()
await asyncio.sleep(config.sleep_duration)
async def get_dropdown_button():
dropdown_button = await document.querySelector(".bp3-dialog .bp3-button-text")
assert dropdown_button is not None
dropdown_button_text = await get_text(document, dropdown_button)
# Defensive check if the interface change
assert dropdown_button_text in ["markdown", "json"], dropdown_button_text
return dropdown_button, dropdown_button_text
logger.debug("Checking download type")
button, button_text = await get_dropdown_button()
if button_text != output_type:
logger.debug("Changing output type to {}", output_type)
await button.click()
await asyncio.sleep(config.sleep_duration)
output_type_elem, = await document.querySelectorAll(".bp3-text-overflow-ellipsis")
await output_type_elem.click()
# defensive check
await asyncio.sleep(config.sleep_duration)
_, button_text_ = await get_dropdown_button()
assert button_text_ == output_type, (button_text_, output_type)
logger.debug("Downloading output of type {}", output_type)
buttons = await document.querySelectorAll('button')
export_all_confirm, = [b for b in buttons if await get_text(document, b) == 'export all']
await export_all_confirm.click()
logger.debug("Wait download of {} to {}", output_type, output_directory)
if config.debug:
# No way to check because download location is not specified
return
for i in range(1, 60 * 10):
await asyncio.sleep(1)
if i % 60 == 0:
logger.debug("Keep waiting for {}, {}s elapsed", output_type, i)
for file in output_directory.iterdir():
if file.name.endswith(".zip"):
logger.debug("File {} found for {}", file, output_type)
await asyncio.sleep(1)
return
logger.debug("Waiting too long {}")
raise FileNotFoundError("Impossible to download {} in {}", output_type, output_directory)
async def signin(document, config: Config, sleep_duration=1.):
"""Sign-in into Roam"""
logger.debug("Opening signin page")
await document.goto('https://roamresearch.com/#/signin')
await asyncio.sleep(sleep_duration)
logger.debug("Fill email '{}'", config.user)
email_elem = await document.querySelector("input[name='email']")
await email_elem.click()
await email_elem.type(config.user)
logger.debug("Fill password")
passwd_elem = await document.querySelector("input[name='password']")
await passwd_elem.click()
await passwd_elem.type(config.password)
logger.debug("Click on sign-in")
buttons = await document.querySelectorAll('button')
signin_confirm, = [b for b in buttons if await get_text(document, b) == 'sign in']
await signin_confirm.click()
await asyncio.sleep(sleep_duration)
async def go_to_database(document, database):
"""Go to the database page"""
url = f'https://roamresearch.com/#/app/{database}'
logger.debug(f"Load database from url '{url}'")
await document.goto(url)
def _kill_child_process(timeout=50):
procs = psutil.Process().children(recursive=True)
if not procs:
return
logger.debug("Terminate child process {}", procs)
for p in procs:
try:
p.terminate()
except psutil.NoSuchProcess:
pass
gone, still_alive = psutil.wait_procs(procs, timeout=timeout)
if still_alive:
logger.warning(f"Kill child process {still_alive} that was still alive after "
f"'timeout={timeout}' from 'terminate()' command")
for p in still_alive:
try:
p.kill()
except psutil.NoSuchProcess:
pass
def scrap(markdown_zip_path: Path, json_zip_path: Path, config: Config):
# Just for easier run from the CLI
markdown_zip_path = Path(markdown_zip_path)
json_zip_path = Path(json_zip_path)
tasks = [download_rr_archive("markdown", Path(markdown_zip_path), config=config),
download_rr_archive("json", Path(json_zip_path), config=config),
]
# Register to always kill child process when the script close, to not have zombie process.
# Because of https://github.com/miyakogi/pyppeteer/issues/274 without this patch it does happen
# a lot.
if not config.debug:
atexit.register(_kill_child_process)
if config.debug:
for task in tasks:
# Run sequentially for easier debugging
asyncio.get_event_loop().run_until_complete(task)
logger.warning("Exiting without updating the git repository, "
"because we can't get the downloads with the option --debug")
else:
asyncio.get_event_loop().run_until_complete(asyncio.gather(*tasks))
logger.debug("Scrapping finished")
| 38.629921 | 99 | 0.64523 |
97851323a01ce3f175239da2a945be28cd5c3116 | 532 | py | Python | source/DSM/struct.py | Leosocy/StructSlender | 9a25d53c9ac5924e683d9cc76a8a6326d64a4667 | [
"MIT"
] | 2 | 2017-10-25T15:16:33.000Z | 2017-11-08T14:41:48.000Z | source/DSM/struct.py | Leosocy/StructSlender | 9a25d53c9ac5924e683d9cc76a8a6326d64a4667 | [
"MIT"
] | null | null | null | source/DSM/struct.py | Leosocy/StructSlender | 9a25d53c9ac5924e683d9cc76a8a6326d64a4667 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
struct
"""
import sys
from StructType import *
class Struct(object):
"""
A class that corresponds to the properties of a structure.
"""
def __init__(self, header, body, tail):
self.orig_struct_str = ""
self.struct_type = StructType(header, tail)
self._variables = []
def __str__(self):
return "Orig <%s>, StructName <%s>, Variables <%s>" % (self.orig_struct_str, self.struct_name, self._variables)
| 21.28 | 120 | 0.595865 |
c619910ace1c2e4f0e7abb7065bb44304f9b1e3e | 570 | py | Python | src/utils/aws.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 18 | 2021-05-20T13:20:16.000Z | 2022-02-11T02:40:18.000Z | src/utils/aws.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 109 | 2021-05-21T20:14:23.000Z | 2022-03-31T20:56:10.000Z | src/utils/aws.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 4 | 2021-05-17T13:47:53.000Z | 2022-02-12T10:48:21.000Z | from urllib.parse import urlparse
from researchhub.settings import AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY
def get_s3_url(bucket, key, with_credentials=False):
s3 = 's3://'
if with_credentials is True:
return (
f'{s3}{AWS_ACCESS_KEY_ID}:{AWS_SECRET_ACCESS_KEY}@{bucket}{key}'
)
return f'{s3}{bucket}{key}'
def http_to_s3(url, with_credentials=False):
parsed = urlparse(url)
bucket = parsed.netloc.split('.', maxsplit=1)[0]
key = parsed.path
return get_s3_url(bucket, key, with_credentials=with_credentials)
| 27.142857 | 76 | 0.698246 |
94db455fe5f1a54071b4cc01fa0d6aa5c3880d7b | 2,370 | py | Python | nonebot/plugin.py | SDchao/nonebot | 145d1787143584895375231210e30fdd3003d5bf | [
"MIT"
] | 8 | 2018-08-21T07:26:30.000Z | 2019-04-07T07:23:32.000Z | nonebot/plugin.py | coffiasd/nonebot | c02b9a4ccf61126aa81e3f86b06b44685461af09 | [
"MIT"
] | 7 | 2020-07-17T11:47:58.000Z | 2022-02-26T01:39:17.000Z | nonebot/plugin.py | coffiasd/nonebot | c02b9a4ccf61126aa81e3f86b06b44685461af09 | [
"MIT"
] | 1 | 2019-02-25T11:29:53.000Z | 2019-02-25T11:29:53.000Z | import importlib
import os
import re
from typing import Any, Set, Optional
from .log import logger
class Plugin:
__slots__ = ('module', 'name', 'usage')
def __init__(self, module: Any,
name: Optional[str] = None,
usage: Optional[Any] = None):
self.module = module
self.name = name
self.usage = usage
_plugins: Set[Plugin] = set()
def load_plugin(module_name: str) -> bool:
"""
Load a module as a plugin.
:param module_name: name of module to import
:return: successful or not
"""
try:
module = importlib.import_module(module_name)
name = getattr(module, '__plugin_name__', None)
usage = getattr(module, '__plugin_usage__', None)
_plugins.add(Plugin(module, name, usage))
logger.info(f'Succeeded to import "{module_name}"')
return True
except Exception as e:
logger.error(f'Failed to import "{module_name}", error: {e}')
logger.exception(e)
return False
def load_plugins(plugin_dir: str, module_prefix: str) -> int:
"""
Find all non-hidden modules or packages in a given directory,
and import them with the given module prefix.
:param plugin_dir: plugin directory to search
:param module_prefix: module prefix used while importing
:return: number of plugins successfully loaded
"""
count = 0
for name in os.listdir(plugin_dir):
path = os.path.join(plugin_dir, name)
if os.path.isfile(path) and \
(name.startswith('_') or not name.endswith('.py')):
continue
if os.path.isdir(path) and \
(name.startswith('_') or not os.path.exists(
os.path.join(path, '__init__.py'))):
continue
m = re.match(r'([_A-Z0-9a-z]+)(.py)?', name)
if not m:
continue
if load_plugin(f'{module_prefix}.{m.group(1)}'):
count += 1
return count
def load_builtin_plugins() -> int:
"""
Load built-in plugins distributed along with "nonebot" package.
"""
plugin_dir = os.path.join(os.path.dirname(__file__), 'plugins')
return load_plugins(plugin_dir, 'nonebot.plugins')
def get_loaded_plugins() -> Set[Plugin]:
"""
Get all plugins loaded.
:return: a set of Plugin objects
"""
return _plugins
| 27.241379 | 69 | 0.609705 |
ed0bc0c23cc44b5a52570fb3016d9c1d63a93c68 | 866 | py | Python | gameSU.py | robinankele/privacy-games | 3a12194bbfb511ba4303d5cd1d37a7c1982622fa | [
"MIT"
] | null | null | null | gameSU.py | robinankele/privacy-games | 3a12194bbfb511ba4303d5cd1d37a7c1982622fa | [
"MIT"
] | null | null | null | gameSU.py | robinankele/privacy-games | 3a12194bbfb511ba4303d5cd1d37a7c1982622fa | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Author: Robin Ankele <robin.ankele@cs.ox.ac.uk>
http://users.ox.ac.uk/~kell4062
Copyright (c) 2017, University of Oxford
All rights reserved.
"""
from gameBasic import G
class G_SU(G):
def __init__(self):
G.__init__(self)
def __finialize__(self):
G.__finialize__(self)
def oracle_U_f(self):
return G._U_f(self)
def SU(self, u_x, a_x, p_x):
G.input(self, u_x, a_x, p_x)
e_x_, b_x = G.view(self)
""" Perform adversarial magic to break notion """
e_x = 0
e_y = 0
u_x = u_x
return validate(e_x, e_y, u_x)
def validate(self, e_x, e_y, u_x):
if G._f(self, e_x, u_x) and G._f(self, e_y, u_x):
return true
return false
if __name__ == '__main__':
print >> sys.stderr, "This is a library and should not be executed standalone"
sys.exit(1)
| 20.139535 | 80 | 0.622402 |
db1c0b5919b60d2247d98fa2109fd4da08a122a6 | 7,054 | py | Python | tensorflow_io/core/python/ops/arrow_io_tensor_ops.py | doc22940/io | 0b7f5de76df0d0426d1d9041b2e0a7d3f355da38 | [
"Apache-2.0"
] | null | null | null | tensorflow_io/core/python/ops/arrow_io_tensor_ops.py | doc22940/io | 0b7f5de76df0d0426d1d9041b2e0a7d3f355da38 | [
"Apache-2.0"
] | null | null | null | tensorflow_io/core/python/ops/arrow_io_tensor_ops.py | doc22940/io | 0b7f5de76df0d0426d1d9041b2e0a7d3f355da38 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FeatherIOTensor"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid
import tensorflow as tf
from tensorflow_io.core.python.ops import io_tensor_ops
from tensorflow_io.core.python.ops import core_ops
class _ArrowIOTensorComponentFunction(io_tensor_ops._IOTensorComponentFunction): # pylint: disable=protected-access
"""_ArrowIOTensorComponentFunction will translate call"""
def __init__(self,
function,
resource,
component, column_index, shape, dtype):
super(_ArrowIOTensorComponentFunction, self).__init__(
function, resource, component, shape, dtype)
self._column_index = column_index
def __call__(self, start, stop):
start, stop, _ = slice(start, stop).indices(self._length)
return self._function(
self._resource,
start=start, stop=stop,
column_index=self._column_index,
shape=self._shape, dtype=self._dtype)
def _extract_table_arrays(table):
"""Get buffer info from arrays in table, outputs are padded so dim sizes
are rectangular.
Args:
table: A pyarrow.Table
Return:
tuple of:
array_buffer_addrs: 3-dim list of buffer addresses where dims are
columns, chunks, buffer addresses
array_buffer_sizes: 3-dim list of buffer sizes, follows addrs layout
array_lengths: 3-dim list of array lengths where dims are columns,
chunks, length of array followed by child array lengths
"""
array_buffer_addrs = []
array_buffer_sizes = []
array_lengths = []
max_num_bufs = 0
max_num_chunks = 0
max_num_lengths = 0
# Iterate over each column in the Table
for chunked_array in table:
array_chunk_buffer_addrs = []
array_chunk_buffer_sizes = []
array_chunk_lengths = []
# Iterate over each data chunk in the column
for arr in chunked_array.iterchunks():
bufs = arr.buffers()
array_chunk_buffer_addrs.append(
[b.address if b is not None else 0 for b in bufs])
array_chunk_buffer_sizes.append(
[b.size if b is not None else 0 for b in bufs])
# Get the total length of the array followed by lenghts of children
array_and_child_lengths = [len(arr)]
# Check if has child array, e.g. list type
if arr.type.num_children > 0:
if hasattr(arr, 'values'):
array_and_child_lengths.append(len(arr.values))
else:
raise ValueError("Only nested type currently supported is ListType")
array_chunk_lengths.append(array_and_child_lengths)
if len(bufs) > max_num_bufs:
max_num_bufs = len(bufs)
if len(array_and_child_lengths) > max_num_lengths:
max_num_lengths = len(array_and_child_lengths)
array_buffer_addrs.append(array_chunk_buffer_addrs)
array_buffer_sizes.append(array_chunk_buffer_sizes)
array_lengths.append(array_chunk_lengths)
if len(array_chunk_lengths) > max_num_chunks:
max_num_chunks = len(array_chunk_lengths)
# Pad buffer addrs, sizes and array lengths so inputs are rectangular
num_columns = len(array_buffer_sizes)
for i in range(num_columns):
# pad chunk list with empty lists that will be padded with null bufs
if len(array_buffer_sizes[i]) < max_num_chunks:
array_buffer_sizes[i].extend([[]] * (max_num_chunks -
len(array_buffer_sizes[i])))
if len(array_lengths[i]) < max_num_chunks:
array_lengths[i].extend([-1] * (max_num_chunks - len(array_lengths[i])))
num_chunks = len(array_buffer_sizes[i])
for j in range(num_chunks):
# pad buffer addr, size, and array length lists
if len(array_buffer_sizes[i][j]) < max_num_bufs:
array_buffer_sizes[i][j].extend([-1] * (max_num_bufs -
len(array_buffer_sizes[i][j])))
array_buffer_addrs[i][j].extend([0] * (max_num_bufs -
len(array_buffer_addrs[i][j])))
if len(array_lengths[i][j]) < max_num_lengths:
array_lengths[i][j].extend([-1] * (max_num_lengths -
len(array_lengths[i][j])))
return array_buffer_addrs, array_buffer_sizes, array_lengths
class ArrowIOTensor(io_tensor_ops._TableIOTensor): # pylint: disable=protected-access
"""ArrowIOTensor"""
#=============================================================================
# Constructor (private)
#=============================================================================
def __init__(self,
table,
internal=False):
with tf.name_scope("ArrowIOTensor") as scope:
# Hold reference to table and schema buffer for life of this op
self._table = table
self._schema_buffer = table.schema.serialize()
# Get buffer addresses as long ints
schema_buffer_addr = self._schema_buffer.address
schema_buffer_size = self._schema_buffer.size
array_buffer_addrs, array_buffer_sizes, array_lengths = \
_extract_table_arrays(table)
# Create the Arrow readable resource
resource = core_ops.io_arrow_readable_from_memory_init(
schema_buffer_addr,
schema_buffer_size,
array_buffer_addrs,
array_buffer_sizes,
array_lengths,
container=scope,
shared_name="pyarrow.Table%s/%s" % (
table.schema.names, uuid.uuid4().hex))
# Create a BaseIOTensor for each column
elements = []
columns = table.column_names
for column_index, column in enumerate(columns):
shape, dtype = core_ops.io_arrow_readable_spec(resource, column_index)
shape = tf.TensorShape(shape.numpy())
dtype = tf.as_dtype(dtype.numpy())
spec = tf.TensorSpec(shape, dtype, column)
function = _ArrowIOTensorComponentFunction( # pylint: disable=protected-access
core_ops.io_arrow_readable_read,
resource, column, column_index, shape, dtype)
elements.append(
io_tensor_ops.BaseIOTensor(
spec, function, internal=internal))
spec = tuple([e.spec for e in elements])
super(ArrowIOTensor, self).__init__(
spec, columns, elements, internal=internal)
| 39.853107 | 115 | 0.653388 |
84259b40d00a0f2a5489a624a002aa845f433723 | 16,916 | py | Python | kinto/tests/core/resource/test_events.py | swhgoon/kinto | 10001d44bb08e4fbc74da31a41a4eaa461e0fd7f | [
"Apache-2.0"
] | null | null | null | kinto/tests/core/resource/test_events.py | swhgoon/kinto | 10001d44bb08e4fbc74da31a41a4eaa461e0fd7f | [
"Apache-2.0"
] | null | null | null | kinto/tests/core/resource/test_events.py | swhgoon/kinto | 10001d44bb08e4fbc74da31a41a4eaa461e0fd7f | [
"Apache-2.0"
] | 1 | 2020-07-15T04:27:08.000Z | 2020-07-15T04:27:08.000Z | import mock
import uuid
from contextlib import contextmanager
import webtest
from pyramid.config import Configurator
from kinto.core.events import (ResourceChanged, AfterResourceChanged,
ResourceRead, AfterResourceRead, ACTIONS)
from kinto.core.storage.exceptions import BackendError
from kinto.tests.core.testapp import main as make_testapp
from kinto.tests.core.support import unittest, BaseWebTest, get_request_class
from kinto.core import statsd
@contextmanager
def notif_broken(app, event_cls):
old = app.registry.notify
def buggy(event):
if not isinstance(event, event_cls):
return old(event)
raise Exception("boom")
app.registry.notify = buggy
yield
app.registry.notify = old
class BaseEventTest(BaseWebTest):
subscribed = tuple()
def setUp(self):
super(BaseEventTest, self).setUp()
self.events = []
self.body = {'data': {'name': 'de Paris'}}
def tearDown(self):
self.events = []
super(BaseEventTest, self).tearDown()
def listener(self, event):
self.events.append(event)
def make_app(self, settings=None):
settings = self.get_app_settings(settings)
self.config = Configurator(settings=settings)
for event_cls in self.subscribed:
self.config.add_subscriber(self.listener, event_cls)
self.config.commit()
app = make_testapp(config=self.config)
app = webtest.TestApp(app)
app.RequestClass = get_request_class(self.api_prefix)
return app
class ResourceReadTest(BaseEventTest, unittest.TestCase):
subscribed = (ResourceRead,)
def test_get_sends_read_event(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record_id = resp.json['data']['id']
record_url = self.get_item_url(record_id)
self.app.get(record_url, headers=self.headers)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'], ACTIONS.READ.value)
self.assertEqual(len(self.events[0].read_records), 1)
def test_collection_get_sends_read_event(self):
self.app.get(self.collection_url, headers=self.headers)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'], ACTIONS.READ.value)
self.assertEqual(len(self.events[0].read_records), 0)
def test_post_sends_read_if_id_already_exists(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record = resp.json['data']
body = dict(self.body)
body['data']['id'] = record['id']
# a second post with the same record id
self.app.post_json(self.collection_url, body, headers=self.headers,
status=200)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'], ACTIONS.READ.value)
class ResourceChangedTest(BaseEventTest, unittest.TestCase):
subscribed = (ResourceChanged,)
def test_post_sends_create_action(self):
self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
def test_put_sends_create_action(self):
body = dict(self.body)
body['data']['id'] = record_id = str(uuid.uuid4())
record_url = self.get_item_url(record_id)
self.app.put_json(record_url, body,
headers=self.headers, status=201)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
def test_not_triggered_on_failed_put(self):
record_id = str(uuid.uuid4())
record_url = self.get_item_url(record_id)
self.app.put_json(record_url, self.body, headers=self.headers)
headers = self.headers.copy()
headers['If-Match'] = '"12345"'
self.app.put_json(record_url, self.body, headers=headers, status=412)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
def test_patch_sends_update_action(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record = resp.json['data']
record_url = self.get_item_url(record['id'])
self.app.patch_json(record_url, self.body, headers=self.headers,
status=200)
self.assertEqual(len(self.events), 2)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
self.assertEqual(self.events[1].payload['action'],
ACTIONS.UPDATE.value)
def test_put_sends_update_action_if_record_exists(self):
body = dict(self.body)
body['data']['id'] = record_id = str(uuid.uuid4())
record_url = self.get_item_url(record_id)
self.app.put_json(record_url, body,
headers=self.headers, status=201)
body['data']['more'] = 'stuff'
self.app.put_json(record_url, body,
headers=self.headers, status=200)
self.assertEqual(len(self.events), 2)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
self.assertEqual(self.events[1].payload['action'],
ACTIONS.UPDATE.value)
def test_delete_sends_delete_action(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record = resp.json['data']
record_url = self.get_item_url(record['id'])
self.app.delete(record_url, headers=self.headers, status=200)
self.assertEqual(len(self.events), 2)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
self.assertEqual(self.events[1].payload['action'],
ACTIONS.DELETE.value)
def test_collection_delete_sends_delete_action(self):
self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
self.app.delete(self.collection_url, headers=self.headers, status=200)
self.assertEqual(len(self.events), 3)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
self.assertEqual(self.events[1].payload['action'],
ACTIONS.CREATE.value)
self.assertEqual(self.events[2].payload['action'],
ACTIONS.DELETE.value)
def test_request_fails_if_notify_fails(self):
with notif_broken(self.app.app, ResourceChanged):
self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=500)
self.assertEqual(len(self.events), 0)
def test_triggered_on_protected_resource(self):
app = self.make_app(settings={
'psilo_write_principals': 'system.Authenticated'
})
app.post_json('/psilos', self.body,
headers=self.headers, status=201)
self.assertEqual(len(self.events), 1)
self.assertEqual(self.events[0].payload['action'],
ACTIONS.CREATE.value)
def test_permissions_are_stripped_from_event_on_protected_resource(self):
app = self.make_app(settings={
'psilo_write_principals': 'system.Authenticated'
})
resp = app.post_json('/psilos', self.body,
headers=self.headers, status=201)
record = resp.json['data']
record_url = '/psilos/' + record['id']
app.patch_json(record_url, {"data": {"name": "De barcelona"}},
headers=self.headers)
impacted_records = self.events[-1].impacted_records
self.assertNotIn('__permissions__', impacted_records[0]['new'])
self.assertNotIn('__permissions__', impacted_records[0]['old'])
class AfterResourceChangedTest(BaseEventTest, unittest.TestCase):
subscribed = (AfterResourceChanged,)
def test_request_succeeds_if_notify_fails(self):
with notif_broken(self.app.app, AfterResourceChanged):
self.app.post_json(self.collection_url, self.body,
headers=self.headers)
self.assertEqual(len(self.events), 0)
class AfterResourceReadTest(BaseEventTest, unittest.TestCase):
subscribed = (AfterResourceRead,)
def test_request_succeeds_if_notify_fails(self):
with notif_broken(self.app.app, AfterResourceChanged):
self.app.post_json(self.collection_url, self.body,
headers=self.headers)
self.assertEqual(len(self.events), 0)
class ImpactedRecordsTest(BaseEventTest, unittest.TestCase):
subscribed = (ResourceChanged,)
def test_create_has_new_record_and_no_old_in_payload(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers)
record = resp.json['data']
impacted_records = self.events[-1].impacted_records
self.assertEqual(len(impacted_records), 1)
self.assertNotIn('old', impacted_records[0])
self.assertEqual(impacted_records[0]['new'], record)
def test_collection_delete_has_old_record_and_no_new_in_payload(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers)
record1 = resp.json['data']
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers)
record2 = resp.json['data']
self.app.delete(self.collection_url, headers=self.headers, status=200)
impacted_records = self.events[-1].impacted_records
self.assertEqual(len(impacted_records), 2)
self.assertNotIn('new', impacted_records[0])
self.assertNotIn('new', impacted_records[1])
self.assertEqual(impacted_records[0]['old']['deleted'], True)
self.assertEqual(impacted_records[1]['old']['deleted'], True)
deleted_ids = {impacted_records[0]['old']['id'],
impacted_records[1]['old']['id']}
self.assertEqual(deleted_ids, {record1['id'], record2['id']})
def test_update_has_old_and_new_record(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record = resp.json['data']
record_url = self.get_item_url(record['id'])
self.app.patch_json(record_url, {'data': {'name': 'en boite'}},
headers=self.headers)
impacted_records = self.events[-1].impacted_records
self.assertEqual(len(impacted_records), 1)
self.assertEqual(impacted_records[0]['new']['id'], record['id'])
self.assertEqual(impacted_records[0]['new']['id'],
impacted_records[0]['old']['id'])
self.assertEqual(impacted_records[0]['old']['name'], 'de Paris')
self.assertEqual(impacted_records[0]['new']['name'], 'en boite')
def test_delete_has_old_record_and_no_new_in_payload(self):
resp = self.app.post_json(self.collection_url, self.body,
headers=self.headers, status=201)
record = resp.json['data']
record_url = self.get_item_url(record['id'])
self.app.delete(record_url, headers=self.headers, status=200)
impacted_records = self.events[-1].impacted_records
self.assertEqual(len(impacted_records), 1)
self.assertNotIn('new', impacted_records[0])
self.assertEqual(impacted_records[0]['old']['id'], record['id'])
self.assertEqual(impacted_records[0]['old']['deleted'], True)
class BatchEventsTest(BaseEventTest, unittest.TestCase):
subscribed = (ResourceChanged, ResourceRead)
def test_impacted_records_are_merged(self):
record_id = str(uuid.uuid4())
record_url = self.get_item_url(record_id)
body = {
"defaults": {
"method": "PUT",
"path": record_url
},
"requests": [
{"body": {'data': {'name': 'foo'}}},
{"body": {'data': {'name': 'bar'}}},
{"body": {'data': {'name': 'baz'}}},
{"method": "DELETE"}
]
}
self.app.post_json("/batch", body, headers=self.headers)
self.assertEqual(len(self.events), 3)
create_event = self.events[0]
self.assertEqual(create_event.payload['action'], 'create')
self.assertEqual(len(create_event.impacted_records), 1)
self.assertNotIn('old', create_event.impacted_records[0])
update_event = self.events[1]
self.assertEqual(update_event.payload['action'], 'update')
impacted = update_event.impacted_records
self.assertEqual(len(impacted), 2)
self.assertEqual(impacted[0]['old']['name'], 'foo')
self.assertEqual(impacted[0]['new']['name'], 'bar')
self.assertEqual(impacted[1]['old']['name'], 'bar')
self.assertEqual(impacted[1]['new']['name'], 'baz')
delete_event = self.events[2]
self.assertEqual(delete_event.payload['action'], 'delete')
self.assertEqual(len(delete_event.impacted_records), 1)
self.assertNotIn('new', delete_event.impacted_records[0])
def test_one_event_is_sent_per_resource(self):
body = {
"defaults": {
"method": "POST",
"body": self.body,
},
"requests": [
{"path": '/mushrooms'},
{"path": '/mushrooms'},
{"path": '/psilos'},
]
}
self.app.post_json("/batch", body, headers=self.headers)
self.assertEqual(len(self.events), 2)
def test_one_event_is_sent_per_action(self):
body = {
"defaults": {
"path": '/mushrooms',
},
"requests": [
{"method": "POST", "body": self.body},
{"method": "DELETE"},
{"method": "GET"},
]
}
self.app.post_json("/batch", body, headers=self.headers)
self.assertEqual(len(self.events), 3)
def test_events_are_not_sent_if_subrequest_fails(self):
patch = mock.patch.object(self.storage,
'delete_all',
side_effect=BackendError('boom'))
patch.start()
self.addCleanup(patch.stop)
request_create = {
"method": "POST",
"body": self.body,
}
request_delete_all = {
"method": "DELETE",
"body": self.body,
}
body = {
"defaults": {
"path": self.collection_url
},
"requests": [request_create, request_delete_all]
}
self.app.post_json("/batch", body, headers=self.headers,
status=503)
self.assertEqual(len(self.events), 0)
def load_from_config(config, prefix):
class ClassListener(object):
def __call__(self, event):
pass
return ClassListener()
@unittest.skipIf(not statsd.statsd_module, "statsd is not installed.")
class StatsDTest(BaseWebTest, unittest.TestCase):
def get_app_settings(self, *args, **kwargs):
settings = super(StatsDTest, self).get_app_settings(*args, **kwargs)
if not statsd.statsd_module:
return settings
settings['statsd_url'] = 'udp://localhost:8125'
this_module = 'kinto.tests.core.resource.test_events'
settings['event_listeners'] = 'test'
settings['event_listeners.test.use'] = this_module
return settings
def test_statds_tracks_listeners_execution_duration(self):
statsd_client = self.app.app.registry.statsd._client
with mock.patch.object(statsd_client, 'timing') as mocked:
self.app.post_json(self.collection_url,
{"data": {"name": "pouet"}},
headers=self.headers)
timers = set(c[0][0] for c in mocked.call_args_list)
self.assertIn('listeners.test', timers)
| 40.085308 | 78 | 0.606822 |
7edf3381386c69837aef05737fed92f8cf38d9ea | 7,002 | py | Python | tests/test_webapi.py | fusion-flap/flap_w7x_webapi | fd110fcaa3ed63c5cd45e7ef33b7c11011743c4b | [
"MIT"
] | null | null | null | tests/test_webapi.py | fusion-flap/flap_w7x_webapi | fd110fcaa3ed63c5cd45e7ef33b7c11011743c4b | [
"MIT"
] | 1 | 2019-09-15T21:23:40.000Z | 2019-09-15T21:30:28.000Z | tests/test_webapi.py | fusion-flap/flap_w7x_webapi | fd110fcaa3ed63c5cd45e7ef33b7c11011743c4b | [
"MIT"
] | null | null | null | import os
import unittest
import datetime
import numpy as np
import flap
import flap_w7x_webapi as webapi
# -------------------------------------INDEPENDENT FUNCTIONS FROM FLAP--------------------------------------------------
def test_basic():
# Getting ECRH data by giving the exact time
ecrh = webapi.GetSignal('ECRH')
start = datetime.datetime(2018, 8, 22, 10, 46, 53)
end = datetime.datetime(2018, 8, 22, 10, 47, 8)
ecrh.time_query_gen(start, end)
ecrh_data = ecrh.archive_pull()
return int(np.mean(ecrh_data["values"]))
def test_shotid():
# Getting NBI data with shotid
nbi7 = webapi.GetSignal('NBI-7')
nbi7.shotid_time_query_gen('20180912.012')
nbi7.downsamp_gen(512)
nbi7_data = webapi.ScalarData(nbi7.archive_pull())
return [int(np.mean(nbi7_data.val)*1000000), int(min(nbi7_data.time)/1e9)]
def test_streamlink():
# Getting ECRH data via the streaming link, can be used for arbitrary data
ecrh2 = webapi.GetSignal('ArchiveDB/codac/W7X/CBG_ECRH/TotalPower_DATASTREAM/V1/0/Ptot_ECRH/scaled/')
ecrh2.shotid_time_query_gen('20180912.012')
ecrh2.downsamp_gen(512)
ecrh2_data = webapi.ScalarData(ecrh2.archive_pull())
return True
class StandaloneTest(unittest.TestCase):
def test_basic(self):
self.assertEqual(test_basic(), 1136)
def test_shotid(self):
self.assertEqual(test_shotid(), [-26, 1536744303])
def test_streamlink(self):
self.assertTrue(test_streamlink())
# -------------------------------------COMPATIBILITY WITH FLAP----------------------------------------------------------
def test_register():
flap.register_data_source('W7X_WEBAPI',
get_data_func=webapi.get_data,
add_coord_func=webapi.add_coordinate)
return 'W7X_WEBAPI' in flap.list_data_sources()
def test_reading():
flap.register_data_source('W7X_WEBAPI',
get_data_func=webapi.get_data,
add_coord_func=webapi.add_coordinate)
d = flap.get_data('W7X_WEBAPI', name='ECRH',
exp_id='20180912.012',
options={'Scale Time': True,
'Check Time Equidistant': True,
'Cache Data': False},
object_name='ECRH_data')
return [d.coordinates[0].mode.equidistant, d.coordinates[0].start]
def test_downsampling():
flap.register_data_source('W7X_WEBAPI',
get_data_func=webapi.get_data,
add_coord_func=webapi.add_coordinate)
d = flap.get_data('W7X_WEBAPI', name='NBI-7',
exp_id='20181016.037',
options={'Scale Time': True,
'Downsample': 1024,
'Cache Data': False},
object_name='ECRH_data')
return d.coordinates[0].shape
def test_timerange():
flap.register_data_source('W7X_WEBAPI',
get_data_func=webapi.get_data,
add_coord_func=webapi.add_coordinate)
d = flap.get_data('W7X_WEBAPI', name='ArchiveDB/codac/W7X/CBG_ECRH/TotalPower_DATASTREAM/V1/0/Ptot_ECRH/scaled/',
exp_id='20181016.037',
options={'Scale Time': True,
'Cache Data': False},
object_name='ECRH_data',
coordinates={'Time': [2, 3]})
return [d.coordinates[0].values[0], d.coordinates[0].values[-1]]
def test_imagedata():
flap.register_data_source('W7X_WEBAPI',
get_data_func=webapi.get_data,
add_coord_func=webapi.add_coordinate)
d = flap.get_data('W7X_WEBAPI', name='AUG-2',
exp_id='20181016.037',
options={'Scale Time': True,
'Cache Data': False},
object_name='AUG2_data',
coordinates={'Time': [2, 3]})
return np.mean(d.data)
def test_vectordata():
flap.register_data_source('W7X_WEBAPI',
get_data_func=webapi.get_data,
add_coord_func=webapi.add_coordinate)
# d = flap.get_data('W7X_WEBAPI', name='TS-v8-ne_map',
d = flap.get_data('W7X_WEBAPI', name='ABES-v2-density',
exp_id='20181016.037',
options={'Scale Time': True,
'Cache Data': False},
object_name='ECRH_data',
coordinates={'Time': [0,3]})
return np.mean(d.data)
def test_cache():
cached_file = os.sep.join(webapi.__file__.split(os.sep)[:-1] +\
['cached','archivedb_codac_w7x_cbg_ecrh_totalpower_datastream'+
'_v1_0_ptot_ecrh_scaled_-20181016.037.hdf5'])
existed = os.path.exists(cached_file)
if existed is True:
print("Can't properly test data caching, file already exists")
flap.register_data_source('W7X_WEBAPI',
get_data_func=webapi.get_data,
add_coord_func=webapi.add_coordinate)
d = flap.get_data('W7X_WEBAPI', name='ArchiveDB/codac/W7X/CBG_ECRH/TotalPower_DATASTREAM/V1/0/Ptot_ECRH/scaled/',
exp_id='20181016.037',
options={'Scale Time': True,
'Cache Data': True},
object_name='ECRH_data',
coordinates={'Time': [2, 3]})
exists = os.path.exists(cached_file)
if not existed:
os.remove(cached_file)
return exists
class FLAPTest(unittest.TestCase):
def test_register(self):
self.assertTrue(test_register())
def test_reading(self):
self.assertEqual(test_reading(), [True, -1.0])
def test_downsampling(self):
self.assertEqual(test_downsampling(), [841])
def test_timerange(self):
self.assertEqual(test_timerange(), [2.0, 2.99996])
def test_imagedata(self):
self.assertEqual(test_imagedata(), 2356.9879264322917)
def test_vectordata(self):
self.assertEqual(test_vectordata(), 2.614503530033091e+19)
def test_cache(self):
self.assertTrue(test_cache())
#--------------------------------------------POINTS3D check-------------------------------------------------------------
def test_reff():
point = webapi.Points3D()
point.append_xyz(np.array([0,6.0,0]))
point.xyz_to_reff('20181018.003')
return point.reff
class Points3DTest(unittest.TestCase):
def test_vectordata(self):
self.assertEqual(test_reff(), 0.45680578471016137)
if __name__ == '__main__':
unittest.main()
| 38.262295 | 121 | 0.550557 |
7186f76ae0ab118dd81441cba40d6bb36375b556 | 640 | py | Python | pyhow/samples/impl/async.py | yoeo/pyhow | e882cc3a7b9765d6d4472de08128ac7b3c98c7c1 | [
"MIT"
] | 2 | 2016-02-29T13:57:58.000Z | 2016-03-21T16:40:46.000Z | pyhow/samples/impl/async.py | yoeo/pyhow | e882cc3a7b9765d6d4472de08128ac7b3c98c7c1 | [
"MIT"
] | 1 | 2018-06-21T08:58:54.000Z | 2018-06-21T08:58:54.000Z | pyhow/samples/impl/async.py | yoeo/pyhow | e882cc3a7b9765d6d4472de08128ac7b3c98c7c1 | [
"MIT"
] | null | null | null | """Handle asynchronious operations."""
# category: asynchronious operations
def aenter():
"""__aenter__: Todo."""
# TODO: async with, new in python 3.5
return NotImplemented
def aexit():
"""__aexit__: Todo."""
# TODO: async with, new in python 3.5
return NotImplemented
def aiter():
"""__aiter__: Todo."""
# TODO: async for, new in python 3.5
return NotImplemented
def anext():
"""__anext__: Todo."""
# TODO: async for, new in python 3.5
return NotImplemented
def await_handler():
"""__await__: Todo."""
# TODO: async def, new in python 3.5
return NotImplemented
| 16 | 41 | 0.628125 |
b5cc2844171f65bf96c169f978a3bb7f84dc6615 | 1,364 | py | Python | util/feature_extraction.py | GauravHub11/Audio_Emotion_recognition | 135c91a363101cd91fefdb992205543689071d23 | [
"MIT"
] | null | null | null | util/feature_extraction.py | GauravHub11/Audio_Emotion_recognition | 135c91a363101cd91fefdb992205543689071d23 | [
"MIT"
] | null | null | null | util/feature_extraction.py | GauravHub11/Audio_Emotion_recognition | 135c91a363101cd91fefdb992205543689071d23 | [
"MIT"
] | null | null | null | # feature_extracting
import librosa
import pandas as pd
import numpy as np
def get_audio_features(audio_path,sampling_rate):
X, sample_rate = librosa.load(audio_path ,res_type='kaiser_fast',duration=2.5,sr=sampling_rate*2,offset=0.5)
sample_rate = np.array(sample_rate)
y_harmonic, y_percussive = librosa.effects.hpss(X)
pitches, magnitudes = librosa.core.pitch.piptrack(y=X, sr=sample_rate)
mfccs = np.mean(librosa.feature.mfcc(y=X,sr=sample_rate,n_mfcc=13),axis=1)
pitches = np.trim_zeros(np.mean(pitches,axis=1))[:20]
magnitudes = np.trim_zeros(np.mean(magnitudes,axis=1))[:20]
C = np.mean(librosa.feature.chroma_cqt(y=y_harmonic, sr=sampling_rate),axis=1)
return [mfccs, pitches, magnitudes, C]
def get_features_dataframe(dataframe, sampling_rate):
labels = pd.DataFrame(dataframe['label'])
features = pd.DataFrame(columns=['mfcc','pitches','magnitudes','C'])
for index, audio_path in enumerate(dataframe['path']):
features.loc[index] = get_audio_features(audio_path, sampling_rate)
mfcc = features.mfcc.apply(pd.Series)
pit = features.pitches.apply(pd.Series)
mag = features.magnitudes.apply(pd.Series)
C = features.C.apply(pd.Series)
combined_features = pd.concat([mfcc,pit,mag,C],axis=1,ignore_index=True)
return combined_features, labels | 35.894737 | 112 | 0.717009 |
f480f4319da91683ca1aa2bf1e9535db94469d27 | 122,518 | py | Python | tensorflow/python/kernel_tests/conv_ops_test.py | yage99/tensorflow | c7fa71b32a3635eb25596ae80d007b41007769c4 | [
"Apache-2.0"
] | 1 | 2020-08-09T21:50:17.000Z | 2020-08-09T21:50:17.000Z | tensorflow/python/kernel_tests/conv_ops_test.py | yage99/tensorflow | c7fa71b32a3635eb25596ae80d007b41007769c4 | [
"Apache-2.0"
] | 3 | 2021-08-25T16:13:38.000Z | 2022-02-10T02:04:06.000Z | tensorflow/python/kernel_tests/conv_ops_test.py | yage99/tensorflow | c7fa71b32a3635eb25596ae80d007b41007769c4 | [
"Apache-2.0"
] | 3 | 2017-05-17T08:44:52.000Z | 2021-08-18T05:37:12.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import convolutional
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.compat import collections_abc
def GetShrunkInceptionShapes(shrink=10):
"""Iterator for smaller versions of convolution shapes in 2015 Inception.
Relative to inception, each depth value is `depth // shrink`.
Args:
shrink: Factor to shrink each depth value by relative to Inception.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the convolution
parameters of Inception layers.
"""
input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048],
[4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760],
[4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248],
[4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216],
[4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192],
[4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152],
[4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152],
[4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],
[4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96],
[4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288],
[4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256],
[4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192],
[4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64],
[4, 147, 147, 24]]
filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384],
[1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320],
[1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384],
[1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320],
[3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192],
[3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224],
[3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192],
[1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224],
[1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128],
[3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160],
[1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160],
[3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128],
[1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128],
[1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96],
[3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64],
[1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48],
[3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64],
[1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64],
[1, 1, 24, 64]]
out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320],
[4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320],
[4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192],
[4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224],
[4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192],
[4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96],
[4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64],
[4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48],
[4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64],
[4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64],
[4, 147, 147, 64]]
strides = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1
]
# Shrink sizes to make the test faster
for i in input_sizes:
i[3] //= shrink
for f in filter_sizes:
f[2] //= shrink
f[3] //= shrink
for o in out_sizes:
o[3] //= shrink
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, VALID, VALID, VALID
]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCHW" format is only supported on CUDA.
test_configs += [("NCHW", True)]
return test_configs
class Conv2DTest(test.TestCase):
def _DtypesToTest(self, use_gpu):
# double datatype is currently not supported for convolution ops
# on the ROCm platform
optional_float64 = [] if test.is_built_with_rocm() else [dtypes.float64]
if use_gpu and not test_util.GpuSupportsHalfMatMulAndConv():
return [dtypes.float32] + optional_float64
else:
# It is important that float32 comes before float16 here,
# as we will be using its gradients as reference for fp16 gradients.
return [dtypes.float32, dtypes.float16] + optional_float64
def _CreateNumpyTensor(self, shape):
total_size = 1
for s in shape:
total_size *= s
return np.arange(1, total_size + 1, dtype=np.float32).reshape(shape)
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, dilations,
strides, padding, data_format, dtype, use_gpu):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
dilations: Dilated rate: [col_dilation, row_dilation]
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
use_gpu: True if the operations should be run on GPU
Returns:
Symbolic tensor value that can be used to execute the computation
"""
x1 = self._CreateNumpyTensor(tensor_in_sizes)
x2 = self._CreateNumpyTensor(filter_in_sizes)
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if isinstance(padding, (list, tuple)):
padding = [(0, 0)] + padding + [(0, 0)]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
if isinstance(padding, (list, tuple)):
padding = test_util.NHWCToNCHW(padding)
conv = nn_ops.conv2d(
t1,
t2,
dilations=dilations,
strides=strides,
padding=padding,
data_format=data_format)
self.assertEqual(conv.dtype, dtype)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
return conv
def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that CPU and GPU produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
def _SetupVal(data_format, use_gpu):
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d(
t1, t2, strides=strides, padding=padding, data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
return conv
tensors = []
for (data_format, use_gpu) in GetTestConfigs():
tensors.append(_SetupVal(data_format, use_gpu))
values = self.evaluate(tensors)
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-3, atol=1e-3)
def _ComputeReferenceDilatedConv(self, tensor_in_sizes, filter_in_sizes,
stride, dilation, padding, data_format,
use_gpu):
x1 = self._CreateNumpyTensor(tensor_in_sizes)
x2 = self._CreateNumpyTensor(filter_in_sizes)
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
if isinstance(stride, collections_abc.Iterable):
strides = list(stride)
else:
strides = [stride, stride]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
full_strides = [1, 1] + strides
full_dilation = [1, 1] + dilation
else:
full_strides = [1] + strides + [1]
full_dilation = [1] + dilation + [1]
expected = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilation,
data_format=data_format)
computed = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilation,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
expected = test_util.NCHWToNHWC(expected)
computed = test_util.NCHWToNHWC(computed)
return expected, computed
def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, strides,
padding, dilations, rtol=1e-4):
expected_results = []
computed_results = []
for data_format, use_gpu in GetTestConfigs():
expected, computed = self._ComputeReferenceDilatedConv(
tensor_in_sizes, filter_in_sizes, strides, dilations, padding,
data_format, use_gpu)
expected_results.append(expected)
computed_results.append(computed)
tolerance = 1e-2 if use_gpu else 1e-5
expected_values = self.evaluate(expected_results)
computed_values = self.evaluate(computed_results)
for e_value, c_value in zip(expected_values, computed_values):
tf_logging.debug("expected = %s", e_value)
tf_logging.debug("actual = %s", c_value)
self.assertAllClose(
e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=rtol)
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
expected,
dilations=(1, 1),
gpu_only=False,
test_grappler_layout_optimizer=False,
tol=1e-5,
fp16_tol=1e-3):
if gpu_only and not test.is_gpu_available(cuda_only=True):
return
tensors = []
dilations = list(dilations)
for (data_format, use_gpu) in GetTestConfigs():
if gpu_only and not use_gpu:
continue
dtypes_to_test = self._DtypesToTest(use_gpu)
if not test_grappler_layout_optimizer and data_format == "NHWC":
dtypes_to_test.append(dtypes.int32)
for dtype in dtypes_to_test:
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
dtype,
use_gpu=use_gpu)
if test_grappler_layout_optimizer and data_format == "NHWC" and use_gpu:
# Grappler's layout optimizer will not optimize a fetch node, so
# this identity allows Grappler to optimize the Conv2D node.
result = array_ops.identity(result)
tensors.append(result)
values = self.evaluate(tensors)
for i in range(len(tensors)):
conv = tensors[i]
value = values[i]
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
tol_to_use = fp16_tol if value.dtype == np.float16 else tol
if np.issubdtype(value.dtype, np.integer):
self.assertAllEqual(np.rint(expected), np.ravel(value))
else:
self.assertAllClose(expected, np.ravel(value), atol=tol_to_use,
rtol=tol_to_use)
self.assertShapeEqual(value, conv)
self.assertEqual(value.dtype, conv.dtype.as_numpy_dtype)
def _VerifyExplicitPaddings(self,
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
dilations=(1, 1),
test_grappler_layout_optimizer=False,
tol=1e-5,
fp16_tol=1e-3):
"""Verifies Conv2D with explicit padding generates correct values.
It does this by comparing with Conv2D without explicit padding. This
function assumes Conv2D without explicit padding works correctly.
Args:
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,
input_depth, output_depth].
strides: [row_stride, col_stride] for the convolution;
padding: Explicit padding amounts.
dilations: Dilation values
test_grappler_layout_optimizer: If True, allow the Grappler layout
optimizer to run, which turns NHWC Conv2Ds on the GPU to NCHW Conv2Ds.
tol: The absolute and relative tolerance for non-fp16 dtypes.
fp16_tol: The absolute and relative tolerance for fp16.
"""
input_tensor = self._CreateNumpyTensor(tensor_in_sizes)
filter_tensor = self._CreateNumpyTensor(filter_in_sizes)
input_tensor = array_ops.pad(input_tensor, [(0, 0)] + padding + [(0, 0)])
dilations = list(dilations)
conv2d_result = nn_ops.conv2d(
input_tensor,
filter_tensor, [1] + list(strides) + [1],
"VALID",
dilations=[1] + dilations + [1])
expected = list(self.evaluate(array_ops.reshape(conv2d_result, [-1])))
self._VerifyValues(
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
expected,
dilations,
test_grappler_layout_optimizer=test_grappler_layout_optimizer,
tol=tol,
fp16_tol=fp16_tol)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x1Filter(self):
expected_output = [
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DExpandedBatch(self):
tensor_in_sizes_batch = [10, 2, 3, 3]
tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]
filter_in_sizes = [1, 1, 3, 3]
filter_in = self._CreateNumpyTensor(filter_in_sizes)
x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)
x2 = x1.reshape(tensor_in_sizes_expanded_batch)
conv1 = nn_ops.conv2d(
x1,
filter_in,
strides=[1, 1],
padding="VALID")
conv2 = nn_ops.conv2d(
x2,
filter_in,
strides=[1, 1],
padding="VALID")
self.assertEqual(conv1.shape, tensor_in_sizes_batch)
self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)
self.assertAllEqual(
conv1,
self.evaluate(conv2).reshape(conv1.shape))
@test_util.run_in_graph_and_eager_modes
def testConvolutionClass2DExpandedBatch(self):
tensor_in_sizes_batch = [10, 2, 3, 3]
tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]
filter_in_sizes = [1, 1, 3, 3]
filter_in = self._CreateNumpyTensor(filter_in_sizes)
x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)
x2 = x1.reshape(tensor_in_sizes_expanded_batch)
convolver1 = nn_ops.Convolution(
input_shape=x1.shape,
filter_shape=filter_in.shape,
strides=[1, 1],
padding="VALID")
self.assertEqual(convolver1.num_batch_dims, 1)
convolver2 = nn_ops.Convolution(
input_shape=x2.shape,
filter_shape=filter_in.shape,
strides=[1, 1],
padding="VALID")
self.assertEqual(convolver2.num_batch_dims, 2)
conv1 = convolver1(x1, filter_in)
conv2 = convolver2(x2, filter_in)
self.assertEqual(conv1.shape, tensor_in_sizes_batch)
self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)
self.assertAllEqual(
conv1,
self.evaluate(conv2).reshape(conv1.shape))
@test_util.run_in_graph_and_eager_modes
def testConvolutionWith2SpatialDimensionsAndExpandedBatch(self):
tensor_in_sizes_batch = [10, 2, 3, 3]
tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]
filter_in_sizes = [1, 1, 3, 3]
filter_in = self._CreateNumpyTensor(filter_in_sizes)
x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)
x2 = x1.reshape(tensor_in_sizes_expanded_batch)
conv1 = nn_ops.convolution(
x1,
filter_in,
strides=[1, 1],
padding="VALID")
conv2 = nn_ops.convolution(
x2,
filter_in,
strides=[1, 1],
padding="VALID")
self.assertEqual(conv1.shape, tensor_in_sizes_batch)
self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)
self.assertAllEqual(
conv1,
self.evaluate(conv2).reshape(conv1.shape))
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter2x1Dilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2DEmpty(self):
expected_output = []
self._VerifyValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
dilations=[1, 2],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [
231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
936.0, 1029.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2FilterDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID")
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2(self):
expected_output = [2271.0, 2367.0, 2463.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2Same(self):
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="SAME",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride1x2(self):
expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0]
self._VerifyValues(
tensor_in_sizes=[1, 3, 6, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideValid(self):
expected_output = [65, 95, 275, 305]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="VALID",
expected=expected_output)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideSame(self):
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 9, 11])
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="SAME",
expected=[44, 28, 41, 16])
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
padding="VALID",
expected=[50, 60])
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeDilation(self):
self._VerifyDilatedConvValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID")
@test_util.run_in_graph_and_eager_modes()
def testConv2D0x0Padding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding=[[0, 0], [0, 0]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[3, 4, 3, 2],
filter_in_sizes=[1, 1, 2, 1],
strides=[2, 2],
padding=[[0, 0], [0, 0]])
@test_util.run_in_graph_and_eager_modes()
def testConv2D1x1Padding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Padding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 2],
filter_in_sizes=[2, 1, 2, 1],
strides=[1, 1],
padding=[[2, 2], [2, 2]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 2],
filter_in_sizes=[1, 1, 2, 1],
strides=[2, 1],
padding=[[2, 2], [2, 2]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DOnlyBottomPadding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 2],
strides=[1, 1],
padding=[[0, 3], [0, 0]], tol=2e-5)
self._VerifyExplicitPaddings(
tensor_in_sizes=[2, 2, 4, 3],
filter_in_sizes=[1, 2, 3, 2],
strides=[2, 2],
padding=[[0, 3], [0, 0]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DOnlyTopRightPadding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 2],
strides=[1, 1],
padding=[[1, 0], [0, 2]],
tol=5e-5)
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 4, 2],
filter_in_sizes=[2, 2, 2, 2],
strides=[1, 3],
padding=[[1, 0], [0, 2]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DLotsPadding(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 1, 1, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding=[[3, 4], [4, 2]])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 1],
filter_in_sizes=[2, 2, 1, 3],
strides=[2, 1],
padding=[[3, 4], [4, 2]])
@test_util.run_in_graph_and_eager_modes()
def testConv2DExplicitPaddingWithDilations(self):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 3, 2, 1],
filter_in_sizes=[1, 2, 1, 2],
strides=[1, 1],
padding=[[1, 0], [0, 1]],
dilations=[2, 1])
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[3, 2, 2, 1],
strides=[1, 1],
padding=[[2, 1], [1, 2]],
dilations=[2, 3])
def testConv2DExplicitPaddingWithLayoutOptimizer(self):
# Test with Grappler's layout optimizer, to ensure the layout optimizer
# handles explicit padding correctly.
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 3, 2, 1],
filter_in_sizes=[1, 2, 1, 2],
strides=[1, 1],
padding=[[1, 0], [0, 1]],
dilations=[2, 1],
test_grappler_layout_optimizer=True)
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[3, 2, 2, 1],
strides=[1, 1],
padding=[[2, 1], [1, 2]],
dilations=[2, 3],
test_grappler_layout_optimizer=True)
def _VerifyGroupConvFwd(self, tensor_in_sizes, filter_in_sizes, dilations,
strides, padding, data_format, dtype):
"""Verify the output of group convolution is equal to a for-loop implementation.
Args:
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,
input_depth, output_depth].
dilations: Dilated rate: [col_dilation, row_dilation]
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
"""
tensor_in = self._CreateNumpyTensor(tensor_in_sizes)
filter_in = self._CreateNumpyTensor(filter_in_sizes)
num_groups = tensor_in_sizes[3] // filter_in_sizes[2]
assert num_groups > 1 and \
filter_in_sizes[2] * num_groups == tensor_in_sizes[3]
with test_util.device(True):
t1 = constant_op.constant(tensor_in, dtype=dtype)
t2 = constant_op.constant(filter_in, dtype=dtype)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
t1_splits = array_ops.split(t1, num_groups, axis=1)
else:
t1_splits = array_ops.split(t1, num_groups, axis=3)
t2_splits = array_ops.split(t2, num_groups, axis=3)
def MakeConv2d(inputs, filters):
return nn_ops.conv2d(
inputs,
filters,
strides,
padding,
dilations=dilations,
data_format=data_format)
group_conv = MakeConv2d(t1, t2)
group_conv_loop = array_ops.concat(
[MakeConv2d(t1s, t2s) for t1s, t2s in zip(t1_splits, t2_splits)],
axis=1 if data_format == "NCHW" else 3)
results = self.evaluate([group_conv, group_conv_loop])
tol_to_use = 1e-5
self.assertAllClose(
results[0], results[1], atol=tol_to_use, rtol=tol_to_use)
@test_util.run_in_graph_and_eager_modes
@test_util.run_cuda_only
def testConv2DGroupConvFwd(self):
for data_format in ["NHWC", "NCHW"]:
for dilation in [1, 2]:
for stride in [1, 2]:
self._VerifyGroupConvFwd([10, 32, 32, 16], [3, 3, 4, 8],
dilations=[dilation, dilation],
strides=[stride, stride],
padding="SAME",
data_format=data_format,
dtype=dtypes.float32)
@test_util.deprecated_graph_mode_only
@test_util.run_cuda_only
def testInputGradientGroupConv(self):
for data_format in ["NCHW", "NHWC"]:
for test_input in [True, False]:
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
num_groups=2,
padding="VALID",
in_depth=4,
out_depth=6,
stride_rows=1,
stride_cols=1,
test_input=test_input,
data_format=data_format,
use_gpu=True,
max_err=0.005)
@test_util.deprecated_graph_mode_only
@test_util.run_cuda_only
def testFilterGradientGroupConv(self):
for data_format in ["NCHW", "NHWC"]:
for test_input in [True, False]:
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
num_groups=2,
padding="VALID",
in_depth=4,
out_depth=6,
stride_rows=1,
stride_cols=1,
test_input=test_input,
data_format=data_format,
use_gpu=True,
max_err=0.005)
# TODO(yzhwang): this currently fails.
# self._VerifyValues(tensor_in_sizes=[1, 8, 8, 1],
# filter_in_sizes=[2, 2, 1, 1],
# strides=[4, 4], padding="SAME",
# expected=[72, 112, 392, 432])
# Testing for backprops
def _RunAndVerifyBackpropInput(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu,
err,
dilations=(1, 1)):
if use_gpu and not test.is_gpu_available(cuda_only=True):
return
x1 = self._CreateNumpyTensor(filter_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
with test_util.device(use_gpu):
if len(input_sizes) == 4:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if isinstance(padding, (list, tuple)):
padding = [(0, 0)] + padding + [(0, 0)]
if data_format == "NCHW":
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
if isinstance(padding, (list, tuple)):
padding = test_util.NHWCToNCHW((padding))
conv = nn_ops.conv2d_backprop_input(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
# "values" consists of two tensors for two backprops
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
self.assertAllCloseAccordingToType(expected, value.flatten(), atol=1e-5)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with test_util.device(use_gpu):
if data_format == "NCHW":
new_input_sizes = test_util.NHWCToNCHW(input_sizes)
else:
new_input_sizes = input_sizes
t0 = constant_op.constant(new_input_sizes, shape=[len(new_input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_input(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
ret = self.evaluate(conv)
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-2, atol=1e-2)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropInput(self):
expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropInput(self):
expected_output = []
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInput(self):
expected_output = [
14.0, 32.0, 50.0, 100.0, 163.0, 226.0, 167.0, 212.0, 257.0, 122.0,
140.0, 158.0, 478.0, 541.0, 604.0, 437.0, 482.0, 527.0
]
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInputStride1x2(self):
expected_output = [
1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 7.0, 12.0, 11.0, 18.0, 15.0, 24.0, 12.0,
16.0, 15.0, 20.0, 18.0, 24.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropInput(self):
expected_output = [
1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropInput(self):
expected_output = [5.0, 11.0, 17.0, 23.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
@test_util.disable_xla("XLA requires input_sizes to be a 4D shape.")
def testConv2DInputSizesContainsOnlySpatialDimensionsBackpropInput(self):
expected_output = [5.0, 11.0, 17.0, 23.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[2, 2],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
# Testing for backprops
def _RunAndVerifyBackpropFilter(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu,
dilations=(1, 1),
err=1e-5):
x0 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
explicit_strides = [1] + strides + [1]
new_padding = padding
new_dilations = [1] + dilations + [1]
if isinstance(new_padding, (list, tuple)):
new_padding = [(0, 0)] + new_padding + [(0, 0)]
if data_format == "NCHW":
explicit_strides = test_util.NHWCToNCHW(explicit_strides)
new_dilations = test_util.NHWCToNCHW(new_dilations)
if isinstance(padding, (list, tuple)):
new_padding = test_util.NHWCToNCHW(new_padding)
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with test_util.device(use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes, dtype=dtype)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype)
if data_format == "NCHW":
t0 = test_util.NHWCToNCHW(t0)
t2 = test_util.NHWCToNCHW(t2)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=explicit_strides,
padding=new_padding,
dilations=new_dilations,
data_format=data_format)
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(expected, value.flatten(), err)
def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with test_util.device(use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t0 = test_util.NHWCToNCHW(t0)
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
ret = self.evaluate(conv)
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropFilter(self):
expected = [5.0, 8.0, 14.0, 17.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropFilter(self):
expected = []
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
output_sizes=[1, 1, 2, 0],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DBackpropFilterWithEmptyInput(self):
expected = [0, 0, 0, 0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilter(self):
expected = [
17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0, 32.0, 43.0, 54.0,
37.0, 50.0, 63.0, 42.0, 57.0, 72.0, 62.0, 85.0, 108.0, 67.0, 92.0,
117.0, 72.0, 99.0, 126.0, 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0,
120.0, 153.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self):
expected = [161.0, 182.0, 287.0, 308.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropFilter(self):
expected_output = [78.]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropFilter(self):
expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 4.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
# Testing for backprops
def _RunAndVerifyBackpropInputDilation(self, input_sizes, filter_sizes,
output_sizes, strides, dilations,
padding, data_format, use_gpu, err):
x1 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(filter_sizes)
default_dilations = (dilations[0] == 1 and dilations[1] == 1)
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
conv_forward = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
conv_forward_2 = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCHW":
conv_forward = test_util.NCHWToNHWC(conv_forward)
conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)
conv = gradients_impl.gradients(conv_forward, t1)[0]
conv_2 = gradients_impl.gradients(conv_forward_2, t1)[0]
# "values" consists of two tensors for two backprops
value = self.evaluate(conv)
value_2 = self.evaluate(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
tf_logging.debug("expected = %s", value_2)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
# Testing for backprops
def _RunAndVerifyBackpropFilterDilation(self, input_sizes, filter_sizes,
output_sizes, strides, dilations,
padding, data_format, use_gpu, err):
x1 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(filter_sizes)
default_dilations = (dilations[0] == 1 and dilations[1] == 1)
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
conv_forward = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
conv_forward_2 = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCHW":
conv_forward = test_util.NCHWToNHWC(conv_forward)
conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)
conv = gradients_impl.gradients(conv_forward, t2)[0]
conv_2 = gradients_impl.gradients(conv_forward, t2)[0]
value = self.evaluate(conv)
value_2 = self.evaluate(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
tf_logging.debug("expected = %s", value_2)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth1ValidBackpropFilterDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DEmptyBackpropFilterDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
output_sizes=[1, 1, 2, 0],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropFilterDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 4, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DKernelSizeMatchesInputSizeBackpropFilterDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth1ValidBackpropInputDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DEmptyBackpropInputDilation1x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropInputDilation2x1(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 2, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
@test_util.deprecated_graph_mode_only
def testConv2DKernelSizeMatchesInputSizeBackpropInputDilation2x2(self):
if test.is_gpu_available(cuda_only=True) or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def _RunAndVerifyBackpropInputExplicitPadding(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
data_format,
use_gpu,
dilations=(1, 1),
err=2e-5):
if use_gpu and not test.is_gpu_available(cuda_only=True):
return
if not use_gpu and dilations != (1, 1):
return # Non-default dilations is currently not supported on the CPU.
x1 = self._CreateNumpyTensor(filter_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
padded_input_sizes = input_sizes[:]
padded_input_sizes[1] += padding[0][0] + padding[0][1]
padded_input_sizes[2] += padding[1][0] + padding[1][1]
c = nn_ops.conv2d_backprop_input(
padded_input_sizes,
x1,
x2,
strides=[1] + strides + [1],
padding="VALID",
dilations=[1] + dilations + [1])
c = c[:, padding[0][0]:(c.shape[1] - padding[0][1]), padding[1][0]:(
c.shape[2] - padding[1][1]), :]
expected = list(self.evaluate(array_ops.reshape(c, [-1])))
self._RunAndVerifyBackpropInput(
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu=use_gpu,
err=err,
dilations=dilations)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding0x0BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding=[[0, 0], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 4, 2],
filter_sizes=[2, 2, 2, 3],
output_sizes=[1, 1, 2, 3],
strides=[2, 2],
padding=[[0, 0], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding1x1BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 3, 4, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 2],
filter_sizes=[1, 1, 2, 1],
output_sizes=[1, 4, 3, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 4, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 4, 2, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
dilations=[2, 2], use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding2x2BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[2, 3, 1, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[2, 2, 5, 1],
strides=[3, 1],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 6, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 3, 4, 1],
strides=[1, 2],
padding=[[2, 2], [2, 2]],
data_format=data_format,
dilations=[2, 3],
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_1_8_4_1_BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 10, 8, 1],
strides=[1, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=5e-5)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 5, 3, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 4, 8, 1],
strides=[3, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_5_0_2_2_BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[1, 7, 7, 1],
strides=[1, 1],
padding=[[5, 0], [2, 2]],
data_format=data_format,
err=5e-5,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 4, 2, 1],
filter_sizes=[3, 3, 1, 1],
output_sizes=[1, 5, 2, 1],
strides=[1, 2],
padding=[[5, 0], [2, 2]],
data_format=data_format,
dilations=[2, 1],
use_gpu=use_gpu)
def _RunAndVerifyBackpropFilterExplicitPadding(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
data_format,
use_gpu,
dilations=(1, 1),
err=1e-5):
if use_gpu and not test.is_gpu_available(cuda_only=True):
return
if not use_gpu and dilations != (1, 1):
return # Non-default dilations is currently not supported on the CPU.
x0 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
x0 = np.pad(x0, [(0, 0)] + padding + [(0, 0)], "constant")
c = nn_ops.conv2d_backprop_filter(
x0,
filter_sizes,
x2,
strides=[1] + strides + [1],
padding="VALID",
dilations=[1] + dilations + [1])
expected = list(self.evaluate(array_ops.reshape(c, [-1])))
self._RunAndVerifyBackpropFilter(
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu=use_gpu,
dilations=dilations,
err=err)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding0x0BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding=[[0, 0], [0, 0]],
data_format=data_format, use_gpu=use_gpu)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 4, 2],
filter_sizes=[2, 2, 2, 3],
output_sizes=[1, 1, 2, 3],
strides=[2, 2],
padding=[[0, 0], [0, 0]],
data_format=data_format, use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding1x1BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 3, 4, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
err=5e-5)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 2],
filter_sizes=[1, 1, 2, 1],
output_sizes=[1, 4, 3, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
use_gpu=use_gpu,
data_format=data_format)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 4, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 4, 2, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 2])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding2x2BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[2, 3, 1, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[2, 2, 5, 1],
strides=[3, 1],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 6, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 3, 4, 1],
strides=[1, 2],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 3])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_1_8_4_1_BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 10, 8, 1],
strides=[1, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 5, 3, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 4, 8, 1],
strides=[3, 1],
padding=[[1, 8], [4, 2]],
use_gpu=use_gpu,
data_format=data_format)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_5_0_2_2_BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[1, 7, 7, 1],
strides=[1, 1],
padding=[[5, 0], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 4, 2, 1],
filter_sizes=[3, 3, 1, 1],
output_sizes=[1, 5, 2, 1],
strides=[1, 2],
padding=[[5, 0], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 1])
# Gradient checkers
def ConstructAndTestGradient(self,
batch,
input_rows,
input_cols,
filter_rows,
filter_cols,
in_depth,
out_depth,
stride_rows,
stride_cols,
padding,
test_input,
data_format,
use_gpu,
num_groups=1,
max_err=0.003):
assert in_depth % num_groups == 0 and out_depth % num_groups == 0
input_shape = [batch, input_rows, input_cols, in_depth]
filter_shape = [filter_rows, filter_cols, in_depth // num_groups, out_depth]
# TODO(yangke): re-factor the computation of output shape.
if padding == "VALID":
output_rows = (input_rows - filter_rows + stride_rows) // stride_rows
output_cols = (input_cols - filter_cols + stride_cols) // stride_cols
elif padding == "SAME":
output_rows = (input_rows + stride_rows - 1) // stride_rows
output_cols = (input_cols + stride_cols - 1) // stride_cols
else:
self.assertIsInstance(padding, (list, tuple))
output_rows = (input_rows + padding[1][0] + padding[1][1] - filter_rows +
stride_rows) // stride_rows
output_cols = (input_cols + padding[2][0] + padding[2][1] - filter_cols +
stride_cols) // stride_cols
output_shape = [batch, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
# Conv2DGrad functions are not compiled for double due to
# a problem in the way Eigen's Conv2DGrad works for double.
# So we disable the DOUBLE path. We should re-enable this
# when double support returns for CPU and/or GPU.
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=dtype, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=dtype, name="filter")
strides = [1, stride_rows, stride_cols, 1]
new_padding = padding
if data_format == "NCHW":
new_input_tensor = test_util.NHWCToNCHW(input_tensor)
strides = test_util.NHWCToNCHW(strides)
if isinstance(padding, (list, tuple)):
new_padding = test_util.NHWCToNCHW(padding)
else:
new_input_tensor = input_tensor
conv = nn_ops.conv2d(
new_input_tensor,
filter_tensor,
strides,
new_padding,
data_format=data_format,
name="conv")
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
self.assertEqual(output_shape, conv.get_shape())
if test_input:
jacob_t, jacob_n = gradient_checker.compute_gradient(input_tensor,
input_shape,
conv,
output_shape)
else:
jacob_t, jacob_n = gradient_checker.compute_gradient(filter_tensor,
filter_shape,
conv,
output_shape)
if dtype == dtypes.float32:
reference_jacob_t = jacob_t
err = np.fabs(jacob_t - jacob_n).max()
else:
# Compare fp16 theoretical gradients to fp32 theoretical gradients,
# since fp16 numerical gradients are too imprecise.
err = np.fabs(jacob_t - reference_jacob_t).max()
tf_logging.debug("conv_2d gradient error = %s", err)
self.assertLess(err, max_err)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=3,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient1x1PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.0025)
@test_util.deprecated_graph_mode_only
def testFilterGradient1x1PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient1x1PaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient1x1PaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient2x2PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [2, 2], [2, 2], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.003)
@test_util.deprecated_graph_mode_only
def testFilterGradient2x2PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [2, 2], [2, 2], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.003)
@test_util.deprecated_graph_mode_only
def testInputGradient1_2_3_4PaddingStride3x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=5,
filter_rows=4,
filter_cols=2,
in_depth=3,
out_depth=2,
stride_rows=3,
stride_cols=2,
padding=[[0, 0], [1, 2], [3, 4], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient1_2_3_4PaddingStride3x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=5,
filter_rows=4,
filter_cols=2,
in_depth=3,
out_depth=2,
stride_rows=3,
stride_cols=2,
padding=[[0, 0], [1, 2], [3, 4], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient4_3_2_1PaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=3,
input_rows=5,
input_cols=7,
filter_rows=3,
filter_cols=2,
in_depth=1,
out_depth=2,
stride_rows=2,
stride_cols=1,
padding=[[0, 0], [4, 3], [2, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient4_3_2_1PaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=3,
input_rows=5,
input_cols=7,
filter_rows=3,
filter_cols=2,
in_depth=1,
out_depth=2,
stride_rows=2,
stride_cols=1,
padding=[[0, 0], [4, 3], [2, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient0_0_0_5PaddingStride1x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=6,
input_cols=7,
filter_rows=3,
filter_cols=4,
in_depth=3,
out_depth=2,
stride_rows=1,
stride_cols=2,
padding=[[0, 0], [0, 0], [0, 5], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient0_0_0_5PaddingStride1x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=6,
input_cols=7,
filter_rows=3,
filter_cols=4,
in_depth=3,
out_depth=2,
stride_rows=1,
stride_cols=2,
padding=[[0, 0], [0, 0], [0, 5], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
c1 = nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], c1.get_shape().as_list())
# Incorrect input shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
# Incorrect filter shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
strides=[1, 1, 1, 1],
padding="SAME")
# Depth mismatch.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[4, 4, 2, 2]),
strides=[1, 1, 1, 1],
padding="SAME")
# Input depth divisible by filter depth (group convolution).
# No exceptions should appear.
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 8]),
array_ops.placeholder(dtypes.float32, shape=[4, 4, 2, 16]),
strides=[1, 1, 1, 1],
padding="SAME")
# Negative padding.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, -1], [1, 2], [0, 0]])
# Nonzero padding in nonspatial dimension.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[1, 0], [0, 0], [0, 0], [0, 0]])
# Nonzero NCHW padding in nonspatial dimension.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, 1], [0, 0], [0, 0]],
data_format="NCHW")
# Wrong amount of padding
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, 0], [0, 0]])
# Only specify one padding amount per dimension
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0], [0], [0], [0]])
# Explicit padding elements are not lists
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[0, 0, 0, 0])
@test_util.deprecated_graph_mode_only
def testOpEdgeCases(self):
with self.cached_session() as sess:
# Illegal strides.
with self.assertRaisesRegex(errors_impl.UnimplementedError,
"strides in the batch and depth"):
input_placeholder = array_ops.placeholder(dtypes.float32)
input_val = np.ones([10, 10])
filter_placeholder = array_ops.placeholder(dtypes.float32)
filter_val = np.ones([10, 10])
sess.run(
nn_ops.conv2d(
input_placeholder,
filter_placeholder,
strides=[2, 1, 1, 1],
padding="SAME"),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
with self.assertRaisesRegex(errors_impl.UnimplementedError,
"strides in the batch and depth"):
input_placeholder = array_ops.placeholder(dtypes.float32)
filter_placeholder = array_ops.placeholder(dtypes.float32)
input_val = np.ones([10, 10])
filter_val = np.ones([10, 10])
sess.run(
nn_ops.conv2d(
input_placeholder,
filter_placeholder,
strides=[1, 1, 1, 2],
padding="SAME"),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
# Filter larger than input.
with self.assertRaisesRegex(ValueError, "Negative dimension size"):
input_placeholder = array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3])
input_val = np.ones([32, 20, 20, 3])
filter_placeholder = array_ops.placeholder(
dtypes.float32, shape=[20, 21, 3, 2])
filter_val = np.ones([20, 21, 3, 2])
sess.run(
nn_ops.conv2d(
input_placeholder,
filter_placeholder,
strides=[1, 1, 1, 1],
padding="VALID"),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
with self.assertRaisesRegex(ValueError, "Negative dimension size"):
input_placeholder = array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3])
input_val = np.ones([32, 20, 20, 3])
filter_placeholder = array_ops.placeholder(
dtypes.float32, shape=[21, 20, 3, 2])
filter_val = np.ones([21, 20, 3, 2])
sess.run(
nn_ops.conv2d(
input_placeholder,
filter_placeholder,
strides=[1, 1, 1, 1],
padding="VALID"),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
# Filter larger than input + padding.
with self.assertRaisesRegex(ValueError, "Negative dimension size"):
input_placeholder = array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3])
input_val = np.ones([32, 20, 20, 3])
filter_placeholder = array_ops.placeholder(
dtypes.float32, shape=[24, 25, 3, 2])
filter_val = np.ones([24, 25, 3, 2])
sess.run(
nn_ops.conv2d(
input_placeholder,
filter_placeholder,
strides=[1, 1, 1, 1],
padding=[[0, 0], [2, 2], [2, 2], [0, 0]]),
feed_dict={
input_placeholder: input_val,
filter_placeholder: filter_val
})
# Negative padding during backprop.
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"All elements of explicit_paddings must be nonnegative"):
filter_placeholder = array_ops.placeholder(
dtypes.float32, shape=[18, 18, 3, 2])
filter_val = np.ones([18, 18, 3, 2])
out_backprop = array_ops.placeholder(
dtypes.float32, shape=[32, 3, 2, 2])
out_backprop_val = np.ones([32, 3, 2, 2])
sess.run(
nn_ops.conv2d_backprop_input([32, 20, 20, 3],
filter_placeholder,
out_backprop,
strides=[1, 1, 1, 1],
padding=[[0, 0], [-1, 0], [0, 0],
[0, 0]]),
feed_dict={
filter_placeholder: filter_val,
out_backprop: out_backprop_val
})
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"All elements of explicit_paddings must be nonnegative"):
input_placeholder = array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3])
input_val = np.ones([32, 20, 20, 3])
out_backprop = array_ops.placeholder(
dtypes.float32, shape=[32, 3, 2, 2])
out_backprop_val = np.ones([32, 3, 2, 2])
sess.run(
nn_ops.conv2d_backprop_filter(
input_placeholder, [18, 18, 3, 2],
out_backprop,
strides=[1, 1, 1, 1],
padding=[[0, 0], [-1, 0], [0, 0], [0, 0]]),
feed_dict={
input_placeholder: input_val,
out_backprop: out_backprop_val
})
class DepthwiseConv2DTest(test.TestCase):
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.cached_session() as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
conv = nn_impl.depthwise_conv2d(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = self.evaluate(conv)
tf_logging.debug("value = %s", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output)
class SeparableConv2DTest(test.TestCase):
def _InitValues(self, sizes):
"""Initializes values for input tensors.
Args:
sizes: Tensor dimensions.
Returns:
Tensor initialized to values.
"""
total_size = 1
for s in sizes:
total_size *= s
x = [f * 0.5 for f in range(1, total_size + 1)]
return constant_op.constant(x, shape=sizes)
def _VerifyValues(self,
tensor_in_sizes,
depthwise_filter_in_sizes,
pointwise_filter_in_sizes,
stride,
padding,
expected,
data_format="NHWC"):
"""Verifies the output values of the separable convolution function.
Args:
tensor_in_sizes: Input tensor dimensions.
depthwise_filter_in_sizes: Depthwise filter tensor dimensions.
pointwise_filter_in_sizes: Pointwise filter tensor dimensions.
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
data_format: string data format for input tensor.
"""
with self.cached_session(use_gpu=True) as sess:
t1 = self._InitValues(tensor_in_sizes)
f1 = self._InitValues(depthwise_filter_in_sizes)
f1.set_shape(depthwise_filter_in_sizes)
f2 = self._InitValues(pointwise_filter_in_sizes)
real_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
real_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
if isinstance(padding, list):
padding = [padding[0], padding[3], padding[1], padding[2]]
conv = nn_impl.separable_conv2d(
real_t1,
f1,
f2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = array_ops.transpose(conv, [0, 2, 3, 1])
value = self.evaluate(conv)
tf_logging.debug("value = %s", value)
self.assertArrayNear(expected, np.ravel(value), 2e-3)
self.assertShapeEqual(value, conv)
def _testSeparableConv2D(self, data_format):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 2, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 7].
# Complexity is O(2*3*2*2 + 6*7*1*1) as opposed to O(2*7*2*2).
expected_output = [
6644.5, 6971.5, 7298.5, 7625.5, 7952.5, 8279.5, 8606.5, 8154.5, 8556.5,
8958.5, 9360.5, 9762.5, 10164.5, 10566.5, 9664.5, 10141.5, 10618.5,
11095.5, 11572.5, 12049.5, 12526.5, 4145.5, 4346.5, 4547.5, 4748.5,
4949.5, 5150.5, 5351.5, 12684.5, 13311.5, 13938.5, 14565.5, 15192.5,
15819.5, 16446.5, 14194.5, 14896.5, 15598.5, 16300.5, 17002.5, 17704.5,
18406.5, 15704.5, 16481.5, 17258.5, 18035.5, 18812.5, 19589.5, 20366.5,
6499.5, 6814.5, 7129.5, 7444.5, 7759.5, 8074.5, 8389.5, 18724.5,
19651.5, 20578.5, 21505.5, 22432.5, 23359.5, 24286.5, 20234.5, 21236.5,
22238.5, 23240.5, 24242.5, 25244.5, 26246.5, 21744.5, 22821.5, 23898.5,
24975.5, 26052.5, 27129.5, 28206.5, 8853.5, 9282.5, 9711.5, 10140.5,
10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75,
7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25,
7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75,
2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 7],
stride=1,
padding="SAME",
expected=expected_output,
data_format=data_format)
def testSeparableConv2D(self):
self._testSeparableConv2D("NHWC")
def disabledtestSeparableConv2DNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2D("NCHW")
def _testSeparableConv2DEqualInputOutputDepth(self, data_format):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 3, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 6].
# Complexity is O(2*3*2*2 + 6*6*1*1) as opposed to O(2*6*2*2).
expected_output = [
5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0, 7047.0, 7449.0, 7851.0,
8253.0, 8655.0, 9057.0, 8352.0, 8829.0, 9306.0, 9783.0, 10260.0,
10737.0, 3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0, 10962.0,
11589.0, 12216.0, 12843.0, 13470.0, 14097.0, 12267.0, 12969.0, 13671.0,
14373.0, 15075.0, 15777.0, 13572.0, 14349.0, 15126.0, 15903.0, 16680.0,
17457.0, 5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0, 16182.0,
17109.0, 18036.0, 18963.0, 19890.0, 20817.0, 17487.0, 18489.0, 19491.0,
20493.0, 21495.0, 22497.0, 18792.0, 19869.0, 20946.0, 22023.0, 23100.0,
24177.0, 7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0, 4963.5, 5227.5,
5491.5, 5755.5, 6019.5, 6283.5, 5328.0, 5611.5, 5895.0, 6178.5, 6462.0,
6745.5, 5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5, 1757.25, 1840.5,
1923.75, 2007.0, 2090.25, 2173.5
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 6],
stride=1,
padding="SAME",
expected=expected_output,
data_format=data_format)
@test_util.deprecated_graph_mode_only
def testSeparableConv2DEqualInputOutputDepth(self):
self._testSeparableConv2DEqualInputOutputDepth("NHWC")
def testSeparableConv2DEqualInputOutputDepthNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2DEqualInputOutputDepth("NCHW")
def _testSeparableConv2dExplicitPadding(self, data_format):
tensor_in_sizes = [1, 4, 4, 2]
depthwise_filter_in_sizes = [2, 2, 2, 3]
pointwise_filter_in_sizes = [1, 1, 6, 7]
padding = [[0, 0], [1, 2], [3, 4], [0, 0]]
with self.cached_session(use_gpu=True):
# Compute the 'expected' values by manually padding before calling
# separable_conv2d
t1 = self._InitValues(tensor_in_sizes)
t1 = array_ops.pad(t1, padding)
f1 = self._InitValues(depthwise_filter_in_sizes)
f1.set_shape(depthwise_filter_in_sizes)
f2 = self._InitValues(pointwise_filter_in_sizes)
conv = nn_impl.separable_conv2d(
t1,
f1,
f2,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
expected = self.evaluate(conv)
expected = np.ravel(expected)
self._VerifyValues(
tensor_in_sizes=tensor_in_sizes,
depthwise_filter_in_sizes=depthwise_filter_in_sizes,
pointwise_filter_in_sizes=pointwise_filter_in_sizes,
stride=1,
padding=padding,
expected=expected,
data_format=data_format)
def testSeparableConv2dExplicitPadding(self):
self._testSeparableConv2dExplicitPadding("NHWC")
def testSeparableConv2dExplicitPaddingNCHW(self):
if not test.is_gpu_available():
return
self._testSeparableConv2dExplicitPadding("NCHW")
class DeepConv2DTest(test.TestCase):
def _CompareFwdConv2D(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that DeepConv2D and Conv2D produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
with self.cached_session(use_gpu=False) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
conv = nn_ops.conv2d(t1, t2, strides=strides, padding=padding)
os.environ["TF_USE_DEEP_CONV2D"] = "0"
values_expect = self.evaluate([conv])
os.environ["TF_USE_DEEP_CONV2D"] = "1"
values_test = self.evaluate([conv])
self.assertAllClose(values_expect, values_test, rtol=1e-5, atol=1e-5)
def _RunTestCases(self, conv_strides, padding):
input_sizes = [[5, 5, 5, 1248], [3, 17, 17, 192], [2, 35, 35, 288],
[2, 6, 8, 517], [2, 7, 4, 81], [3, 11, 3, 77]]
filter_sizes = [[3, 3, 1248, 128], [3, 3, 192, 192], [3, 3, 288, 384],
[3, 3, 517, 64], [3, 3, 81, 77], [3, 3, 77, 181]]
for input_shape, filter_shape in zip(input_sizes, filter_sizes):
self._CompareFwdConv2D(input_shape, filter_shape, conv_strides, padding)
def testConv2D3x3FilterStride1x1Valid(self):
self._RunTestCases([1, 1], "VALID")
def testConv2D3x3FilterStride1x1Same(self):
self._RunTestCases([1, 1], "SAME")
class Conv2DBenchmark(test.Benchmark):
def benchmarkGPUConvStackFirst(self):
# Benchmark the first iteration of a conv-net with many identical conv
# operations.
if not test.is_gpu_available():
return
with ops.Graph().as_default(), session_lib.Session() as session:
batch_size = 1
timesteps = 600
features = 1
inputs = random_ops.random_uniform(
[batch_size, 1, timesteps, features], seed=1234)
num_outputs_list = [512] * 40 + [1]
kernel_w = 3
x = inputs
for num_outputs in num_outputs_list:
x = convolutional.conv2d(x, num_outputs, [1, kernel_w])
outputs = x
self.evaluate(variables.global_variables_initializer())
num_iterations = 4
for iter_index in xrange(num_iterations):
start = time.time()
session.run(outputs)
wall_time = time.time() - start
self.report_benchmark(
name="conv_stack_iter_%d" % iter_index, wall_time=wall_time)
tf_logging.info("conv_stack_iter_%d: %.4f" % (iter_index, wall_time))
def _bench_op(self, name, op, burn_iters, num_iters):
config = config_pb2.ConfigProto()
# Prevent Grappler from optimizing away the entire graph.
config.graph_options.rewrite_options.dependency_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
with session_lib.Session(config=config) as session:
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
session, op, burn_iters=burn_iters, min_iters=num_iters, name=name)
def benchmarkExplicitVsManualPadding(self):
"""Compare performance of EXPLICIT padding and calling tf.pad.
A Conv2D op with EXPLICIT padding is benchmarked, and a tf.pad with the same
padding followed by an equivalent Conv2D op is benchmarked.
"""
if not test.is_gpu_available():
return
with ops.Graph().as_default():
burn_iters = 15
num_iters = 300
batch_size = 64
# The input and filter correspond to the first layer of Resnet50.
input = variables.Variable( # pylint: disable=redefined-builtin
random_ops.random_uniform([
batch_size,
3,
224,
224
]))
filter = variables.Variable(random_ops.random_uniform([7, 7, 3, 64])) # pylint: disable=redefined-builtin
strides = [1, 1, 2, 2]
padding = [(0, 0), (0, 0), (3, 3), (3, 3)]
output_explicit_pad = nn_ops.conv2d(
input, filter, strides, padding=padding, data_format="NCHW")
input_padded = array_ops.pad(input, padding)
output_manual_pad = nn_ops.conv2d(
input_padded, filter, strides, padding="VALID", data_format="NCHW")
# Benchmark just the forward pass.
self._bench_op("explicit_pad_forward", output_explicit_pad.op, burn_iters,
num_iters)
self._bench_op("manual_pad_forward", output_manual_pad.op, burn_iters,
num_iters)
# Benchmark both the forward and backwards passes.
input_grad_explicit_pad, filter_grad_explicit_pad = (
gradients_impl.gradients(output_explicit_pad, [input, filter]))
self._bench_op(
"explicit_pad_backward",
control_flow_ops.group(input_grad_explicit_pad,
filter_grad_explicit_pad), burn_iters,
num_iters)
input_grad_manual_pad, filter_grad_manual_pad = gradients_impl.gradients(
output_manual_pad, [input, filter])
self._bench_op(
"manual_pad_backward",
control_flow_ops.group(input_grad_manual_pad, filter_grad_manual_pad),
burn_iters, num_iters)
def benchmarkExplicitVsSamePaddingGraph(self):
"""Compare performance of EXPLICIT and SAME padding in graph mode.
A Conv2D op with SAME padding is benchmarked, and an equivalent Conv2D op
with explicit padding is benchmarked, where the padding is the same as in
the SAME case. The purpose is to ensure EXPLICIT padding is just as
efficient as the SAME case
"""
if not test.is_gpu_available():
return
with ops.Graph().as_default():
burn_iters = 15
num_convs = 20
num_iters = 50
batch_size = 64
# The input and filter correspond to a middle layer of Resnet50.
input = variables.Variable( # pylint: disable=redefined-builtin
random_ops.random_uniform([
batch_size,
256,
14,
14
]))
filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256])) # pylint: disable=redefined-builtin
strides = [1, 1, 1, 1]
padding = [(0, 0), (0, 0), (1, 1), (1, 1)]
output_explicit_pad = input
output_same_pad = input
for _ in range(num_convs):
output_explicit_pad = nn_ops.conv2d(
output_explicit_pad,
filter,
strides,
padding=padding,
data_format="NCHW")
output_same_pad = nn_ops.conv2d(
output_same_pad,
filter,
strides,
padding="SAME",
data_format="NCHW")
grad_explicit_pad, = gradients_impl.gradients(output_explicit_pad, filter)
grad_same_pad, = gradients_impl.gradients(output_same_pad, filter)
self._bench_op("graph_explicit_pad", grad_explicit_pad.op, burn_iters,
num_iters)
self._bench_op("graph_same_pad", grad_same_pad.op, burn_iters, num_iters)
def benchmarkExplicitVsSamePaddingEager(self):
"""Compare performance of EXPLICIT and SAME padding in eager mode.
A Conv2D op with SAME padding is benchmarked, and an equivalent Conv2D op
with explicit padding is benchmarked, where the padding is the same as in
the SAME case. Currently, EXPLICIT padding is slightly slower, due to the
fact the Python padding list must be checked and processed before the Conv2D
op can run.
"""
# TODO(reedwm): Make EXPLICIT padding as fast as SAME padding.
if not test.is_gpu_available():
return
with context.eager_mode():
burn_iters = 15
num_convs = 20
num_iters = 50
batch_size = 64
# The input and filter correspond to a middle layer of Resnet50.
input = variables.Variable( # pylint: disable=redefined-builtin
random_ops.random_uniform([
batch_size,
256,
14,
14
]))
filter = variables.Variable(random_ops.random_uniform([3, 3, 256, 256])) # pylint: disable=redefined-builtin
strides = [1, 1, 1, 1]
padding = [(0, 0), (0, 0), (1, 1), (1, 1)]
output_explicit_pad = input
output_same_pad = input
for _ in range(burn_iters):
output_explicit_pad = nn_ops.conv2d(
output_explicit_pad,
filter,
strides,
padding=padding,
data_format="NCHW")
output_same_pad = nn_ops.conv2d(
output_same_pad,
filter,
strides,
padding="SAME",
data_format="NCHW")
start = time.time()
for _ in range(num_iters):
with backprop.GradientTape() as tape:
for _ in range(num_convs):
output_explicit_pad = nn_ops.conv2d(
output_explicit_pad,
filter,
strides,
padding=padding,
data_format="NCHW")
tape.gradient(output_explicit_pad, filter)
end = time.time()
self.report_benchmark(
name="eager_explicit_pad",
wall_time=(end - start) / num_iters,
iters=num_iters)
start = time.time()
for _ in range(num_iters):
with backprop.GradientTape() as tape:
for _ in range(num_convs):
output_same_pad = nn_ops.conv2d(
output_same_pad,
filter,
strides,
padding="SAME",
data_format="NCHW")
tape.gradient(output_same_pad, filter)
end = time.time()
self.report_benchmark(
name="eager_same_pad",
wall_time=(end - start) / num_iters,
iters=num_iters)
def GetInceptionFwdTest(input_size, filter_size, stride, padding,
gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionFwd %s", (input_size, filter_size,
stride, padding))
return
tf_logging.info("Testing InceptionFwd %s", (input_size, filter_size, stride,
padding))
self._CompareFwdValues(input_size, filter_size, [stride, stride], padding)
return Test
def GetInceptionFwdDilatedConvTest(input_size, filter_size, stride, padding):
def Test(self):
if stride == 1:
tf_logging.info("Testing InceptionFwd with dilations %s",
(input_size, filter_size, stride, padding))
self._VerifyDilatedConvValues(
tensor_in_sizes=input_size,
filter_in_sizes=filter_size,
strides=[stride, stride],
dilations=[2, 2],
padding=padding,
rtol=5e-4)
return Test
def GetInceptionBackInputTest(input_size, filter_size, output_size, stride,
padding,
gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
return
tf_logging.info("Testing InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
self._CompareBackpropInput(input_size, filter_size, output_size,
[stride, stride], padding)
return Test
def GetInceptionBackFilterTest(input_size, filter_size, output_size, strides,
padding, gpu_only=False):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
return
tf_logging.info("Testing InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
self._CompareBackFilter(input_size, filter_size, output_size, strides,
padding)
return Test
if __name__ == "__main__":
for index, (input_size_, filter_size_, output_size_, stride_,
padding_) in enumerate(GetShrunkInceptionShapes()):
setattr(Conv2DTest, "testInceptionFwd_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdTest(input_size_, filter_size_, stride_,
padding_)))
setattr(
Conv2DTest, "testInceptionFwdDilatedConv_" + str(index),
test_util.run_in_graph_and_eager_modes(GetInceptionFwdDilatedConvTest(
input_size_, filter_size_, stride_, padding_)))
setattr(Conv2DTest, "testInceptionBackInput_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionBackInputTest(input_size_, filter_size_,
output_size_, stride_, padding_)))
setattr(Conv2DTest, "testInceptionBackFilter_" + str(index),
test_util.run_in_graph_and_eager_modes(
GetInceptionBackFilterTest(input_size_, filter_size_,
output_size_, [stride_, stride_],
padding_)))
# TODO(b/35359731)
# Fwd, BckInput, and BackFilter to test that for certain input parameter
# set, winograd nonfused algorithm will be excluded from conv autotune. If
# in such case, winograd nonfused algorithm is added as one option of the
# conv autotune, and cuDNN version is smaller than 7, the following tests
# will fail.
ishape = [1, 400, 400, 1]
fshape = [1, 1, 1, 256]
oshape = [1, 400, 400, 256]
setattr(Conv2DTest, "testInceptionFwd_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdTest(ishape, fshape, 1, "SAME", gpu_only=True)))
setattr(Conv2DTest, "testInceptionFwdDilatedConv_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionFwdDilatedConvTest(ishape, fshape, 1, "SAME")))
setattr(Conv2DTest, "testInceptionBackInput_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionBackInputTest(ishape, fshape, oshape, 1, "SAME",
gpu_only=True)))
setattr(Conv2DTest, "testInceptionBackFilter_No_Winograd_Nonfused",
test_util.run_in_graph_and_eager_modes(
GetInceptionBackFilterTest(ishape, fshape, oshape, [1, 1], "SAME",
gpu_only=True)))
test.main()
| 36.969825 | 115 | 0.588436 |
6ac797b1003a33194fca60517a98c9d9c28c696c | 8,667 | py | Python | olc_webportalv2/new_multisample/models.py | forestdussault/olc_webportalv2 | 9c8c719279ac7dfe9ea749c977d5391e4709b5b9 | [
"MIT"
] | 1 | 2019-01-03T21:14:22.000Z | 2019-01-03T21:14:22.000Z | olc_webportalv2/new_multisample/models.py | lowandrew/olc_webportalv2 | e75ba1b7af85bb25b59138d31e268ecde6616208 | [
"MIT"
] | 8 | 2018-03-05T21:19:41.000Z | 2018-04-05T13:54:45.000Z | olc_webportalv2/new_multisample/models.py | lowandrew/olc_webportalv2 | e75ba1b7af85bb25b59138d31e268ecde6616208 | [
"MIT"
] | 1 | 2019-01-03T21:14:37.000Z | 2019-01-03T21:14:37.000Z | from django.db import models
from olc_webportalv2.users.models import User
from django.contrib.postgres.fields.jsonb import JSONField
import os
from django.core.exceptions import ValidationError
# Create your models here.
def validate_fastq(fieldfile):
filename = os.path.basename(fieldfile.name)
if filename.endswith('.fastq.gz') or filename.endswith('.fastq'):
print('File extension for {} confirmed valid'.format(filename))
else:
raise ValidationError(
_('%(file)s does not end with .fastq or .fastq.gz'),
params={'filename': filename},
)
class ProjectMulti(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
project_title = models.CharField(max_length=256)
description = models.CharField(max_length=200, blank=True)
date = models.DateTimeField(auto_now_add=True)
forward_id = models.CharField(max_length=256, default='_R1')
reverse_id = models.CharField(max_length=256, default='_R2')
def __str__(self):
return self.project_title
class Sample(models.Model):
project = models.ForeignKey(ProjectMulti, on_delete=models.CASCADE, related_name='samples')
file_R1 = models.FileField(upload_to='%Y%m%d%s', blank=True)
file_R2 = models.FileField(upload_to='%Y%m%d%s', blank=True)
file_fasta = models.FileField(upload_to='%Y%m%d%s', blank=True)
title = models.CharField(max_length=200, blank=True)
genesippr_status = models.CharField(max_length=128,
default="Unprocessed")
sendsketch_status = models.CharField(max_length=128,
default="Unprocessed")
confindr_status = models.CharField(max_length=128,
default="Unprocessed")
genomeqaml_status = models.CharField(max_length=128,
default="Unprocessed")
amr_status = models.CharField(max_length=128,
default="Unprocessed")
def __str__(self):
return self.title
class GenomeQamlResult(models.Model):
class Meta:
verbose_name_plural = "GenomeQAML Results"
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='genomeqaml_result')
predicted_class = models.CharField(max_length=128, default='N/A')
percent_fail = models.CharField(max_length=128, default='N/A')
percent_pass = models.CharField(max_length=128, default='N/A')
percent_reference = models.CharField(max_length=118, default='N/A')
def __str__(self):
return '{}'.format(self.sample)
class SendsketchResult(models.Model):
class Meta:
verbose_name_plural = "Sendsketch Results"
def __str__(self):
return 'pk {}: Rank {}: Sample {}'.format(self.pk, self.rank, self.sample.pk)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE)
rank = models.CharField(max_length=8, default='N/A')
wkid = models.CharField(max_length=256, default='N/A')
kid = models.CharField(max_length=256, default='N/A')
ani = models.CharField(max_length=256, default='N/A')
complt = models.CharField(max_length=256, default='N/A')
contam = models.CharField(max_length=256, default='N/A')
matches = models.CharField(max_length=256, default='N/A')
unique = models.CharField(max_length=256, default='N/A')
nohit = models.CharField(max_length=256, default='N/A')
taxid = models.CharField(max_length=256, default='N/A')
gsize = models.CharField(max_length=256, default='N/A')
gseqs = models.CharField(max_length=256, default='N/A')
taxname = models.CharField(max_length=256, default='N/A')
class GenesipprResults(models.Model):
# For admin panel
def __str__(self):
return '{}'.format(self.sample)
# TODO: Accomodate seqID
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='genesippr_results')
# genesippr.csv
strain = models.CharField(max_length=256, default="N/A")
genus = models.CharField(max_length=256, default="N/A")
# STEC
serotype = models.CharField(max_length=256, default="N/A")
o26 = models.CharField(max_length=256, default="N/A")
o45 = models.CharField(max_length=256, default="N/A")
o103 = models.CharField(max_length=256, default="N/A")
o111 = models.CharField(max_length=256, default="N/A")
o121 = models.CharField(max_length=256, default="N/A")
o145 = models.CharField(max_length=256, default="N/A")
o157 = models.CharField(max_length=256, default="N/A")
uida = models.CharField(max_length=256, default="N/A")
eae = models.CharField(max_length=256, default="N/A")
eae_1 = models.CharField(max_length=256, default="N/A")
vt1 = models.CharField(max_length=256, default="N/A")
vt2 = models.CharField(max_length=256, default="N/A")
vt2f = models.CharField(max_length=256, default="N/A")
# listeria
igs = models.CharField(max_length=256, default="N/A")
hlya = models.CharField(max_length=256, default="N/A")
inlj = models.CharField(max_length=256, default="N/A")
# salmonella
inva = models.CharField(max_length=256, default="N/A")
stn = models.CharField(max_length=256, default="N/A")
def inva_number(self):
return float(self.inva.split('%')[0])
def uida_number(self):
return float(self.uida.split('%')[0])
def vt1_number(self):
return float(self.vt1.split('%')[0])
def vt2_number(self):
return float(self.vt2.split('%')[0])
def vt2f_number(self):
return float(self.vt2f.split('%')[0])
def eae_number(self):
return float(self.eae.split('%')[0])
def eae_1_number(self):
return float(self.eae_1.split('%')[0])
def hlya_number(self):
return float(self.hlya.split('%')[0])
def igs_number(self):
return float(self.igs.split('%')[0])
def inlj_number(self):
return float(self.inlj.split('%')[0])
class Meta:
verbose_name_plural = "Genesippr Results"
class GenesipprResultsSixteens(models.Model):
class Meta:
verbose_name_plural = "SixteenS Results"
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='sixteens_results')
# sixteens_full.csv
strain = models.CharField(max_length=256, default="N/A")
gene = models.CharField(max_length=256, default="N/A")
percentidentity = models.CharField(max_length=256, default="N/A")
genus = models.CharField(max_length=256, default="N/A")
foldcoverage = models.CharField(max_length=256, default="N/A")
@property
def gi_accession(self):
# Split by | delimiter, pull second element which should be the GI#
gi_accession = self.gene.split('|')[1]
return gi_accession
class GenesipprResultsGDCS(models.Model):
class Meta:
verbose_name_plural = "GDCS Results"
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='gdcs_results')
# GDCS.csv
strain = models.CharField(max_length=256, default="N/A")
genus = models.CharField(max_length=256, default="N/A")
matches = models.CharField(max_length=256, default="N/A")
meancoverage = models.CharField(max_length=128, default="N/A")
passfail = models.CharField(max_length=16, default="N/A")
allele_dict = JSONField(blank=True, null=True, default=dict)
class ConFindrResults(models.Model):
class Meta:
verbose_name_plural = 'Confindr Results'
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='confindr_results')
strain = models.CharField(max_length=256, default="N/A")
genera_present = models.CharField(max_length=256, default="N/A")
contam_snvs = models.CharField(max_length=256, default="N/A")
contaminated = models.CharField(max_length=256, default="N/A")
class GenesipprResultsSerosippr(models.Model):
class Meta:
verbose_name_plural = "Serosippr Results"
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE)
class AMRResult(models.Model):
class Meta:
verbose_name_plural = 'AMR Results'
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='amr_results')
results_dict = JSONField(blank=True, null=True, default=dict)
species = models.CharField(max_length=88, default='N/A')
| 36.415966 | 98 | 0.678782 |
bc61d57432c789cdb436b4bc1c5183ead5243af6 | 352 | py | Python | test1/myscript4.py | josephernest/vversioning | de09ab66c018a5aceee787101c5e307f957a2601 | [
"MIT"
] | null | null | null | test1/myscript4.py | josephernest/vversioning | de09ab66c018a5aceee787101c5e307f957a2601 | [
"MIT"
] | null | null | null | test1/myscript4.py | josephernest/vversioning | de09ab66c018a5aceee787101c5e307f957a2601 | [
"MIT"
] | null | null | null | """
==CHANGELOG==
* support UTF8
==CHANGELOG==
"""
sqdgfhsqgfksqfkjgsqfkqsgdkfsqkgfqsdf
sqgjdfjsqdhfqgskdgfkqgsdjfsqdfggdsqjf
sqdgfhsqgfksqfkjgsqfkqsgdkfsqkgfqsdf
sqgjdfjsqdhfqgskdgfkqgsdjfsqdfggdsqjf
sqdgfhsqgfksqfkjgsqfkqsgdkfsqkgfqsdf
sqgjdfjsqdhfqgskdgfkqgsdjfsqdfggdsqjf
sqdgfhsqgfksqfkjgsqfkqsgdkfsqkgfqsdf
sqgjdfjsqdhfqgskdgfkqgsdjfsqdfggdsqjf
| 23.466667 | 37 | 0.911932 |
efec86be34effc8afef49c51476f56364df69329 | 3,494 | py | Python | tests/test_utils.py | rra/aiohttp-remotes | a1aa6916c1713af40688370ab48bfffb350574fd | [
"MIT"
] | null | null | null | tests/test_utils.py | rra/aiohttp-remotes | a1aa6916c1713af40688370ab48bfffb350574fd | [
"MIT"
] | null | null | null | tests/test_utils.py | rra/aiohttp-remotes | a1aa6916c1713af40688370ab48bfffb350574fd | [
"MIT"
] | null | null | null | from ipaddress import IPv4Address, IPv6Address, ip_address, ip_network
import pytest
from aiohttp_remotes.exceptions import IncorrectIPCount, UntrustedIP
from aiohttp_remotes.utils import parse_trusted_list, remote_ip
def test_parse_str():
with pytest.raises(TypeError):
parse_trusted_list('127.0.0.1')
def test_parse_non_sequence():
with pytest.raises(TypeError):
parse_trusted_list(1)
def test_parse_non_sequence_of_containers():
with pytest.raises(TypeError):
parse_trusted_list([1])
def test_parse_ipv4():
ret = parse_trusted_list([[IPv4Address('127.0.0.1')]])
assert ret == [[IPv4Address('127.0.0.1')]]
def test_parse_ipv6():
ret = parse_trusted_list([[IPv6Address('::1')]])
assert ret == [[IPv6Address('::1')]]
def test_parse_ipv4_str():
ret = parse_trusted_list([['127.0.0.1']])
assert ret == [[IPv4Address('127.0.0.1')]]
def test_parse_ipv6_str():
ret = parse_trusted_list([['::1']])
assert ret == [[IPv6Address('::1')]]
def test_parse_non_ip_item():
with pytest.raises(ValueError):
parse_trusted_list([['garbage']])
def test_parse_ellipsis_at_beginning():
ret = parse_trusted_list([['127.0.0.1'], ...])
assert ret == [[IPv4Address('127.0.0.1')], ...]
def test_parse_ellipsis_after_address():
with pytest.raises(ValueError):
parse_trusted_list([..., ['127.0.0.1']])
# --------------------- remote_ip -----------------------
def test_remote_ip_no_trusted():
ip = ip_address('10.10.10.10')
assert ip == remote_ip([], [ip])
def test_remote_ip_ok():
ips = [ip_address('10.10.10.10'),
ip_address('20.20.20.20'),
ip_address('30.30.30.30')]
trusted = parse_trusted_list([['10.10.0.0/16'],
['20.20.20.20']])
assert ips[-1] == remote_ip(trusted, ips)
def test_remote_ip_not_trusted_network():
ips = [ip_address('10.10.10.10'),
ip_address('20.20.20.20'),
ip_address('30.30.30.30')]
trusted = parse_trusted_list([['40.40.0.0/16'],
['20.20.20.20']])
with pytest.raises(UntrustedIP) as ctx:
remote_ip(trusted, ips)
assert ctx.value.trusted == [ip_network('40.40.0.0/16')]
assert ctx.value.ip == ip_address('10.10.10.10')
def test_remote_ip_not_trusted_ip():
ips = [ip_address('10.10.10.10'),
ip_address('20.20.20.20'),
ip_address('30.30.30.30')]
trusted = parse_trusted_list([['40.40.40.40'],
['20.20.20.20']])
with pytest.raises(UntrustedIP) as ctx:
remote_ip(trusted, ips)
assert ctx.value.trusted == [ip_address('40.40.40.40')]
assert ctx.value.ip == ip_address('10.10.10.10')
def test_remote_ip_invalis_ips_count():
ips = [ip_address('10.10.10.10'),
ip_address('20.20.20.20')]
trusted = parse_trusted_list([['40.40.40.40'],
['20.20.20.20']])
with pytest.raises(IncorrectIPCount) as ctx:
remote_ip(trusted, ips)
assert ctx.value.expected == 3
assert ctx.value.actual == [IPv4Address('10.10.10.10'),
IPv4Address('20.20.20.20')]
def test_remote_with_ellipsis():
ips = [ip_address('10.10.10.10'),
ip_address('20.20.20.20'),
ip_address('30.30.30.30')]
trusted = parse_trusted_list([['10.10.0.0/16'],
...])
assert ips[-2] == remote_ip(trusted, ips)
| 29.610169 | 70 | 0.598454 |
fba5c06dd089c53e740a19d314c459e0945f7715 | 11,137 | py | Python | tf_model/residual_attention_keras.py | tamlhp/dfd_benchmark | 15cc5c4708a5414c6309ea1f20a5dfa3428409fa | [
"MIT"
] | 7 | 2020-03-20T18:46:29.000Z | 2022-03-22T03:06:17.000Z | tf_model/residual_attention_keras.py | tamlhp/dfd_benchmark | 15cc5c4708a5414c6309ea1f20a5dfa3428409fa | [
"MIT"
] | 1 | 2021-12-03T06:49:04.000Z | 2021-12-03T06:49:04.000Z | tf_model/residual_attention_keras.py | tamlhp/dfd_benchmark | 15cc5c4708a5414c6309ea1f20a5dfa3428409fa | [
"MIT"
] | 2 | 2021-08-23T08:54:09.000Z | 2022-02-07T10:04:23.000Z | import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import tensorflow.keras.backend as K
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.6
# config.gpu_options.visible_device_list = "0" #only the gpu 0 is allowed
set_session(tf.Session(config=config))
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.layers import UpSampling2D
from keras.layers import Activation
from keras.layers import MaxPool2D
from keras.layers import Add
from keras.layers import Multiply
from keras.layers import Lambda
from keras.layers import Input
from keras.layers import Conv2D
from keras.layers import MaxPool2D
from keras.layers import Dense
from keras.layers import AveragePooling2D
from keras.layers import Flatten
from keras.layers import Activation
from keras.layers import BatchNormalization
from keras.layers import Dropout
from keras.models import Model
from keras.regularizers import l2
import keras
from keras.utils.vis_utils import model_to_dot
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
def residual_block(input, input_channels=None, output_channels=None, kernel_size=(3, 3), stride=1):
"""
full pre-activation residual block
https://arxiv.org/pdf/1603.05027.pdf
"""
if output_channels is None:
output_channels = input.get_shape()[-1].value
if input_channels is None:
input_channels = output_channels // 4
strides = (stride, stride)
x = BatchNormalization()(input)
x = Activation('relu')(x)
x = Conv2D(input_channels, (1, 1))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(input_channels, kernel_size, padding='same', strides=stride)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(output_channels, (1, 1), padding='same')(x)
if input_channels != output_channels or stride != 1:
input = Conv2D(output_channels, (1, 1), padding='same', strides=strides)(input)
x = Add()([x, input])
return x
def attention_block(input, input_channels=None, output_channels=None, encoder_depth=1):
"""
attention block
https://arxiv.org/abs/1704.06904
"""
p = 1
t = 2
r = 1
if input_channels is None:
input_channels = input.get_shape()[-1].value
if output_channels is None:
output_channels = input_channels
# First Residual Block
for i in range(p):
input = residual_block(input)
# Trunc Branch
output_trunk = input
for i in range(t):
output_trunk = residual_block(output_trunk)
# Soft Mask Branch
## encoder
### first down sampling
output_soft_mask = MaxPool2D(padding='same')(input) # 32x32
for i in range(r):
output_soft_mask = residual_block(output_soft_mask)
skip_connections = []
for i in range(encoder_depth - 1):
## skip connections
output_skip_connection = residual_block(output_soft_mask)
skip_connections.append(output_skip_connection)
# print ('skip shape:', output_skip_connection.get_shape())
## down sampling
output_soft_mask = MaxPool2D(padding='same')(output_soft_mask)
for _ in range(r):
output_soft_mask = residual_block(output_soft_mask)
## decoder
skip_connections = list(reversed(skip_connections))
for i in range(encoder_depth - 1):
## upsampling
for _ in range(r):
output_soft_mask = residual_block(output_soft_mask)
output_soft_mask = UpSampling2D()(output_soft_mask)
## skip connections
output_soft_mask = Add()([output_soft_mask, skip_connections[i]])
### last upsampling
for i in range(r):
output_soft_mask = residual_block(output_soft_mask)
output_soft_mask = UpSampling2D()(output_soft_mask)
## Output
output_soft_mask = Conv2D(input_channels, (1, 1))(output_soft_mask)
output_soft_mask = Conv2D(input_channels, (1, 1))(output_soft_mask)
output_soft_mask = Activation('sigmoid')(output_soft_mask)
# Attention: (1 + output_soft_mask) * output_trunk
output = Lambda(lambda x: x + 1)(output_soft_mask)
output = Multiply()([output, output_trunk]) #
# Last Residual Block
for i in range(p):
output = residual_block(output)
return output
def AttentionResNet92(shape=(256, 256, 3), n_channels=64, n_classes=1,
dropout=0, regularization=0.01):
"""
Attention-92 ResNet
https://arxiv.org/abs/1704.06904
"""
regularizer = l2(regularization)
input_ = Input(shape=shape)
x = Conv2D(n_channels, (7, 7), strides=(2, 2), padding='same')(input_) # 112x112
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x) # 56x56
x = residual_block(x, output_channels=n_channels * 4) # 56x56
x = attention_block(x, encoder_depth=3) # bottleneck 7x7
x = residual_block(x, output_channels=n_channels * 8, stride=2) # 28x28
x = attention_block(x, encoder_depth=2) # bottleneck 7x7
x = attention_block(x, encoder_depth=2) # bottleneck 7x7
x = residual_block(x, output_channels=n_channels * 16, stride=2) # 14x14
x = attention_block(x, encoder_depth=1) # bottleneck 7x7
x = attention_block(x, encoder_depth=1) # bottleneck 7x7
x = attention_block(x, encoder_depth=1) # bottleneck 7x7
x = residual_block(x, output_channels=n_channels * 32, stride=2) # 7x7
x = residual_block(x, output_channels=n_channels * 32)
x = residual_block(x, output_channels=n_channels * 32)
pool_size = (x.get_shape()[1].value, x.get_shape()[2].value)
x = AveragePooling2D(pool_size=pool_size, strides=(1, 1))(x)
x = Flatten()(x)
if dropout:
x = Dropout(dropout)(x)
output = Dense(n_classes, kernel_regularizer=regularizer, activation='sigmoid')(x)
model = Model(input_, output)
return model
def AttentionResNet56(shape=(256, 256, 3), n_channels=64, n_classes=1,
dropout=0, regularization=0.01):
"""
Attention-56 ResNet
https://arxiv.org/abs/1704.06904
"""
regularizer = l2(regularization)
input_ = Input(shape=shape)
x = Conv2D(n_channels, (7, 7), strides=(2, 2), padding='same')(input_) # 112x112
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x) # 56x56
x = residual_block(x, output_channels=n_channels * 4) # 56x56
x = attention_block(x, encoder_depth=3) # bottleneck 7x7
x = residual_block(x, output_channels=n_channels * 8, stride=2) # 28x28
x = attention_block(x, encoder_depth=2) # bottleneck 7x7
x = residual_block(x, output_channels=n_channels * 16, stride=2) # 14x14
x = attention_block(x, encoder_depth=1) # bottleneck 7x7
x = residual_block(x, output_channels=n_channels * 32, stride=2) # 7x7
x = residual_block(x, output_channels=n_channels * 32)
x = residual_block(x, output_channels=n_channels * 32)
pool_size = (x.get_shape()[1].value, x.get_shape()[2].value)
x = AveragePooling2D(pool_size=pool_size, strides=(1, 1))(x)
x = Flatten()(x)
if dropout:
x = Dropout(dropout)(x)
output = Dense(n_classes, kernel_regularizer=regularizer, activation='sigmoid')(x)
model = Model(input_, output)
return model
def AttentionResNetCifar10(shape=(256, 256, 3), n_channels=32, n_classes=1):
"""
Attention-56 ResNet for Cifar10 Dataset
https://arxiv.org/abs/1704.06904
"""
input_ = Input(shape=shape)
# input_ = Input(tensor=image)
x = Conv2D(n_channels, (5, 5), padding='same')(input_)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPool2D(pool_size=(2, 2))(x) # 16x16
x = residual_block(x, input_channels=32, output_channels=128)
x = attention_block(x, encoder_depth=2)
x = residual_block(x, input_channels=128, output_channels=256, stride=2) # 8x8
x = attention_block(x, encoder_depth=1)
x = residual_block(x, input_channels=256, output_channels=512, stride=2) # 4x4
x = attention_block(x, encoder_depth=1)
x = residual_block(x, input_channels=512, output_channels=1024)
x = residual_block(x, input_channels=1024, output_channels=1024)
x = residual_block(x, input_channels=1024, output_channels=1024)
x = AveragePooling2D(pool_size=(4, 4), strides=(1, 1))(x) # 1x1
x = Flatten()(x)
output = Dense(n_classes, activation='sigmoid')(x)
model = Model(input_, output)
return model
from collections import Counter
from sklearn.utils import class_weight
import numpy as np
if __name__ == "__main__":
IMAGE_SHAPE = 256
model = AttentionResNet92(shape=(IMAGE_SHAPE, IMAGE_SHAPE, 3),n_channels=32, n_classes=1)
model.compile(optimizer = "adam", loss = 'binary_crossentropy',metrics = ['accuracy'])
from keras.preprocessing.image import ImageDataGenerator
batch_size = 32
dataGenerator = ImageDataGenerator(rescale=1./255,rotation_range=5,
width_shift_range=0.05,
height_shift_range=0.05,
horizontal_flip=True,shear_range=0.05)
generator = dataGenerator.flow_from_directory(
'/data/tam/kaggle/extract_raw_img/',
target_size=(IMAGE_SHAPE, IMAGE_SHAPE),
batch_size=batch_size,
class_mode='binary',
subset='training',shuffle=True)
test_generator = dataGenerator.flow_from_directory(
'/data/tam/kaggle/extract_raw_img_test/',
target_size=(IMAGE_SHAPE, IMAGE_SHAPE),
batch_size=batch_size,
class_mode='binary',
subset='training',shuffle=True)
tensorboard_callback = keras.callbacks.TensorBoard(log_dir="./log_residual_attention_keras")
lr_reducer = ReduceLROnPlateau(monitor='val_acc', factor=0.2, patience=7, min_lr=10e-7, epsilon=0.01, verbose=1)
checkpoints = keras.callbacks.ModelCheckpoint("./log_residual_attention_keras/checkpoint_newdata_{epoch:04d}.pth", monitor='val_loss', verbose=0, save_best_only=False, period=1)
early_stopper = EarlyStopping(monitor='val_acc', min_delta=0, patience=15, verbose=1)
callbacks= [tensorboard_callback,checkpoints,lr_reducer, early_stopper]
counter = Counter(generator.classes)
max_val = float(max(counter.values()))
class_weights = {class_id : max_val/num_images for class_id, num_images in counter.items()}
class_weights_2 = class_weight.compute_class_weight(
'balanced',
np.unique(generator.classes),
generator.classes)
model.fit_generator(generator,validation_data=test_generator, steps_per_epoch=int(1142792/batch_size), epochs=30,workers=1,validation_steps=14298/batch_size,class_weight = class_weights,callbacks = callbacks)
| 36.276873 | 212 | 0.694711 |
30804e12aea73580edcae0f030cdcd4a4c714c7b | 3,281 | py | Python | guts/api/views/versions.py | smallwormer/stable-liberty-guts | e635b710cdd210f70e9d50c3b85fffdeb53e8f01 | [
"Apache-2.0"
] | null | null | null | guts/api/views/versions.py | smallwormer/stable-liberty-guts | e635b710cdd210f70e9d50c3b85fffdeb53e8f01 | [
"Apache-2.0"
] | null | null | null | guts/api/views/versions.py | smallwormer/stable-liberty-guts | e635b710cdd210f70e9d50c3b85fffdeb53e8f01 | [
"Apache-2.0"
] | 1 | 2022-03-03T05:41:31.000Z | 2022-03-03T05:41:31.000Z | # Copyright (c) 2015 Aptira Pty Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
from oslo_config import cfg
versions_opts = [
cfg.StrOpt('public_endpoint',
help="Public url to use for versions endpoint. The default "
"is None, which will use the request's host_url "
"attribute to populate the URL base. If Guts is "
"operating behind a proxy, you will want to change "
"this to represent the proxy's URL."),
]
CONF = cfg.CONF
CONF.register_opts(versions_opts)
def get_view_builder(req):
base_url = CONF.public_endpoint or req.application_url
return ViewBuilder(base_url)
class ViewBuilder(object):
def __init__(self, base_url):
"""Initialize ViewBuilder.
:param base_url: url of the root wsgi application
"""
self.base_url = base_url
def build_choices(self, VERSIONS, req):
version_objs = []
for version in VERSIONS:
version = VERSIONS[version]
version_objs.append({
"id": version['id'],
"status": version['status'],
"links": [{"rel": "self",
"href": self.generate_href(version['id'],
req.path), }, ],
"media-types": version['media-types'], })
return dict(choices=version_objs)
def build_versions(self, versions):
version_objs = []
for version in sorted(versions.keys()):
version = versions[version]
version_objs.append({
"id": version['id'],
"status": version['status'],
"updated": version['updated'],
"links": self._build_links(version), })
return dict(versions=version_objs)
def build_version(self, version):
reval = copy.deepcopy(version)
reval['links'].insert(0, {
"rel": "self",
"href": self.base_url.rstrip('/') + '/', })
return dict(version=reval)
def _build_links(self, version_data):
"""Generate a container of links that refer to the provided version."""
href = self.generate_href(version_data['id'])
links = [{'rel': 'self',
'href': href, }, ]
return links
def generate_href(self, version, path=None):
"""Create an url that refers to a specific version_number."""
version_number = 'v1'
if path:
path = path.strip('/')
return os.path.join(self.base_url, version_number, path)
else:
return os.path.join(self.base_url, version_number) + '/'
| 33.141414 | 79 | 0.586102 |
89dd2a46f5682a83f7140285f315366b7b828f02 | 391 | py | Python | 7KYU/reverse_number.py | yaznasivasai/python_codewars | 25493591dde4649dc9c1ec3bece8191a3bed6818 | [
"MIT"
] | 4 | 2021-07-17T22:48:03.000Z | 2022-03-25T14:10:58.000Z | 7KYU/reverse_number.py | yaznasivasai/python_codewars | 25493591dde4649dc9c1ec3bece8191a3bed6818 | [
"MIT"
] | null | null | null | 7KYU/reverse_number.py | yaznasivasai/python_codewars | 25493591dde4649dc9c1ec3bece8191a3bed6818 | [
"MIT"
] | 3 | 2021-06-14T14:18:16.000Z | 2022-03-16T06:02:02.000Z | def reverse_number(n: int) -> int:
""" This function takes in input 'n' and returns 'n' with all digits reversed. """
if len(str(n)) == 1:
return n
k = abs(n)
reversed_n = []
while k != 0:
i = k % 10
reversed_n.append(i)
k = (k - i) // 10
return int(''.join(map(str, reversed_n))) if n > 0 else -int(''.join(map(str, reversed_n)))
| 32.583333 | 95 | 0.526854 |
2baa9ed47050f4f3d44ce3340987ea3b2f3d4431 | 2,565 | py | Python | test/functional/sbercoin_state_root.py | SbercoinCom/sbercoin.com | 8fb386e59e4db8a6abb3a2c638a2ecc918f6b9dd | [
"MIT"
] | 1 | 2021-05-17T06:06:57.000Z | 2021-05-17T06:06:57.000Z | test/functional/sbercoin_state_root.py | SbercoinCom/sbercoin.com | 8fb386e59e4db8a6abb3a2c638a2ecc918f6b9dd | [
"MIT"
] | null | null | null | test/functional/sbercoin_state_root.py | SbercoinCom/sbercoin.com | 8fb386e59e4db8a6abb3a2c638a2ecc918f6b9dd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.messages import COIN
from test_framework.sbercoinconfig import *
import sys
class StateRootTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# verify that the state hash is not 0 on genesis
def verify_not_null_test(self):
block_hash = self.node.getblockhash(0)
block = self.node.getblock(block_hash)
assert(int(block['hashStateRoot'], 16) != 0)
# verify that the state hash changes on contract creation
def verify_state_hash_changes(self):
amount = 20000*COIN
self.node.generate(COINBASE_MATURITY+50)
block_hash_a = self.node.getblockhash(COINBASE_MATURITY+50)
block_a = self.node.getblock(block_hash_a)
"""
pragma solidity ^0.4.10;
contract Example {
function () payable {}
}
"""
self.node.createcontract("60606040523415600b57fe5b5b60398060196000396000f30060606040525b600b5b5b565b0000a165627a7a7230582092926a9814888ff08700cbd86cf4ff8c50052f5fd894e794570d9551733591d60029")
self.node.generate(1)
block_hash_b = self.node.getblockhash(COINBASE_MATURITY+51)
block_b = self.node.getblock(block_hash_b)
assert(block_a['hashStateRoot'] != block_b['hashStateRoot'])
# verify that the state hash remains the same on restart
def verify_state_hash_remains_on_restart(self):
block_hash_a = self.node.getblockhash(COINBASE_MATURITY+51)
block_a = self.node.getblock(block_hash_a)
self.stop_nodes()
self.start_nodes()
self.node = self.nodes[0]
self.node.generate(1)
block_hash_b = self.node.getblockhash(COINBASE_MATURITY+52)
block_b = self.node.getblock(block_hash_b)
assert(block_a['hashStateRoot'] == block_b['hashStateRoot'])
def run_test(self):
self.node = self.nodes[0]
self.verify_not_null_test()
self.verify_state_hash_changes()
self.verify_state_hash_remains_on_restart()
if __name__ == '__main__':
StateRootTest().main()
| 38.863636 | 200 | 0.716179 |
9bda86392cdfa530b398f3710444bf5b76b9ff7d | 2,780 | py | Python | Agent/duelingnet.py | hassaanakram/Resource-Allocation-using-deeprl | f36bfb6ba9956a0b072421a8feb8034428571403 | [
"MIT"
] | 33 | 2020-05-19T10:56:45.000Z | 2022-03-08T11:40:53.000Z | Agent/duelingnet.py | chisyliu/Resource-Allocation-using-deeprl | fec93a99177115ee32652483fd1005cdbf67ae56 | [
"MIT"
] | 5 | 2020-05-11T12:41:12.000Z | 2022-03-01T10:49:36.000Z | Agent/duelingnet.py | chisyliu/Resource-Allocation-using-deeprl | fec93a99177115ee32652483fd1005cdbf67ae56 | [
"MIT"
] | 17 | 2020-02-09T10:50:41.000Z | 2022-03-16T01:57:54.000Z | import numpy as np
import tensorflow as tf
import tflearn
import keras.backend as K
from keras.models import Model
from keras.layers import Dense, Input, Flatten, Conv1D, Conv2D, MaxPooling1D, MaxPooling2D, Add, Subtract, Lambda
class DuelingNetwork:
def __init__(self, session, dim_state, dim_action, learning_rate, tau=0.01):
self._sess = session
self._dim_s = dim_state
self._dim_a = dim_action
self._lr = learning_rate
self._inputs = tflearn.input_data(shape=[None, self._dim_s])
self._out, self._params = self.buildNetwork(self._inputs, 'dqn')
self._out_target, self._params_target = self.buildNetwork(self._inputs, 'target')
self._actions = tf.placeholder(tf.float32, [None, self._dim_a])
self._y_values = tf.placeholder(tf.float32, [None])#yahan change
action_q_values = tf.reduce_sum(tf.multiply(self._out, self._actions), reduction_indices=1)#yahan bih
self._update_target = \
[t_p.assign(tau * g_p - (1 - tau) * t_p) for g_p, t_p in zip(self._params, self._params_target)]
self.loss = tflearn.mean_square(self._y_values, action_q_values)
self.optimize = tf.train.AdamOptimizer(self._lr).minimize(self.loss)
def buildNetwork(self, state, type):
with tf.variable_scope(type):
# w_init = tflearn.initializations.truncated_normal(stddev=1.0)
# weights_init=w_init,
net = tflearn.fully_connected(state, 64, activation='relu')
net = tflearn.fully_connected(net, 32, activation='relu')
value = Dense(8, activation="relu")(net)
value = Dense(1, activation="relu")(value)
advantage = Dense(8, activation="relu")(net)
# advantage = Dense(self.action_size, activation="relu")(advantage)
advantage_mean = Lambda(lambda x: K.mean(x, axis=1))(advantage)
advantage = Subtract()([advantage, advantage_mean])
net = Add()([value, advantage])
q_values = tflearn.fully_connected(net, self._dim_a)
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=type)
return q_values, params
def train(self, inputs, action, y_values):
return self._sess.run([self.optimize, self.loss], feed_dict={
self._inputs: inputs,
self._actions: action,
self._y_values: y_values
})
def predict(self, inputs):
return self._sess.run(self._out, feed_dict={
self._inputs: inputs,
})
def predict_target(self, inputs):
return self._sess.run(self._out_target, feed_dict={
self._inputs: inputs
})
def update_target(self):
self._sess.run(self._update_target)
| 37.567568 | 113 | 0.648561 |
da2638fa62f49345ce7fa3d187d19deb2e9ef627 | 586 | py | Python | tuple.py | oananbeh/Python-tutorial- | 5d2a1d4157fb2bade7be247d844090ed1ffbf973 | [
"Apache-2.0"
] | null | null | null | tuple.py | oananbeh/Python-tutorial- | 5d2a1d4157fb2bade7be247d844090ed1ffbf973 | [
"Apache-2.0"
] | null | null | null | tuple.py | oananbeh/Python-tutorial- | 5d2a1d4157fb2bade7be247d844090ed1ffbf973 | [
"Apache-2.0"
] | null | null | null | months=("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul",
"Aug", "Sep", "Oct", "Nov", "Dec")
print(months[1])
print(months[-1])
print(months[-3])
print(months[0:4])
example1=(1,3,5,7,9)
example2=(True,False,True)
example3=("Jon",24,True)
print(len(months))
if "Sep" in months:
print("Yes")
for month in months:
print(month)
i=0
while i<len(months):
print(months[i])
i+=1
t1=("o","h","t","z")
t2=(10,20,30,50,80)
t3=t1+t2
print(t3)
numbers = (1, 3, 3, 8, 7, 5, 3, 6, 8, 3)
print(numbers.count(8))
names=("Alin","Jon","Alin","Omar","Jon","Alin")
print(names.count("Jon")) | 20.928571 | 56 | 0.588737 |
6c1674fff85928a4423cc488432ddd74e22df6b7 | 1,283 | py | Python | doc/source/cookbook/offaxis_projection.py | kastalpes/yt | b1e197ca84433fbd61eaf44b28ff5cdb37981d4c | [
"BSD-3-Clause-Clear"
] | 2 | 2021-03-02T18:59:49.000Z | 2021-03-02T18:59:50.000Z | doc/source/cookbook/offaxis_projection.py | kastalpes/yt | b1e197ca84433fbd61eaf44b28ff5cdb37981d4c | [
"BSD-3-Clause-Clear"
] | 4 | 2018-04-13T23:03:42.000Z | 2018-05-08T17:50:43.000Z | doc/source/cookbook/offaxis_projection.py | kastalpes/yt | b1e197ca84433fbd61eaf44b28ff5cdb37981d4c | [
"BSD-3-Clause-Clear"
] | 2 | 2020-05-16T15:29:37.000Z | 2020-06-22T10:17:08.000Z | import yt
import numpy as np
# Load the dataset.
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
# Choose a center for the render.
c = [0.5, 0.5, 0.5]
# Our image plane will be normal to some vector. For things like collapsing
# objects, you could set it the way you would a cutting plane -- but for this
# dataset, we'll just choose an off-axis value at random. This gets normalized
# automatically.
L = [1.0, 0.0, 0.0]
# Our "width" is the width of the image plane as well as the depth.
# The first element is the left to right width, the second is the
# top-bottom width, and the last element is the back-to-front width
# (all in code units)
W = [0.04,0.04,0.4]
# The number of pixels along one side of the image.
# The final image will have Npixel^2 pixels.
Npixels = 512
# Create the off axis projection.
# Setting no_ghost to False speeds up the process, but makes a
# slightly lower quality image.
image = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
# Write out the final image and give it a name
# relating to what our dataset is called.
# We save the log of the values so that the colors do not span
# many orders of magnitude. Try it without and see what happens.
yt.write_image(np.log10(image), "%s_offaxis_projection.png" % ds)
| 35.638889 | 79 | 0.731099 |
505b6081033057ca72ea64bc6264f0105ee0646f | 13,707 | py | Python | src/streamlink/plugins/crunchyroll.py | Erk-/streamlink | d240704d1237fb5878960480c3f8951e0c5023b9 | [
"BSD-2-Clause"
] | 6 | 2020-11-13T22:48:09.000Z | 2021-03-05T14:38:40.000Z | src/streamlink/plugins/crunchyroll.py | TheDrHax/streamlink | 4dfd0d516fd8484438389518985e3b5131b7a253 | [
"BSD-2-Clause"
] | null | null | null | src/streamlink/plugins/crunchyroll.py | TheDrHax/streamlink | 4dfd0d516fd8484438389518985e3b5131b7a253 | [
"BSD-2-Clause"
] | 3 | 2020-11-21T14:50:52.000Z | 2021-03-23T09:55:35.000Z | import datetime
import logging
import re
from uuid import uuid4
from streamlink.plugin import Plugin, PluginArgument, PluginArguments, PluginError, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
STREAM_WEIGHTS = {
"low": 240,
"mid": 420,
"high": 720,
"ultra": 1080,
}
STREAM_NAMES = {
"120k": "low",
"328k": "mid",
"864k": "high"
}
def parse_timestamp(ts):
"""Takes ISO 8601 format(string) and converts into a utc datetime(naive)"""
return (
datetime.datetime.strptime(ts[:-7], "%Y-%m-%dT%H:%M:%S")
+ datetime.timedelta(hours=int(ts[-5:-3]), minutes=int(ts[-2:]))
* int(ts[-6:-5] + "1")
)
_api_schema = validate.Schema({
"error": bool,
validate.optional("code"): validate.text,
validate.optional("message"): validate.text,
validate.optional("data"): object,
})
_media_schema = validate.Schema(
{
"stream_data": validate.any(
None,
{
"streams": validate.all(
[{
"quality": validate.any(validate.text, None),
"url": validate.url(
scheme="http",
path=validate.endswith(".m3u8")
),
validate.optional("video_encode_id"): validate.text
}]
)
}
)
},
validate.get("stream_data")
)
_login_schema = validate.Schema({
"auth": validate.any(validate.text, None),
"expires": validate.all(
validate.text,
validate.transform(parse_timestamp)
),
"user": {
"username": validate.any(validate.text, None),
"email": validate.text
}
})
_session_schema = validate.Schema(
{
"session_id": validate.text
},
validate.get("session_id")
)
class CrunchyrollAPIError(Exception):
"""Exception thrown by the Crunchyroll API when an error occurs"""
def __init__(self, msg, code):
Exception.__init__(self, msg)
self.msg = msg
self.code = code
class CrunchyrollAPI:
_api_url = "https://api.crunchyroll.com/{0}.0.json"
_default_locale = "en_US"
_user_agent = "Dalvik/1.6.0 (Linux; U; Android 4.4.2; Android SDK built for x86 Build/KK)"
_version_code = 444
_version_name = "2.1.10"
_access_token = "WveH9VkPLrXvuNm"
_access_type = "com.crunchyroll.crunchyroid"
def __init__(self, cache, session, session_id=None, locale=_default_locale):
"""Abstract the API to access to Crunchyroll data.
Can take saved credentials to use on it's calls to the API.
"""
self.cache = cache
self.session = session
self.session_id = session_id
if self.session_id: # if the session ID is setup don't use the cached auth token
self.auth = None
else:
self.auth = cache.get("auth")
self.device_id = cache.get("device_id") or self.generate_device_id()
self.locale = locale
self.headers = {
"X-Android-Device-Is-GoogleTV": "0",
"X-Android-Device-Product": "google_sdk_x86",
"X-Android-Device-Model": "Android SDK built for x86",
"Using-Brightcove-Player": "1",
"X-Android-Release": "4.4.2",
"X-Android-SDK": "19",
"X-Android-Application-Version-Name": self._version_name,
"X-Android-Application-Version-Code": str(self._version_code),
'User-Agent': self._user_agent
}
def _api_call(self, entrypoint, params=None, schema=None):
"""Makes a call against the api.
:param entrypoint: API method to call.
:param params: parameters to include in the request data.
:param schema: schema to use to validate the data
"""
url = self._api_url.format(entrypoint)
# Default params
params = params or {}
if self.session_id:
params.update({
"session_id": self.session_id
})
else:
params.update({
"device_id": self.device_id,
"device_type": self._access_type,
"access_token": self._access_token,
"version": self._version_code
})
params.update({
"locale": self.locale.replace('_', ''),
})
if self.session_id:
params["session_id"] = self.session_id
# The certificate used by Crunchyroll cannot be verified in some environments.
res = self.session.http.post(url, data=params, headers=self.headers, verify=False)
json_res = self.session.http.json(res, schema=_api_schema)
if json_res["error"]:
err_msg = json_res.get("message", "Unknown error")
err_code = json_res.get("code", "unknown_error")
raise CrunchyrollAPIError(err_msg, err_code)
data = json_res.get("data")
if schema:
data = schema.validate(data, name="API response")
return data
def generate_device_id(self):
device_id = str(uuid4())
# cache the device id
self.cache.set("device_id", 365 * 24 * 60 * 60)
log.debug("Device ID: {0}".format(device_id))
return device_id
def start_session(self):
"""
Starts a session against Crunchyroll's server.
Is recommended that you call this method before making any other calls
to make sure you have a valid session against the server.
"""
params = {}
if self.auth:
params["auth"] = self.auth
self.session_id = self._api_call("start_session", params, schema=_session_schema)
log.debug("Session created with ID: {0}".format(self.session_id))
return self.session_id
def login(self, username, password):
"""
Authenticates the session to be able to access restricted data from
the server (e.g. premium restricted videos).
"""
params = {
"account": username,
"password": password
}
login = self._api_call("login", params, schema=_login_schema)
self.auth = login["auth"]
self.cache.set("auth", login["auth"], expires_at=login["expires"])
return login
def authenticate(self):
try:
data = self._api_call("authenticate", {"auth": self.auth}, schema=_login_schema)
except CrunchyrollAPIError:
self.auth = None
self.cache.set("auth", None, expires_at=0)
log.warning("Saved credentials have expired")
return
log.debug("Credentials expire at: {}".format(data["expires"]))
self.cache.set("auth", self.auth, expires_at=data["expires"])
return data
def get_info(self, media_id, fields=None, schema=None):
"""
Returns the data for a certain media item.
:param media_id: id that identifies the media item to be accessed.
:param fields: list of the media"s field to be returned. By default the
API returns some fields, but others are not returned unless they are
explicity asked for. I have no real documentation on the fields, but
they all seem to start with the "media." prefix (e.g. media.name,
media.stream_data).
:param schema: validation schema to use
"""
params = {"media_id": media_id}
if fields:
params["fields"] = ",".join(fields)
return self._api_call("info", params, schema=schema)
@pluginmatcher(re.compile(r"""
https?://(\w+\.)?crunchyroll\.
(?:
com|de|es|fr|co\.jp
)
(?:
/(en-gb|es|es-es|pt-pt|pt-br|fr|de|ar|it|ru)
)?
(?:/[^/&?]+)?
/[^/&?]+-(?P<media_id>\d+)
""", re.VERBOSE))
class Crunchyroll(Plugin):
arguments = PluginArguments(
PluginArgument(
"username",
metavar="USERNAME",
requires=["password"],
help="A Crunchyroll username to allow access to restricted streams."
),
PluginArgument(
"password",
sensitive=True,
metavar="PASSWORD",
nargs="?",
const=None,
default=None,
help="""
A Crunchyroll password for use with --crunchyroll-username.
If left blank you will be prompted.
"""
),
PluginArgument(
"purge-credentials",
action="store_true",
help="""
Purge cached Crunchyroll credentials to initiate a new session
and reauthenticate.
"""
),
PluginArgument(
"session-id",
sensitive=True,
metavar="SESSION_ID",
help="""
Set a specific session ID for crunchyroll, can be used to bypass
region restrictions. If using an authenticated session ID, it is
recommended that the authentication parameters be omitted as the
session ID is account specific.
Note: The session ID will be overwritten if authentication is used
and the session ID does not match the account.
"""
)
)
@classmethod
def stream_weight(cls, key):
weight = STREAM_WEIGHTS.get(key)
if weight:
return weight, "crunchyroll"
return Plugin.stream_weight(key)
def _get_streams(self):
api = self._create_api()
media_id = int(self.match.group("media_id"))
try:
# the media.stream_data field is required, no stream data is returned otherwise
info = api.get_info(media_id, fields=["media.stream_data"], schema=_media_schema)
except CrunchyrollAPIError as err:
raise PluginError(f"Media lookup error: {err.msg}")
if not info:
return
streams = {}
# The adaptive quality stream sometimes a subset of all the other streams listed, ultra is no included
has_adaptive = any([s["quality"] == "adaptive" for s in info["streams"]])
if has_adaptive:
log.debug("Loading streams from adaptive playlist")
for stream in filter(lambda x: x["quality"] == "adaptive", info["streams"]):
for q, s in HLSStream.parse_variant_playlist(self.session, stream["url"]).items():
# rename the bitrates to low, mid, or high. ultra doesn't seem to appear in the adaptive streams
name = STREAM_NAMES.get(q, q)
streams[name] = s
# If there is no adaptive quality stream then parse each individual result
for stream in info["streams"]:
if stream["quality"] != "adaptive":
# the video_encode_id indicates that the stream is not a variant playlist
if "video_encode_id" in stream:
streams[stream["quality"]] = HLSStream(self.session, stream["url"])
else:
# otherwise the stream url is actually a list of stream qualities
for q, s in HLSStream.parse_variant_playlist(self.session, stream["url"]).items():
# rename the bitrates to low, mid, or high. ultra doesn't seem to appear in the adaptive streams
name = STREAM_NAMES.get(q, q)
streams[name] = s
return streams
def _create_api(self):
"""Creates a new CrunchyrollAPI object, initiates it's session and
tries to authenticate it either by using saved credentials or the
user's username and password.
"""
if self.options.get("purge_credentials"):
self.cache.set("session_id", None, 0)
self.cache.set("auth", None, 0)
self.cache.set("session_id", None, 0)
# use the crunchyroll locale as an override, for backwards compatibility
locale = self.get_option("locale") or self.session.localization.language_code
api = CrunchyrollAPI(self.cache,
self.session,
session_id=self.get_option("session_id"),
locale=locale)
if not self.get_option("session_id"):
log.debug(f"Creating session with locale: {locale}")
api.start_session()
if api.auth:
log.debug("Using saved credentials")
login = api.authenticate()
if login:
login_name = login["user"]["username"] or login["user"]["email"]
log.info(f"Successfully logged in as '{login_name}'")
if not api.auth and self.options.get("username"):
try:
log.debug("Attempting to login using username and password")
api.login(self.options.get("username"),
self.options.get("password"))
login = api.authenticate()
login_name = login["user"]["username"] or login["user"]["email"]
log.info(f"Logged in as '{login_name}'")
except CrunchyrollAPIError as err:
raise PluginError(f"Authentication error: {err.msg}")
if not api.auth:
log.warning(
"No authentication provided, you won't be able to access "
"premium restricted content"
)
return api
__plugin__ = Crunchyroll
| 35.418605 | 120 | 0.569344 |
c3fa68df10c8465311bf748fbae122afb3d4a756 | 1,658 | py | Python | wagtail/wagtailredirects/forms.py | seddonym/wagtail-tableblock | aea3ce67a0800285b20b93018b7c0a8679e479b7 | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailredirects/forms.py | seddonym/wagtail-tableblock | aea3ce67a0800285b20b93018b7c0a8679e479b7 | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailredirects/forms.py | seddonym/wagtail-tableblock | aea3ce67a0800285b20b93018b7c0a8679e479b7 | [
"BSD-3-Clause"
] | 1 | 2019-03-05T15:37:22.000Z | 2019-03-05T15:37:22.000Z | from django import forms
from django.utils.translation import ugettext_lazy as _
from wagtail.wagtailadmin.widgets import AdminPageChooser
from wagtail.wagtailcore.models import Site
from wagtail.wagtailredirects.models import Redirect
class RedirectForm(forms.ModelForm):
site = forms.ModelChoiceField(
label=_("From site"), queryset=Site.objects.all(), required=False, empty_label=_("All sites")
)
def __init__(self, *args, **kwargs):
super(RedirectForm, self).__init__(*args, **kwargs)
self.fields['redirect_page'].widget = AdminPageChooser()
required_css_class = "required"
def clean(self):
"""
The unique_together condition on the model is ignored if site is None, so need to
check for duplicates manually
"""
cleaned_data = super(RedirectForm, self).clean()
if cleaned_data.get('site') is None:
old_path = cleaned_data.get('old_path')
if old_path is None:
# cleaned_data['old_path'] is empty because it has already failed validation,
# so don't bother with our duplicate test
return
old_path = Redirect.normalise_path(old_path)
duplicates = Redirect.objects.filter(old_path=old_path, site__isnull=True)
if self.instance.pk:
duplicates = duplicates.exclude(id=self.instance.pk)
if duplicates:
raise forms.ValidationError(_("A redirect with this path already exists."))
class Meta:
model = Redirect
fields = ('old_path', 'site', 'is_permanent', 'redirect_page', 'redirect_link')
| 36.043478 | 101 | 0.658625 |
aceae5f871e9a01900d6cf5229f6ed78d82368dd | 3,554 | py | Python | src/models/gbm1_indiv_pair_h2h_tuned.py | bushal01/league-ml2 | 6a3abf522faefcead017efa3fc6f9e2e8cafcd7f | [
"FTL"
] | 1 | 2019-11-14T22:46:08.000Z | 2019-11-14T22:46:08.000Z | src/models/gbm1_indiv_pair_h2h_tuned.py | bushal01/league-ml2 | 6a3abf522faefcead017efa3fc6f9e2e8cafcd7f | [
"FTL"
] | 25 | 2020-02-24T17:57:47.000Z | 2022-03-11T09:17:10.000Z | src/models/gbm1_indiv_pair_h2h_tuned.py | bushal01/league-ml2 | 6a3abf522faefcead017efa3fc6f9e2e8cafcd7f | [
"FTL"
] | 1 | 2018-08-04T02:50:10.000Z | 2018-08-04T02:50:10.000Z | import pandas as pd
import sklearn.ensemble
import sys
import os
import dotenv
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import get_modeling_data
import model_evaluation.model_performance_functions as mpf
import time
if __name__ == '__main__':
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
else:
project_dir = os.path.join(os.getcwd(), os.pardir)
dotenv_path = os.path.join(project_dir, '.env')
dotenv.load_dotenv(dotenv_path)
train = get_modeling_data.get_train()
validation = get_modeling_data.get_validation()
train = train.fillna(0)
validation = validation.fillna(0)
non_modeling_cols = get_modeling_data.get_non_modeling_cols()
X_train = train.drop(non_modeling_cols + ['team_100_win'], axis=1)
Y_train = train['team_100_win']
X_validation = validation.drop(non_modeling_cols + ['team_100_win'], axis=1)
Y_validation = validation['team_100_win']
learning_rates = [0.02]
n_estimators = [500]
min_samples_splits = [100, 200, 500]
max_depths = [2, 3, 5, 7]
for lr in learning_rates:
for mss in min_samples_splits:
for md in max_depths:
gbm_params = {'learning_rate': lr,
'n_estimators': 1500,
'min_samples_split': mss,
'min_samples_leaf': 50,
'max_depth': md,
'random_state': 414}
print(gbm_params)
start_time = time.time()
model = sklearn.ensemble.GradientBoostingClassifier(**gbm_params)
model_fit = model.fit(X_train, Y_train)
n_est_performance = mpf.gbm_best_iter(model_fit, X_validation, Y_validation)
# Get training and validation predictions using best iteration
ctr = 1
for prediction in model_fit.staged_predict(X_train):
if ctr == n_est_performance['best_iter']:
train_pred = prediction
ctr = ctr + 1
ctr = 1
for prediction in model_fit.staged_predict(X_validation):
if ctr == n_est_performance['best_iter']:
validation_pred = prediction
ctr = ctr + 1
train_time = time.time() - start_time
ks_gini_train = mpf.ks_gini(Y_train, train_pred)
ks_gini_validation = mpf.ks_gini(Y_validation, validation_pred)
correct_pred_train = mpf.correct_prediction_rate(Y_train, train_pred)
correct_pred_validation = mpf.correct_prediction_rate(Y_validation, validation_pred)
model_performance = mpf.record_gbm_performance(description='GBM,Indiv,paired,h2h,230k,best_iteration',
**gbm_params, best_iter=n_est_performance['best_iter'], num_vars=X_train.shape[1],
train_rows=X_train.shape[0], valid_rows=X_validation.shape[0],
correct_pred_train=correct_pred_train, correct_pred_validation=correct_pred_validation,
ks_train=ks_gini_train['ks'], ks_valid=ks_gini_validation['ks'],
gini_train=ks_gini_train['gini'], gini_valid=ks_gini_validation['gini'],
mse_train=mpf.mse(Y_train, train_pred), mse_valid=mpf.mse(Y_validation, validation_pred),
train_time=train_time, file=os.getenv('DATA_DIR') + 'model_performance/gbm_eval.csv')
print(model_performance) | 46.155844 | 128 | 0.633089 |
427d88c1255fa80c8f8064a7e8d6926fd11cc3f7 | 1,132 | py | Python | examples/hellogrpc/deployment_test.py | linux-on-ibm-z/rules_k8s | e65b80edd2a2162f67120a98e84bb489f15fcf97 | [
"Apache-2.0"
] | 263 | 2017-08-29T22:38:58.000Z | 2022-03-09T06:46:28.000Z | examples/hellogrpc/deployment_test.py | linux-on-ibm-z/rules_k8s | e65b80edd2a2162f67120a98e84bb489f15fcf97 | [
"Apache-2.0"
] | 666 | 2017-08-31T23:22:52.000Z | 2022-03-19T09:57:07.000Z | examples/hellogrpc/deployment_test.py | linux-on-ibm-z/rules_k8s | e65b80edd2a2162f67120a98e84bb489f15fcf97 | [
"Apache-2.0"
] | 150 | 2017-09-03T17:20:27.000Z | 2022-02-09T18:45:37.000Z | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
import yaml
def TestData(name):
return os.path.join(os.environ['TEST_SRCDIR'], 'io_bazel_rules_k8s', name)
class DeploymentTest(unittest.TestCase):
def test_things_match(self):
with open(TestData('examples/hellogrpc/deployment.yaml'), 'r') as f:
static = yaml.load(f.read())
with open(TestData('examples/hellogrpc/deployment.json'), 'r') as f:
generated = json.loads(f.read())
self.assertEqual(static, generated)
if __name__ == '__main__':
unittest.main()
| 32.342857 | 76 | 0.741166 |
b3a23b2a7e49bfc69c86905109db3da6a31e16a4 | 1,648 | py | Python | donedealfinderapi.py | tigranza/gooddeal | 96401a53a305d39b72962104682ee7bb3b0b84bd | [
"Apache-2.0"
] | null | null | null | donedealfinderapi.py | tigranza/gooddeal | 96401a53a305d39b72962104682ee7bb3b0b84bd | [
"Apache-2.0"
] | null | null | null | donedealfinderapi.py | tigranza/gooddeal | 96401a53a305d39b72962104682ee7bb3b0b84bd | [
"Apache-2.0"
] | null | null | null |
from bs4 import BeautifulSoup
import requests
import re
import json
from car import Car
from profiler import start_timer, print_time
host = 'https://donedeal.ie'
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
class DoneDealException(Exception):
pass
class DoneDealFinderApi():
def __init__(self, config):
if host:
self.host = host
if headers:
self.headers = headers
data = {
# 'section': "all",
# 'adType': "forsale",
# 'source': '',
# 'sort': 'relevance desc',
# 'area': [],
# 'max': 30,
# 'start': 0,
}
self.cars = self._find(data)
def _find(self, data):
""" Search donedeal using following params:
:param data: `dict` containging the key value pairs of:
{
section: "all"
adType: "forsale"
source: ''
sort: 'relevance desc'
area: []
max: 30, // must be a common denominator for balanced
2 or 3-column layout (eg 6|12|18|24|30) etc
start: 0
}
"""
uri = '{host}/search/api/v4/find/'.format(host=self.host)
resp = requests.post(uri, data=json.dumps(data), headers=self.headers)
if resp.status_code != 200:
print("{resp.status_code} {resp.reason}".format(resp=resp))
raise DoneDealException(
'Got invalid response code of {}'.format(resp.status_code)
)
return resp.json() | 26.580645 | 78 | 0.51699 |
3bc2013aaf0ca09f5658b30a77a601da331ac838 | 19,447 | py | Python | satchmo/apps/product/tests.py | djangoplicity/satchmo | 75b672dffb64fed3e55c253d51a0ce73f0747e05 | [
"BSD-3-Clause"
] | null | null | null | satchmo/apps/product/tests.py | djangoplicity/satchmo | 75b672dffb64fed3e55c253d51a0ce73f0747e05 | [
"BSD-3-Clause"
] | null | null | null | satchmo/apps/product/tests.py | djangoplicity/satchmo | 75b672dffb64fed3e55c253d51a0ce73f0747e05 | [
"BSD-3-Clause"
] | null | null | null | from decimal import Decimal
from django.conf import settings
from django.contrib.sites.models import Site
from django.core import urlresolvers
from django.forms.util import ValidationError
from django.http import HttpResponse
from django.test import TestCase
from django.utils import timezone
from product.forms import ProductExportForm
from product.models import (
Category,
Discount,
Option,
OptionGroup,
Product,
Price,
)
from product.prices import (
get_product_quantity_adjustments,
PriceAdjustment,
PriceAdjustmentCalc,
)
import datetime
import keyedcache
import signals
class OptionGroupTest(TestCase):
def setUp(self):
self.site=Site.objects.get_current()
sizes = OptionGroup.objects.create(name="sizes", sort_order=1, site=self.site)
option_small = Option.objects.create(option_group=sizes, name="Small", value="small", sort_order=1)
option_large = Option.objects.create(option_group=sizes, name="Large", value="large", sort_order=2, price_change=1)
colors = OptionGroup.objects.create(name="colors", sort_order=2, site=self.site)
option_black = Option.objects.create(option_group=colors, name="Black", value="black", sort_order=1)
option_white = Option.objects.create(option_group=colors, name="White", value="white", sort_order=2, price_change=3)
# Change an option
option_white.price_change = 5
option_white.sort_order = 2
option_white.save()
self.sizes = sizes
self.option_small = option_small
self.option_large = option_large
self.colors = colors
self.option_black = option_black
self.option_white = option_white
# def testUniqueTogether(self):
# """You can't have two options with the same value in an option group"""
# self.option_white.value = "black"
# try:
# self.option_white.save()
# self.fail('Should have thrown an error, due to duplicate keys')
# except db.IntegrityError:
# pass
# db.transaction.rollback()
def testValues(self):
opt = Option.objects.get(id=self.option_white.id)
self.assertEqual(opt.value, u'white')
self.assertEqual(opt.price_change, 5)
self.assertEqual(opt.sort_order, 2)
class CategoryTest(TestCase):
"""
Run some category tests on urls
"""
def setUp(self):
self.site = Site.objects.get_current()
# setup simple categories
self.pet_jewelry, _created = Category.objects.get_or_create(
slug="pet-jewelry", name="Pet Jewelry", parent=None, site=self.site
)
self.womens_jewelry, _created = Category.objects.get_or_create(
slug="womens-jewelry", name="Women's Jewelry", parent=None, site=self.site
)
def tearDown(self):
keyedcache.cache_delete()
def test_hierarchy_validation(self):
#
# first, set the hierarchy
#
# None -> womens_jewelry -> pet_jewelry
#
self.pet_jewelry.parent = self.womens_jewelry
self.pet_jewelry.save()
#
# now, try
#
# pet_jewelry -> womens_jewelry -> pet_jewelry
#
self.womens_jewelry.parent = self.pet_jewelry
self.assertRaises(ValidationError, self.womens_jewelry.save)
def test_absolute_url(self):
exp_url = urlresolvers.reverse('satchmo_category', kwargs={
'slug': self.womens_jewelry.slug
})
self.assertEqual(self.womens_jewelry.get_absolute_url(), exp_url)
# def test_infinite_loop(self):
# """Check that Category methods still work on a Category whose parents list contains an infinite loop."""
# # Create two Categories that are each other's parents. First make sure that
# # attempting to save them throws an error, then force a save anyway.
# pet_jewelry = Category.objects.create(slug="pet-jewelry", name="Pet Jewelry", site=self.site)
# womens_jewelry = Category.objects.create(slug="womens-jewelry", name="Women's Jewelry", site=self.site)
# pet_jewelry.parent = womens_jewelry
# pet_jewelry.save()
# womens_jewelry.parent = pet_jewelry
# try:
# womens_jewelry.save()
# self.fail('Should have thrown a ValidationError')
# except ValidationError:
# pass
# # force save
# Model.save(womens_jewelry)
# pet_jewelry = Category.objects.active().get(slug="pet-jewelry")
# womens_jewelry = Category.objects.active().get(slug="womens-jewelry")
# kids = Category.objects.by_site(site=self.site).order_by('name')
# slugs = [cat.slug for cat in kids]
# self.assertEqual(slugs, [u'pet-jewelry', u'womens-jewelry'])
class DiscountTest(TestCase):
def setUp(self):
self.site = Site.objects.get_current()
start = datetime.date(2006, 10, 1)
end = datetime.date(5000, 10, 1)
self.discount = Discount.objects.create(description="New Sale", code="BUYME", amount="5.00", allowedUses=10,
numUses=0, minOrder=5, active=True, startDate=start, endDate=end, shipping='NONE', site=self.site)
self.old_language_code = settings.LANGUAGE_CODE
settings.LANGUAGE_CODE = 'en-us'
def tearDown(self):
keyedcache.cache_delete()
settings.LANGUAGE_CODE = self.old_language_code
def testValid(self):
v = self.discount.isValid()
self.assert_(v[0])
self.assertEqual(v[1], u'Valid.')
def testFutureDate(self):
"""Test a future date for discount start"""
start = datetime.date(5000, 1, 1)
self.discount.startDate = start
self.discount.save()
self.discount.isValid()
v = self.discount.isValid()
self.assertFalse(v[0])
self.assertEqual(v[1], u'This coupon is not active yet.')
def testPastDate(self):
"""Test an expired discount"""
#Change end date to the past
start = datetime.date(2000, 1, 1)
end = datetime.date(2006, 1, 1)
self.discount.startDate = start
self.discount.endDate = end
self.discount.save()
v = self.discount.isValid()
self.assertFalse(v[0])
self.assertEqual(v[1], u'This coupon has expired.')
def testNotActive(self):
"""Not active should always be invalid."""
self.discount.startDate = datetime.date(2006, 12, 1)
self.discount.endDate = datetime.date(5000, 12, 1)
self.discount.active = False
self.discount.save()
v = self.discount.isValid()
self.assertFalse(v[0], False)
self.assertEqual(v[1], u'This coupon is disabled.')
class CalcFunctionTest(TestCase):
def assert_apply_even_split(self, input_str, amount_str, expect_str):
"""
Method which simplifies many similar tests to be written more compact on one line
Example: the following line does the same as the method ``testEvenSplit1``.
> > > self.assert_apply_even_split('10 10 10 10', '16', '4.00 4.00 4.00 4.00')
"""
ddd = input_str.split()
dd = map(lambda x: Decimal(str(x)).quantize(Decimal("0.01")), ddd)
d = dict(enumerate(dd))
amount = Decimal(str(amount_str)).quantize(Decimal("0.01"))
s = Discount.apply_even_split(d, amount)
self.assertEqual(s.keys(), d.keys())
output_str = ' '.join(map(lambda (k, v): str(v), sorted(s.items())))
self.assertEqual(output_str, expect_str)
def testEvenSplit1Duplicate(self):
"""Does the same as the following test, but written more compact on one line""";
self.assert_apply_even_split('10 10 10 10', '16', '4.00 4.00 4.00 4.00')
def testEvenSplit1(self):
"""Simple split test"""
d = {
1 : Decimal("10.00"),
2 : Decimal("10.00"),
3 : Decimal("10.00"),
4 : Decimal("10.00"),
}
s = Discount.apply_even_split(d, Decimal("16.00"))
self.assertEqual(s[1], Decimal("4.00"))
self.assertEqual(s[2], Decimal("4.00"))
self.assertEqual(s[3], Decimal("4.00"))
self.assertEqual(s[4], Decimal("4.00"))
def testEvenSplitTooMuch(self):
"""Test when amount is greater than total"""
d = {
1 : Decimal("10.00"),
2 : Decimal("10.00"),
3 : Decimal("10.00"),
4 : Decimal("10.00"),
}
s = Discount.apply_even_split(d, Decimal("50.00"))
self.assertEqual(s[1], Decimal("10.00"))
self.assertEqual(s[2], Decimal("10.00"))
self.assertEqual(s[3], Decimal("10.00"))
self.assertEqual(s[4], Decimal("10.00"))
def testEvenSplitEqual(self):
"""Test when amount is exactly equal"""
d = {
1 : Decimal("10.00"),
2 : Decimal("10.00"),
3 : Decimal("10.00"),
4 : Decimal("10.00"),
}
s = Discount.apply_even_split(d, Decimal("40.00"))
self.assertEqual(s[1], Decimal("10.00"))
self.assertEqual(s[2], Decimal("10.00"))
self.assertEqual(s[3], Decimal("10.00"))
self.assertEqual(s[4], Decimal("10.00"))
def testEvenSplitOneTooSmall(self):
"""Test when one of the items is maxed, but others are OK"""
d = {
1 : Decimal("10.00"),
2 : Decimal("5.00"),
3 : Decimal("10.00"),
4 : Decimal("10.00"),
}
s = Discount.apply_even_split(d, Decimal("23.00"))
self.assertEqual(s[1], Decimal("6.00"))
self.assertEqual(s[2], Decimal("5.00"))
self.assertEqual(s[3], Decimal("6.00"))
self.assertEqual(s[4], Decimal("6.00"))
def testThirds(self):
d = {
1 : Decimal("10.00"),
2 : Decimal("10.00"),
3 : Decimal("10.00"),
}
s = Discount.apply_even_split(d, Decimal("10.00"))
self.assertEqual(s[1], Decimal("3.34"))
self.assertEqual(s[2], Decimal("3.33"))
self.assertEqual(s[3], Decimal("3.33"))
def testLargeThirds(self):
d = {
1 : Decimal("100.00"),
2 : Decimal("100.00"),
3 : Decimal("100.00"),
}
s = Discount.apply_even_split(d, Decimal("100.00"))
self.assertEqual(s[1], Decimal("33.34"))
self.assertEqual(s[2], Decimal("33.33"))
self.assertEqual(s[3], Decimal("33.33"))
def testThirdsUneven(self):
d = {
1 : Decimal("10.00"),
2 : Decimal("10.00"),
3 : Decimal("3.00"),
}
s = Discount.apply_even_split(d, Decimal("10.00"))
self.assertEqual(s[1], Decimal("3.50"))
self.assertEqual(s[2], Decimal("3.50"))
self.assertEqual(s[3], Decimal("3.00"))
def testPercentage1(self):
d = {
1 : Decimal("10.00"),
2 : Decimal("10.00"),
3 : Decimal("10.00"),
}
s = Discount.apply_percentage(d, Decimal("10.00"))
self.assertEqual(s[1], Decimal("1.00"))
self.assertEqual(s[2], Decimal("1.00"))
self.assertEqual(s[3], Decimal("1.00"))
def testEvenSplitUncommonNear(self):
"""Simple split test"""
self.assert_apply_even_split('6.67 6.67 6.67', '20.00', '6.67 6.67 6.66')
def testEvenSplitUncommon1(self):
"""Simple split test"""
self.assert_apply_even_split('12.90 5.80 25.80 1.99', '20.00', '6.11 5.80 6.10 1.99')
def testEvenSplitUncommon2(self):
"""Simple split test"""
self.assert_apply_even_split('12.90 5.80 25.80 2.99', '20.00', '5.67 5.67 5.67 2.99')
def testEvenSplitUncommon3(self):
"""Simple split test"""
self.assert_apply_even_split('12.90 5.80 25.80 1.98', '20.00', '6.11 5.80 6.11 1.98')
def testEvenSplitUncommon4(self):
"""Simple split test"""
self.assert_apply_even_split('12.90 5.80 25.80 0.98', '20.00', '6.61 5.80 6.61 0.98')
def testEvenSplitUncommon5(self):
"""Simple split test"""
self.assert_apply_even_split('12.90 5.80 25.80 0.98', '10.00', '3.01 3.01 3.00 0.98')
def testEvenSplitUncommon6(self):
"""Simple split test"""
self.assert_apply_even_split('12.90 5.80 25.80 3.99', '30.00', '10.11 5.80 10.10 3.99')
def testEvenSplitUncommon7(self):
"""Simple split test"""
self.assert_apply_even_split('12.90 5.80 25.80 3.99', '40.00', '12.90 5.80 17.31 3.99')
def testEvenSplitUncommon8(self):
"""Simple split test"""
self.assert_apply_even_split('12.90 35.80 25.80 3.99', '40.00', '12.01 12.00 12.00 3.99')
def testEvenSplitUncommon9(self):
"""Simple split test"""
self.assert_apply_even_split('8.00 15.80 25.80 3.99', '40.00', '8.00 14.01 14.00 3.99')
def testEvenSplitUncommon10(self):
"""Simple split test"""
self.assert_apply_even_split('8.00 15.80 25.80 13.99', '40.00', '8.00 10.67 10.67 10.66')
def testEvenSplitUncommon11(self):
"""Simple split test"""
self.assert_apply_even_split('8.00 15.80 25.80 14.00', '40.00', '8.00 10.67 10.67 10.66')
def testEvenSplitUncommon12(self):
"""Simple split test"""
self.assert_apply_even_split('5.80 25.80 12.90', '20.00', '5.80 7.10 7.10')
class ProductExportTest(TestCase):
"""
Test product export functionality.
"""
def setUp(self):
# Log in as a superuser
from django.contrib.auth.models import User
user = User.objects.create_user('root', 'root@eruditorum.com', '12345')
user.is_staff = True
user.is_superuser = True
user.save()
self.client.login(username='root', password='12345')
def tearDown(self):
keyedcache.cache_delete()
def test_text_export(self):
"""
Test the content type of an exported text file.
"""
url = urlresolvers.reverse('satchmo_admin_product_export')
form_data = {
'format': 'yaml',
'include_images': False,
}
response = self.client.post(url, form_data)
self.assertTrue(response.has_header('Content-Type'))
self.assertEqual('text/yaml', response['Content-Type'])
form_data['format'] = 'json'
response = self.client.post(url, form_data)
self.assertTrue(response.has_header('Content-Type'))
self.assertEqual('text/json', response['Content-Type'])
form_data['format'] = 'xml'
response = self.client.post(url, form_data)
self.assertTrue(response.has_header('Content-Type'))
self.assertEqual('text/xml', response['Content-Type'])
def test_zip_export_content_type(self):
"""
Test the content type of an exported zip file.
"""
url = urlresolvers.reverse('satchmo_admin_product_export')
form_data = {
'format': 'yaml',
'include_images': True,
}
response = self.client.post(url, form_data)
self.assertTrue(response.has_header('Content-Type'))
self.assertEqual('application/zip', response['Content-Type'])
def test_unicode(self):
"""Test the ProductExportForm behavior
Specifically, we're checking that a unicode 'format' is converted to ascii
in the 'export' method of 'ProductExportForm'."""
form = ProductExportForm({'format': u'yaml', 'include_images': True})
response = form.export(None)
self.assert_(isinstance(response, HttpResponse))
class ProductTest(TestCase):
"""Test Product functions"""
fixtures = ['l10n-data.yaml','sample-store-data.yaml', 'products.yaml', 'test-config.yaml']
def tearDown(self):
keyedcache.cache_delete()
def test_quantity_price_standard_product(self):
"""Check quantity price for a standard product"""
product = Product.objects.get(slug='PY-Rocks')
self.assertEqual(product.unit_price, Decimal("19.50"))
def test_discount_qty_price(self):
"""Test quantity price discounts"""
product = Product.objects.get(slug='PY-Rocks')
price = Price(product=product, quantity=Decimal('10'), price=Decimal("10.00"))
price.save()
self.assertEqual(product.unit_price, Decimal("19.50"))
self.assertEqual(product.get_qty_price(Decimal('1')), Decimal("19.50"))
self.assertEqual(product.get_qty_price(Decimal('2')), Decimal("19.50"))
self.assertEqual(product.get_qty_price(Decimal('10')), Decimal("10.00"))
def test_expiring_price(self):
"""Test whether a price with an expiration date is used in preference to a non-expiring price."""
product = Product.objects.get(slug='PY-Rocks')
self.assertEqual(product.unit_price, Decimal("19.50"))
today = timezone.now()
aweek = datetime.timedelta(days=7)
lastwk = today - aweek
nextwk = today + aweek
# new price should override the old one
price = Price.objects.create(product=product, quantity=Decimal('1'), price=Decimal("10.00"), expires=nextwk)
self.assertEqual(product.unit_price, Decimal("10.00"))
# but not if it is expired
price.expires = lastwk
price.save()
self.assertEqual(product.unit_price, Decimal("19.50"))
def test_smart_attr(self):
p = Product.objects.get(slug__iexact='dj-rocks')
mb = Product.objects.get(slug__iexact='dj-rocks-m-b')
sb = Product.objects.get(slug__iexact='dj-rocks-s-b')
# going to set a weight on the product, and an override weight on the medium
# shirt.
p.weight = 100
p.save()
sb.weight = 50
sb.save()
self.assertEqual(p.smart_attr('weight'), 100)
self.assertEqual(sb.smart_attr('weight'), 50)
self.assertEqual(mb.smart_attr('weight'), 100)
# no height
self.assertEqual(p.smart_attr('height'), None)
self.assertEqual(sb.smart_attr('height'), None)
class PriceAdjustmentTest(TestCase):
fixtures = ['products.yaml']
def setUp(self):
self.product = Product.objects.get(slug="dj-rocks")
self.price = self.product.price_set.get(quantity=1)
def tearDown(self):
keyedcache.cache_delete()
def test_basic(self):
pcalc = PriceAdjustmentCalc(self.price)
p = PriceAdjustment('test', amount=Decimal(1))
pcalc += p
pcalc += p
p = PriceAdjustment('test2', amount=Decimal(10))
pcalc += p
self.assertEqual(pcalc.total_adjustment(), Decimal(12))
def test_product_adjustments(self):
p1 = self.product.unit_price
self.assertEqual(p1, Decimal('20.00'))
signals.satchmo_price_query.connect(five_off)
p2 = self.product.unit_price
self.assertEqual(p2, Decimal('15.00'))
adj = get_product_quantity_adjustments(self.product, qty=1)
self.assertEqual(len(adj.adjustments), 1)
a = adj.adjustments[0]
self.assertEqual(a.key, 'half')
self.assertEqual(a.amount, Decimal(5))
signals.satchmo_price_query.disconnect(five_off)
def five_off(sender, adjustment=None, **kwargs):
adjustment += PriceAdjustment('half', 'Half', amount=Decimal(5))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35.946396 | 124 | 0.612588 |
1064f5e3c69c181e0713ad1e483d842f6a5bcd24 | 1,703 | py | Python | mce/equiv.py | mvcisback/mce-spec-inference | 58432b35e35b75cab1c77cbbe2057aff94794597 | [
"MIT"
] | null | null | null | mce/equiv.py | mvcisback/mce-spec-inference | 58432b35e35b75cab1c77cbbe2057aff94794597 | [
"MIT"
] | null | null | null | mce/equiv.py | mvcisback/mce-spec-inference | 58432b35e35b75cab1c77cbbe2057aff94794597 | [
"MIT"
] | null | null | null | """
Code for checking if two trajectories index the same time unrolled state.
"""
from typing import Iterator
import aiger
import aiger_sat
import aiger_bv as BV
def bmc_equiv(circ1, circ2, horizon, assume=None) -> Iterator[bool]:
"""
Perform bounded model checking up to horizon to see if circ1 and
circ2 are equivilent.
"""
# Create distinguishing predicate.
expr = BV.uatom(1, val=0)
for o1 in circ1.outputs:
o2 = f'{o1}##copy'
size = circ1.omap[o1].size
expr |= BV.uatom(size, o1) != BV.uatom(size, o2)
expr.with_output('distinguished')
monitor = ((circ1 | circ2) >> expr.aigbv).aig
assert len(monitor.outputs) == 1
# Make underlying aig lazy.
monitor = monitor.lazy_aig
# BMC loop
for t in range(horizon):
delta = horizon - t
unrolled = monitor.unroll(delta, only_last_outputs=True)
assert len(unrolled.outputs) == 1
unrolled = aiger.BoolExpr(unrolled)
if assume is not None:
unrolled |= assume
yield aiger_sat.is_sat(unrolled)
def equiv_states(mdp, horizon, time, state1, state2):
"""
Return whether state1 and state2 correspond to the same state at
t=`time`.
"""
if state1 == state2:
return True
circ1 = mdp.aigbv
# Reinitialize circuits to start at respective states.
circ2 = circ1.reinit(state2)
circ1 = circ1.reinit(state1)
# Give circ2 different symbol names from circ1.
circ2 = circ2['o', {k: f'{k}##copy' for k in circ1.outputs}]
circ2 = circ2['l', {k: f'{k}##copy' for k in circ1.latches}]
return not any(bmc_equiv(circ1, circ2, horizon - time))
__all__ = ['equiv_states']
| 26.2 | 73 | 0.641221 |
0c453443fa4edefc2ba97b8fe38055f733650996 | 5,001 | py | Python | example/tutorial_frozenlake_dqn.py | lllcho/tensorlayer | 87591b4945a6a67dfb4ea797a575efae997fd9d2 | [
"Apache-2.0"
] | 1 | 2021-09-10T09:27:31.000Z | 2021-09-10T09:27:31.000Z | example/tutorial_frozenlake_dqn.py | lllcho/tensorlayer | 87591b4945a6a67dfb4ea797a575efae997fd9d2 | [
"Apache-2.0"
] | null | null | null | example/tutorial_frozenlake_dqn.py | lllcho/tensorlayer | 87591b4945a6a67dfb4ea797a575efae997fd9d2 | [
"Apache-2.0"
] | null | null | null | """Q-Network Q(a, s) - TD Learning, Off-Policy, e-Greedy Exploration (GLIE).
Q(S, A) <- Q(S, A) + alpha * (R + lambda * Q(newS, newA) - Q(S, A))
delta_w = R + lambda * Q(newS, newA)
See David Silver RL Tutorial Lecture 5 - Q-Learning for more details.
EN: https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0#.5m3361vlw
CN: https://zhuanlan.zhihu.com/p/25710327
Note: Policy Network has been proved to be better than Q-Learning, see tutorial_atari_pong.py
# The FrozenLake v0 environment
https://gym.openai.com/envs/FrozenLake-v0
The agent controls the movement of a character in a grid world. Some tiles of
the grid are walkable, and others lead to the agent falling into the water.
Additionally, the movement direction of the agent is uncertain and only partially
depends on the chosen direction. The agent is rewarded for finding a walkable
path to a goal tile.
SFFF (S: starting point, safe)
FHFH (F: frozen surface, safe)
FFFH (H: hole, fall to your doom)
HFFG (G: goal, where the frisbee is located)
The episode ends when you reach the goal or fall in a hole. You receive a reward
of 1 if you reach the goal, and zero otherwise.
"""
import gym
import tensorlayer as tl
from tensorlayer.layers import *
env = gym.make('FrozenLake-v0')
def to_one_hot(i, n_classes=None):
a = np.zeros(n_classes, 'uint8')
a[i] = 1
return a
render = False # display the game environment
running_reward = None
tf.reset_default_graph()
## Define Q-network q(a,s) that ouput the rewards of 4 actions by given state, i.e. Action-Value Function.
# 4x4 grid can be represented by one-hot vector with 16 integers.
inputs = tf.placeholder(shape=[1, 16], dtype=tf.float32)
net = InputLayer(inputs, name='observation')
net = DenseLayer(net, n_units=4, act=tf.identity, W_init=tf.random_uniform_initializer(0, 0.01), b_init=None, name='q_a_s')
y = net.outputs # action-value / rewards of 4 actions
predict = tf.argmax(y, 1) # chose action greedily with reward. in Q-Learning, policy is greedy, so we use "max" to select the next action.
## Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.
nextQ = tf.placeholder(shape=[1, 4], dtype=tf.float32)
loss = tl.cost.mean_squared_error(nextQ, y, is_mean=False) # tf.reduce_sum(tf.square(nextQ - y))
train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss)
## Set learning parameters
lambd = .99 # decay factor
e = 0.1 # e-Greedy Exploration, the larger the more random
num_episodes = 10000
with tf.Session() as sess:
tl.layers.initialize_global_variables(sess)
for i in range(num_episodes):
## Reset environment and get first new observation
episode_time = time.time()
s = env.reset() # observation is state, integer 0 ~ 15
rAll = 0
for j in range(99): # step index, maximum step is 99
if render: env.render()
## Choose an action by greedily (with e chance of random action) from the Q-network
a, allQ = sess.run([predict, y], feed_dict={inputs: [to_one_hot(s, 16)]})
## e-Greedy Exploration !!! sample random action
if np.random.rand(1) < e:
a[0] = env.action_space.sample()
## Get new state and reward from environment
s1, r, d, _ = env.step(a[0])
## Obtain the Q' values by feeding the new state through our network
Q1 = sess.run(y, feed_dict={inputs: [to_one_hot(s1, 16)]})
## Obtain maxQ' and set our target value for chosen action.
maxQ1 = np.max(Q1) # in Q-Learning, policy is greedy, so we use "max" to select the next action.
targetQ = allQ
targetQ[0, a[0]] = r + lambd * maxQ1
## Train network using target and predicted Q values
# it is not real target Q value, it is just an estimation,
# but check the Q-Learning update formula:
# Q'(s,a) <- Q(s,a) + alpha(r + lambd * maxQ(s',a') - Q(s, a))
# minimizing |r + lambd * maxQ(s',a') - Q(s, a)|^2 equal to force
# Q'(s,a) ≈ Q(s,a)
_ = sess.run(train_op, {inputs: [to_one_hot(s, 16)], nextQ: targetQ})
rAll += r
s = s1
## Reduce chance of random action if an episode is done.
if d == True:
e = 1. / ((i / 50) + 10) # reduce e, GLIE: Greey in the limit with infinite Exploration
break
## Note that, the rewards here with random action
running_reward = rAll if running_reward is None else running_reward * 0.99 + rAll * 0.01
print("Episode [%d/%d] sum reward:%f running reward:%f took:%.5fs %s" % (i, num_episodes, rAll, running_reward, time.time() - episode_time, ''
if rAll == 0 else ' !!!!!!!!'))
| 48.553398 | 158 | 0.645271 |
0f7981afcd5855d715bbf1e4cb50b68d3db6025f | 60 | py | Python | src/blueprints/multilingual/__init__.py | PseuToPy/PseuToPy-web | fe0d43b0c91b6ae115ba06cb5453df4ad7ca1292 | [
"MIT"
] | 1 | 2020-10-04T08:59:00.000Z | 2020-10-04T08:59:00.000Z | src/blueprints/multilingual/__init__.py | PseuToPy/PseuToPy-api | fe0d43b0c91b6ae115ba06cb5453df4ad7ca1292 | [
"MIT"
] | 4 | 2020-06-19T12:59:18.000Z | 2021-01-31T08:13:41.000Z | src/blueprints/multilingual/__init__.py | PseuToPy/PseuToPy-web | fe0d43b0c91b6ae115ba06cb5453df4ad7ca1292 | [
"MIT"
] | null | null | null | from src.blueprints.multilingual.routes import multilingual
| 30 | 59 | 0.883333 |
84ebeb4f64ab97cbe19b0542a887c523850e0e65 | 2,135 | py | Python | examples/remote_cluster.py | venukarnati92/python-1 | 3fabf9ed9f4758fb5133975a58fc147471e91d9d | [
"Apache-2.0"
] | 4,417 | 2018-01-13T04:30:48.000Z | 2022-03-31T15:33:59.000Z | examples/remote_cluster.py | belajarqywok/python | b15bea16a87ad03136a4627941ac437582ea4657 | [
"Apache-2.0"
] | 1,414 | 2018-01-12T19:31:56.000Z | 2022-03-31T22:01:02.000Z | examples/remote_cluster.py | palnabarun/python | 6b01c95e1673c0787d3d688b361bfd995d62dd98 | [
"Apache-2.0"
] | 2,854 | 2018-01-14T08:57:33.000Z | 2022-03-31T01:41:56.000Z | # Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example demonstrates the communication between a remote cluster and a
server outside the cluster without kube client installed on it.
The communication is secured with the use of Bearer token.
"""
from kubernetes import client, config
def main():
# Define the bearer token we are going to use to authenticate.
# See here to create the token:
# https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/
aToken = "<token>"
# Create a configuration object
aConfiguration = client.Configuration()
# Specify the endpoint of your Kube cluster
aConfiguration.host = "https://XXX.XXX.XXX.XXX:443"
# Security part.
# In this simple example we are not going to verify the SSL certificate of
# the remote cluster (for simplicity reason)
aConfiguration.verify_ssl = False
# Nevertheless if you want to do it you can with these 2 parameters
# configuration.verify_ssl=True
# ssl_ca_cert is the filepath to the file that contains the certificate.
# configuration.ssl_ca_cert="certificate"
aConfiguration.api_key = {"authorization": "Bearer " + aToken}
# Create a ApiClient with our config
aApiClient = client.ApiClient(aConfiguration)
# Do calls
v1 = client.CoreV1Api(aApiClient)
print("Listing pods with their IPs:")
ret = v1.list_pod_for_all_namespaces(watch=False)
for i in ret.items:
print("%s\t%s\t%s" %
(i.status.pod_ip, i.metadata.namespace, i.metadata.name))
if __name__ == '__main__':
main()
| 35 | 81 | 0.725059 |
7329a6553c1d534de7857ec6b3b9822c54f2e0ba | 3,279 | py | Python | python/oneflow/nn/__init__.py | Warmchay/oneflow | 5a333ff065bb89990318de2f1bd650e314d49301 | [
"Apache-2.0"
] | null | null | null | python/oneflow/nn/__init__.py | Warmchay/oneflow | 5a333ff065bb89990318de2f1bd650e314d49301 | [
"Apache-2.0"
] | null | null | null | python/oneflow/nn/__init__.py | Warmchay/oneflow | 5a333ff065bb89990318de2f1bd650e314d49301 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from oneflow.nn.graph import Graph
from oneflow.nn.module import Module
from oneflow.nn.modules.activation import (
ELU,
GELU,
Hardsigmoid,
Hardswish,
Hardtanh,
LeakyReLU,
LogSigmoid,
LogSoftmax,
Mish,
PReLU,
ReLU,
ReLU6,
Sigmoid,
Softmax,
Softplus,
Tanh,
SELU,
SiLU,
Softsign,
)
from oneflow.nn.modules.all_reduce import AllReduce
from oneflow.nn.modules.batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d
from oneflow.nn.modules.container import (
ModuleDict,
ModuleList,
ParameterDict,
ParameterList,
Sequential,
)
from oneflow.nn.modules.conv import Conv1d, Conv2d, Conv3d, ConvTranspose2d
from oneflow.nn.modules.min_max_observer import MinMaxObserver
from oneflow.nn.modules.moving_average_min_max_observer import (
MovingAverageMinMaxObserver,
)
from oneflow.nn.modules.fake_quantization import FakeQuantization
from oneflow.nn.modules.quantization import Quantization
from oneflow.nn.modules.dataset import (
COCOReader,
CoinFlip,
CropMirrorNormalize,
OFRecordImageDecoder,
OFRecordImageDecoderRandomCrop,
OFRecordImageGpuDecoderRandomCropResize,
OFRecordRawDecoder,
OFRecordRawDecoder as OfrecordRawDecoder,
OFRecordReader,
OFRecordReader as OfrecordReader,
OFRecordBytesDecoder,
GPTIndexedBinDataReader,
)
from oneflow.nn.modules.dropout import Dropout
from oneflow.nn.modules.flatten import Flatten
from oneflow.nn.modules.instancenorm import (
InstanceNorm1d,
InstanceNorm2d,
InstanceNorm3d,
)
from oneflow.nn.modules.linear import Identity, Linear
from oneflow.nn.modules.loss import (
BCELoss,
BCEWithLogitsLoss,
CrossEntropyLoss,
CTCLoss,
KLDivLoss,
L1Loss,
MarginRankingLoss,
MSELoss,
NLLLoss,
SmoothL1Loss,
CombinedMarginLoss,
)
from oneflow.nn.modules.normalization import GroupNorm, LayerNorm
from oneflow.nn.modules.padding import (
ConstantPad1d,
ConstantPad2d,
ConstantPad3d,
ReflectionPad2d,
ReplicationPad2d,
ZeroPad2d,
)
from oneflow.nn.modules.pixelshuffle import PixelShufflev2 as PixelShuffle
from oneflow.nn.modules.pooling import (
AvgPool1d,
AvgPool2d,
AvgPool3d,
MaxPool1d,
MaxPool2d,
MaxPool3d,
AdaptiveAvgPool1d,
AdaptiveAvgPool2d,
AdaptiveAvgPool3d,
)
from oneflow.nn.modules.sparse import Embedding
from oneflow.nn.modules.upsampling import (
Upsample,
UpsamplingBilinear2d,
UpsamplingNearest2d,
)
from oneflow.nn.modules.fold import Fold, Unfold
from oneflow.nn.parameter import Parameter
from oneflow.nn import utils
from . import functional
from . import parallel
| 25.617188 | 78 | 0.761208 |
f45207f874f94dee303f0bb0400e72370b61bdb9 | 1,726 | py | Python | Example/Gutenkunst2007/Brown_2004_All_One/Nets.py | bcdaniels/SloppyCell | 17e68127a6aba19056a5067748a2d18241cc4d76 | [
"BSD-3-Clause"
] | 2 | 2020-05-26T19:29:39.000Z | 2020-08-26T20:54:52.000Z | Example/Gutenkunst2007/Brown_2004_All_One/Nets.py | bcdaniels/SloppyCell | 17e68127a6aba19056a5067748a2d18241cc4d76 | [
"BSD-3-Clause"
] | 1 | 2020-05-26T16:50:49.000Z | 2021-07-08T20:35:35.000Z | Example/Gutenkunst2007/Brown_2004_All_One/Nets.py | jurquiza/SloppyCellUrquiza2019 | a9f64d9d4172c82735813f09e48f36777a714e9c | [
"BSD-3-Clause"
] | 3 | 2017-09-12T03:12:01.000Z | 2018-10-19T11:08:09.000Z | from SloppyCell.ReactionNetworks import *
# Load the network from the XML file
net = IO.from_SBML_file('BIOMD0000000033.xml')
for var in net.variables.keys():
# Set all typical values to 1.
net.set_var_typical_val(var, 1.0)
# Set all non-zero ICs to 1.
if net.get_var_ic(var) != 0:
net.set_var_ic(var, 1.0)
net.set_var_ic('EGF', 0)
net.set_var_ic('NGF', 0)
net.compile()
# Create our various conditions
# EGF stimulation in the wild-type
egf_stim = net.copy('EGF_stim')
egf_stim.set_var_ic('EGF', 1)
# NGF stimulation in the wild-type
ngf_stim = net.copy('NGF_stim')
ngf_stim.set_var_ic('NGF', 1)
# EGF stimulation with PI3K inhibited
egf_ly = egf_stim.copy('EGF_LY')
egf_ly.set_var_ic('kPI3KRas', 0)
egf_ly.set_var_optimizable('kPI3KRas', False)
egf_ly.set_var_ic('kPI3K', 0)
egf_ly.set_var_optimizable('kPI3K', False)
# NGF stimulation with PI3K inhibited
ngf_ly = ngf_stim.copy('NGF_LY')
ngf_ly.set_var_ic('kPI3KRas', 0)
ngf_ly.set_var_optimizable('kPI3KRas', False)
ngf_ly.set_var_ic('kPI3K', 0)
ngf_ly.set_var_optimizable('kPI3K', False)
# NGF stimulation with a dominant-negative Rap1
ngf_DN_Rap1 = ngf_stim.copy('NGF_DN_Rap1')
ngf_DN_Rap1.set_var_ic('kRap1ToBRaf', 0)
ngf_DN_Rap1.set_var_optimizable('kRap1ToBRaf', False)
# NGF stimulation with a dominant-negative Ras
ngf_DN_Ras = ngf_stim.copy('NGF_DN_Ras')
ngf_DN_Ras.set_var_ic('kRasToRaf1', 0)
ngf_DN_Ras.set_var_optimizable('kRasToRaf1', False)
ngf_DN_Ras.set_var_ic('kPI3KRas', 0)
ngf_DN_Ras.set_var_optimizable('kPI3KRas', False)
# Collect our networks in a list
networks = [egf_stim, ngf_stim, egf_ly, ngf_ly, ngf_DN_Rap1, ngf_DN_Ras]
# And make a list of corresponding integration times
int_times = [[0, 120]] * len(networks)
| 31.381818 | 72 | 0.75956 |
396fcfe26645eea486b583d87ce80e9346861477 | 13,103 | py | Python | jello/dotmap.py | roehling/jello | 1073355e2bbfcd4f92af2584c9e539ce34859fc0 | [
"MIT"
] | 265 | 2020-03-25T06:59:33.000Z | 2022-03-29T19:07:18.000Z | jello/dotmap.py | roehling/jello | 1073355e2bbfcd4f92af2584c9e539ce34859fc0 | [
"MIT"
] | 7 | 2020-03-26T15:54:45.000Z | 2022-03-09T00:33:25.000Z | jello/dotmap.py | roehling/jello | 1073355e2bbfcd4f92af2584c9e539ce34859fc0 | [
"MIT"
] | 13 | 2020-03-26T11:01:39.000Z | 2022-02-21T03:16:29.000Z | # The MIT License (MIT)
#
# Copyright (c) 2015 Chris Redford
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# Note: commented out code on lines 52-55 and 79-82. Changes on line 122-125
from collections import OrderedDict
try:
from collections.abc import MutableMapping, Iterable
except ImportError:
from collections import MutableMapping, Iterable
from json import dumps
from pprint import pprint
from inspect import ismethod
class DotMap(MutableMapping, OrderedDict):
def __init__(self, *args, **kwargs):
self._map = OrderedDict()
self._dynamic = kwargs.pop('_dynamic', True)
self._prevent_method_masking = kwargs.pop('_prevent_method_masking', False)
trackedIDs = kwargs.pop('_trackedIDs', {})
if args:
d = args[0]
# for recursive assignment handling
trackedIDs[id(d)] = self
src = []
if isinstance(d, MutableMapping):
src = self.__call_items(d)
elif isinstance(d, Iterable):
src = d
for k,v in src:
# Remove this code so we can load data that has reserved key names, yet
# still raise an exception when attempting to create them later.
# if self._prevent_method_masking and k in reserved_keys:
# raise KeyError('"{}" is reserved'.format(k))
if isinstance(v, dict):
idv = id(v)
if idv in trackedIDs:
v = trackedIDs[idv]
else:
trackedIDs[idv] = v
v = self.__class__(v, _dynamic=self._dynamic, _prevent_method_masking = self._prevent_method_masking, _trackedIDs = trackedIDs)
if type(v) is list:
l = []
for i in v:
n = i
if isinstance(i, dict):
idi = id(i)
if idi in trackedIDs:
n = trackedIDs[idi]
else:
trackedIDs[idi] = i
n = self.__class__(i, _dynamic=self._dynamic, _prevent_method_masking = self._prevent_method_masking)
l.append(n)
v = l
self._map[k] = v
if kwargs:
for k,v in self.__call_items(kwargs):
# Remove this code so we can load data that has reserved key names, yet
# still raise an exception when attempting to create them later.
# if self._prevent_method_masking and k in reserved_keys:
# raise KeyError('"{}" is reserved'.format(k))
self._map[k] = v
def __call_items(self, obj):
if hasattr(obj, 'iteritems') and ismethod(getattr(obj, 'iteritems')):
return obj.iteritems()
else:
return obj.items()
def items(self):
return self.iteritems()
def iteritems(self):
return self.__call_items(self._map)
def __iter__(self):
return self._map.__iter__()
def next(self):
return self._map.next()
def __setitem__(self, k, v):
self._map[k] = v
def __getitem__(self, k):
if k not in self._map and self._dynamic and k != '_ipython_canary_method_should_not_exist_':
# automatically extend to new DotMap
self[k] = self.__class__()
return self._map[k]
def __setattr__(self, k, v):
if k in {'_map','_dynamic', '_ipython_canary_method_should_not_exist_', '_prevent_method_masking'}:
super(DotMap, self).__setattr__(k,v)
elif self._prevent_method_masking and k in reserved_keys:
raise KeyError('"{}" is reserved'.format(k))
else:
self[k] = v
def __getattr__(self, k):
if k.startswith('__') and k.endswith('__'):
raise AttributeError(f'{k} is reserved. Please use python dict bracket notation for this key.')
if self._prevent_method_masking and k in reserved_keys:
raise AttributeError(f'{k} is reserved. Please use python dict bracket notation for this key.')
if k in {'_map','_dynamic','_ipython_canary_method_should_not_exist_'}:
return super(DotMap, self).__getattr__(k)
try:
v = super(self.__class__, self).__getattribute__(k)
return v
except AttributeError:
pass
return self[k]
def __delattr__(self, key):
return self._map.__delitem__(key)
def __contains__(self, k):
return self._map.__contains__(k)
def __add__(self, other):
if self.empty():
return other
else:
self_type = type(self).__name__
other_type = type(other).__name__
msg = "unsupported operand type(s) for +: '{}' and '{}'"
raise TypeError(msg.format(self_type, other_type))
def __str__(self, seen = None):
items = []
seen = {id(self)} if seen is None else seen
for k,v in self.__call_items(self._map):
# circular assignment case
if isinstance(v, self.__class__):
if id(v) in seen:
items.append('{0}={1}(...)'.format(k, self.__class__.__name__))
else:
seen.add(id(v))
items.append('{0}={1}'.format(k, v.__str__(seen)))
else:
items.append('{0}={1}'.format(k, repr(v)))
joined = ', '.join(items)
out = '{0}({1})'.format(self.__class__.__name__, joined)
return out
def __repr__(self):
return str(self)
def toDict(self, seen = None):
if seen is None:
seen = {}
d = {}
seen[id(self)] = d
for k,v in self.items():
if issubclass(type(v), DotMap):
idv = id(v)
if idv in seen:
v = seen[idv]
else:
v = v.toDict(seen = seen)
elif type(v) in (list, tuple):
l = []
for i in v:
n = i
if issubclass(type(i), DotMap):
idv = id(n)
if idv in seen:
n = seen[idv]
else:
n = i.toDict(seen = seen)
l.append(n)
if type(v) is tuple:
v = tuple(l)
else:
v = l
d[k] = v
return d
def pprint(self, pformat='dict'):
if pformat == 'json':
print(dumps(self.toDict(), indent=4, sort_keys=True))
else:
pprint(self.toDict())
def empty(self):
return (not any(self))
# proper dict subclassing
def values(self):
return self._map.values()
# ipython support
def __dir__(self):
return self.keys()
@classmethod
def parseOther(self, other):
if issubclass(type(other), DotMap):
return other._map
else:
return other
def __cmp__(self, other):
other = DotMap.parseOther(other)
return self._map.__cmp__(other)
def __eq__(self, other):
other = DotMap.parseOther(other)
if not isinstance(other, dict):
return False
return self._map.__eq__(other)
def __ge__(self, other):
other = DotMap.parseOther(other)
return self._map.__ge__(other)
def __gt__(self, other):
other = DotMap.parseOther(other)
return self._map.__gt__(other)
def __le__(self, other):
other = DotMap.parseOther(other)
return self._map.__le__(other)
def __lt__(self, other):
other = DotMap.parseOther(other)
return self._map.__lt__(other)
def __ne__(self, other):
other = DotMap.parseOther(other)
return self._map.__ne__(other)
def __delitem__(self, key):
return self._map.__delitem__(key)
def __len__(self):
return self._map.__len__()
def clear(self):
self._map.clear()
def copy(self):
return self.__class__(self)
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo=None):
return self.copy()
def get(self, key, default=None):
return self._map.get(key, default)
def has_key(self, key):
return key in self._map
def iterkeys(self):
return self._map.iterkeys()
def itervalues(self):
return self._map.itervalues()
def keys(self):
return self._map.keys()
def pop(self, key, default=None):
return self._map.pop(key, default)
def popitem(self):
return self._map.popitem()
def setdefault(self, key, default=None):
return self._map.setdefault(key, default)
def update(self, *args, **kwargs):
if len(args) != 0:
self._map.update(*args)
self._map.update(kwargs)
def viewitems(self):
return self._map.viewitems()
def viewkeys(self):
return self._map.viewkeys()
def viewvalues(self):
return self._map.viewvalues()
@classmethod
def fromkeys(cls, seq, value=None):
d = cls()
d._map = OrderedDict.fromkeys(seq, value)
return d
def __getstate__(self): return self.__dict__
def __setstate__(self, d): self.__dict__.update(d)
# bannerStr
def _getListStr(self,items):
out = '['
mid = ''
for i in items:
mid += ' {}\n'.format(i)
if mid != '':
mid = '\n' + mid
out += mid
out += ']'
return out
def _getValueStr(self,k,v):
outV = v
multiLine = len(str(v).split('\n')) > 1
if multiLine:
# push to next line
outV = '\n' + v
if type(v) is list:
outV = self._getListStr(v)
out = '{} {}'.format(k,outV)
return out
def _getSubMapDotList(self, pre, name, subMap):
outList = []
if pre == '':
pre = name
else:
pre = '{}.{}'.format(pre,name)
def stamp(pre,k,v):
valStr = self._getValueStr(k,v)
return '{}.{}'.format(pre, valStr)
for k,v in subMap.items():
if isinstance(v,DotMap) and v != DotMap():
subList = self._getSubMapDotList(pre,k,v)
outList.extend(subList)
else:
outList.append(stamp(pre,k,v))
return outList
def _getSubMapStr(self, name, subMap):
outList = ['== {} =='.format(name)]
for k,v in subMap.items():
if isinstance(v, self.__class__) and v != self.__class__():
# break down to dots
subList = self._getSubMapDotList('',k,v)
# add the divit
# subList = ['> {}'.format(i) for i in subList]
outList.extend(subList)
else:
out = self._getValueStr(k,v)
# out = '> {}'.format(out)
out = '{}'.format(out)
outList.append(out)
finalOut = '\n'.join(outList)
return finalOut
def bannerStr(self):
lines = []
previous = None
for k,v in self.items():
if previous == self.__class__.__name__:
lines.append('-')
out = ''
if isinstance(v, self.__class__):
name = k
subMap = v
out = self._getSubMapStr(name,subMap)
lines.append(out)
previous = self.__class__.__name__
else:
out = self._getValueStr(k,v)
lines.append(out)
previous = 'other'
lines.append('--')
s = '\n'.join(lines)
return s
reserved_keys = {i for i in dir(DotMap) if not i.startswith('__') and not i.endswith('__')}
| 35.223118 | 151 | 0.545448 |
462e3fb8680d29e52d4c551f5d0ed1f83e4d346c | 615 | py | Python | django_pre_post/migrations/0002_auto_20180402_2043.py | CSnap/Django-pre-post | b86138201e6e5503ff02c6bf4c3bb4f1b6c20757 | [
"MIT"
] | null | null | null | django_pre_post/migrations/0002_auto_20180402_2043.py | CSnap/Django-pre-post | b86138201e6e5503ff02c6bf4c3bb4f1b6c20757 | [
"MIT"
] | 12 | 2018-04-04T20:13:08.000Z | 2021-03-19T21:44:48.000Z | django_pre_post/migrations/0002_auto_20180402_2043.py | CSnap/Django-pre-post | b86138201e6e5503ff02c6bf4c3bb4f1b6c20757 | [
"MIT"
] | 1 | 2020-05-21T17:44:24.000Z | 2020-05-21T17:44:24.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-02 20:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('django_pre_post', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='answer',
name='attempt',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='django_pre_post.Attempt'),
preserve_default=False,
),
]
| 26.73913 | 146 | 0.656911 |
4c53cf4d6f2fc7c373cf43dc79de3c3242b99b13 | 535 | py | Python | phoenix/solr/__init__.py | TeriForey/fawkes | 1b51430d2f952d9802f13fa1f2d0f68df706c43e | [
"Apache-2.0"
] | null | null | null | phoenix/solr/__init__.py | TeriForey/fawkes | 1b51430d2f952d9802f13fa1f2d0f68df706c43e | [
"Apache-2.0"
] | 6 | 2016-12-01T16:17:10.000Z | 2020-11-11T03:19:31.000Z | phoenix/solr/__init__.py | TeriForey/fawkes | 1b51430d2f952d9802f13fa1f2d0f68df706c43e | [
"Apache-2.0"
] | null | null | null | from pyramid.settings import asbool
def includeme(config):
settings = config.registry.settings
if asbool(settings.get('phoenix.solr', 'false')):
# actions
pass
config.add_route('index_service', '/solr/{service_id}/index')
config.add_route('clear_index', '/solr/clear')
# check if solr is activated
def solr_activated(request):
settings = request.registry.settings
return asbool(settings.get('phoenix.solr', 'false'))
config.add_request_method(solr_activated, reify=True)
| 29.722222 | 65 | 0.691589 |
1644223d6c699563673f7de9acda6d0ebc582065 | 93 | py | Python | runner/__init__.py | qklee-lz/resa | 12d10c23c42532e7120f5e23a5675707271c87c8 | [
"Apache-2.0"
] | 118 | 2021-02-28T08:34:41.000Z | 2022-03-24T02:50:54.000Z | runner/__init__.py | qklee-lz/resa | 12d10c23c42532e7120f5e23a5675707271c87c8 | [
"Apache-2.0"
] | 40 | 2021-03-30T09:14:40.000Z | 2022-03-30T13:45:25.000Z | runner/__init__.py | qklee-lz/resa | 12d10c23c42532e7120f5e23a5675707271c87c8 | [
"Apache-2.0"
] | 28 | 2021-03-28T12:21:18.000Z | 2022-03-24T07:18:16.000Z | from .evaluator import *
from .resa_trainer import *
from .registry import build_evaluator
| 18.6 | 38 | 0.795699 |
51d9c6889469ff7af56be77403a86733b855e8c7 | 391 | py | Python | tests/optimizers_test/test_emperor_penguin_optimizer.py | pfnet-research/batch-metaheuristics | 202372a4ef4439cceb71e71092487c9173fd367c | [
"MIT"
] | 7 | 2021-01-22T05:00:18.000Z | 2022-02-26T12:41:07.000Z | tests/optimizers_test/test_emperor_penguin_optimizer.py | pfnet-research/batch-metaheuristics | 202372a4ef4439cceb71e71092487c9173fd367c | [
"MIT"
] | null | null | null | tests/optimizers_test/test_emperor_penguin_optimizer.py | pfnet-research/batch-metaheuristics | 202372a4ef4439cceb71e71092487c9173fd367c | [
"MIT"
] | null | null | null | from batchopt.optimizers.emperor_penguin_optimizer import EmperorPenguinOptimizer
def test_epo_init(optimizer_init_test_func):
optimizer_init_test_func(EmperorPenguinOptimizer)
def test_epo_ask(optimizer_ask_test_func):
optimizer_ask_test_func(EmperorPenguinOptimizer)
def test_epo_train(optimizer_optimize_test_func):
optimizer_optimize_test_func(EmperorPenguinOptimizer)
| 27.928571 | 81 | 0.877238 |
2d16b3b67fe5c3eae80637552cf4cb70dd108f23 | 482 | py | Python | setup.py | ascott1/regulations-site | 9c6ce452b8d3e82cc6f3a38953e6f2d04ae64788 | [
"CC0-1.0"
] | 1 | 2019-12-29T17:49:57.000Z | 2019-12-29T17:49:57.000Z | setup.py | ascott1/regulations-site | 9c6ce452b8d3e82cc6f3a38953e6f2d04ae64788 | [
"CC0-1.0"
] | null | null | null | setup.py | ascott1/regulations-site | 9c6ce452b8d3e82cc6f3a38953e6f2d04ae64788 | [
"CC0-1.0"
] | null | null | null | import os
from setuptools import setup, find_packages
setup(
name="regulations",
version="2.1.5",
packages=find_packages(),
include_package_data=True,
setup_requires=['cfgov_setup==1.2',],
frontend_build_script='frontendbuild.sh',
install_requires=[
'django>=1.8',
'lxml',
'requests',
],
classifiers=[
'License :: Public Domain',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication'
]
)
| 21.909091 | 73 | 0.618257 |
593189fdc744d62b837fb18d26e0bf17f0ab40b4 | 9,197 | py | Python | tests/mockssh.py | lydian/mrjob | 13028274296f5618d63ffc00301537fd385eef82 | [
"Apache-2.0"
] | null | null | null | tests/mockssh.py | lydian/mrjob | 13028274296f5618d63ffc00301537fd385eef82 | [
"Apache-2.0"
] | null | null | null | tests/mockssh.py | lydian/mrjob | 13028274296f5618d63ffc00301537fd385eef82 | [
"Apache-2.0"
] | null | null | null | # Copyright 2009-2012 Yelp
# Copyright 2014 Ed Schofield
# Copyright 2015-2016 Yelp
# Copyright 2017 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A mock version of the ssh binary that actually manipulates the
filesystem. This imitates only things that mrjob actually uses.
Relies on these environment variables:
MOCK_SSH_ROOTS -- specify directories for hosts in the form:
host1=/tmp/dir1:host2=/tmp/dir2
MOCK_SSH_VERIFY_KEY_FILE -- set to 'true' if the script should print an error
when the key file does not exist
You can optionally set MOCK_SSH_REQUIRES_SUDO to 1 (or any nonempty value)
to raise an error unless ls and cat are preceded by sudo.
This is designed to run as: python -m tests.mockssh <ssh args>
mrjob requires a single binary (no args) to stand in for ssh, so
use create_mock_hadoop_script() to write out a shell script that runs
mockssh.
"""
from __future__ import print_function
import os
import pipes
import posixpath
import re
import stat
import sys
def create_mock_ssh_script(path):
"""Dump a wrapper script to the given file object that runs this
python script."""
# make this work even if $PATH or $PYTHONPATH changes
with open(path, 'w') as f:
f.write('#!/bin/sh\n')
f.write('%s %s "$@"\n' % (
pipes.quote(sys.executable),
pipes.quote(os.path.abspath(__file__))))
os.chmod(path, stat.S_IREAD | stat.S_IEXEC)
def mock_ssh_dir(host, path):
"""Create a directory at ``path`` relative to the temp directory for
``host``, where ``path`` is a POSIX path
"""
dest = rel_posix_to_abs_local(host, path)
if not os.path.exists(dest):
os.makedirs(dest)
def mock_ssh_file(host, path, contents):
"""Create a directory at ``path`` relative to the temp directory for
``host``, where ``path`` is a POSIX path.
Returns the path of the resulting file on the filesystem for sanity
checking.
"""
if not isinstance(contents, bytes):
raise TypeError('mock SSH file contents must be bytes')
path = rel_posix_to_abs_local(host, path)
basename, name = os.path.split(path)
if not os.path.exists(basename):
os.makedirs(basename)
with open(path, 'wb') as f:
f.write(contents)
return path
def path_for_host(host, environ=None):
"""Get the filesystem path that the given host is being faked at"""
if environ is None:
environ = os.environ
for kv_pair in environ['MOCK_SSH_ROOTS'].split(':'):
this_host, this_path = kv_pair.split('=')
if this_host == host:
return os.path.abspath(this_path)
raise KeyError('Host %s is not specified in $MOCK_SSH_ROOTS (%s)' %
(host, environ['MOCK_SSH_ROOTS']))
def rel_posix_to_abs_local(host, path, environ=None):
"""Convert a POSIX path to the current system's format and prepend the
tmp directory the host's files are in
"""
if environ is None:
environ = os.environ
if path.startswith('/'):
path = path[1:]
root = path_for_host(host, environ)
return os.path.join(root, *path.split('/'))
_WORKER_ADDR_RE = re.compile(r'^(?P<master>.*?)!(?P<worker>.*?)=(?P<dir>.*)$')
_SCP_RE = re.compile(r'cat > (?P<filename>.*?) &&.*$')
def main(stdin, stdout, stderr, args, environ):
def worker_addresses():
"""Get the addresses for workers based on :envvar:`MOCK_SSH_ROOTS`"""
for kv_pair in environ['MOCK_SSH_ROOTS'].split(':'):
m = _WORKER_ADDR_RE.match(kv_pair)
if m:
print(m.group('worker'), file=stdout)
return 0
def receive_poor_mans_scp(host, args):
"""Mock SSH behavior for uploading SSH key with sh -c "cat ..." """
dest = _SCP_RE.match(args[2]).group('filename')
try:
path = os.path.join(path_for_host(host, environ), dest)
with open(path, 'w') as f:
f.writelines(stdin)
return 0
except IOError:
print('No such file or directory:', dest, file=stderr)
return 1
def ls(host, args):
"""Mock SSH behavior for running the ``find`` command over SSH"""
dest = args[1]
if dest == '-L':
dest = args[2]
root = path_for_host(host, environ)
local_dest = rel_posix_to_abs_local(host, dest, environ)
prefix_length = len(path_for_host(host, environ))
if not os.path.exists(local_dest):
print('No such file or directory:', local_dest, file=stderr)
return 1
if not os.path.isdir(local_dest):
print(dest, file=stdout)
for root, dirs, files in os.walk(local_dest):
components = root.split(os.sep)
new_root = posixpath.join(*components)
for filename in files:
print(
'/' + posixpath.join(new_root, filename)[prefix_length:],
file=stdout)
return 0
def cat(host, args):
"""Mock SSH behavior for running cat <path> over SSH"""
local_dest = rel_posix_to_abs_local(host, args[1], environ)
if not os.path.exists(local_dest):
print('No such file or directory:', local_dest, file=stderr)
return 1
# in Python 3, binary data has to go to sys.stdout.buffer
stdout_buffer = getattr(stdout, 'buffer', stdout)
with open(local_dest, 'rb') as f:
for line in f:
stdout_buffer.write(line)
return 0
def run(host, remote_args, stdout, stderr, environ, worker_key_file=None):
"""Execute a command as a "host." Recursively call for worker if
necessary.
"""
remote_arg_pos = 0
# handle sudo
if remote_args[0] == 'sudo':
remote_args = remote_args[1:]
elif environ.get('MOCK_SSH_REQUIRES_SUDO'):
if remote_args[0] in ('find', 'cat'):
print('sudo required', file=stderr)
return 1
# Accept stdin for a file transfer (this is 'sh -c "cat > ...')
if remote_args[:2] == ['sh', '-c'] and len(remote_args) == 3:
return receive_poor_mans_scp(host, remote_args)
# ls (this is 'find -type f ...')
if remote_args[0] == 'find':
return ls(host, remote_args)
# cat (this is 'cat ...')
if remote_args[0] == 'cat':
return cat(host, remote_args)
# Recursively call for workers
if remote_args[0].split('/')[-1] == 'ssh':
# Actually check the existence of the key file on the master node
while not remote_args[remote_arg_pos] == '-i':
remote_arg_pos += 1
worker_key_file = remote_args[remote_arg_pos + 1]
if not os.path.exists(
os.path.join(path_for_host(host, environ),
worker_key_file)):
# This is word-for-word what SSH says.
print(('Warning: Identity file %s not accessible.'
' No such file or directory.' %
worker_key_file), file=stderr)
print('Permission denied (publickey).', file=stderr)
return 1
while not remote_args[remote_arg_pos].startswith('hadoop@'):
remote_arg_pos += 1
worker_host = (
host + '!%s' % remote_args[remote_arg_pos].split('@')[1])
# build bang path
return run(worker_host, remote_args[remote_arg_pos + 1:],
stdout, stderr, environ, worker_key_file)
cmd_line = ' '.join(pipes.quote(x) for x in remote_args)
print("Command line not recognized: %s" % cmd_line, file=stderr)
return 1
# Find where the user's commands begin
arg_pos = 0
# skip to key file path
while args[arg_pos] != '-i':
arg_pos += 1
arg_pos += 1
# verify existence of key pair file if necessary
if environ.get('MOCK_SSH_VERIFY_KEY_FILE', 'false') == 'true' \
and not os.path.exists(args[arg_pos]):
print('Warning: Identity file', end='', file=stderr)
args[arg_pos], 'not accessible: No such file or directory.'
return 1
# skip to host address
while not args[arg_pos].startswith('hadoop@'):
arg_pos += 1
host = args[arg_pos].split('@')[1]
# the rest are arguments are what to run on the remote machine
arg_pos += 1
return run(host, args[arg_pos:], stdout, stderr, environ, None)
if __name__ == '__main__':
sys.exit(main(sys.stdin, sys.stdout, sys.stderr, sys.argv, os.environ))
| 34.575188 | 78 | 0.611939 |
878a55922384e98104ba2f934c735c00bcc5bffd | 1,750 | py | Python | hello-stream-twitter.py | mdemaster/mdemaster_w205_exercise2 | 0030d78d674840f848250bff970824a1d49587fb | [
"Apache-2.0"
] | null | null | null | hello-stream-twitter.py | mdemaster/mdemaster_w205_exercise2 | 0030d78d674840f848250bff970824a1d49587fb | [
"Apache-2.0"
] | 1 | 2020-06-25T07:11:18.000Z | 2020-06-25T07:11:18.000Z | hello-stream-twitter.py | mdemaster/mdemaster_w205_exercise2 | 0030d78d674840f848250bff970824a1d49587fb | [
"Apache-2.0"
] | null | null | null | """
Using Twitter stream API, print all the tweets in the stream containing the term "Hello" in a 1 min period
"""
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from Twittercredentials import *
from time import time,ctime
import simplejson
class StdOutListener(StreamListener):
def __init__(self,timer):
self.inc = 0
StreamListener.__init__(self)
# report the start of data collection...
print "Gathering data at %s"%(str(ctime()))
self.startTime = time()
print "Start Time = %s"%(str(ctime()))
self.timer = timer
self.count = 0
def on_data(self, data):
try:
self.endTime = time()
self.elapsedTime = self.endTime - self.startTime
if self.elapsedTime <= self.timer:
self.dataJson =simplejson.loads(data[:-1])
self.dataJsonText = self.dataJson["text"].lower()
self.count += 1
if "Hello" in self.dataJsonText:
print self.dataJsonText
else:
print "Count== ",self.count
print "End Time = %s"%(str(ctime()))
print "Elapsed Time = %s"%(str(self.elapsedTime))
return False
return True
except Exception, e:
# Catch any unicode errors while printing to console
# and just ignore them to avoid breaking application.
pass
def on_error(self, status):
print ("ERROR :",status)
if __name__ == '__main__':
# to collect the data for 1 min
l = StdOutListener(60)
mystream = tweepy.Stream(auth, l, timeout=60)
mystream.sample()
| 28.688525 | 106 | 0.585714 |
715a2a3490b3f315243ff12ca4974c4cca7ab80a | 2,423 | py | Python | onnxruntime/test/python/onnxruntime_test_python_keras.py | hqucms/onnxruntime | 6e4e76414639f50836a64546603c8957227857b0 | [
"MIT"
] | 3 | 2019-11-25T10:26:57.000Z | 2021-05-14T08:11:29.000Z | onnxruntime/test/python/onnxruntime_test_python_keras.py | hqucms/onnxruntime | 6e4e76414639f50836a64546603c8957227857b0 | [
"MIT"
] | 10 | 2019-03-25T21:47:46.000Z | 2019-04-30T02:33:05.000Z | onnxruntime/test/python/onnxruntime_test_python_keras.py | hqucms/onnxruntime | 6e4e76414639f50836a64546603c8957227857b0 | [
"MIT"
] | 4 | 2021-06-05T19:52:22.000Z | 2021-11-30T13:58:13.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# -*- coding: UTF-8 -*-
# Taken from https://github.com/onnx/onnxmltools/blob/master/tests/end2end/test_custom_op.py.
import unittest
import os
import sys
import numpy as np
import onnxmltools
import onnxruntime as onnxrt
from keras import backend as K
from keras import Sequential
from keras.layers import Layer, Conv2D, MaxPooling2D
class ScaledTanh(Layer):
def __init__(self, alpha=1.0, beta=1.0, **kwargs):
super(ScaledTanh, self).__init__(**kwargs)
self.alpha = alpha
self.beta = beta
def build(self, input_shape):
super(ScaledTanh, self).build(input_shape)
def call(self, x):
return self.alpha * K.tanh(self.beta * x)
def compute_output_shape(self, input_shape):
return input_shape
def custom_activation(scope, operator, container):
# type:(ScopeBase, OperatorBase, ModelContainer) -> None
container.add_node('ScaledTanh', operator.input_full_names, operator.output_full_names,
op_version=1, alpha=operator.original_operator.alpha, beta=operator.original_operator.beta)
class TestInferenceSessionKeras(unittest.TestCase):
def testRunModelConv(self):
# keras model
N, C, H, W = 2, 3, 5, 5
x = np.random.rand(N, H, W, C).astype(np.float32, copy=False)
model = Sequential()
model.add(Conv2D(2, kernel_size=(1, 2), strides=(1, 1), padding='valid', input_shape=(H, W, C),
data_format='channels_last'))
model.add(ScaledTanh(0.9, 2.0))
model.add(MaxPooling2D((2, 2), strides=(2, 2), data_format='channels_last'))
model.compile(optimizer='sgd', loss='mse')
actual = model.predict(x)
self.assertIsNotNone(actual)
# conversion
converted_model = onnxmltools.convert_keras(model, custom_conversion_functions={ScaledTanh: custom_activation})
self.assertIsNotNone(converted_model)
# runtime
content = converted_model.SerializeToString()
rt = onnxrt.InferenceSession(content)
input = {'conv2d_1_input_0': x}
actual_rt = rt.run(None, input)
self.assertEqual(len(actual_rt), 1)
np.testing.assert_allclose(actual, actual_rt[0], rtol=1e-05, atol=1e-08)
if __name__ == '__main__':
unittest.main(module=__name__, buffer=True)
| 33.652778 | 119 | 0.673545 |
88db0bdd62872272a8beab751fcaccfd4738e8a8 | 434 | py | Python | bot/__main__.py | OpenASL/HowSignBot | bd9c5bc0edfd6fb50bdce7c7c1d84462e1e704c2 | [
"MIT"
] | 9 | 2021-01-12T07:28:30.000Z | 2021-12-30T09:27:04.000Z | bot/__main__.py | OpenASL/HowSignBot | bd9c5bc0edfd6fb50bdce7c7c1d84462e1e704c2 | [
"MIT"
] | 16 | 2021-03-28T16:31:42.000Z | 2022-03-21T00:18:30.000Z | bot/__main__.py | OpenASL/HowSignBot | bd9c5bc0edfd6fb50bdce7c7c1d84462e1e704c2 | [
"MIT"
] | 1 | 2021-07-18T20:49:19.000Z | 2021-07-18T20:49:19.000Z | import logging
from aiohttp import web
from . import __version__
from . import settings
from .app import app
log_format = "%(asctime)s - %(name)s %(levelname)s: %(message)s"
logging.getLogger("disnake").setLevel(logging.WARNING)
logging.basicConfig(
format=log_format,
level=settings.LOG_LEVEL,
)
logger = logging.getLogger(__name__)
logger.info(f"starting bot version {__version__}")
web.run_app(app, port=settings.PORT)
| 22.842105 | 64 | 0.758065 |
db30a0ad573c6e1d49f0f31d1002e2303dec6d7b | 7,211 | py | Python | test/expected/python.asyncio/service_extension_same_file/f_BasePinger.py | dustyholmes-wf/frugal | 915ccfc58fcc9baabc4549c522e3acd2975a2e0b | [
"Apache-2.0"
] | null | null | null | test/expected/python.asyncio/service_extension_same_file/f_BasePinger.py | dustyholmes-wf/frugal | 915ccfc58fcc9baabc4549c522e3acd2975a2e0b | [
"Apache-2.0"
] | null | null | null | test/expected/python.asyncio/service_extension_same_file/f_BasePinger.py | dustyholmes-wf/frugal | 915ccfc58fcc9baabc4549c522e3acd2975a2e0b | [
"Apache-2.0"
] | null | null | null | #
# Autogenerated by Frugal Compiler (3.4.7)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
import asyncio
from datetime import timedelta
import inspect
from frugal.aio.processor import FBaseProcessor
from frugal.aio.processor import FProcessorFunction
from frugal.exceptions import TApplicationExceptionType
from frugal.exceptions import TTransportExceptionType
from frugal.middleware import Method
from frugal.transport import TMemoryOutputBuffer
from frugal.util.deprecate import deprecated
from thrift.Thrift import TApplicationException
from thrift.Thrift import TMessageType
from thrift.transport.TTransport import TTransportException
from .ttypes import *
class Iface(object):
async def basePing(self, ctx):
"""
Args:
ctx: FContext
"""
pass
class Client(Iface):
def __init__(self, provider, middleware=None):
"""
Create a new Client with an FServiceProvider containing a transport
and protocol factory.
Args:
provider: FServiceProvider
middleware: ServiceMiddleware or list of ServiceMiddleware
"""
middleware = middleware or []
if middleware and not isinstance(middleware, list):
middleware = [middleware]
self._transport = provider.get_transport()
self._protocol_factory = provider.get_protocol_factory()
middleware += provider.get_middleware()
self._methods = {
'basePing': Method(self._basePing, middleware),
}
async def basePing(self, ctx):
"""
Args:
ctx: FContext
"""
return await self._methods['basePing']([ctx])
async def _basePing(self, ctx):
memory_buffer = TMemoryOutputBuffer(self._transport.get_request_size_limit())
oprot = self._protocol_factory.get_protocol(memory_buffer)
oprot.write_request_headers(ctx)
oprot.writeMessageBegin('basePing', TMessageType.CALL, 0)
args = basePing_args()
args.write(oprot)
oprot.writeMessageEnd()
response_transport = await self._transport.request(ctx, memory_buffer.getvalue())
iprot = self._protocol_factory.get_protocol(response_transport)
iprot.read_response_headers(ctx)
_, mtype, _ = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
if x.type == TApplicationExceptionType.RESPONSE_TOO_LARGE:
raise TTransportException(type=TTransportExceptionType.RESPONSE_TOO_LARGE, message=x.message)
raise x
result = basePing_result()
result.read(iprot)
iprot.readMessageEnd()
class Processor(FBaseProcessor):
def __init__(self, handler, middleware=None):
"""
Create a new Processor.
Args:
handler: Iface
"""
if middleware and not isinstance(middleware, list):
middleware = [middleware]
super(Processor, self).__init__()
self.add_to_processor_map('basePing', _basePing(Method(handler.basePing, middleware), self.get_write_lock()))
class _basePing(FProcessorFunction):
def __init__(self, handler, lock):
super(_basePing, self).__init__(handler, lock)
async def process(self, ctx, iprot, oprot):
args = basePing_args()
args.read(iprot)
iprot.readMessageEnd()
result = basePing_result()
try:
ret = self._handler([ctx])
if inspect.iscoroutine(ret):
ret = await ret
except TApplicationException as ex:
async with self._lock:
_write_application_exception(ctx, oprot, "basePing", exception=ex)
return
except Exception as e:
async with self._lock:
_write_application_exception(ctx, oprot, "basePing", ex_code=TApplicationExceptionType.INTERNAL_ERROR, message=str(e))
raise
async with self._lock:
try:
oprot.write_response_headers(ctx)
oprot.writeMessageBegin('basePing', TMessageType.REPLY, 0)
result.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
except TTransportException as e:
# catch a request too large error because the TMemoryOutputBuffer always throws that if too much data is written
if e.type == TTransportExceptionType.REQUEST_TOO_LARGE:
raise _write_application_exception(ctx, oprot, "basePing", ex_code=TApplicationExceptionType.RESPONSE_TOO_LARGE, message=e.message)
else:
raise e
def _write_application_exception(ctx, oprot, method, ex_code=None, message=None, exception=None):
if exception is not None:
x = exception
else:
x = TApplicationException(type=ex_code, message=message)
oprot.write_response_headers(ctx)
oprot.writeMessageBegin(method, TMessageType.EXCEPTION, 0)
x.write(oprot)
oprot.writeMessageEnd()
oprot.get_transport().flush()
return x
class basePing_args(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('basePing_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class basePing_result(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('basePing_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 31.76652 | 151 | 0.628484 |
35b2f002a3806e320907999792cbeac674fc8fe5 | 137 | py | Python | project/app/admin.py | cs-fullstack-fall-2018/django-mini-project1-RoyzellW | 540a867f9890933f82b13bb3e7a12457f8594c65 | [
"Apache-2.0"
] | null | null | null | project/app/admin.py | cs-fullstack-fall-2018/django-mini-project1-RoyzellW | 540a867f9890933f82b13bb3e7a12457f8594c65 | [
"Apache-2.0"
] | null | null | null | project/app/admin.py | cs-fullstack-fall-2018/django-mini-project1-RoyzellW | 540a867f9890933f82b13bb3e7a12457f8594c65 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from .models import teacherTimeSheet
# Register your models here.
admin.site.register(teacherTimeSheet) | 27.4 | 37 | 0.839416 |
9c1ecfde38e67ec50b0cffcc0c0307293652d888 | 22,849 | py | Python | malcolm/modules/scanning/controllers/runnablecontroller.py | aaron-parsons/pymalcolm | 4e7ebd6b09382ab7e013278a81097d17873fa5c4 | [
"Apache-2.0"
] | null | null | null | malcolm/modules/scanning/controllers/runnablecontroller.py | aaron-parsons/pymalcolm | 4e7ebd6b09382ab7e013278a81097d17873fa5c4 | [
"Apache-2.0"
] | null | null | null | malcolm/modules/scanning/controllers/runnablecontroller.py | aaron-parsons/pymalcolm | 4e7ebd6b09382ab7e013278a81097d17873fa5c4 | [
"Apache-2.0"
] | null | null | null | from annotypes import Anno, TYPE_CHECKING, add_call_types, Any
from scanpointgenerator import CompoundGenerator
from malcolm.core import AbortedError, MethodModel, Queue, Context, \
TimeoutError, AMri, NumberMeta, Widget, Part, DEFAULT_TIMEOUT
from malcolm.compat import OrderedDict
from malcolm.core.models import MapMeta
from malcolm.modules.builtin.controllers import ManagerController, \
AConfigDir, AInitialDesign, ADescription, AUseGit
from ..infos import ParameterTweakInfo, RunProgressInfo, ConfigureParamsInfo
from ..util import RunnableStates, AGenerator, AAxesToMove, ConfigureParams
from ..hooks import ConfigureHook, ValidateHook, PostConfigureHook, \
RunHook, PostRunArmedHook, PostRunReadyHook, ResumeHook, ReportStatusHook, \
AbortHook, PauseHook, SeekHook, ControllerHook
if TYPE_CHECKING:
from typing import Dict, Tuple, List, Iterable, Type, Callable
PartContextParams = Iterable[Tuple[Part, Context, Dict[str, Any]]]
PartConfigureParams = Dict[Part, ConfigureParamsInfo]
ss = RunnableStates
with Anno("The validated configure parameters"):
AConfigureParams = ConfigureParams
with Anno("Step to mark as the last completed step, 0 for current"):
ACompletedSteps = int
def get_steps_per_run(generator, axes_to_move):
# type: (CompoundGenerator, List[str]) -> int
steps = 1
axes_set = set(axes_to_move)
for dim in reversed(generator.dimensions):
# If the axes_set is empty then we are done
if not axes_set:
break
# Consume the axes that this generator scans
for axis in dim.axes:
assert axis in axes_set, \
"Axis %s is not in %s" % (axis, axes_to_move)
axes_set.remove(axis)
# Now multiply by the dimensions to get the number of steps
steps *= dim.size
return steps
class RunnableController(ManagerController):
"""RunnableDevice implementer that also exposes GUI for child parts"""
# The state_set that this controller implements
state_set = ss()
def __init__(self,
mri, # type: AMri
config_dir, # type: AConfigDir
initial_design="", # type: AInitialDesign
description="", # type: ADescription
use_git=True, # type: AUseGit
):
# type: (...) -> None
super(RunnableController, self).__init__(
mri, config_dir, initial_design, description, use_git)
# Shared contexts between Configure, Run, Pause, Seek, Resume
self.part_contexts = {} # type: Dict[Part, Context]
# Any custom ConfigureParams subclasses requested by Parts
self.part_configure_params = {} # type: PartConfigureParams
# Params passed to configure()
self.configure_params = None # type: ConfigureParams
# Progress reporting dict of completed_steps for each part
self.progress_updates = None # type: Dict[Part, int]
# Queue so that do_run can wait to see why it was aborted and resume if
# needed
self.resume_queue = None # type: Queue
# Queue so we can wait for aborts to complete
self.abort_queue = None # type: Queue
# Stored for pause
self.steps_per_run = 0 # type: int
# Create sometimes writeable attribute for the current completed scan
# step
self.completed_steps = NumberMeta(
"int32", "Readback of number of scan steps",
tags=[Widget.TEXTINPUT.tag()]
).create_attribute_model(0)
self.field_registry.add_attribute_model(
"completedSteps", self.completed_steps, self.pause)
self.set_writeable_in(self.completed_steps, ss.PAUSED, ss.ARMED)
# Create read-only attribute for the number of configured scan steps
self.configured_steps = NumberMeta(
"int32", "Number of steps currently configured",
tags=[Widget.TEXTUPDATE.tag()]
).create_attribute_model(0)
self.field_registry.add_attribute_model(
"configuredSteps", self.configured_steps)
# Create read-only attribute for the total number scan steps
self.total_steps = NumberMeta(
"int32", "Readback of number of scan steps",
tags=[Widget.TEXTUPDATE.tag()]
).create_attribute_model(0)
self.field_registry.add_attribute_model("totalSteps", self.total_steps)
# Create the method models
self.field_registry.add_method_model(self.validate)
self.set_writeable_in(
self.field_registry.add_method_model(self.configure), ss.READY)
self.set_writeable_in(
self.field_registry.add_method_model(self.run), ss.ARMED)
self.set_writeable_in(
self.field_registry.add_method_model(self.abort),
ss.READY, ss.CONFIGURING, ss.ARMED, ss.RUNNING, ss.POSTRUN,
ss.PAUSED, ss.SEEKING)
self.set_writeable_in(
self.field_registry.add_method_model(self.pause),
ss.ARMED, ss.PAUSED, ss.RUNNING)
self.set_writeable_in(
self.field_registry.add_method_model(self.resume), ss.PAUSED)
# Override reset to work from aborted too
self.set_writeable_in(
self.field_registry.get_field("reset"),
ss.FAULT, ss.DISABLED, ss.ABORTED, ss.ARMED)
# Allow Parts to report their status
self.info_registry.add_reportable(
RunProgressInfo, self.update_completed_steps)
# Allow Parts to request extra items from configure
self.info_registry.add_reportable(
ConfigureParamsInfo, self.update_configure_params)
def do_reset(self):
super(RunnableController, self).do_reset()
self.configured_steps.set_value(0)
self.completed_steps.set_value(0)
self.total_steps.set_value(0)
def update_configure_params(self, part=None, info=None):
# type: (Part, ConfigureParamsInfo) -> None
"""Tell controller part needs different things passed to Configure"""
with self.changes_squashed:
# Update the dict
if part:
self.part_configure_params[part] = info
# No process yet, so don't do this yet
if self.process is None:
return
# Get the model of our configure method as the starting point
configure_model = MethodModel.from_callable(self.configure)
# These will not be inserted as the already exist
ignored = tuple(ConfigureHook.call_types)
# Re-calculate the following
required = []
takes_elements = OrderedDict()
defaults = OrderedDict()
# First do the required arguments
for k in configure_model.takes.required:
required.append(k)
takes_elements[k] = configure_model.takes.elements[k]
for part in self.parts.values():
try:
info = self.part_configure_params[part]
except KeyError:
continue
for k in info.required:
if k not in required and k not in ignored:
required.append(k)
takes_elements[k] = info.metas[k]
# Now the default and optional
for k in configure_model.takes.elements:
if k not in required:
takes_elements[k] = configure_model.takes.elements[k]
for part in self.parts.values():
try:
info = self.part_configure_params[part]
except KeyError:
continue
for k in info.metas:
if k not in required and k not in ignored:
takes_elements[k] = info.metas[k]
if k in info.defaults:
defaults[k] = info.defaults[k]
# Set the values
configure_model.takes.set_elements(takes_elements)
configure_model.takes.set_required(required)
configure_model.set_defaults(defaults)
# Update methods from the new metas
self._block.configure.set_takes(configure_model.takes)
self._block.configure.set_defaults(configure_model.defaults)
# Now make a validate model with returns
validate_model = MethodModel.from_dict(configure_model.to_dict())
returns = MapMeta.from_dict(validate_model.takes.to_dict())
for v in returns.elements.values():
v.set_writeable(False)
self._block.validate.set_takes(validate_model.takes)
self._block.validate.set_defaults(validate_model.defaults)
self._block.validate.set_returns(returns)
def update_block_endpoints(self):
super(RunnableController, self).update_block_endpoints()
self.update_configure_params()
def _part_params(self, part_contexts=None, params=None):
# type: (Dict[Part, Context], ConfigureParams) -> PartContextParams
if part_contexts is None:
part_contexts = self.part_contexts
if params is None:
params = self.configure_params
for part, context in part_contexts.items():
args = {}
for k in params.call_types:
args[k] = getattr(params, k)
yield part, context, args
# This will be serialized, so maintain camelCase for axesToMove
# noinspection PyPep8Naming
@add_call_types
def validate(self, generator, axesToMove=None, **kwargs):
# type: (AGenerator, AAxesToMove, **Any) -> AConfigureParams
"""Validate configuration parameters and return validated parameters.
Doesn't take device state into account so can be run in any state
"""
iterations = 10
# We will return this, so make sure we fill in defaults
for k, default in self._block.configure.defaults.items():
if k not in kwargs:
kwargs[k] = default
# The validated parameters we will eventually return
params = ConfigureParams(generator, axesToMove, **kwargs)
# Make some tasks just for validate
part_contexts = self.create_part_contexts()
# Get any status from all parts
status_part_info = self.run_hooks(
ReportStatusHook(p, c) for p, c in part_contexts.items())
while iterations > 0:
# Try up to 10 times to get a valid set of parameters
iterations -= 1
# Validate the params with all the parts
validate_part_info = self.run_hooks(
ValidateHook(p, c, status_part_info, **kwargs)
for p, c, kwargs in self._part_params(part_contexts, params))
tweaks = ParameterTweakInfo.filter_values(validate_part_info)
if tweaks:
for tweak in tweaks:
deserialized = self._block.configure.takes.elements[
tweak.parameter].validate(tweak.value)
setattr(params, tweak.parameter, deserialized)
self.log.debug(
"Tweaking %s to %s", tweak.parameter, deserialized)
else:
# Consistent set, just return the params
return params
raise ValueError("Could not get a consistent set of parameters")
def abortable_transition(self, state):
with self._lock:
# We might have been aborted just now, so this will fail
# with an AbortedError if we were
self_ctx = self.part_contexts.get(self, None)
if self_ctx:
self_ctx.sleep(0)
self.transition(state)
# This will be serialized, so maintain camelCase for axesToMove
# noinspection PyPep8Naming
@add_call_types
def configure(self, generator, axesToMove=None, **kwargs):
# type: (AGenerator, AAxesToMove, **Any) -> None
"""Validate the params then configure the device ready for run().
Try to prepare the device as much as possible so that run() is quick to
start, this may involve potentially long running activities like moving
motors.
Normally it will return in Armed state. If the user aborts then it will
return in Aborted state. If something goes wrong it will return in Fault
state. If the user disables then it will return in Disabled state.
"""
params = self.validate(generator, axesToMove, **kwargs)
try:
self.transition(ss.CONFIGURING)
self.do_configure(params)
self.abortable_transition(ss.ARMED)
except AbortedError:
self.abort_queue.put(None)
raise
except Exception as e:
self.go_to_error_state(e)
raise
def do_configure(self, params):
# type: (ConfigureParams) -> None
# Clear out any old part contexts now rather than letting gc do it
for context in self.part_contexts.values():
context.unsubscribe_all()
# These are the part tasks that abort() and pause() will operate on
self.part_contexts = self.create_part_contexts()
# So add one for ourself too so we can be aborted
self.part_contexts[self] = Context(self.process)
# Store the params for use in seek()
self.configure_params = params
# This will calculate what we need from the generator, possibly a long
# call
params.generator.prepare()
# Set the steps attributes that we will do across many run() calls
self.total_steps.set_value(params.generator.size)
self.completed_steps.set_value(0)
self.configured_steps.set_value(0)
# TODO: We can be cleverer about this and support a different number
# of steps per run for each run by examining the generator structure
self.steps_per_run = get_steps_per_run(
params.generator, params.axesToMove)
# Get any status from all parts
part_info = self.run_hooks(ReportStatusHook(p, c)
for p, c in self.part_contexts.items())
# Run the configure command on all parts, passing them info from
# ReportStatus. Parts should return any reporting info for PostConfigure
completed_steps = 0
steps_to_do = self.steps_per_run
part_info = self.run_hooks(
ConfigureHook(p, c, completed_steps, steps_to_do, part_info, **kw)
for p, c, kw in self._part_params())
# Take configuration info and reflect it as attribute updates
self.run_hooks(PostConfigureHook(p, c, part_info)
for p, c in self.part_contexts.items())
# Update the completed and configured steps
self.configured_steps.set_value(steps_to_do)
# Reset the progress of all child parts
self.progress_updates = {}
self.resume_queue = Queue()
@add_call_types
def run(self):
# type: () -> None
"""Run a device where configure() has already be called
Normally it will return in Ready state. If setup for multiple-runs with
a single configure() then it will return in Armed state. If the user
aborts then it will return in Aborted state. If something goes wrong it
will return in Fault state. If the user disables then it will return in
Disabled state.
"""
if self.configured_steps.value < self.total_steps.value:
next_state = ss.ARMED
else:
next_state = ss.READY
try:
self.transition(ss.RUNNING)
hook = RunHook
going = True
while going:
try:
self.do_run(hook)
except AbortedError:
self.abort_queue.put(None)
# Wait for a response on the resume_queue
should_resume = self.resume_queue.get()
if should_resume:
# we need to resume
hook = ResumeHook
self.log.debug("Resuming run")
else:
# we don't need to resume, just drop out
raise
else:
going = False
self.abortable_transition(next_state)
except AbortedError:
raise
except Exception as e:
self.go_to_error_state(e)
raise
def do_run(self, hook):
# type: (Type[ControllerHook]) -> None
self.run_hooks(hook(p, c) for p, c in self.part_contexts.items())
self.abortable_transition(ss.POSTRUN)
completed_steps = self.configured_steps.value
if completed_steps < self.total_steps.value:
steps_to_do = self.steps_per_run
part_info = self.run_hooks(
ReportStatusHook(p, c) for p, c in self.part_contexts.items())
self.completed_steps.set_value(completed_steps)
self.run_hooks(
PostRunArmedHook(
p, c, completed_steps, steps_to_do, part_info, **kwargs)
for p, c, kwargs in self._part_params())
self.configured_steps.set_value(completed_steps + steps_to_do)
else:
self.run_hooks(
PostRunReadyHook(p, c) for p, c in self.part_contexts.items())
def update_completed_steps(self, part, completed_steps):
# type: (object, RunProgressInfo) -> None
with self._lock:
# Update
self.progress_updates[part] = completed_steps.steps
min_completed_steps = min(self.progress_updates.values())
if min_completed_steps > self.completed_steps.value:
self.completed_steps.set_value(min_completed_steps)
@add_call_types
def abort(self):
# type: () -> None
"""Abort the current operation and block until aborted
Normally it will return in Aborted state. If something goes wrong it
will return in Fault state. If the user disables then it will return in
Disabled state.
"""
# Tell _call_do_run not to resume
if self.resume_queue:
self.resume_queue.put(False)
self.try_aborting_function(ss.ABORTING, ss.ABORTED, self.do_abort)
def do_abort(self):
# type: () -> None
self.run_hooks(
AbortHook(p, c) for p, c in self.create_part_contexts().items())
def try_aborting_function(self, start_state, end_state, func, *args):
# type: (str, str, Callable[..., None], *Any) -> None
try:
# To make the running function fail we need to stop any running
# contexts (if running a hook) or make transition() fail with
# AbortedError. Both of these are accomplished here
with self._lock:
original_state = self.state.value
self.abort_queue = Queue()
self.transition(start_state)
for context in self.part_contexts.values():
context.stop()
if original_state not in (ss.READY, ss.ARMED, ss.PAUSED):
# Something was running, let it finish aborting
try:
self.abort_queue.get(timeout=DEFAULT_TIMEOUT)
except TimeoutError:
self.log.warning("Timeout waiting while %s" % start_state)
with self._lock:
# Now we've waited for a while we can remove the error state
# for transition in case a hook triggered it rather than a
# transition
self_ctx = self.part_contexts.get(self, None)
if self_ctx:
self_ctx.ignore_stops_before_now()
func(*args)
self.abortable_transition(end_state)
except AbortedError:
self.abort_queue.put(None)
raise
except Exception as e: # pylint:disable=broad-except
self.go_to_error_state(e)
raise
# Allow camelCase as this will be serialized
# noinspection PyPep8Naming
@add_call_types
def pause(self, completedSteps=0):
# type: (ACompletedSteps) -> None
"""Pause a run() so that resume() can be called later, or seek within
an Armed or Paused state.
The original call to run() will not be interrupted by pause(), it will
wait until the scan completes or is aborted.
Normally it will return in Paused state. If the user aborts then it will
return in Aborted state. If something goes wrong it will return in Fault
state. If the user disables then it will return in Disabled state.
"""
current_state = self.state.value
if completedSteps <= 0:
completed_steps = self.completed_steps.value
else:
completed_steps = completedSteps
if current_state == ss.RUNNING:
next_state = ss.PAUSED
else:
next_state = current_state
assert completed_steps < self.total_steps.value, \
"Cannot seek to after the end of the scan"
self.try_aborting_function(
ss.SEEKING, next_state, self.do_pause, completed_steps)
def do_pause(self, completed_steps):
# type: (int) -> None
self.run_hooks(
PauseHook(p, c) for p, c in self.create_part_contexts().items())
in_run_steps = completed_steps % self.steps_per_run
steps_to_do = self.steps_per_run - in_run_steps
part_info = self.run_hooks(
ReportStatusHook(p, c) for p, c in self.part_contexts.items())
self.completed_steps.set_value(completed_steps)
self.run_hooks(
SeekHook(p, c, completed_steps, steps_to_do, part_info, **kwargs)
for p, c, kwargs in self._part_params())
self.configured_steps.set_value(completed_steps + steps_to_do)
@add_call_types
def resume(self):
# type: () -> None
"""Resume a paused scan.
Normally it will return in Running state. If something goes wrong it
will return in Fault state.
"""
self.transition(ss.RUNNING)
self.resume_queue.put(True)
# self.run will now take over
def do_disable(self):
# type: () -> None
# Abort anything that is currently running, but don't wait
for context in self.part_contexts.values():
context.stop()
if self.resume_queue:
self.resume_queue.put(False)
super(RunnableController, self).do_disable()
| 43.772031 | 80 | 0.619983 |
4367c8166ee51a3f70b6fdcda012e143daf38ee9 | 2,526 | py | Python | sdk/media/azure-mgmt-media/setup.py | moovy2/azure-sdk-for-python | 6b0495dc9917d47a7264f26cbd3221d43461a537 | [
"MIT"
] | null | null | null | sdk/media/azure-mgmt-media/setup.py | moovy2/azure-sdk-for-python | 6b0495dc9917d47a7264f26cbd3221d43461a537 | [
"MIT"
] | null | null | null | sdk/media/azure-mgmt-media/setup.py | moovy2/azure-sdk-for-python | 6b0495dc9917d47a7264f26cbd3221d43461a537 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-media"
PACKAGE_PPRINT_NAME = "Media Services"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.mgmt',
]),
install_requires=[
'msrest>=0.6.21',
'azure-common~=1.1',
'azure-mgmt-core>=1.2.0,<2.0.0',
],
python_requires=">=3.7",
)
| 34.135135 | 91 | 0.605305 |
228b6b50192dbc7367e482f85e610619e8b7ce7f | 849 | py | Python | app/main/forms.py | abdirahman-ahmednoor/pitcher | 83c9f3caf033e422aafb781afbff186dbf3980f1 | [
"MIT"
] | null | null | null | app/main/forms.py | abdirahman-ahmednoor/pitcher | 83c9f3caf033e422aafb781afbff186dbf3980f1 | [
"MIT"
] | null | null | null | app/main/forms.py | abdirahman-ahmednoor/pitcher | 83c9f3caf033e422aafb781afbff186dbf3980f1 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, TextAreaField, SubmitField
from wtforms.validators import Required
class UpdateProfile(FlaskForm):
bio = TextAreaField('Write a brief bio about you.', validators = [Required()])
submit = SubmitField('Save')
class PitchForm(FlaskForm):
title = StringField('Title', validators=[Required()])
category = SelectField('Category', choices= [('Technology', 'Technology'), ('Business', 'Business'), ('Programming', 'Programming'),('Social', 'Social'), ('Religion', 'Religion'), ('Sports', 'Sports')], validators=[Required()])
post = TextAreaField('Your pitch', validators=[Required()])
submit = SubmitField('Pitch')
class CommentForm(FlaskForm):
comment = TextAreaField('Leave a comment', validators=[Required()])
submit = SubmitField('Comment') | 49.941176 | 231 | 0.716137 |
0c5fef4469184556d3fc84322ea22348960a9cc4 | 50,039 | py | Python | src/sage/schemes/elliptic_curves/ell_field.py | bopopescu/sage | 2d495be78e0bdc7a0a635454290b27bb4f5f70f0 | [
"BSL-1.0"
] | 3 | 2019-07-15T13:48:24.000Z | 2019-11-08T12:31:43.000Z | src/sage/schemes/elliptic_curves/ell_field.py | bopopescu/sage | 2d495be78e0bdc7a0a635454290b27bb4f5f70f0 | [
"BSL-1.0"
] | 2 | 2018-10-30T13:40:20.000Z | 2020-07-23T12:13:30.000Z | src/sage/schemes/elliptic_curves/ell_field.py | bopopescu/sage | 2d495be78e0bdc7a0a635454290b27bb4f5f70f0 | [
"BSL-1.0"
] | 1 | 2020-07-23T10:29:58.000Z | 2020-07-23T10:29:58.000Z | r"""
Elliptic curves over a general field
This module defines the class ``EllipticCurve_field``, based on
``EllipticCurve_generic``, for elliptic curves over general fields.
"""
#*****************************************************************************
# Copyright (C) 2006 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import absolute_import
from . import ell_generic
import sage.rings.all as rings
from sage.rings.complex_field import is_ComplexField
from sage.rings.real_mpfr import is_RealField
from .constructor import EllipticCurve
from sage.schemes.elliptic_curves.ell_point import EllipticCurvePoint_field
from .ell_curve_isogeny import EllipticCurveIsogeny, isogeny_codomain_from_kernel
class EllipticCurve_field(ell_generic.EllipticCurve_generic):
base_field = ell_generic.EllipticCurve_generic.base_ring
_point = EllipticCurvePoint_field
# Twists: rewritten by John Cremona as follows:
#
# Quadratic twist allowed except when char=2, j=0
# Quartic twist allowed only if j=1728!=0 (so char!=2,3)
# Sextic twist allowed only if j=0!=1728 (so char!=2,3)
#
# More complicated twists exist in theory for char=2,3 and
# j=0=1728, but I have never worked them out or seen them used!
#
def genus(self):
"""
Return 1 for elliptic curves.
EXAMPLES::
sage: E = EllipticCurve(GF(3), [0, -1, 0, -346, 2652])
sage: E.genus()
1
sage: R = FractionField(QQ['z'])
sage: E = EllipticCurve(R, [0, -1, 0, -346, 2652])
sage: E.genus()
1
"""
return rings.ZZ.one()
r"""
Twists: rewritten by John Cremona as follows:
The following twists are implemented:
- Quadratic twist: except when char=2 and `j=0`.
- Quartic twist: only if `j=1728\not=0` (so not if char=2,3).
- Sextic twist: only if `j=0\not=1728` (so not if char=2,3).
More complicated twists exist in theory for char=2,3 and j=0=1728,
but are not implemented.
"""
def quadratic_twist(self, D=None):
r"""
Return the quadratic twist of this curve by ``D``.
INPUT:
- ``D`` (default None) the twisting parameter (see below).
In characteristics other than 2, `D` must be nonzero, and the
twist is isomorphic to self after adjoining `\sqrt(D)` to the
base.
In characteristic 2, `D` is arbitrary, and the twist is
isomorphic to self after adjoining a root of `x^2+x+D` to the
base.
In characteristic 2 when `j=0`, this is not implemented.
If the base field `F` is finite, `D` need not be specified,
and the curve returned is the unique curve (up to isomorphism)
defined over `F` isomorphic to the original curve over the
quadratic extension of `F` but not over `F` itself. Over
infinite fields, an error is raised if `D` is not given.
EXAMPLES::
sage: E = EllipticCurve([GF(1103)(1), 0, 0, 107, 340]); E
Elliptic Curve defined by y^2 + x*y = x^3 + 107*x + 340 over Finite Field of size 1103
sage: F=E.quadratic_twist(-1); F
Elliptic Curve defined by y^2 = x^3 + 1102*x^2 + 609*x + 300 over Finite Field of size 1103
sage: E.is_isomorphic(F)
False
sage: E.is_isomorphic(F,GF(1103^2,'a'))
True
A characteristic 2 example::
sage: E=EllipticCurve(GF(2),[1,0,1,1,1])
sage: E1=E.quadratic_twist(1)
sage: E.is_isomorphic(E1)
False
sage: E.is_isomorphic(E1,GF(4,'a'))
True
Over finite fields, the twisting parameter may be omitted::
sage: k.<a> = GF(2^10)
sage: E = EllipticCurve(k,[a^2,a,1,a+1,1])
sage: Et = E.quadratic_twist()
sage: Et # random (only determined up to isomorphism)
Elliptic Curve defined by y^2 + x*y = x^3 + (a^7+a^4+a^3+a^2+a+1)*x^2 + (a^8+a^6+a^4+1) over Finite Field in a of size 2^10
sage: E.is_isomorphic(Et)
False
sage: E.j_invariant()==Et.j_invariant()
True
sage: p=next_prime(10^10)
sage: k = GF(p)
sage: E = EllipticCurve(k,[1,2,3,4,5])
sage: Et = E.quadratic_twist()
sage: Et # random (only determined up to isomorphism)
Elliptic Curve defined by y^2 = x^3 + 7860088097*x^2 + 9495240877*x + 3048660957 over Finite Field of size 10000000019
sage: E.is_isomorphic(Et)
False
sage: k2 = GF(p^2,'a')
sage: E.change_ring(k2).is_isomorphic(Et.change_ring(k2))
True
"""
K=self.base_ring()
char=K.characteristic()
if D is None:
if K.is_finite():
x = rings.polygen(K)
if char==2:
# We find D such that x^2+x+D is irreducible. If the
# degree is odd we can take D=1; otherwise it suffices to
# consider odd powers of a generator.
D = K(1)
if K.degree()%2==0:
D = K.gen()
a = D**2
while len((x**2+x+D).roots())>0:
D *= a
else:
# We could take a multiplicative generator but
# that might be expensive to compute; otherwise
# half the elements will do
D = K.random_element()
while len((x**2-D).roots())>0:
D = K.random_element()
else:
raise ValueError("twisting parameter D must be specified over infinite fields.")
else:
try:
D=K(D)
except ValueError:
raise ValueError("twisting parameter D must be in the base field.")
if char!=2 and D.is_zero():
raise ValueError("twisting parameter D must be nonzero when characteristic is not 2")
if char!=2:
b2,b4,b6,b8=self.b_invariants()
# E is isomorphic to [0,b2,0,8*b4,16*b6]
return EllipticCurve(K,[0,b2*D,0,8*b4*D**2,16*b6*D**3])
# now char==2
if self.j_invariant() !=0: # iff a1!=0
a1,a2,a3,a4,a6=self.ainvs()
E0=self.change_weierstrass_model(a1,a3/a1,0,(a1**2*a4+a3**2)/a1**3)
# which has the form = [1,A2,0,0,A6]
assert E0.a1()==K(1)
assert E0.a3()==K(0)
assert E0.a4()==K(0)
return EllipticCurve(K,[1,E0.a2()+D,0,0,E0.a6()])
else:
raise ValueError("Quadratic twist not implemented in char 2 when j=0")
def two_torsion_rank(self):
r"""
Return the dimension of the 2-torsion subgroup of
`E(K)`.
This will be 0, 1 or 2.
EXAMPLES::
sage: E=EllipticCurve('11a1')
sage: E.two_torsion_rank()
0
sage: K.<alpha>=QQ.extension(E.division_polynomial(2).monic())
sage: E.base_extend(K).two_torsion_rank()
1
sage: E.reduction(53).two_torsion_rank()
2
::
sage: E = EllipticCurve('14a1')
sage: E.two_torsion_rank()
1
sage: K.<alpha>=QQ.extension(E.division_polynomial(2).monic().factor()[1][0])
sage: E.base_extend(K).two_torsion_rank()
2
::
sage: EllipticCurve('15a1').two_torsion_rank()
2
"""
f=self.division_polynomial(rings.Integer(2))
n=len(f.roots())+1
return rings.Integer(n).ord(rings.Integer(2))
def quartic_twist(self, D):
r"""
Return the quartic twist of this curve by `D`.
INPUT:
- ``D`` (must be nonzero) -- the twisting parameter..
.. note::
The characteristic must not be 2 or 3, and the `j`-invariant must be 1728.
EXAMPLES::
sage: E=EllipticCurve_from_j(GF(13)(1728)); E
Elliptic Curve defined by y^2 = x^3 + x over Finite Field of size 13
sage: E1=E.quartic_twist(2); E1
Elliptic Curve defined by y^2 = x^3 + 5*x over Finite Field of size 13
sage: E.is_isomorphic(E1)
False
sage: E.is_isomorphic(E1,GF(13^2,'a'))
False
sage: E.is_isomorphic(E1,GF(13^4,'a'))
True
"""
K=self.base_ring()
char=K.characteristic()
D=K(D)
if char==2 or char==3:
raise ValueError("Quartic twist not defined in chars 2,3")
if self.j_invariant() !=K(1728):
raise ValueError("Quartic twist not defined when j!=1728")
if D.is_zero():
raise ValueError("quartic twist requires a nonzero argument")
c4,c6=self.c_invariants()
# E is isomorphic to [0,0,0,-27*c4,0]
assert c6==0
return EllipticCurve(K,[0,0,0,-27*c4*D,0])
def sextic_twist(self, D):
r"""
Return the quartic twist of this curve by `D`.
INPUT:
- ``D`` (must be nonzero) -- the twisting parameter..
.. note::
The characteristic must not be 2 or 3, and the `j`-invariant must be 0.
EXAMPLES::
sage: E=EllipticCurve_from_j(GF(13)(0)); E
Elliptic Curve defined by y^2 = x^3 + 1 over Finite Field of size 13
sage: E1=E.sextic_twist(2); E1
Elliptic Curve defined by y^2 = x^3 + 11 over Finite Field of size 13
sage: E.is_isomorphic(E1)
False
sage: E.is_isomorphic(E1,GF(13^2,'a'))
False
sage: E.is_isomorphic(E1,GF(13^4,'a'))
False
sage: E.is_isomorphic(E1,GF(13^6,'a'))
True
"""
K=self.base_ring()
char=K.characteristic()
D=K(D)
if char==2 or char==3:
raise ValueError("Sextic twist not defined in chars 2,3")
if self.j_invariant() !=K(0):
raise ValueError("Sextic twist not defined when j!=0")
if D.is_zero():
raise ValueError("Sextic twist requires a nonzero argument")
c4,c6=self.c_invariants()
# E is isomorphic to [0,0,0,0,-54*c6]
assert c4==0
return EllipticCurve(K,[0,0,0,0,-54*c6*D])
def is_quadratic_twist(self, other):
r"""
Determine whether this curve is a quadratic twist of another.
INPUT:
- ``other`` -- an elliptic curves with the same base field as self.
OUTPUT:
Either 0, if the curves are not quadratic twists, or `D` if
``other`` is ``self.quadratic_twist(D)`` (up to isomorphism).
If ``self`` and ``other`` are isomorphic, returns 1.
If the curves are defined over `\mathbb{Q}`, the output `D` is
a squarefree integer.
.. note::
Not fully implemented in characteristic 2, or in
characteristic 3 when both `j`-invariants are 0.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: Et = E.quadratic_twist(-24)
sage: E.is_quadratic_twist(Et)
-6
sage: E1=EllipticCurve([0,0,1,0,0])
sage: E1.j_invariant()
0
sage: E2=EllipticCurve([0,0,0,0,2])
sage: E1.is_quadratic_twist(E2)
2
sage: E1.is_quadratic_twist(E1)
1
sage: type(E1.is_quadratic_twist(E1)) == type(E1.is_quadratic_twist(E2)) #trac 6574
True
::
sage: E1=EllipticCurve([0,0,0,1,0])
sage: E1.j_invariant()
1728
sage: E2=EllipticCurve([0,0,0,2,0])
sage: E1.is_quadratic_twist(E2)
0
sage: E2=EllipticCurve([0,0,0,25,0])
sage: E1.is_quadratic_twist(E2)
5
::
sage: F = GF(101)
sage: E1 = EllipticCurve(F,[4,7])
sage: E2 = E1.quadratic_twist()
sage: D = E1.is_quadratic_twist(E2); D!=0
True
sage: F = GF(101)
sage: E1 = EllipticCurve(F,[4,7])
sage: E2 = E1.quadratic_twist()
sage: D = E1.is_quadratic_twist(E2)
sage: E1.quadratic_twist(D).is_isomorphic(E2)
True
sage: E1.is_isomorphic(E2)
False
sage: F2 = GF(101^2,'a')
sage: E1.change_ring(F2).is_isomorphic(E2.change_ring(F2))
True
A characteristic 3 example::
sage: F = GF(3^5,'a')
sage: E1 = EllipticCurve_from_j(F(1))
sage: E2 = E1.quadratic_twist(-1)
sage: D = E1.is_quadratic_twist(E2); D!=0
True
sage: E1.quadratic_twist(D).is_isomorphic(E2)
True
::
sage: E1 = EllipticCurve_from_j(F(0))
sage: E2 = E1.quadratic_twist()
sage: D = E1.is_quadratic_twist(E2); D
1
sage: E1.is_isomorphic(E2)
True
"""
from sage.schemes.elliptic_curves.ell_generic import is_EllipticCurve
E = self
F = other
if not is_EllipticCurve(E) or not is_EllipticCurve(F):
raise ValueError("arguments are not elliptic curves")
K = E.base_ring()
zero = K.zero()
if not K == F.base_ring():
return zero
j=E.j_invariant()
if j != F.j_invariant():
return zero
if E.is_isomorphic(F):
if K is rings.QQ:
return rings.ZZ(1)
return K.one()
char=K.characteristic()
if char==2:
raise NotImplementedError("not implemented in characteristic 2")
elif char==3:
if j==0:
raise NotImplementedError("not implemented in characteristic 3 for curves of j-invariant 0")
D = E.b2()/F.b2()
else:
# now char!=2,3:
c4E,c6E = E.c_invariants()
c4F,c6F = F.c_invariants()
if j==0:
um = c6E/c6F
x=rings.polygen(K)
ulist=(x**3-um).roots(multiplicities=False)
if len(ulist)==0:
D = zero
else:
D = ulist[0]
elif j==1728:
um=c4E/c4F
x=rings.polygen(K)
ulist=(x**2-um).roots(multiplicities=False)
if len(ulist)==0:
D = zero
else:
D = ulist[0]
else:
D = (c6E*c4F)/(c6F*c4E)
# Normalization of output:
if D.is_zero():
return D
if K is rings.QQ:
D = D.squarefree_part()
assert E.quadratic_twist(D).is_isomorphic(F)
return D
def is_quartic_twist(self, other):
r"""
Determine whether this curve is a quartic twist of another.
INPUT:
- ``other`` -- an elliptic curves with the same base field as self.
OUTPUT:
Either 0, if the curves are not quartic twists, or `D` if
``other`` is ``self.quartic_twist(D)`` (up to isomorphism).
If ``self`` and ``other`` are isomorphic, returns 1.
.. note::
Not fully implemented in characteristics 2 or 3.
EXAMPLES::
sage: E = EllipticCurve_from_j(GF(13)(1728))
sage: E1 = E.quartic_twist(2)
sage: D = E.is_quartic_twist(E1); D!=0
True
sage: E.quartic_twist(D).is_isomorphic(E1)
True
::
sage: E = EllipticCurve_from_j(1728)
sage: E1 = E.quartic_twist(12345)
sage: D = E.is_quartic_twist(E1); D
15999120
sage: (D/12345).is_perfect_power(4)
True
"""
from sage.schemes.elliptic_curves.ell_generic import is_EllipticCurve
E = self
F = other
if not is_EllipticCurve(E) or not is_EllipticCurve(F):
raise ValueError("arguments are not elliptic curves")
K = E.base_ring()
zero = K.zero()
if not K == F.base_ring():
return zero
j=E.j_invariant()
if j != F.j_invariant() or j!=K(1728):
return zero
if E.is_isomorphic(F):
return K.one()
char=K.characteristic()
if char==2:
raise NotImplementedError("not implemented in characteristic 2")
elif char==3:
raise NotImplementedError("not implemented in characteristic 3")
else:
# now char!=2,3:
D = F.c4()/E.c4()
if D.is_zero():
return D
assert E.quartic_twist(D).is_isomorphic(F)
return D
def is_sextic_twist(self, other):
r"""
Determine whether this curve is a sextic twist of another.
INPUT:
- ``other`` -- an elliptic curves with the same base field as self.
OUTPUT:
Either 0, if the curves are not sextic twists, or `D` if
``other`` is ``self.sextic_twist(D)`` (up to isomorphism).
If ``self`` and ``other`` are isomorphic, returns 1.
.. note::
Not fully implemented in characteristics 2 or 3.
EXAMPLES::
sage: E = EllipticCurve_from_j(GF(13)(0))
sage: E1 = E.sextic_twist(2)
sage: D = E.is_sextic_twist(E1); D!=0
True
sage: E.sextic_twist(D).is_isomorphic(E1)
True
::
sage: E = EllipticCurve_from_j(0)
sage: E1 = E.sextic_twist(12345)
sage: D = E.is_sextic_twist(E1); D
575968320
sage: (D/12345).is_perfect_power(6)
True
"""
from sage.schemes.elliptic_curves.ell_generic import is_EllipticCurve
E = self
F = other
if not is_EllipticCurve(E) or not is_EllipticCurve(F):
raise ValueError("arguments are not elliptic curves")
K = E.base_ring()
zero = K.zero()
if not K == F.base_ring():
return zero
j=E.j_invariant()
if j != F.j_invariant() or not j.is_zero():
return zero
if E.is_isomorphic(F):
return K.one()
char=K.characteristic()
if char==2:
raise NotImplementedError("not implemented in characteristic 2")
elif char==3:
raise NotImplementedError("not implemented in characteristic 3")
else:
# now char!=2,3:
D = F.c6()/E.c6()
if D.is_zero():
return D
assert E.sextic_twist(D).is_isomorphic(F)
return D
def descend_to(self, K, f=None):
r"""
Given an elliptic curve self defined over a field `L` and a
subfield `K` of `L`, return all elliptic curves over `K` which
are isomorphic over `L` to self.
INPUT:
- `K` -- a field which embeds into the base field `L` of self.
- `f` (optional) -- an embedding of `K` into `L`. Ignored if
`K` is `\QQ`.
OUTPUT:
A list (possibly empty) of elliptic curves defined over `K`
which are isomorphic to self over `L`, up to isomorphism over
`K`.
.. NOTE::
Currently only implemented over number fields. To extend
to other fields of characteristic not 2 or 3, what is
needed is a method giving the preimages in `K^*/(K^*)^m` of
an element of the base field, for `m=2,4,6`.
EXAMPLES::
sage: E = EllipticCurve([1,2,3,4,5])
sage: E.descend_to(ZZ)
Traceback (most recent call last):
...
TypeError: Input must be a field.
::
sage: F.<b> = QuadraticField(23)
sage: G.<a> = F.extension(x^3+5)
sage: E = EllipticCurve(j=1728*b).change_ring(G)
sage: EF = E.descend_to(F); EF
[Elliptic Curve defined by y^2 = x^3 + (27*b-621)*x + (-1296*b+2484) over Number Field in b with defining polynomial x^2 - 23]
sage: all([Ei.change_ring(G).is_isomorphic(E) for Ei in EF])
True
::
sage: L.<a> = NumberField(x^4 - 7)
sage: K.<b> = NumberField(x^2 - 7, embedding=a^2)
sage: E = EllipticCurve([a^6,0])
sage: EK = E.descend_to(K); EK
[Elliptic Curve defined by y^2 = x^3 + b*x over Number Field in b with defining polynomial x^2 - 7,
Elliptic Curve defined by y^2 = x^3 + 7*b*x over Number Field in b with defining polynomial x^2 - 7]
sage: all([Ei.change_ring(L).is_isomorphic(E) for Ei in EK])
True
::
sage: K.<a> = QuadraticField(17)
sage: E = EllipticCurve(j = 2*a)
sage: E.descend_to(QQ)
[]
TESTS:
Check that :trac:`16456` is fixed::
sage: K.<a> = NumberField(x^3-2)
sage: E = EllipticCurve('11a1').quadratic_twist(2)
sage: EK = E.change_ring(K)
sage: EK2 = EK.change_weierstrass_model((a,a,a,a+1))
sage: EK2.descend_to(QQ)
[Elliptic Curve defined by y^2 = x^3 + x^2 - 41*x - 199 over Rational Field]
sage: k.<i> = QuadraticField(-1)
sage: E = EllipticCurve(k,[0,0,0,1,0])
sage: E.descend_to(QQ)
[Elliptic Curve defined by y^2 = x^3 + x over Rational Field,
Elliptic Curve defined by y^2 = x^3 - 4*x over Rational Field]
"""
if not K.is_field():
raise TypeError("Input must be a field.")
L = self.base_field()
if L is K:
return self
elif L == K: # number fields can be equal but not identical
return self.base_extend(K)
# Construct an embedding f of K in L, and check that the
# j-invariant is in the image, otherwise return an empty list:
j = self.j_invariant()
from sage.rings.all import QQ
if K == QQ:
try:
jK = QQ(j)
except (ValueError, TypeError):
return []
elif f is None:
embeddings = K.embeddings(L)
if len(embeddings) == 0:
raise TypeError("Input must be a subfield of the base field of the curve.")
for g in embeddings:
try:
jK = g.preimage(j)
f = g
break
except Exception:
pass
if f is None:
return []
else:
try:
if f.domain() != K:
raise ValueError("embedding has wrong domain")
if f.codomain() != L:
raise ValueError("embedding has wrong codomain")
except AttributeError:
raise ValueError("invalid embedding: %s" % f)
try:
jK = f.preimage(j)
except Exception:
return []
# Now we have the j-invariant in K and must find all twists
# which work, separating the cases of j=0 and j=1728.
if L.characteristic():
raise NotImplementedError("Not implemented in positive characteristic")
if jK == 0:
t = -54*self.c6()
try:
dlist = t.descend_mod_power(K,6)
# list of d in K such that t/d is in L*^6
except AttributeError:
raise NotImplementedError("Not implemented over %s" % L)
Elist = [EllipticCurve([0,0,0,0,d]) for d in dlist]
elif jK == 1728:
t = -27*self.c4()
try:
dlist = t.descend_mod_power(K,4)
# list of d in K such that t/d is in L*^4
except AttributeError:
raise NotImplementedError("Not implemented over %s" % L)
Elist = [EllipticCurve([0,0,0,d,0]) for d in dlist]
else:
c4, c6 = self.c_invariants()
t = c6/c4
try:
dlist = t.descend_mod_power(K,2)
# list of d in K such that t/d is in L*^2
except AttributeError:
raise NotImplementedError("Not implemented over %s" % L)
c = -27*jK/(jK-1728) # =-27c4^3/c6^2
a4list = [c*d**2 for d in dlist]
a6list = [2*a4*d for a4,d in zip(a4list,dlist)]
Elist = [EllipticCurve([0,0,0,a4,a6]) for a4,a6 in zip(a4list,a6list)]
if K is QQ:
Elist = [E.minimal_model() for E in Elist]
return Elist
def isogeny(self, kernel, codomain=None, degree=None, model=None, check=True):
r"""
Return an elliptic curve isogeny from self.
The isogeny can be determined in two ways, either by a
polynomial or a set of torsion points. The methods used are:
- Velu's Formulas: Velu's original formulas for computing
isogenies. This algorithm is selected by giving as the
``kernel`` parameter a point or a list of points which
generate a finite subgroup.
- Kohel's Formulas: Kohel's original formulas for computing
isogenies. This algorithm is selected by giving as the
``kernel`` parameter a polynomial (or a coefficient list
(little endian)) which will define the kernel of the
isogeny.
INPUT:
- ``E`` - an elliptic curve, the domain of the isogeny to
initialize.
- ``kernel`` - a kernel, either a point in ``E``, a list of points
in ``E``, a univariate kernel polynomial or ``None``.
If initiating from a domain/codomain, this must be
set to None. Validity of input is *not* fully checked.
- ``codomain`` - an elliptic curve (default:None). If ``kernel`` is
None, then this must be the codomain of a separable
normalized isogeny, furthermore, ``degree`` must be
the degree of the isogeny from ``E`` to ``codomain``.
If ``kernel`` is not None, then this must be
isomorphic to the codomain of the normalized separable
isogeny defined by ``kernel``, in this case, the
isogeny is post composed with an isomorphism so that
this parameter is the codomain.
- ``degree`` - an integer (default:None). If ``kernel`` is None,
then this is the degree of the isogeny from ``E`` to
``codomain``. If ``kernel`` is not None, then this is
used to determine whether or not to skip a gcd of the
kernel polynomial with the two torsion polynomial of
``E``.
- ``model`` - a string (default:None). Only supported
variable is "minimal", in which case if``E``
is a curve over the rationals or over a
number field, then the codomain is a global
minimum model where this exists.
- ``check`` (default: True) does some partial checks that the
input is valid (e.g., that the points
defined by the kernel polynomial are
torsion); however, invalid input can in some
cases still pass, since that the points define
a group is not checked.
OUTPUT:
An isogeny between elliptic curves. This is a morphism of curves.
EXAMPLES::
sage: F = GF(2^5, 'alpha'); alpha = F.gen()
sage: E = EllipticCurve(F, [1,0,1,1,1])
sage: R.<x> = F[]
sage: phi = E.isogeny(x+1)
sage: phi.rational_maps()
((x^2 + x + 1)/(x + 1), (x^2*y + x)/(x^2 + 1))
sage: E = EllipticCurve('11a1')
sage: P = E.torsion_points()[1]
sage: E.isogeny(P)
Isogeny of degree 5 from Elliptic Curve defined by y^2 + y = x^3 - x^2 - 10*x - 20 over Rational Field to Elliptic Curve defined by y^2 + y = x^3 - x^2 - 7820*x - 263580 over Rational Field
sage: E = EllipticCurve(GF(19),[1,1])
sage: P = E(15,3); Q = E(2,12);
sage: (P.order(), Q.order())
(7, 3)
sage: phi = E.isogeny([P,Q]); phi
Isogeny of degree 21 from Elliptic Curve defined by y^2 = x^3 + x + 1 over Finite Field of size 19 to Elliptic Curve defined by y^2 = x^3 + x + 1 over Finite Field of size 19
sage: phi(E.random_point()) # all points defined over GF(19) are in the kernel
(0 : 1 : 0)
Not all polynomials define a finite subgroup (:trac:`6384`)::
sage: E = EllipticCurve(GF(31),[1,0,0,1,2])
sage: phi = E.isogeny([14,27,4,1])
Traceback (most recent call last):
...
ValueError: The polynomial does not define a finite subgroup of the elliptic curve.
An example in which we construct an invalid morphism, which
illustrates that the check for correctness of the input is not
sufficient. (See :trac:`11578`.)::
sage: R.<x> = QQ[]
sage: K.<a> = NumberField(x^2-x-1)
sage: E = EllipticCurve(K, [-13392, -1080432])
sage: R.<x> = K[]
sage: phi = E.isogeny( (x-564)*(x - 396/5*a + 348/5) )
sage: phi.codomain().conductor().norm().factor()
5^2 * 11^2 * 3271 * 15806939 * 4169267639351
sage: phi.domain().conductor().norm().factor()
11^2
"""
try:
return EllipticCurveIsogeny(self, kernel, codomain, degree, model, check=check)
except AttributeError as e:
raise RuntimeError("Unable to construct isogeny: %s" % e)
def isogeny_codomain(self, kernel, degree=None):
r"""
Return the codomain of the isogeny from self with given
kernel.
INPUT:
- ``kernel`` - Either a list of points in the kernel of the isogeny,
or a kernel polynomial (specified as a either a
univariate polynomial or a coefficient list.)
- ``degree`` - an integer, (default:None) optionally specified degree
of the kernel.
OUTPUT:
An elliptic curve, the codomain of the separable normalized
isogeny from this kernel
EXAMPLES::
sage: E = EllipticCurve('17a1')
sage: R.<x> = QQ[]
sage: E2 = E.isogeny_codomain(x - 11/4); E2
Elliptic Curve defined by y^2 + x*y + y = x^3 - x^2 - 1461/16*x - 19681/64 over Rational Field
"""
return isogeny_codomain_from_kernel(self, kernel, degree=None)
def isogenies_prime_degree(self, l=None, max_l=31):
"""
Generic code, valid for all fields, for arbitrary prime `l` not equal to the characteristic.
INPUT:
- ``l`` -- either None, a prime or a list of primes.
- ``max_l`` -- a bound on the primes to be tested (ignored unless `l` is None).
OUTPUT:
(list) All `l`-isogenies for the given `l` with domain self.
METHOD:
Calls the generic function
``isogenies_prime_degree()``. This requires that
certain operations have been implemented over the base field,
such as root-finding for univariate polynomials.
EXAMPLES::
sage: F = QQbar
sage: E = EllipticCurve(F, [1,18]); E
Elliptic Curve defined by y^2 = x^3 + x + 18 over Algebraic Field
sage: E.isogenies_prime_degree()
Traceback (most recent call last):
...
NotImplementedError: This code could be implemented for QQbar, but has not been yet.
sage: F = CC
sage: E = EllipticCurve(F, [1,18]); E
Elliptic Curve defined by y^2 = x^3 + 1.00000000000000*x + 18.0000000000000 over Complex Field with 53 bits of precision
sage: E.isogenies_prime_degree(11)
Traceback (most recent call last):
...
NotImplementedError: This code could be implemented for general complex fields, but has not been yet.
Examples over finite fields::
sage: E = EllipticCurve(GF(next_prime(1000000)), [7,8])
sage: E.isogenies_prime_degree()
[Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 970389*x + 794257 over Finite Field of size 1000003, Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 29783*x + 206196 over Finite Field of size 1000003, Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 999960*x + 78 over Finite Field of size 1000003, Isogeny of degree 13 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 878063*x + 845666 over Finite Field of size 1000003, Isogeny of degree 13 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 375648*x + 342776 over Finite Field of size 1000003, Isogeny of degree 17 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 347438*x + 594729 over Finite Field of size 1000003, Isogeny of degree 17 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 674846*x + 7392 over Finite Field of size 1000003, Isogeny of degree 23 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 390065*x + 605596 over Finite Field of size 1000003]
sage: E.isogenies_prime_degree(2)
[Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 970389*x + 794257 over Finite Field of size 1000003, Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 29783*x + 206196 over Finite Field of size 1000003, Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 999960*x + 78 over Finite Field of size 1000003]
sage: E.isogenies_prime_degree(3)
[]
sage: E.isogenies_prime_degree(5)
[]
sage: E.isogenies_prime_degree(7)
[]
sage: E.isogenies_prime_degree(13)
[Isogeny of degree 13 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 878063*x + 845666 over Finite Field of size 1000003,
Isogeny of degree 13 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 375648*x + 342776 over Finite Field of size 1000003]
sage: E.isogenies_prime_degree([2, 3, 5, 7, 13])
[Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 970389*x + 794257 over Finite Field of size 1000003, Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 29783*x + 206196 over Finite Field of size 1000003, Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 999960*x + 78 over Finite Field of size 1000003, Isogeny of degree 13 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 878063*x + 845666 over Finite Field of size 1000003, Isogeny of degree 13 from Elliptic Curve defined by y^2 = x^3 + 7*x + 8 over Finite Field of size 1000003 to Elliptic Curve defined by y^2 = x^3 + 375648*x + 342776 over Finite Field of size 1000003]
sage: E.isogenies_prime_degree([2, 4])
Traceback (most recent call last):
...
ValueError: 4 is not prime.
sage: E.isogenies_prime_degree(4)
Traceback (most recent call last):
...
ValueError: 4 is not prime.
sage: E.isogenies_prime_degree(11)
[]
sage: E = EllipticCurve(GF(17),[2,0])
sage: E.isogenies_prime_degree(3)
[]
sage: E.isogenies_prime_degree(2)
[Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 2*x over Finite Field of size 17 to Elliptic Curve defined by y^2 = x^3 + 9*x over Finite Field of size 17, Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 2*x over Finite Field of size 17 to Elliptic Curve defined by y^2 = x^3 + 5*x + 9 over Finite Field of size 17, Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 2*x over Finite Field of size 17 to Elliptic Curve defined by y^2 = x^3 + 5*x + 8 over Finite Field of size 17]
sage: E = EllipticCurve(GF(13^4, 'a'),[2,8])
sage: E.isogenies_prime_degree(2)
[Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 2*x + 8 over Finite Field in a of size 13^4 to Elliptic Curve defined by y^2 = x^3 + 7*x + 4 over Finite Field in a of size 13^4, Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 2*x + 8 over Finite Field in a of size 13^4 to Elliptic Curve defined by y^2 = x^3 + (8*a^3+2*a^2+7*a+5)*x + (12*a^3+3*a^2+4*a+4) over Finite Field in a of size 13^4, Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + 2*x + 8 over Finite Field in a of size 13^4 to Elliptic Curve defined by y^2 = x^3 + (5*a^3+11*a^2+6*a+11)*x + (a^3+10*a^2+9*a) over Finite Field in a of size 13^4]
sage: E.isogenies_prime_degree(3)
[Isogeny of degree 3 from Elliptic Curve defined by y^2 = x^3 + 2*x + 8 over Finite Field in a of size 13^4 to Elliptic Curve defined by y^2 = x^3 + 9*x + 11 over Finite Field in a of size 13^4]
Example to show that separable isogenies of degree equal to the characteristic are now implemented::
sage: E.isogenies_prime_degree(13)
[Isogeny of degree 13 from Elliptic Curve defined by y^2 = x^3 + 2*x + 8 over Finite Field in a of size 13^4 to Elliptic Curve defined by y^2 = x^3 + 6*x + 5 over Finite Field in a of size 13^4]
Examples over number fields (other than QQ)::
sage: QQroot2.<e> = NumberField(x^2-2)
sage: E = EllipticCurve(QQroot2, j=8000)
sage: E.isogenies_prime_degree()
[Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + (-150528000)*x + (-629407744000) over Number Field in e with defining polynomial x^2 - 2 to Elliptic Curve defined by y^2 = x^3 + (-36750)*x + 2401000 over Number Field in e with defining polynomial x^2 - 2,
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + (-150528000)*x + (-629407744000) over Number Field in e with defining polynomial x^2 - 2 to Elliptic Curve defined by y^2 = x^3 + (220500*e-257250)*x + (54022500*e-88837000) over Number Field in e with defining polynomial x^2 - 2,
Isogeny of degree 2 from Elliptic Curve defined by y^2 = x^3 + (-150528000)*x + (-629407744000) over Number Field in e with defining polynomial x^2 - 2 to Elliptic Curve defined by y^2 = x^3 + (-220500*e-257250)*x + (-54022500*e-88837000) over Number Field in e with defining polynomial x^2 - 2]
sage: E = EllipticCurve(QQroot2, [1,0,1,4, -6]); E
Elliptic Curve defined by y^2 + x*y + y = x^3 + 4*x + (-6) over Number Field in e with defining polynomial x^2 - 2
sage: E.isogenies_prime_degree(2)
[Isogeny of degree 2 from Elliptic Curve defined by y^2 + x*y + y = x^3 + 4*x + (-6) over Number Field in e with defining polynomial x^2 - 2 to Elliptic Curve defined by y^2 + x*y + y = x^3 + (-36)*x + (-70) over Number Field in e with defining polynomial x^2 - 2]
sage: E.isogenies_prime_degree(3)
[Isogeny of degree 3 from Elliptic Curve defined by y^2 + x*y + y = x^3 + 4*x + (-6) over Number Field in e with defining polynomial x^2 - 2 to Elliptic Curve defined by y^2 + x*y + y = x^3 + (-1)*x over Number Field in e with defining polynomial x^2 - 2,
Isogeny of degree 3 from Elliptic Curve defined by y^2 + x*y + y = x^3 + 4*x + (-6) over Number Field in e with defining polynomial x^2 - 2 to Elliptic Curve defined by y^2 + x*y + y = x^3 + (-171)*x + (-874) over Number Field in e with defining polynomial x^2 - 2]
"""
F = self.base_ring()
if is_RealField(F):
raise NotImplementedError("This code could be implemented for general real fields, but has not been yet.")
if is_ComplexField(F):
raise NotImplementedError("This code could be implemented for general complex fields, but has not been yet.")
if F == rings.QQbar:
raise NotImplementedError("This code could be implemented for QQbar, but has not been yet.")
from .isogeny_small_degree import isogenies_prime_degree
if l is None:
from sage.rings.all import prime_range
l = prime_range(max_l+1)
if not isinstance(l, list):
try:
l = rings.ZZ(l)
except TypeError:
raise ValueError("%s is not prime."%l)
if l.is_prime():
return isogenies_prime_degree(self, l)
else:
raise ValueError("%s is not prime."%l)
L = list(set(l))
try:
L = [rings.ZZ(ell) for ell in L]
except TypeError:
raise ValueError("%s is not a list of primes."%l)
L.sort()
return sum([isogenies_prime_degree(self,ell) for ell in L],[])
def is_isogenous(self, other, field=None):
"""
Return whether or not self is isogenous to other.
INPUT:
- ``other`` -- another elliptic curve.
- ``field`` (default None) -- Currently not implemented. A
field containing the base fields of the two elliptic curves
onto which the two curves may be extended to test if they
are isogenous over this field. By default is_isogenous will
not try to find this field unless one of the curves can be
be extended into the base field of the other, in which case
it will test over the larger base field.
OUTPUT:
(bool) True if there is an isogeny from curve ``self`` to
curve ``other`` defined over ``field``.
METHOD:
Over general fields this is only implemented in trivial cases.
EXAMPLES::
sage: E1 = EllipticCurve(CC, [1,18]); E1
Elliptic Curve defined by y^2 = x^3 + 1.00000000000000*x + 18.0000000000000 over Complex Field with 53 bits of precision
sage: E2 = EllipticCurve(CC, [2,7]); E2
Elliptic Curve defined by y^2 = x^3 + 2.00000000000000*x + 7.00000000000000 over Complex Field with 53 bits of precision
sage: E1.is_isogenous(E2)
Traceback (most recent call last):
...
NotImplementedError: Only implemented for isomorphic curves over general fields.
sage: E1 = EllipticCurve(Frac(PolynomialRing(ZZ,'t')), [2,19]); E1
Elliptic Curve defined by y^2 = x^3 + 2*x + 19 over Fraction Field of Univariate Polynomial Ring in t over Integer Ring
sage: E2 = EllipticCurve(CC, [23,4]); E2
Elliptic Curve defined by y^2 = x^3 + 23.0000000000000*x + 4.00000000000000 over Complex Field with 53 bits of precision
sage: E1.is_isogenous(E2)
Traceback (most recent call last):
...
NotImplementedError: Only implemented for isomorphic curves over general fields.
"""
from .ell_generic import is_EllipticCurve
if not is_EllipticCurve(other):
raise ValueError("Second argument is not an Elliptic Curve.")
if self.is_isomorphic(other):
return True
else:
raise NotImplementedError("Only implemented for isomorphic curves over general fields.")
def weierstrass_p(self, prec=20, algorithm=None):
r"""
Computes the Weierstrass `\wp`-function of the elliptic curve.
INPUT:
- ``mprec`` - precision
- ``algorithm`` - string (default:``None``) an algorithm identifier
indicating using the ``pari``, ``fast`` or ``quadratic``
algorithm. If the algorithm is ``None``, then this
function determines the best algorithm to use.
OUTPUT:
a Laurent series in one variable `z` with coefficients in the
base field `k` of `E`.
EXAMPLES::
sage: E = EllipticCurve('11a1')
sage: E.weierstrass_p(prec=10)
z^-2 + 31/15*z^2 + 2501/756*z^4 + 961/675*z^6 + 77531/41580*z^8 + O(z^10)
sage: E.weierstrass_p(prec=8)
z^-2 + 31/15*z^2 + 2501/756*z^4 + 961/675*z^6 + O(z^8)
sage: Esh = E.short_weierstrass_model()
sage: Esh.weierstrass_p(prec=8)
z^-2 + 13392/5*z^2 + 1080432/7*z^4 + 59781888/25*z^6 + O(z^8)
sage: E.weierstrass_p(prec=20, algorithm='fast')
z^-2 + 31/15*z^2 + 2501/756*z^4 + 961/675*z^6 + 77531/41580*z^8 + 1202285717/928746000*z^10 + 2403461/2806650*z^12 + 30211462703/43418875500*z^14 + 3539374016033/7723451736000*z^16 + 413306031683977/1289540602350000*z^18 + O(z^20)
sage: E.weierstrass_p(prec=20, algorithm='pari')
z^-2 + 31/15*z^2 + 2501/756*z^4 + 961/675*z^6 + 77531/41580*z^8 + 1202285717/928746000*z^10 + 2403461/2806650*z^12 + 30211462703/43418875500*z^14 + 3539374016033/7723451736000*z^16 + 413306031683977/1289540602350000*z^18 + O(z^20)
sage: E.weierstrass_p(prec=20, algorithm='quadratic')
z^-2 + 31/15*z^2 + 2501/756*z^4 + 961/675*z^6 + 77531/41580*z^8 + 1202285717/928746000*z^10 + 2403461/2806650*z^12 + 30211462703/43418875500*z^14 + 3539374016033/7723451736000*z^16 + 413306031683977/1289540602350000*z^18 + O(z^20)
"""
from .ell_wp import weierstrass_p
return weierstrass_p(self, prec=prec, algorithm=algorithm)
def hasse_invariant(self):
r"""
Return the Hasse invariant of this elliptic curve.
OUTPUT:
The Hasse invariant of this elliptic curve, as an element of
the base field. This is only defined over fields of positive
characteristic, and is an element of the field which is zero
if and only if the curve is supersingular. Over a field of
characteristic zero, where the Hasse invariant is undefined,
a ``ValueError`` is returned.
EXAMPLES::
sage: E = EllipticCurve([Mod(1,2),Mod(1,2),0,0,Mod(1,2)])
sage: E.hasse_invariant()
1
sage: E = EllipticCurve([0,0,Mod(1,3),Mod(1,3),Mod(1,3)])
sage: E.hasse_invariant()
0
sage: E = EllipticCurve([0,0,Mod(1,5),0,Mod(2,5)])
sage: E.hasse_invariant()
0
sage: E = EllipticCurve([0,0,Mod(1,5),Mod(1,5),Mod(2,5)])
sage: E.hasse_invariant()
2
Some examples over larger fields::
sage: EllipticCurve(GF(101),[0,0,0,0,1]).hasse_invariant()
0
sage: EllipticCurve(GF(101),[0,0,0,1,1]).hasse_invariant()
98
sage: EllipticCurve(GF(103),[0,0,0,0,1]).hasse_invariant()
20
sage: EllipticCurve(GF(103),[0,0,0,1,1]).hasse_invariant()
17
sage: F.<a> = GF(107^2)
sage: EllipticCurve(F,[0,0,0,a,1]).hasse_invariant()
62*a + 75
sage: EllipticCurve(F,[0,0,0,0,a]).hasse_invariant()
0
Over fields of characteristic zero, the Hasse invariant is
undefined::
sage: E = EllipticCurve([0,0,0,0,1])
sage: E.hasse_invariant()
Traceback (most recent call last):
...
ValueError: Hasse invariant only defined in positive characteristic
"""
k = self.base_field()
p = k.characteristic()
if p == 0:
raise ValueError('Hasse invariant only defined in positive characteristic')
elif p == 2:
return self.a1()
elif p == 3:
return self.b2()
elif p == 5:
return self.c4()
elif p == 7:
return -self.c6()
else:
R = k['x']
x = R.gen()
E = self.short_weierstrass_model()
f=(x**3+E.a4()*x+E.a6())**((p-1)//2)
return f.coefficients(sparse=False)[p-1]
| 41.388751 | 1,602 | 0.561782 |
ff6e5f5430c62860d578d8b3cc5ee467d6394b64 | 3,982 | py | Python | tests/utils/test_general.py | harmon/sentry-python | fc55f7909c8c1969b43d43acccb9835f65fcd48c | [
"BSD-2-Clause"
] | null | null | null | tests/utils/test_general.py | harmon/sentry-python | fc55f7909c8c1969b43d43acccb9835f65fcd48c | [
"BSD-2-Clause"
] | null | null | null | tests/utils/test_general.py | harmon/sentry-python | fc55f7909c8c1969b43d43acccb9835f65fcd48c | [
"BSD-2-Clause"
] | 1 | 2020-05-18T19:15:15.000Z | 2020-05-18T19:15:15.000Z | # coding: utf-8
import sys
import os
import pytest
from sentry_sdk.utils import (
BadDsn,
Dsn,
safe_repr,
exceptions_from_error_tuple,
filename_for_module,
handle_in_app_impl,
iter_event_stacktraces,
)
from sentry_sdk._compat import text_type
try:
from hypothesis import given
import hypothesis.strategies as st
except ImportError:
pass
else:
any_string = st.one_of(st.binary(), st.text())
@given(x=any_string)
def test_safe_repr_never_broken_for_strings(x):
r = safe_repr(x)
assert isinstance(r, text_type)
assert u"broken repr" not in r
def test_safe_repr_regressions():
assert u"лошадь" in safe_repr(u"лошадь")
@pytest.mark.xfail(
sys.version_info < (3,),
reason="Fixing this in Python 2 would break other behaviors",
)
@pytest.mark.parametrize("prefix", (u"", u"abcd", u"лошадь"))
@pytest.mark.parametrize("character", u"\x00\x07\x1b\n")
def test_safe_repr_non_printable(prefix, character):
"""Check that non-printable characters are escaped"""
string = prefix + character
assert character not in safe_repr(string)
assert character not in safe_repr(string.encode("utf-8"))
def test_abs_path():
"""Check if abs_path is actually an absolute path. This can happen either
with eval/exec like here, or when the file in the frame is relative to
__main__"""
code = compile("1/0", "test.py", "exec")
try:
exec(code, {})
except Exception:
exceptions = exceptions_from_error_tuple(sys.exc_info())
(exception,) = exceptions
frame1, frame2 = frames = exception["stacktrace"]["frames"]
for frame in frames:
assert os.path.abspath(frame["abs_path"]) == frame["abs_path"]
assert frame1["filename"] == "tests/utils/test_general.py"
assert frame2["filename"] == "test.py"
def test_filename():
x = filename_for_module
assert x("bogus", "bogus") == "bogus"
assert x("os", os.__file__) == "os.py"
assert x("pytest", pytest.__file__) == "pytest.py"
import sentry_sdk.utils
assert x("sentry_sdk.utils", sentry_sdk.utils.__file__) == "sentry_sdk/utils.py"
@pytest.mark.parametrize(
"given,expected",
[
("https://foobar@sentry.io/123", "https://sentry.io/api/123/store/"),
("https://foobar@sentry.io/bam/123", "https://sentry.io/bam/api/123/store/"),
(
"https://foobar@sentry.io/bam/baz/123",
"https://sentry.io/bam/baz/api/123/store/",
),
],
)
def test_parse_dsn_paths(given, expected):
dsn = Dsn(given)
auth = dsn.to_auth()
assert auth.store_api_url == expected
@pytest.mark.parametrize(
"dsn",
[
"https://foobar@sentry.io"
"https://foobar@sentry.io/"
"https://foobar@sentry.io/asdf"
"https://foobar@sentry.io/asdf/"
"https://foobar@sentry.io/asdf/123/"
],
)
def test_parse_invalid_dsn(dsn):
with pytest.raises(BadDsn):
dsn = Dsn(dsn)
@pytest.mark.parametrize("empty", [None, []])
def test_in_app(empty):
assert handle_in_app_impl(
[{"module": "foo"}, {"module": "bar"}],
in_app_include=["foo"],
in_app_exclude=empty,
) == [{"module": "foo", "in_app": True}, {"module": "bar"}]
assert handle_in_app_impl(
[{"module": "foo"}, {"module": "bar"}],
in_app_include=["foo"],
in_app_exclude=["foo"],
) == [{"module": "foo", "in_app": True}, {"module": "bar"}]
assert handle_in_app_impl(
[{"module": "foo"}, {"module": "bar"}],
in_app_include=empty,
in_app_exclude=["foo"],
) == [{"module": "foo", "in_app": False}, {"module": "bar", "in_app": True}]
def test_iter_stacktraces():
assert set(
iter_event_stacktraces(
{
"threads": {"values": [{"stacktrace": 1}]},
"stacktrace": 2,
"exception": {"values": [{"stacktrace": 3}]},
}
)
) == {1, 2, 3}
| 26.724832 | 85 | 0.613511 |
b0a29d3c88c94adda1f94d7800b3c1a9ea479ece | 669 | bzl | Python | source/bazel/deps/tabulator/get.bzl | luxe/CodeLang-compiler | 78837d90bdd09c4b5aabbf0586a5d8f8f0c1e76a | [
"MIT"
] | 1 | 2019-01-06T08:45:46.000Z | 2019-01-06T08:45:46.000Z | source/bazel/deps/tabulator/get.bzl | luxe/CodeLang-compiler | 78837d90bdd09c4b5aabbf0586a5d8f8f0c1e76a | [
"MIT"
] | 264 | 2015-11-30T08:34:00.000Z | 2018-06-26T02:28:41.000Z | source/bazel/deps/tabulator/get.bzl | UniLang/compiler | c338ee92994600af801033a37dfb2f1a0c9ca897 | [
"MIT"
] | null | null | null | # Do not edit this file directly.
# It was auto-generated by: code/programs/reflexivity/reflexive_refresh
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
def tabulator():
http_archive(
name = "tabulator",
build_file = "//bazel/deps/tabulator:build.BUILD",
sha256 = "da365ae0eae5321705411b9b936baa9cd7cd40e2b27b1f3679af57696541a507",
strip_prefix = "tabulator-b294a53bf39ba082420a3d1d9d913b744e9dea1d",
urls = [
"https://github.com/Unilang/tabulator/archive/b294a53bf39ba082420a3d1d9d913b744e9dea1d.tar.gz",
],
)
| 39.352941 | 107 | 0.717489 |
1f2cf36850b03ec592c2bd9b5411a351e6555a81 | 6,404 | py | Python | src/centerline/geometry.py | fgabel/centerline | 91d130aa1b00bf759d81e12576f2c7bb92a6ce4c | [
"MIT"
] | 1 | 2021-05-22T02:25:22.000Z | 2021-05-22T02:25:22.000Z | src/centerline/geometry.py | fgabel/centerline | 91d130aa1b00bf759d81e12576f2c7bb92a6ce4c | [
"MIT"
] | null | null | null | src/centerline/geometry.py | fgabel/centerline | 91d130aa1b00bf759d81e12576f2c7bb92a6ce4c | [
"MIT"
] | 1 | 2021-07-15T00:19:46.000Z | 2021-07-15T00:19:46.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from numpy import array
from scipy.spatial import Voronoi
from shapely.geometry import LineString, MultiLineString, MultiPolygon, Polygon
from shapely.ops import unary_union
from multiprocessing import Pool
from . import exceptions
class Centerline(MultiLineString):
"""Create a centerline object.
The ``attributes`` are copied and set as the centerline's
attributes.
:param input_geometry: input geometry
:type input_geometry: :py:class:`shapely.geometry.Polygon` or
:py:class:`shapely.geometry.MultiPolygon`
:param interpolation_distance: densify the input geometry's
border by placing additional points at this distance,
defaults to 0.5 [meter]
:type interpolation_distance: float, optional
:raises exceptions.InvalidInputTypeError: input geometry is not
of type :py:class:`shapely.geometry.Polygon` or
:py:class:`shapely.geometry.MultiPolygon`
"""
def __init__(
self, input_geometry, interpolation_distance=0.5, **attributes
):
self._input_geometry = input_geometry
self._interpolation_distance = abs(interpolation_distance)
if not self.input_geometry_is_valid():
raise exceptions.InvalidInputTypeError
self._min_x, self._min_y = self._get_reduced_coordinates()
self.assign_attributes_to_instance(attributes)
super(Centerline, self).__init__(lines=self._construct_centerline())
def input_geometry_is_valid(self):
"""Input geometry is of a :py:class:`shapely.geometry.Polygon`
or a :py:class:`shapely.geometry.MultiPolygon`.
:return: geometry is valid
:rtype: bool
"""
if isinstance(self._input_geometry, Polygon) or isinstance(
self._input_geometry, MultiPolygon
):
return True
else:
return False
def _get_reduced_coordinates(self):
min_x = int(min(self._input_geometry.envelope.exterior.xy[0]))
min_y = int(min(self._input_geometry.envelope.exterior.xy[1]))
return min_x, min_y
def assign_attributes_to_instance(self, attributes):
"""Assign the ``attributes`` to the :py:class:`Centerline` object.
:param attributes: polygon's attributes
:type attributes: dict
"""
for key in attributes:
setattr(self, key, attributes.get(key))
def _check_ridges(self, ridge):
if self._ridge_is_finite(ridge):
input_geometry = self.input_geometry
starting_point = self._create_point_with_restored_coordinates(
x=self.vertices[ridge[0]][0], y=self.vertices[ridge[0]][1]
)
ending_point = self._create_point_with_restored_coordinates(
x=self.vertices[ridge[1]][0], y=self.vertices[ridge[1]][1]
)
linestring = LineString((starting_point, ending_point))
if self._linestring_is_within_input_geometry(linestring, input_geometry):
linestrings.append(linestring)
return linestring
def _construct_centerline(self):
vertices, ridges = self._get_voronoi_vertices_and_ridges()
linestrings = []
pool = Pool()
linestrings = pool.map(self._check_ridges, ridges)
if len(linestrings) < 2:
raise exceptions.TooFewRidgesError
linestrings_unpacked = [ent for sublist in linestrings for ent in sublist]
return unary_union(linestrings_unpacked)
def _get_voronoi_vertices_and_ridges(self):
borders = self._get_densified_borders()
voronoi_diagram = Voronoi(borders)
vertices = voronoi_diagram.vertices
ridges = voronoi_diagram.ridge_vertices
return vertices, ridges
def _ridge_is_finite(self, ridge):
return -1 not in ridge
def _create_point_with_restored_coordinates(self, x, y):
return (x + self._min_x, y + self._min_y)
def _linestring_is_within_input_geometry(self, linestring, input_geometry):
return (
linestring.within(input_geometry)
and len(linestring.coords[0]) > 1
)
def _get_densified_borders(self):
polygons = self._extract_polygons_from_input_geometry()
points = []
for polygon in polygons:
points += self._get_interpolated_boundary(polygon.exterior)
if self._polygon_has_interior_rings(polygon):
for interior in polygon.interiors:
points += self._get_interpolated_boundary(interior)
return array(points)
def _extract_polygons_from_input_geometry(self):
if isinstance(self._input_geometry, MultiPolygon):
return (polygon for polygon in self._input_geometry)
else:
return (self._input_geometry,)
def _polygon_has_interior_rings(self, polygon):
return len(polygon.interiors) > 0
def _get_interpolated_boundary(self, boundary):
line = LineString(boundary)
first_point = self._get_coordinates_of_first_point(line)
last_point = self._get_coordinates_of_last_point(line)
intermediate_points = self._get_coordinates_of_interpolated_points(
line
)
return [first_point] + intermediate_points + [last_point]
def _get_coordinates_of_first_point(self, linestring):
return self._create_point_with_reduced_coordinates(
x=linestring.xy[0][0], y=linestring.xy[1][0]
)
def _get_coordinates_of_last_point(self, linestring):
return self._create_point_with_reduced_coordinates(
x=linestring.xy[0][-1], y=linestring.xy[1][-1]
)
def _get_coordinates_of_interpolated_points(self, linestring):
intermediate_points = []
interpolation_distance = self._interpolation_distance
line_length = linestring.length
while interpolation_distance < line_length:
point = linestring.interpolate(interpolation_distance)
reduced_point = self._create_point_with_reduced_coordinates(
x=point.x, y=point.y
)
intermediate_points.append(reduced_point)
interpolation_distance += self._interpolation_distance
return intermediate_points
def _create_point_with_reduced_coordinates(self, x, y):
return (x - self._min_x, y - self._min_y)
| 35.776536 | 82 | 0.683167 |
2e84ffc180cc31823016f984fc71e820a29c3396 | 174 | py | Python | iwitness/views.py | elishaking/i-witness | 09fe9f6db04fb64440c306e714a5233db31db23e | [
"Apache-2.0"
] | null | null | null | iwitness/views.py | elishaking/i-witness | 09fe9f6db04fb64440c306e714a5233db31db23e | [
"Apache-2.0"
] | 2 | 2021-06-08T20:53:14.000Z | 2021-06-10T22:31:47.000Z | iwitness/views.py | elishaking/i-witness | 09fe9f6db04fb64440c306e714a5233db31db23e | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
def index(request):
return render(request, 'index.html', {})
def privacy(request):
return render(request, 'privacy.html', {})
| 17.4 | 46 | 0.695402 |
f004eee278bd347f70ae428cb923c052f77bb10b | 1,482 | py | Python | awesome_panel/apps/lib_bokeh.py | Hoxbro/awesome-panel | 724ba0b2303d9cf1f76c5039f7bc65659f2598cf | [
"Apache-2.0"
] | null | null | null | awesome_panel/apps/lib_bokeh.py | Hoxbro/awesome-panel | 724ba0b2303d9cf1f76c5039f7bc65659f2598cf | [
"Apache-2.0"
] | null | null | null | awesome_panel/apps/lib_bokeh.py | Hoxbro/awesome-panel | 724ba0b2303d9cf1f76c5039f7bc65659f2598cf | [
"Apache-2.0"
] | null | null | null | """
The purpose of this app is to demonstrate that Panel works with the tools you know and love
❤️, including Bokeh. It supports both light and dark theme.
"""
import numpy as np
import panel as pn
from bokeh.plotting import figure
from scipy.integrate import odeint
from awesome_panel import config
config.extension(url="lib_bokeh")
def get_plot():
"""Returns a Bokeh plot"""
# pylint: disable=invalid-name
sigma = 10
rho = 28
beta = 8.0 / 3
theta = 3 * np.pi / 4
def lorenz(xyz, t): # pylint: disable=unused-argument
x, y, z = xyz
x_dot = sigma * (y - x)
y_dot = x * rho - x * z - y
z_dot = x * y - beta * z
return [x_dot, y_dot, z_dot]
initial = (-10, -7, 35)
t = np.arange(0, 100, 0.006)
solution = odeint(lorenz, initial, t)
x = solution[:, 0]
y = solution[:, 1]
z = solution[:, 2]
xprime = np.cos(theta) * x - np.sin(theta) * y
colors = [
"#C6DBEF",
"#9ECAE1",
"#6BAED6",
"#4292C6",
"#2171B5",
"#08519C",
"#08306B",
]
plot = figure(title="Lorenz attractor example", tools=["pan,wheel_zoom,box_zoom,reset,hover"])
plot.multi_line(
np.array_split(xprime, 7),
np.array_split(z, 7),
line_color=colors,
line_alpha=0.8,
line_width=1.5,
)
return plot
PLOT = get_plot()
pn.pane.Bokeh(PLOT, height=700, sizing_mode="stretch_both").servable()
| 23.15625 | 98 | 0.580972 |
193bdee09e90f9a202565d3caad31518445db624 | 910 | py | Python | C/multiprocessing_start.py | liriqingone/IP_Agent | f562dfe1e4096ed8496767cffa2596704e285861 | [
"Apache-2.0"
] | null | null | null | C/multiprocessing_start.py | liriqingone/IP_Agent | f562dfe1e4096ed8496767cffa2596704e285861 | [
"Apache-2.0"
] | null | null | null | C/multiprocessing_start.py | liriqingone/IP_Agent | f562dfe1e4096ed8496767cffa2596704e285861 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import os
import inspect
import Tianyan_spiders
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from multiprocessing.pool import Pool as ProcessPool
# 全量抓取类实例
full_spiders = []
all_spiders = [Tianyan_spiders]
def start(spider_cls):
try:
spider_cls().start()
except Exception, e:
# send_alert_email_to(['n@sequee.com'], [], name + '\n\n' + traceback.format_exc())
print e
def main():
spider_list = []
for spiders in all_spiders:
for name in dir(spiders):
spider_instance = getattr(spiders, name)
if inspect.isclass(spider_instance):
spider_list.append(spider_instance)
# pool = ThreadPool(8)
pool = ProcessPool(4)
pool.map(start, spider_list)
pool.close()
pool.join()
print("ALL TASK DONE")
if __name__ == '__main__':
main()
| 21.162791 | 91 | 0.656044 |
42faf3691c0fd5c090ba1d185145815466c046e6 | 1,452 | py | Python | rippl/legislature/google_civics.py | gnmerritt/dailyrippl | 9a0f9615ba597a475dbd6305b589827cb2d97b03 | [
"MIT"
] | 6 | 2016-12-03T20:30:43.000Z | 2017-01-10T01:50:09.000Z | rippl/legislature/google_civics.py | gnmerritt/dailyrippl | 9a0f9615ba597a475dbd6305b589827cb2d97b03 | [
"MIT"
] | 24 | 2016-11-30T02:31:13.000Z | 2020-02-25T22:47:27.000Z | rippl/legislature/google_civics.py | gnmerritt/dailyrippl | 9a0f9615ba597a475dbd6305b589827cb2d97b03 | [
"MIT"
] | 1 | 2016-12-25T21:42:31.000Z | 2016-12-25T21:42:31.000Z | """Use Google Civics API to get all the details about """
import logging
import os
import requests
logger = logging.getLogger(__name__)
GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
base_url = 'https://www.googleapis.com/civicinfo/v2/representatives'
def get_reps_for_address(address):
"""Get representatives for an address (any string)
Args:
address: str - an address (lat/long works)
Returns:
list of contact info | None (if error)
"""
if not GOOGLE_API_KEY:
raise AssertionError('Missing GOOGLE_API_KEY from env')
# https://developers.google.com/civic-information/docs/v2/representatives
params = {
'key': GOOGLE_API_KEY,
'includeOffices': True,
'address': address
}
resp = requests.get(base_url, params=params)
if not resp.ok:
# TODO(carolyn): differentiate btwn malformed address, rate limits, etc
return
data = resp.json()
if 'offices' not in data or 'officials' not in data:
logger.error('Missing required key from Google response')
return
offices = data['offices']
officials = data['officials']
# TODO(carolyn): format this like the other api endpoint?
# for now, just add the office name to the officials
for office in offices:
office_name = office['name']
for i in office['officialIndices']:
officials[i]['office_name'] = office_name
return officials
| 27.923077 | 79 | 0.665978 |
e83678404abacc8c4b5a9e7bf83e8545982f4805 | 8,837 | py | Python | LeNet2.py | gregtyminski/Traffic-Sign-Claassifier | 7e05817d49702ffb83882e9dd13e1d1ff90171d4 | [
"MIT"
] | 1 | 2019-12-27T04:14:30.000Z | 2019-12-27T04:14:30.000Z | LeNet2.py | gregtyminski/Traffic-Sign-Claassifier | 7e05817d49702ffb83882e9dd13e1d1ff90171d4 | [
"MIT"
] | null | null | null | LeNet2.py | gregtyminski/Traffic-Sign-Claassifier | 7e05817d49702ffb83882e9dd13e1d1ff90171d4 | [
"MIT"
] | null | null | null | import tensorflow as tf
import neptune
from tensorflow.contrib.layers import flatten
from traffic_sign_dataset import TrafficData
import tensorflow.contrib.slim as slim
from sklearn.utils import shuffle
class LeNet2():
def __repr__(self):
return 'LeNet2()'
def __init__(self, output_classes: int = 43):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
self.my_name = 'LeNet v2'
self.model_file_name = './lenet'
self.mu = 0
self.sigma = 0.1
self.epochs = 10
self.batch_size = 128
self.learn_rate = 0.001
self.dropout_val = 0.5
self.x = tf.placeholder(tf.float32, (None, 32, 32, 3))
self.y = tf.placeholder(tf.int32, (None))
self.one_hot_y = tf.one_hot(self.y, output_classes)
self.dataset = None
self.accuracy_operation = None
self.cross_entropy = None
self.loss_operation = None
self.optimizer = None
self.training_operation = None
self.correct_prediction = None
self.saver = None
self.log_neptune = False
# TODO: Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
self.lay1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6), mean=self.mu, stddev=self.sigma))
self.lay1_b = tf.Variable(tf.zeros([6]))
padding = 'VALID'
self.layer1 = tf.nn.conv2d(self.x, self.lay1_W, strides=[1, 1, 1, 1], padding=padding) + self.lay1_b
# TODO: Activation.
self.layer1 = tf.nn.relu(self.layer1)
# TODO: Pooling. Input = 28x28x6. Output = 14x14x6.
ksize = [1, 2, 2, 1]
strides = [1, 2, 2, 1]
padding = 'VALID'
self.layer1 = tf.nn.max_pool(self.layer1, ksize, strides, padding)
# TODO: Layer 2: Convolutional. Output = 10x10x16.
self.lay2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean=self.mu, stddev=self.sigma))
self.lay2_b = tf.Variable(tf.zeros([16]))
padding = 'VALID'
strides = [1, 1, 1, 1]
self.layer2 = tf.nn.conv2d(self.layer1, self.lay2_W, strides, padding) + self.lay2_b
# TODO: Activation.
self.layer2 = tf.nn.relu(self.layer2)
# TODO: Pooling. Input = 10x10x16. Output = 5x5x16.
ksize = [1, 2, 2, 1]
strides = [1, 2, 2, 1]
padding = 'VALID'
self.layer2 = tf.nn.max_pool(self.layer2, ksize, strides, padding)
# TODO: Flatten. Input = 5x5x16. Output = 400.
self.flat = flatten(self.layer2)
# TODO: Dropout 1
self.flat = tf.nn.dropout(self.flat, self.dropout_val)
# TODO: Layer 3: Fully Connected. Input = 400. Output = 120.
self.lay3_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean=self.mu, stddev=self.sigma))
self.lay3_b = tf.Variable(tf.zeros([120]))
self.layer3 = tf.matmul(self.flat, self.lay3_W) + self.lay3_b
# TODO: Activation.
self.layer3 = tf.nn.relu(self.layer3)
# TODO: Layer 4: Fully Connected. Input = 120. Output = 84.
self.lay4_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean=self.mu, stddev=self.sigma))
self.lay4_b = tf.Variable(tf.zeros([84]))
self.layer4 = tf.matmul(self.layer3, self.lay4_W) + self.lay4_b
# TODO: Activation.
self.layer4 = tf.nn.relu(self.layer4)
# TODO: Dropout 2
self.layer4 = tf.nn.dropout(self.layer4, self.dropout_val)
# TODO: Layer 5: Fully Connected. Input = 84. Output = 43.
self.lay5_W = tf.Variable(tf.truncated_normal(shape=(84, output_classes), mean=self.mu, stddev=self.sigma))
self.lay5_b = tf.Variable(tf.zeros([output_classes]))
self.layer5 = tf.matmul(self.layer4, self.lay5_W) + self.lay5_b
self.network = self.layer5
def start_neptune_session(self, api_token, prj_name):
assert api_token is not None
assert prj_name is not None
neptune.init(
api_token=api_token,
project_qualified_name=prj_name)
self.log_neptune = True
def get_network(self):
'''
:return: Tensor with entire NN architecture.
'''
return self.network
def set_hiperparams(self, epochs: int = 10, batch_size: int = 64, learn_rate: float = 0.002,
dropout_val: float = 0.5):
'''
Method sets hiperparameters.
:param epochs: Number of epochs for training.
:param batch_size: Size of batch.
:param learn_rate: Learning rate.
:return:
'''
self.epochs = epochs
self.batch_size = batch_size
self.learn_rate = learn_rate
self.dropout_val = dropout_val
def evaluate(self, X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, self.batch_size):
batch_x, batch_y = X_data[offset:offset + self.batch_size], y_data[offset:offset + self.batch_size]
accuracy = sess.run(self.accuracy_operation, feed_dict={self.x: batch_x, self.y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
def train(self, dataset: TrafficData = None, neptune_tags = None):
assert dataset is not None
self.dataset = dataset
experiment = None
if self.log_neptune:
experiment = neptune.create_experiment(name=self.my_name, params={'batch_size': self.batch_size,
'lr': self.learn_rate,
'nr_epochs': self.epochs,
'dropout': self.dropout_val})
experiment.append_tag(self.my_name)
experiment.append_tag('double_dropout')
if neptune_tags is not None:
[experiment.append_tag(tag) for tag in neptune_tags]
logits = self.get_network()
self.cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self.one_hot_y, logits=logits)
self.loss_operation = tf.reduce_mean(self.cross_entropy)
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learn_rate)
self.training_operation = self.optimizer.minimize(self.loss_operation)
self.correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(self.one_hot_y, 1))
self.accuracy_operation = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
x_train, y_train = self.dataset.get_training_dataset()
x_valid, y_valid = self.dataset.get_validation_dataset()
x_test, y_test = self.dataset.get_testing_dataset()
num_examples = len(x_train)
print("Training...")
print()
for i in range(self.epochs):
x_train, y_train = shuffle(x_train, y_train)
for offset in range(0, num_examples, self.batch_size):
end = offset + self.batch_size
batch_x, batch_y = x_train[offset:end], y_train[offset:end]
sess.run(self.training_operation, feed_dict={self.x: batch_x, self.y: batch_y})
validation_accuracy = self.evaluate(x_valid, y_valid)
test_accuracy = self.evaluate(x_test, y_test)
print("EPOCH {} ...".format(i + 1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print("Test Accuracy = {:.3f}".format(test_accuracy))
print()
if self.log_neptune:
experiment.send_metric('validation_accuracy', validation_accuracy)
experiment.send_metric('test_accuracy', test_accuracy)
self.saver.save(sess, self.model_file_name)
if self.log_neptune:
experiment.stop()
def predict(self, images):
assert images is not None
with tf.Session() as sess:
# Restore variables from disk.
self.saver.restore(sess, self.model_file_name)
results = self.network.eval(feed_dict={self.x: images})
values = np.argmax(results, axis=1)
labels = [self.dataset.label_for(val) for val in values]
return labels
def model_summary(self):
'''
Method prints summary of the NN model.
:return:
'''
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True) | 41.683962 | 118 | 0.603372 |
85ccefc5be6ba870a4592323f4ed31aac009da3a | 14,941 | py | Python | tsim/core/network/lane.py | eduardomezencio/tsim | 60ac63152a98fd7dabb59c66367bca216e6a7370 | [
"MIT"
] | 2 | 2021-04-24T06:48:13.000Z | 2022-01-25T02:38:44.000Z | tsim/core/network/lane.py | eduardomezencio/tsim | 60ac63152a98fd7dabb59c66367bca216e6a7370 | [
"MIT"
] | null | null | null | tsim/core/network/lane.py | eduardomezencio/tsim | 60ac63152a98fd7dabb59c66367bca216e6a7370 | [
"MIT"
] | null | null | null | """Lane and related classes."""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING, NamedTuple, Optional, Sequence, Tuple
from dataslots import with_slots
from tsim.core.entity import EntityRef
from tsim.core.geometry import Point, Vector, line_intersection
from tsim.core.network.endpoint import Endpoint
from tsim.core.network.orientedway import OrientedWay, OrientedWayPosition
from tsim.core.network.location import (NetworkLocation, NetworkPosition,
WorldAndSegmentPosition)
from tsim.core.network.traffic import Traffic
from tsim.utils.linkedlist import LinkedList
if TYPE_CHECKING:
from tsim.core.network.intersection import Curve
from tsim.core.network.node import Node
from tsim.core.network.way import Way
LANE_WIDTH = 3.0
HALF_LANE_WIDTH = 0.5 * LANE_WIDTH
class LaneRef(NamedTuple):
"""A tuple containing the same as OrientedWay, with a lane index.
The two first elements are equivalent to the values of OrientedWay and the
third value is the number of the lane. Lanes start from zero, meaning the
leftmost lane if looking at the way in the direction of the two first
values, interpreted as an OrientedWay, to n - 1, with n being the number of
lanes in that direction.
"""
way_ref: EntityRef[Way]
endpoint: Endpoint
index: int
@staticmethod
def build(way: Way, endpoint: Endpoint, index: int):
"""Create LaneRef from a Way instead of a weak reference."""
return LaneRef(EntityRef(way), endpoint, index)
@property
def lane(self) -> Lane:
"""Get the referenced lane."""
return self.oriented_way.lane(self.index)
@property
def way(self) -> Optional[Way]:
"""Get the referenced way."""
return self.way_ref()
@property
def oriented_way(self) -> OrientedWay:
"""Get oriented way from this lane."""
return OrientedWay(self.way_ref, self.endpoint)
@property
def start(self) -> Node:
"""Get the start node of the lane."""
return (self.way.start if self.endpoint is Endpoint.START
else self.way.end)
@property
def end(self) -> Node:
"""Get the end node of the lane."""
return (self.way.end if self.endpoint is Endpoint.START
else self.way.start)
@property
def left_neighbor(self) -> LaneRef:
"""Get lane reference one lane to the left."""
return LaneRef(self.way_ref, self.endpoint, self.index - 1)
@property
def right_neighbor(self) -> LaneRef:
"""Get lane reference one lane to the right."""
return LaneRef(self.way_ref, self.endpoint, self.index + 1)
def positive(self) -> LaneRef:
"""Get equivalent lane with positive index."""
if self.index >= 0:
return self
return LaneRef(self.way_ref, self.endpoint.other, -self.index - 1)
def distance_from_center(self) -> float:
"""Get distance to the right from way center to the lane."""
return self.way.lane_distance_from_center(self.index, self.endpoint)
def __call__(self) -> Lane:
"""Get the referenced lane."""
return self.lane
def __repr__(self):
return (f'{LaneRef.__name__}(way_id={self.way.id}, '
f'endpoint={self.endpoint.name[0]}, index={self.index})')
@with_slots
@dataclass(frozen=True)
class LaneSegment:
"""Segment of a `Lane`."""
start_distance: float
end_distance: float
start_way_distance: float
factor: float
start: Point
vector: Vector
is_turning: bool
class Lane(NetworkLocation):
"""A longitudinal section of a `Way` for one-way flow."""
__slots__ = ('lane_ref', 'distance_from_center', 'length', 'segments',
'traffic')
lane_ref: LaneRef
distance_from_center: float
length: float
segments: Tuple[LaneSegment]
traffic: Traffic
def __init__(self, lane_ref: LaneRef):
self.lane_ref = lane_ref.positive()
self.distance_from_center = self.lane_ref.distance_from_center()
self._build_segments()
self.traffic = LinkedList()
@property
def way(self) -> Optional[Way]:
"""Get the referenced way."""
return self.lane_ref.way_ref()
@property
def endpoint(self) -> Endpoint:
"""Get the lane orientation."""
return self.lane_ref.endpoint
@property
def index(self) -> int:
"""Get lane index."""
return self.lane_ref.index
lane_index = index
@property
def oriented_way(self) -> OrientedWay:
"""Get oriented way from this lane."""
return self.lane_ref.oriented_way
@property
def start(self) -> Node:
"""Get the start node of the lane."""
return self.lane_ref.start
@property
def end(self) -> Node:
"""Get the end node of the lane."""
return self.lane_ref.end
@property
def segment_count(self) -> int:
"""Get the number of lane segments."""
return len(self.segments)
@property
def left_neighbor(self) -> Lane:
"""Get the lane to the left of this one."""
return self.lane_ref.left_neighbor()
@property
def right_neighbor(self) -> Lane:
"""Get the lane to the right of this one."""
return self.lane_ref.right_neighbor()
def world_and_segment_position(self, position: float) \
-> WorldAndSegmentPosition:
"""Get world and segment position on given lane position."""
segment: LaneSegment = None
if position >= 0.0:
i, segment = next(((i, s) for i, s in enumerate(self.segments)
if s.end_distance >= position), None)
if segment is not None:
segment_position = position - segment.start_distance
point = segment.start + segment.vector * segment_position
return WorldAndSegmentPosition(point, segment.vector,
i, segment.end_distance)
raise ValueError('Position outside of lane.')
def lane_to_way_position(self, position: float) -> float:
"""Get way position from lane position."""
way_position = None
if position >= 0.0:
segment = next((s for s in self.segments
if s.end_distance >= position), None)
if segment is not None:
way_position = (segment.start_way_distance +
(position - segment.start_distance)
/ segment.factor)
if way_position is not None:
if self.endpoint is Endpoint.END:
way_position = self.way.length - way_position
return way_position
raise ValueError('Position outside of lane.')
def lane_to_oriented_position(self, position: float) -> float:
"""Get oriented way position from lane position."""
way_position = self.lane_to_way_position(position)
if self.endpoint is Endpoint.START:
return way_position
return self.way.length - way_position
def oriented_way_position(self, position: float) -> OrientedWayPosition:
"""Get `OrientedWayPosition` from lane position.
Almost the same as `lane_to_oriented_position`, but returns position as
a `OrientedWayPosition` instead of the position value only.
"""
return OrientedWayPosition(self.oriented_way,
self.lane_to_oriented_position(position))
def way_to_lane_position(self, position: float,
endpoint: Endpoint = Endpoint.START,
default: float = None) -> float:
"""Get lane position from way position.
The `position` argument is a distance in meters from the given
`endpoint`. If position is taken from a way position without
orientation there's no need to set the `endpoint`, since an oriented
way position from the `START` endpoint is equivalent to a way position.
"""
offsets = (self.oriented_way.start_offset,
self.oriented_way.end_offset)
if self.endpoint is not endpoint:
position = self.way.length - position
last_segment = None
if offsets[0] < position <= self.way.length - offsets[1]:
for segment in self.segments:
if segment.start_way_distance > position:
break
last_segment = segment
segment = last_segment
if segment is not None:
return (segment.start_distance +
segment.factor * (position - segment.start_way_distance))
if default is not None:
return default
raise ValueError('Position outside of lane.')
def get_curve(self, dest: OrientedWay,
accept_lane_change: bool = True) -> Optional[Curve]:
"""Get the curve connecting this lane to `dest` oriented way."""
connection = self.end.get_lane_connection(self.lane_ref, dest)
if accept_lane_change or connection[0].index == self.index:
return self.end.intersection.curves[connection]
return None
def get_free_space(self, distance: float, buffer: int) -> Sequence[float]:
"""Get space available behind and ahead in given distance.
The returned sequence contains the free space behind in position 0 and
ahead in position 1. Free space is the distance to an agent or to the
ends of the lane.
"""
free_space = [distance, self.length - distance]
for agent in self.traffic:
position = agent.get_network_position(self, buffer)
if position < distance:
free_space[0] = distance - position
else:
free_space[1] = position - distance
break
return free_space
def _build_segments(self):
distance = 0.0
segments = []
way_segments = self.lane_ref.way.geometry.segments
if self.endpoint is Endpoint.END:
way_distance = self.lane_ref.way.geometry.end_offset
way_segments = (s.inverted() for s in reversed(way_segments))
else:
way_distance = self.lane_ref.way.geometry.start_offset
for way_segment in way_segments:
point = (way_segment.start
+ (self.distance_from_center
* way_segment.width_vector.normalized()))
left = way_segment.start_left
vector = (way_segment.start_right - left).normalized()
start = line_intersection(point, way_segment.vector,
left, vector)
left = way_segment.end_left
vector = (way_segment.end_right - left).normalized()
end = line_intersection(point, way_segment.vector,
left, vector)
length = abs(end - start)
way_length = way_segment.length()
end_distance = distance + length
segments.append(LaneSegment(distance, end_distance, way_distance,
length / way_length, start,
way_segment.vector,
not way_segment.is_rectangular))
distance = end_distance
way_distance += way_length
self.length = distance
self.segments = tuple(segments)
def __repr__(self):
return (f'{Lane.__name__}(way_id={self.way.id}, '
f'endpoint={self.endpoint.name[0]}, index={self.index})')
@with_slots
@dataclass(frozen=True)
class LanePosition(NetworkPosition):
"""A position in a `Lane`.
The position is in meters from the start of the lane.
"""
lane: Lane
position: float
@property
def location(self) -> NetworkLocation:
"""Get the `NetworkLocation` of this lane position."""
return self.lane
@property
def remaining(self) -> float:
"""Get distance in meters to the end of the lane."""
return self.lane.length - self.position
@property
def oriented_way(self) -> OrientedWay:
"""Get the oriented way of this lane."""
return self.lane.oriented_way
@property
def oriented_way_position(self) -> OrientedWayPosition:
"""Get the oriented way position at this lane position."""
return OrientedWayPosition(
self.lane.oriented_way,
self.lane.lane_to_oriented_position(self.position))
@property
def left_neighbor(self) -> LanePosition:
"""Get same position on the lane to the left."""
way_position = self.lane.lane_to_way_position(self.position)
lane = self.lane.left_neighbor
try:
return LanePosition(lane, lane.way_to_lane_position(way_position))
except ValueError:
return LanePosition(lane, self.position)
@property
def right_neighbor(self) -> LanePosition:
"""Get same position on the lane to the right."""
way_position = self.lane.lane_to_way_position(self.position)
lane = self.lane.right_neighbor
try:
return LanePosition(lane, lane.way_to_lane_position(way_position))
except ValueError:
return LanePosition(lane, self.position)
def get_free_space(self, buffer: int) -> Sequence[float]:
"""Get space available behind and ahead in this position.
See same method in `Lane`.
"""
return self.lane.get_free_space(self.position, buffer)
def with_free_space(self, buffer: int, length: float) -> LanePosition:
"""Get a nearby lane position with `length` free space around it.
If there is free space around this position, `self` is returned.
Otherwise, a new `LanePosition` or `None` is returned depending on
whether a free position was found nearby.
"""
result = self
free_space = result.get_free_space(buffer)
if free_space[0] < length:
change = length - free_space[0]
result = LanePosition(result.lane, result.position + change)
free_space[0] += change
free_space[1] -= change
if free_space[1] < length:
change = length - free_space[1]
result = LanePosition(result.lane, result.position - change)
free_space[0] -= change
free_space[1] += change
if any(s < length for s in free_space):
result = None
return result
def world_and_segment_position(self) -> WorldAndSegmentPosition:
"""Get world and segment position at this lane position."""
return self.lane.world_and_segment_position(self.position)
| 35.658711 | 79 | 0.621645 |
1ad028fa4d08ca338e0b66bc5e6c116957bacec5 | 26,923 | py | Python | Lib/socketserver.py | gerph/cpython | 98813cb03c2371789669c3d8debf8fca2a344de9 | [
"CNRI-Python-GPL-Compatible"
] | 6,660 | 2018-01-13T12:16:53.000Z | 2022-03-31T15:15:28.000Z | Lib/socketserver.py | gerph/cpython | 98813cb03c2371789669c3d8debf8fca2a344de9 | [
"CNRI-Python-GPL-Compatible"
] | 427 | 2017-09-29T22:54:36.000Z | 2022-02-15T19:26:50.000Z | Lib/socketserver.py | gerph/cpython | 98813cb03c2371789669c3d8debf8fca2a344de9 | [
"CNRI-Python-GPL-Compatible"
] | 1,933 | 2018-01-15T13:08:40.000Z | 2022-03-31T11:28:59.000Z | """Generic socket server classes.
This module tries to capture the various aspects of defining a server:
For socket-based servers:
- address family:
- AF_INET{,6}: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- socket type:
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
For request-based servers (including socket-based):
- client address verification before further looking at the request
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
- how to handle multiple requests:
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
The classes in this module favor the server type that is simplest to
write: a synchronous TCP/IP server. This is bad class design, but
saves some typing. (There's also the issue that a deep class hierarchy
slows down method lookups.)
There are five classes in an inheritance diagram, four of which represent
synchronous servers of four types:
+------------+
| BaseServer |
+------------+
|
v
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
Note that UnixDatagramServer derives from UDPServer, not from
UnixStreamServer -- the only difference between an IP and a Unix
stream server is the address family, which is simply repeated in both
unix server classes.
Forking and threading versions of each type of server can be created
using the ForkingMixIn and ThreadingMixIn mix-in classes. For
instance, a threading UDP server class is created as follows:
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
The Mix-in class must come first, since it overrides a method defined
in UDPServer! Setting the various member variables also changes
the behavior of the underlying server mechanism.
To implement a service, you must derive a class from
BaseRequestHandler and redefine its handle() method. You can then run
various versions of the service by combining one of the server classes
with your request handler class.
The request handler class must be different for datagram or stream
services. This can be hidden by using the request handler
subclasses StreamRequestHandler or DatagramRequestHandler.
Of course, you still have to use your head!
For instance, it makes no sense to use a forking server if the service
contains state in memory that can be modified by requests (since the
modifications in the child process would never reach the initial state
kept in the parent process and passed to each child). In this case,
you can use a threading server, but you will probably have to use
locks to avoid two requests that come in nearly simultaneous to apply
conflicting changes to the server state.
On the other hand, if you are building e.g. an HTTP server, where all
data is stored externally (e.g. in the file system), a synchronous
class will essentially render the service "deaf" while one request is
being handled -- which may be for a very long time if a client is slow
to read all the data it has requested. Here a threading or forking
server is appropriate.
In some cases, it may be appropriate to process part of a request
synchronously, but to finish processing in a forked child depending on
the request data. This can be implemented by using a synchronous
server and doing an explicit fork in the request handler class
handle() method.
Another approach to handling multiple simultaneous requests in an
environment that supports neither threads nor fork (or where these are
too expensive or inappropriate for the service) is to maintain an
explicit table of partially finished requests and to use a selector to
decide which request to work on next (or whether to handle a new
incoming request). This is particularly important for stream services
where each client can potentially be connected for a long time (if
threads or subprocesses cannot be used).
Future work:
- Standard classes for Sun RPC (which uses either UDP or TCP)
- Standard mix-in classes to implement various authentication
and encryption schemes
XXX Open problems:
- What to do with out-of-band data?
BaseServer:
- split generic "request" functionality out into BaseServer class.
Copyright (C) 2000 Luke Kenneth Casson Leighton <lkcl@samba.org>
example: read entries from a SQL database (requires overriding
get_request() to return a table entry from the database).
entry is processed by a RequestHandlerClass.
"""
# Author of the BaseServer patch: Luke Kenneth Casson Leighton
__version__ = "0.4"
import socket
import selectors
import os
import sys
import threading
from io import BufferedIOBase
from time import monotonic as time
__all__ = ["BaseServer", "TCPServer", "UDPServer",
"ThreadingUDPServer", "ThreadingTCPServer",
"BaseRequestHandler", "StreamRequestHandler",
"DatagramRequestHandler", "ThreadingMixIn"]
if hasattr(os, "fork"):
__all__.extend(["ForkingUDPServer","ForkingTCPServer", "ForkingMixIn"])
if hasattr(socket, "AF_UNIX"):
__all__.extend(["UnixStreamServer","UnixDatagramServer",
"ThreadingUnixStreamServer",
"ThreadingUnixDatagramServer"])
# poll/select have the advantage of not requiring any extra file descriptor,
# contrarily to epoll/kqueue (also, they require a single syscall).
if hasattr(selectors, 'PollSelector'):
_ServerSelector = selectors.PollSelector
else:
_ServerSelector = selectors.SelectSelector
class BaseServer:
"""Base class for server classes.
Methods for the caller:
- __init__(server_address, RequestHandlerClass)
- serve_forever(poll_interval=0.5)
- shutdown()
- handle_request() # if you do not use serve_forever()
- fileno() -> int # for selector
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- handle_timeout()
- verify_request(request, client_address)
- server_close()
- process_request(request, client_address)
- shutdown_request(request)
- close_request(request)
- service_actions()
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- timeout
- address_family
- socket_type
- allow_reuse_address
Instance variables:
- RequestHandlerClass
- socket
"""
timeout = None
def __init__(self, server_address, RequestHandlerClass):
"""Constructor. May be extended, do not override."""
self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
self.__is_shut_down = threading.Event()
self.__shutdown_request = False
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
pass
def serve_forever(self, poll_interval=0.5):
"""Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread.
"""
self.__is_shut_down.clear()
try:
# XXX: Consider using another file descriptor or connecting to the
# socket to wake this up instead of polling. Polling reduces our
# responsiveness to a shutdown request and wastes cpu at all other
# times.
with _ServerSelector() as selector:
selector.register(self, selectors.EVENT_READ)
while not self.__shutdown_request:
ready = selector.select(poll_interval)
# bpo-35017: shutdown() called during select(), exit immediately.
if self.__shutdown_request:
break
if ready:
self._handle_request_noblock()
self.service_actions()
finally:
self.__shutdown_request = False
self.__is_shut_down.set()
def shutdown(self):
"""Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will
deadlock.
"""
self.__shutdown_request = True
self.__is_shut_down.wait()
def service_actions(self):
"""Called by the serve_forever() loop.
May be overridden by a subclass / Mixin to implement any code that
needs to be run during the loop.
"""
pass
# The distinction between handling, getting, processing and finishing a
# request is fairly arbitrary. Remember:
#
# - handle_request() is the top-level call. It calls selector.select(),
# get_request(), verify_request() and process_request()
# - get_request() is different for stream or datagram sockets
# - process_request() is the place that may fork a new process or create a
# new thread to finish the request
# - finish_request() instantiates the request handler class; this
# constructor will handle the request all by itself
def handle_request(self):
"""Handle one request, possibly blocking.
Respects self.timeout.
"""
# Support people who used socket.settimeout() to escape
# handle_request before self.timeout was available.
timeout = self.socket.gettimeout()
if timeout is None:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
if timeout is not None:
deadline = time() + timeout
# Wait until a request arrives or the timeout expires - the loop is
# necessary to accommodate early wakeups due to EINTR.
with _ServerSelector() as selector:
selector.register(self, selectors.EVENT_READ)
while True:
ready = selector.select(timeout)
if ready:
return self._handle_request_noblock()
else:
if timeout is not None:
timeout = deadline - time()
if timeout < 0:
return self.handle_timeout()
def _handle_request_noblock(self):
"""Handle one request, without blocking.
I assume that selector.select() has returned that the socket is
readable before this function was called, so there should be no risk of
blocking in get_request().
"""
try:
request, client_address = self.get_request()
except OSError:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except Exception:
self.handle_error(request, client_address)
self.shutdown_request(request)
except:
self.shutdown_request(request)
raise
else:
self.shutdown_request(request)
def handle_timeout(self):
"""Called if no new request arrives within self.timeout.
Overridden by ForkingMixIn.
"""
pass
def verify_request(self, request, client_address):
"""Verify the request. May be overridden.
Return True if we should proceed with this request.
"""
return True
def process_request(self, request, client_address):
"""Call finish_request.
Overridden by ForkingMixIn and ThreadingMixIn.
"""
self.finish_request(request, client_address)
self.shutdown_request(request)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
pass
def finish_request(self, request, client_address):
"""Finish one request by instantiating RequestHandlerClass."""
self.RequestHandlerClass(request, client_address, self)
def shutdown_request(self, request):
"""Called to shutdown and close an individual request."""
self.close_request(request)
def close_request(self, request):
"""Called to clean up an individual request."""
pass
def handle_error(self, request, client_address):
"""Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.
"""
print('-'*40, file=sys.stderr)
print('Exception happened during processing of request from',
client_address, file=sys.stderr)
import traceback
traceback.print_exc()
print('-'*40, file=sys.stderr)
def __enter__(self):
return self
def __exit__(self, *args):
self.server_close()
class TCPServer(BaseServer):
"""Base class for various socket-based server classes.
Defaults to synchronous IP stream (i.e., TCP).
Methods for the caller:
- __init__(server_address, RequestHandlerClass, bind_and_activate=True)
- serve_forever(poll_interval=0.5)
- shutdown()
- handle_request() # if you don't use serve_forever()
- fileno() -> int # for selector
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- handle_timeout()
- verify_request(request, client_address)
- process_request(request, client_address)
- shutdown_request(request)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- timeout
- address_family
- socket_type
- request_queue_size (only for stream sockets)
- allow_reuse_address
Instance variables:
- server_address
- RequestHandlerClass
- socket
"""
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 5
allow_reuse_address = False
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
"""Constructor. May be extended, do not override."""
BaseServer.__init__(self, server_address, RequestHandlerClass)
self.socket = socket.socket(self.address_family,
self.socket_type)
if bind_and_activate:
try:
self.server_bind()
self.server_activate()
except:
self.server_close()
raise
def server_bind(self):
"""Called by constructor to bind the socket.
May be overridden.
"""
if self.allow_reuse_address:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
self.server_address = self.socket.getsockname()
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
self.socket.listen(self.request_queue_size)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
self.socket.close()
def fileno(self):
"""Return socket file number.
Interface required by selector.
"""
return self.socket.fileno()
def get_request(self):
"""Get the request and client address from the socket.
May be overridden.
"""
return self.socket.accept()
def shutdown_request(self, request):
"""Called to shutdown and close an individual request."""
try:
#explicitly shutdown. socket.close() merely releases
#the socket and waits for GC to perform the actual close.
request.shutdown(socket.SHUT_WR)
except OSError:
pass #some platforms may raise ENOTCONN here
self.close_request(request)
def close_request(self, request):
"""Called to clean up an individual request."""
request.close()
class UDPServer(TCPServer):
"""UDP server class."""
allow_reuse_address = False
socket_type = socket.SOCK_DGRAM
max_packet_size = 8192
def get_request(self):
data, client_addr = self.socket.recvfrom(self.max_packet_size)
return (data, self.socket), client_addr
def server_activate(self):
# No need to call listen() for UDP.
pass
def shutdown_request(self, request):
# No need to shutdown anything.
self.close_request(request)
def close_request(self, request):
# No need to close anything.
pass
if hasattr(os, "fork"):
class ForkingMixIn:
"""Mix-in class to handle each request in a new process."""
timeout = 300
active_children = None
max_children = 40
# If true, server_close() waits until all child processes complete.
block_on_close = True
def collect_children(self, *, blocking=False):
"""Internal routine to wait for children that have exited."""
if self.active_children is None:
return
# If we're above the max number of children, wait and reap them until
# we go back below threshold. Note that we use waitpid(-1) below to be
# able to collect children in size(<defunct children>) syscalls instead
# of size(<children>): the downside is that this might reap children
# which we didn't spawn, which is why we only resort to this when we're
# above max_children.
while len(self.active_children) >= self.max_children:
try:
pid, _ = os.waitpid(-1, 0)
self.active_children.discard(pid)
except ChildProcessError:
# we don't have any children, we're done
self.active_children.clear()
except OSError:
break
# Now reap all defunct children.
for pid in self.active_children.copy():
try:
flags = 0 if blocking else os.WNOHANG
pid, _ = os.waitpid(pid, flags)
# if the child hasn't exited yet, pid will be 0 and ignored by
# discard() below
self.active_children.discard(pid)
except ChildProcessError:
# someone else reaped it
self.active_children.discard(pid)
except OSError:
pass
def handle_timeout(self):
"""Wait for zombies after self.timeout seconds of inactivity.
May be extended, do not override.
"""
self.collect_children()
def service_actions(self):
"""Collect the zombie child processes regularly in the ForkingMixIn.
service_actions is called in the BaseServer's serve_forever loop.
"""
self.collect_children()
def process_request(self, request, client_address):
"""Fork a new subprocess to process the request."""
pid = os.fork()
if pid:
# Parent process
if self.active_children is None:
self.active_children = set()
self.active_children.add(pid)
self.close_request(request)
return
else:
# Child process.
# This must never return, hence os._exit()!
status = 1
try:
self.finish_request(request, client_address)
status = 0
except Exception:
self.handle_error(request, client_address)
finally:
try:
self.shutdown_request(request)
finally:
os._exit(status)
def server_close(self):
super().server_close()
self.collect_children(blocking=self.block_on_close)
class ThreadingMixIn:
"""Mix-in class to handle each request in a new thread."""
# Decides how threads will act upon termination of the
# main process
daemon_threads = False
# If true, server_close() waits until all non-daemonic threads terminate.
block_on_close = True
# For non-daemonic threads, list of threading.Threading objects
# used by server_close() to wait for all threads completion.
_threads = None
def process_request_thread(self, request, client_address):
"""Same as in BaseServer but as a thread.
In addition, exception handling is done here.
"""
try:
self.finish_request(request, client_address)
except Exception:
self.handle_error(request, client_address)
finally:
self.shutdown_request(request)
def process_request(self, request, client_address):
"""Start a new thread to process the request."""
t = threading.Thread(target = self.process_request_thread,
args = (request, client_address))
t.daemon = self.daemon_threads
if not t.daemon and self.block_on_close:
if self._threads is None:
self._threads = []
self._threads.append(t)
t.start()
def server_close(self):
super().server_close()
if self.block_on_close:
threads = self._threads
self._threads = None
if threads:
for thread in threads:
thread.join()
if hasattr(os, "fork"):
class ForkingUDPServer(ForkingMixIn, UDPServer): pass
class ForkingTCPServer(ForkingMixIn, TCPServer): pass
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
if hasattr(socket, 'AF_UNIX'):
class UnixStreamServer(TCPServer):
address_family = socket.AF_UNIX
class UnixDatagramServer(UDPServer):
address_family = socket.AF_UNIX
class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
class BaseRequestHandler:
"""Base class for request handler classes.
This class is instantiated for each request to be handled. The
constructor sets the instance variables request, client_address
and server, and then calls the handle() method. To implement a
specific service, all you need to do is to derive a class which
defines a handle() method.
The handle() method can find the request as self.request, the
client address as self.client_address, and the server (in case it
needs access to per-server information) as self.server. Since a
separate instance is created for each request, the handle() method
can define other arbitrary instance variables.
"""
def __init__(self, request, client_address, server):
self.request = request
self.client_address = client_address
self.server = server
self.setup()
try:
self.handle()
finally:
self.finish()
def setup(self):
pass
def handle(self):
pass
def finish(self):
pass
# The following two classes make it possible to use the same service
# class for stream or datagram servers.
# Each class sets up these instance variables:
# - rfile: a file object from which receives the request is read
# - wfile: a file object to which the reply is written
# When the handle() method returns, wfile is flushed properly
class StreamRequestHandler(BaseRequestHandler):
"""Define self.rfile and self.wfile for stream sockets."""
# Default buffer sizes for rfile, wfile.
# We default rfile to buffered because otherwise it could be
# really slow for large data (a getc() call per byte); we make
# wfile unbuffered because (a) often after a write() we want to
# read and we need to flush the line; (b) big writes to unbuffered
# files are typically optimized by stdio even when big reads
# aren't.
rbufsize = -1
wbufsize = 0
# A timeout to apply to the request socket, if not None.
timeout = None
# Disable nagle algorithm for this socket, if True.
# Use only when wbufsize != 0, to avoid small packets.
disable_nagle_algorithm = False
def setup(self):
self.connection = self.request
if self.timeout is not None:
self.connection.settimeout(self.timeout)
if self.disable_nagle_algorithm:
self.connection.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, True)
self.rfile = self.connection.makefile('rb', self.rbufsize)
if self.wbufsize == 0:
self.wfile = _SocketWriter(self.connection)
else:
self.wfile = self.connection.makefile('wb', self.wbufsize)
def finish(self):
if not self.wfile.closed:
try:
self.wfile.flush()
except socket.error:
# A final socket error may have occurred here, such as
# the local error ECONNABORTED.
pass
self.wfile.close()
self.rfile.close()
class _SocketWriter(BufferedIOBase):
"""Simple writable BufferedIOBase implementation for a socket
Does not hold data in a buffer, avoiding any need to call flush()."""
def __init__(self, sock):
self._sock = sock
def writable(self):
return True
def write(self, b):
self._sock.sendall(b)
with memoryview(b) as view:
return view.nbytes
def fileno(self):
return self._sock.fileno()
class DatagramRequestHandler(BaseRequestHandler):
"""Define self.rfile and self.wfile for datagram sockets."""
def setup(self):
from io import BytesIO
self.packet, self.socket = self.request
self.rfile = BytesIO(self.packet)
self.wfile = BytesIO()
def finish(self):
self.socket.sendto(self.wfile.getvalue(), self.client_address)
| 32.913203 | 85 | 0.639305 |
0bb12f9d08d9f2c5cda88c896e9db92ed154d539 | 907 | py | Python | rake_app/views.py | idf/tagr | 9f68f1a46b412ef305df62c2e6f5349e9edd92c9 | [
"BSD-3-Clause"
] | 6 | 2015-06-24T16:56:54.000Z | 2018-07-23T14:05:42.000Z | rake_app/views.py | idf/tagr | 9f68f1a46b412ef305df62c2e6f5349e9edd92c9 | [
"BSD-3-Clause"
] | 2 | 2015-06-24T20:39:31.000Z | 2015-06-29T15:12:55.000Z | rake_app/views.py | idf/tagr | 9f68f1a46b412ef305df62c2e6f5349e9edd92c9 | [
"BSD-3-Clause"
] | null | null | null | import json
from django.http import HttpResponse
from django.shortcuts import render
from django.template.response import SimpleTemplateResponse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import TemplateView, View
from rake import rake
class MainView(View):
template_name = "tagging.html"
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(MainView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
return SimpleTemplateResponse(MainView.template_name)
def post(self, request):
dic = json.loads(request.body)
ret = rake.Rake().run(dic["text"])
ret = filter(lambda x: len(x.split(" ")) > 1, map(lambda x: x[0], ret))
ret = {"keywords": list(ret)}
return HttpResponse(json.dumps(ret))
| 33.592593 | 79 | 0.708931 |
1e173f03cb840ccef5d696654b84bd4622d6e4eb | 1,923 | py | Python | _scraper.py | IdeaBot/web_scraper | ca0e2cdc10beef78fd9a781258ac0aff29eb17fc | [
"MIT"
] | null | null | null | _scraper.py | IdeaBot/web_scraper | ca0e2cdc10beef78fd9a781258ac0aff29eb17fc | [
"MIT"
] | 1 | 2019-02-26T17:59:46.000Z | 2019-02-26T17:59:46.000Z | _scraper.py | IdeaBot/web_scraper | ca0e2cdc10beef78fd9a781258ac0aff29eb17fc | [
"MIT"
] | null | null | null | import sys
sys.path.append('.\libs\scraperlibs')
import pageRet, fileIO, wordSearcher, findPosts, findDates, findTitle
class page:
'''For storing and retrieving pages, with lots of functions for manipulating them'''
def __init__(self, url):
self.search = wordSearcher.wordSearcher
self.raw = pageRet.pageRet(url).decode()
self.url = url
ignoreChars = False
text = ""
for char in self.raw: #remove all HTML tags
if char == "<":
ignoreChars = True
elif char == ">":
ignoreChars = False
elif not ignoreChars:
text += str(char)
self.text = text
def findFirstPost(self):
'''() -> str : url
find first post in a forum section'''
return findPosts.FirstPost(self.raw)
def findAllPosts(self):
'''() -> list of str : url
find all posts in a forum section'''
return findPosts.FindPosts(self.raw)
def findDates(self):
'''() -> list of struct_time
find all post dates in a post'''
return findDates.findDates(self.text)
def findTitle(self):
'''() -> str
finds text between <title> and </title>'''
return findTitle.findTitle(self.raw)
def findTopicTitle(self):
'''() -> str
finds topic title'''
fullTitle = self.findTitle()
topicTitle = fullTitle[wordSearcher.wordSearcher(" Topic: ", fullTitle, output="lastchar")[0]:]
return topicTitle
def searchraw(self, string):
'''(str) -> list of int
search self.raw for string, returns locations in self.raw'''
return wordSearcher.wordSearcher(string, self.raw)
def searchtext(self, string):
'''(str) -> list of int
search self.text for string, returns locations in self.text'''
return wordSearcher.wordSearcher(string, self.text)
| 34.339286 | 103 | 0.594384 |
24f38c5eb00d37c0dd3b830540a0632178b7655d | 1,452 | py | Python | examples/sarsa_cartpole.py | stefanbschneider/keras-rl | 216c3145f3dc4d17877be26ca2185ce7db462bad | [
"MIT"
] | 3,350 | 2018-03-07T09:46:43.000Z | 2022-03-31T11:25:35.000Z | examples/sarsa_cartpole.py | stefanbschneider/keras-rl | 216c3145f3dc4d17877be26ca2185ce7db462bad | [
"MIT"
] | 223 | 2018-03-11T00:07:46.000Z | 2022-03-09T13:26:01.000Z | examples/sarsa_cartpole.py | stefanbschneider/keras-rl | 216c3145f3dc4d17877be26ca2185ce7db462bad | [
"MIT"
] | 1,007 | 2018-03-08T11:26:49.000Z | 2022-03-14T05:19:34.000Z | import numpy as np
import gym
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents import SARSAAgent
from rl.policy import BoltzmannQPolicy
ENV_NAME = 'CartPole-v0'
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
# SARSA does not require a memory.
policy = BoltzmannQPolicy()
sarsa = SARSAAgent(model=model, nb_actions=nb_actions, nb_steps_warmup=10, policy=policy)
sarsa.compile(Adam(lr=1e-3), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
sarsa.fit(env, nb_steps=50000, visualize=False, verbose=2)
# After training is done, we save the final weights.
sarsa.save_weights('sarsa_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
sarsa.test(env, nb_episodes=5, visualize=True)
| 30.25 | 93 | 0.769284 |
7745eb98894c3876f48f2458759062889233f122 | 4,158 | py | Python | evennia/help/manager.py | pakhnu/my-world | 405983dca81e70fc64d58d6a60126ffa5e8ada8c | [
"BSD-3-Clause"
] | null | null | null | evennia/help/manager.py | pakhnu/my-world | 405983dca81e70fc64d58d6a60126ffa5e8ada8c | [
"BSD-3-Clause"
] | null | null | null | evennia/help/manager.py | pakhnu/my-world | 405983dca81e70fc64d58d6a60126ffa5e8ada8c | [
"BSD-3-Clause"
] | null | null | null | """
Custom manager for HelpEntry objects.
"""
from django.db import models
from evennia.utils import logger, utils
__all__ = ("HelpEntryManager",)
class HelpEntryManager(models.Manager):
"""
This HelpEntryManager implements methods for searching
and manipulating HelpEntries directly from the database.
These methods will all return database objects
(or QuerySets) directly.
Evennia-specific:
find_topicmatch
find_apropos
find_topicsuggestions
find_topics_with_category
all_to_category
search_help (equivalent to evennia.search_helpentry)
"""
def find_topicmatch(self, topicstr, exact=False):
"""
Searches for matching topics based on player's input.
Args:
topcistr (str): Help topic to search for.
exact (bool, optional): Require exact match
(non-case-sensitive). If `False` (default), match
sub-parts of the string.
Returns:
matches (HelpEntries): Query results.
"""
dbref = utils.dbref(topicstr)
if dbref:
return self.filter(id=dbref)
topics = self.filter(db_key__iexact=topicstr)
if not topics and not exact:
topics = self.filter(db_key__istartswith=topicstr)
if not topics:
topics = self.filter(db_key__icontains=topicstr)
return topics
def find_apropos(self, topicstr):
"""
Do a very loose search, returning all help entries containing
the search criterion in their titles.
Args:
topicstr (str): Search criterion.
Returns:
matches (HelpEntries): Query results.
"""
return self.filter(db_key__icontains=topicstr)
def find_topicsuggestions(self, topicstr):
"""
Do a fuzzy match, preferably within the category of the
current topic.
Args:
topicstr (str): Search criterion.
Returns:
matches (Helpentries): Query results.
"""
return self.filter(db_key__icontains=topicstr).exclude(db_key__iexact=topicstr)
def find_topics_with_category(self, help_category):
"""
Search topics having a particular category.
Args:
help_category (str): Category query criterion.
Returns:
matches (HelpEntries): Query results.
"""
return self.filter(db_help_category__iexact=help_category)
def get_all_topics(self):
"""
Get all topics.
Returns:
all (HelpEntries): All topics.
"""
return self.all()
def get_all_categories(self):
"""
Return all defined category names with at least one topic in
them.
Returns:
matches (list): Unique list of category names across all
topics.
"""
return list(set(topic.help_category for topic in self.all()))
def all_to_category(self, default_category):
"""
Shifts all help entries in database to default_category. This
action cannot be reverted. It is used primarily by the engine
when importing a default help database, making sure this ends
up in one easily separated category.
Args:
default_category (str): Category to move entries to.
"""
topics = self.all()
for topic in topics:
topic.help_category = default_category
topic.save()
string = "Help database moved to category %s" % default_category
logger.log_info(string)
def search_help(self, ostring, help_category=None):
"""
Retrieve a search entry object.
Args:
ostring (str): The help topic to look for.
category (str): Limit the search to a particular help topic
"""
ostring = ostring.strip().lower()
if help_category:
return self.filter(db_key__iexact=ostring,
db_help_category__iexact=help_category)
else:
return self.filter(db_key__iexact=ostring)
| 28.479452 | 87 | 0.613516 |
909bc24e0f26fe2ae5708a4404335a8dc40d3dbb | 4,052 | py | Python | classifier_models/resnet.py | Qinaty/input-aware-backdoor-attack-release | ce897adf4a3ce0d2badbd2b53233561fee6c7db7 | [
"MIT"
] | 67 | 2020-10-17T05:04:17.000Z | 2022-03-31T07:16:51.000Z | classifier_models/resnet.py | Qinaty/input-aware-backdoor-attack-release | ce897adf4a3ce0d2badbd2b53233561fee6c7db7 | [
"MIT"
] | 3 | 2020-12-27T03:54:38.000Z | 2022-03-29T15:33:08.000Z | classifier_models/resnet.py | Qinaty/input-aware-backdoor-attack-release | ce897adf4a3ce0d2badbd2b53233561fee6c7db7 | [
"MIT"
] | 12 | 2020-10-20T20:16:10.000Z | 2021-12-29T07:35:53.000Z | """ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3])
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
def test():
net = ResNet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
# test()
| 31.905512 | 104 | 0.616486 |
f2737d1e6da0d1ab2341c4b68ec781bf6559ff28 | 3,416 | py | Python | backtrader/backtrader/utils/py3.py | harshabakku/live-back-testing-trader | 1fd69c7598dc15bea740f160eed886f396bcba2c | [
"MIT"
] | 1 | 2021-07-14T22:04:08.000Z | 2021-07-14T22:04:08.000Z | backtrader/backtrader/utils/py3.py | ajmal017/LiveBackTestingTrader | 8b4f5804c0aa6046128f6706582f9cde78a0519a | [
"MIT"
] | null | null | null | backtrader/backtrader/utils/py3.py | ajmal017/LiveBackTestingTrader | 8b4f5804c0aa6046128f6706582f9cde78a0519a | [
"MIT"
] | 3 | 2021-03-07T16:29:40.000Z | 2022-03-17T21:42:38.000Z | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import itertools
import sys
PY2 = sys.version_info.major == 2
if PY2:
try:
import _winreg as winreg
except ImportError:
winreg = None
MAXINT = sys.maxint
MININT = -sys.maxint - 1
MAXFLOAT = sys.float_info.max
MINFLOAT = sys.float_info.min
string_types = str, unicode
integer_types = int, long
filter = itertools.ifilter
map = itertools.imap
range = xrange
zip = itertools.izip
long = long
cmp = cmp
bytes = bytes
bstr = bytes
from io import StringIO
from urllib2 import urlopen, ProxyHandler, build_opener, install_opener
from urllib import quote as urlquote
def iterkeys(d): return d.iterkeys()
def itervalues(d): return d.itervalues()
def iteritems(d): return d.iteritems()
def keys(d): return d.keys()
def values(d): return d.values()
def items(d): return d.items()
import Queue as queue
else:
try:
import winreg
except ImportError:
winreg = None
MAXINT = sys.maxsize
MININT = -sys.maxsize - 1
MAXFLOAT = sys.float_info.max
MINFLOAT = sys.float_info.min
string_types = str,
integer_types = int,
filter = filter
map = map
range = range
zip = zip
long = int
def cmp(a, b): return (a > b) - (a < b)
def bytes(x): return x.encode('utf-8')
def bstr(x): return str(x)
from io import StringIO
from urllib.request import (urlopen, ProxyHandler, build_opener,
install_opener)
from urllib.parse import quote as urlquote
def iterkeys(d): return iter(d.keys())
def itervalues(d): return iter(d.values())
def iteritems(d): return iter(d.items())
def keys(d): return list(d.keys())
def values(d): return list(d.values())
def items(d): return list(d.items())
import queue as queue
# This is from Armin Ronacher from Flash simplified later by six
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, str('temporary_class'), (), {})
| 25.492537 | 79 | 0.631733 |
845313b8ff6fd95955dec1e812a8cc79a1584f8d | 40,078 | py | Python | dev/Tools/build/waf-1.7.13/lmbrwaflib/az_code_generator.py | akulamartin/lumberyard | 2d4be458a02845179be098e40cdc0c48f28f3b5a | [
"AML"
] | 2 | 2020-12-22T01:02:01.000Z | 2020-12-22T01:02:05.000Z | dev/Tools/build/waf-1.7.13/lmbrwaflib/az_code_generator.py | akulamartin/lumberyard | 2d4be458a02845179be098e40cdc0c48f28f3b5a | [
"AML"
] | null | null | null | dev/Tools/build/waf-1.7.13/lmbrwaflib/az_code_generator.py | akulamartin/lumberyard | 2d4be458a02845179be098e40cdc0c48f28f3b5a | [
"AML"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
from waflib.TaskGen import feature, after_method, before_method, taskgen_method
from waflib.Context import BOTH
from waflib.Configure import conf
from waflib import Node, Task, Utils, Logs, Errors, Options
from cry_utils import append_to_unique_list
from binascii import hexlify
from collections import defaultdict
from pipes import quote
import os
import json
import waflib.Build
import threading
import itertools
# Code Generator Settings
code_generator_ignore_includes = False
code_generator_suppress_errors_as_warnings = False
code_generator_verbose = False
command_length_when_windows_fails_to_execute = 8192
# Module for our utility
module = "AZCodeGenerator"
# Add our task data to be saved between builds
waflib.Build.SAVED_ATTRS.append('azcg')
# Create a lock object to be used for az_code_gen task node access
# This must be used before any calls to make_node, find_node or similar methods during task execution
# These methods create nodes and simultaneous task invocation that attempts to create the same node will error
task_node_access_lock = threading.Lock()
def get_input_dir_node(tg):
"""
Gets the input dir from a task generator.
If 'az_code_gen_input_dir' is set, use that. Otherwise, use the project's root directory.
"""
input_dir = getattr(tg, 'az_code_gen_input_dir', None)
if input_dir:
if os.path.isabs(input_dir):
input_dir = tg.bld.root.make_node(input_dir)
else:
input_dir = tg.bld.srcnode.make_node(input_dir)
else:
input_dir = tg.path
return input_dir
@conf
def azcg_output_dir_node(ctx, target_name=None, additional_subpath=None):
"""
Gets the output dir from a task generator.
"""
if target_name:
output_dir = ctx.bldnode.make_node('azcg/{}'.format(target_name))
else:
output_dir = ctx.bldnode.make_node('azcg')
if additional_subpath:
output_dir = output_dir.make_node(additional_subpath)
return output_dir
@taskgen_method
def get_azcg_output_dir_node(tg, additional_subpath=None, tg_name_override=None):
"""
Gets the output dir from a task generator.
"""
tg_name = tg_name_override if tg_name_override else tg.name
# Apply any override for the az_code_gen target, otherwise fall back to the default behavior
return getattr(tg, 'az_code_gen_override_target', azcg_output_dir_node(tg.bld, tg_name, additional_subpath))
@taskgen_method
def get_azcg_output_dir_path(tg, additional_subpath=None, tg_name_override=None):
return get_azcg_output_dir_node(tg, additional_subpath, tg_name_override).abspath()
@feature('az_code_gen')
@before_method('process_use')
def add_codegen_includes(self):
for attr in ('includes', 'export_includes'):
if not hasattr(self, attr):
setattr(self, attr, list())
include_path_to_inject = get_azcg_output_dir_node(self)
self.includes.append(include_path_to_inject)
input_dir_node = get_input_dir_node(self)
if input_dir_node not in self.includes:
self.includes.append(input_dir_node)
self.export_includes.append(include_path_to_inject)
@feature('az_code_gen')
@after_method('apply_incpaths')
def create_code_generator_tasks(self):
# Skip during project generation
if self.bld.env['PLATFORM'] == 'project_generator':
return
# promote raw entries to list
if isinstance(getattr(self, 'az_code_gen', []), dict):
self.az_code_gen = [self.az_code_gen]
# compute deps
azcg_dep_nodes = []
azcg_dir = self.env['CODE_GENERATOR_PATH'][0]
azcg_dir_node = self.bld.root.find_node(azcg_dir)
if azcg_dir_node:
# For a clean build, .pyc files don't exist at this point, but for the later incremental build, .pyc files will be added as node dependencies and will change the task signature.
# Do not add .pyc files as dependencies.
azcg_dep_nodes = azcg_dir_node.ant_glob('**/*', excl=Node.exclude_regs + '\n**/*.pyc')
else:
Logs.warn('az_code_gen: Unable to find azcg directory. Code Generator tasks will not have the utility/scripts as dependencies')
# this script is a dependency
script_node = self.bld.root.make_node(os.path.abspath(__file__))
# Use .py file as dependency instead of .pyc file.
if script_node.suffix() == '.pyc':
script_node = script_node.change_ext('.py')
azcg_dep_nodes.append(script_node)
for az_code_gen_pass in getattr(self, 'az_code_gen', []):
# See if we have any scripts
code_generator_scripts = az_code_gen_pass.get('scripts', [])
if not code_generator_scripts:
Logs.warn(
'az_code_gen feature enabled but no scripts were specified. '
'No code generation performed for target {}'.format(self.target))
return
code_gen_arguments = az_code_gen_pass.get('arguments', [])
if isinstance(code_gen_arguments, str):
code_gen_arguments = [code_gen_arguments]
code_gen_options = az_code_gen_pass.get('options', [])
if isinstance(code_gen_options, str):
code_gen_options = [code_gen_options]
code_gen_input = az_code_gen_pass.get('files', [])
if not code_gen_input:
Logs.warn(
'az_code_gen feature enabled but no files were specified. '
'No code generation performed for target {}'.format(self.target))
return
code_gen_override_output = az_code_gen_pass.get('override_output', None)
# Create one task per input file/list
for input_item in code_gen_input:
# Auto promote non-lists to lists
if not isinstance(input_item, list):
input_file_list = [input_item]
else:
input_file_list = input_item
create_az_code_generator_task(self, input_file_list, code_generator_scripts, code_gen_arguments, code_gen_options, azcg_dep_nodes, code_gen_override_output)
def create_az_code_generator_task(self, input_file_list, code_generator_scripts, code_gen_arguments, code_gen_options, azcg_dep_nodes, code_gen_override_output):
input_dir_node = get_input_dir_node(self)
input_nodes = [in_file if isinstance(in_file, waflib.Node.Node) else input_dir_node.make_node(clean_path(in_file)) for in_file in input_file_list]
# Create a code gen task.
# We would simply do "new_task = self.create_task('az_code_gen', input_nodes)" had we no need
# to worry about build ordering. Instead, we add to az_code_gen_group (tbd: a better name?)
new_task = az_code_gen(env=self.env.derive(), generator=self)
new_task.set_inputs(input_nodes)
self.bld.add_to_group(new_task, 'az_code_gen_group')
new_task.path = self.path
new_task.input_dir = input_dir_node
new_task.output_dir = code_gen_override_output or get_azcg_output_dir_node(self)
for script in code_generator_scripts:
script_node = self.path.find_or_declare(script)
if script_node is None:
raise Errors.WafError('[ERROR] Unable to find node for script {}'.format(script))
new_task.script_nodes.append(script_node)
new_task.includes = self.includes_nodes
new_task.defines = self.env['DEFINES']
new_task.azcg_deps = azcg_dep_nodes
for code_gen_argument in code_gen_arguments:
new_task.add_argument(code_gen_argument)
if 'PrintOutputRedirectionFile' in code_gen_options:
new_task.capture_and_print_error_output()
if self.bld.is_option_true('use_debug_code_generator'):
Logs.warn("Using DEBUG AZCodeGenerator!")
path = new_task.env.CODE_GENERATOR_PATH[0]
if path.endswith('profilenosym'):
new_task.env.CODE_GENERATOR_PATH = [path[:-len('profilenosym')] + 'debug']
az_code_gen_exe = "AzCodeGeneratorD.exe"
full_debug_path = os.path.join(new_task.env['CODE_GENERATOR_PATH'][0],az_code_gen_exe)
if os.path.exists(full_debug_path):
new_task.env['CODE_GENERATOR_EXECUTABLE'] = "AzCodeGeneratorD.exe"
else:
raise Errors.WafError('[ERROR] Debug AzCodeGenerator executable ({}) was not found. Make sure to build it first if you want to run the code generator in debug mode'.format(full_debug_path))
# Pre-compute arguments for task since doing this during run() causes multi-thread issues in Node
new_task.prepare_task()
def clean_path(path):
return path.replace('\\', '/')
def hash_node_list(list, up):
for item in list:
if isinstance(item, waflib.Node.Node):
up(item.abspath().encode())
else:
up(item)
# ported from msvcdeps.py
def path_to_node(base_node, path, cached_nodes):
# Take the base node and the path and return a node
# Results are cached because searching the node tree is expensive
# The following code is executed by threads, it is not safe, so a lock is needed...
if getattr(path, '__hash__'):
node_lookup_key = (base_node, path)
else:
# Not hashable, assume it is a list and join into a string
node_lookup_key = (base_node, os.path.sep.join(path))
try:
task_node_access_lock.acquire()
node = cached_nodes[node_lookup_key]
except KeyError:
node = base_node.find_resource(path)
cached_nodes[node_lookup_key] = node
finally:
task_node_access_lock.release()
return node
class az_code_gen(Task.Task):
color = 'CYAN'
def __init__(self, *k, **kw):
super(az_code_gen, self).__init__(*k, **kw)
self.more_tasks = []
self.input_dir = None
self.output_dir = None
self.script_nodes = []
self.includes = []
self.defines = []
self.argument_list = []
self.error_output_file_node = None
self.registered_dependencies = []
self.azcg_deps = []
self.generated_output = []
self.profile = False
# Ensure we have a 'azcg' attribute to cache task info for future builds
if not isinstance(getattr(self.generator.bld, 'azcg', None), dict):
self.generator.bld.azcg = defaultdict(dict)
# Ensure the task generator has a lock to manage INCPATHS
if not hasattr(self.generator, 'task_gen_access_lock'):
self.generator.task_gen_access_lock = threading.Lock()
# Ensure the task generator has a lock to manage link task additions
if not hasattr(self.generator, 'task_gen_link_lock'):
self.generator.task_gen_link_lock = threading.Lock()
def __str__(self):
return 'AZ Code Generation for {} -> Processing {}\n'.format(self.generator.target, ', '.join(str(input) for input in self.inputs))
def uid(self):
try:
return self.uid_
except AttributeError:
m = Utils.md5()
up = m.update
# Be sure to add any items here that change how code gen runs, this needs to be unique!
# Ensure anything here will not change over the life of the task
up(self.path.abspath().encode())
up(self.input_dir.abspath().encode())
up(self.output_dir.abspath().encode())
hash_node_list(self.inputs, up)
hash_node_list(self.script_nodes, up)
hash_node_list(self.includes, up)
hash_node_list(self.defines, up)
hash_node_list(self.argument_list, up)
hash_node_list(self.azcg_deps, up)
self.uid_ = m.digest()
return self.uid_
def azcg_get(self, key, default_value=None):
assert isinstance(key, str)
return self.generator.bld.azcg[self.uid()].get(key, default_value)
def azcg_set(self, key, val):
assert isinstance(key, str)
self.generator.bld.azcg[self.uid()][key] = val
def azcg_append_unique(self, key, val):
assert isinstance(key, str)
vals = self.azcg_get(key, [])
assert isinstance(vals, list)
if val not in vals:
vals.append(val)
self.azcg_set(key, vals)
def add_argument(self, argument):
"""
Add an argument to the argument list
"""
self.argument_list.append(argument)
def capture_and_print_error_output(self):
error_output_file_path = 'CodeGenErrorOutput/error_output_{}.log'.format(hexlify(self.uid()))
self.error_output_file_node = self.generator.bld.bldnode.find_or_declare(error_output_file_path)
def register_output_file(self, output_path, should_add_to_build):
# Store this output object for later use since we will need to be on the main thread for node access
self.generated_output.append({'output_path': output_path, 'should_add_to_build' : should_add_to_build})
def process_generated_output(self):
# This should only ever be executed on the main thread! Node operations cannot happen during run()
for generated_output in self.generated_output:
output_path = generated_output['output_path']
should_add_to_build = generated_output['should_add_to_build']
# Nodes won't work with absolute paths, so we have to remove the build path from
# the given output file path. Given path is unicode, make it str to match Node.
output_path = os.path.relpath(str(output_path), start=self.generator.bld.bldnode.abspath())
with task_node_access_lock:
output_node = self.generator.bld.bldnode.find_node(output_path)
if output_node is None:
raise Errors.WafError('[ERROR] az_code_gen: Unable to find generated file as node {}'.format(output_path))
append_to_unique_list(self.outputs, output_node)
if should_add_to_build:
# Add to persistent link list
self.azcg_append_unique('link_inputs', output_node)
# Perform actual add to link task
self.add_link_task(output_node)
def register_dependency_file(self, path):
if not (path.endswith('NUL') or path.endswith('null')):
self.registered_dependencies.append(path)
def add_link_task(self, node_to_link):
# Using modified version of example here:
# https://github.com/waf-project/waf/blob/7b7531b0c6d0598033bea608ffc3c8e335434a6d/docs/book/examples/scenarios_unknown/mytool.py
try:
task_hook = self.generator.get_hook(node_to_link)
except Errors.WafError:
raise Errors.WafError(
'[ERROR] az_code_gen: Created file {} marked for "should add to build" '
'is not buildable.'.format(node_to_link.path_from(self.generator.bldnode)))
created_task = task_hook(self.generator, node_to_link)
# Shove /Fd flags into codegen meta-tasks, this is similar to logic in mscv_helper's
# set_pdb_flags. We compute PDB file path and add the requisite /Fd flag
# This enables debug symbols for code outputted by azcg
if 'msvc' in (self.generator.env.CC_NAME, self.generator.env.CXX_NAME):
# The created_task from the original generator will not be able to go through the
# 'verify_compiler_options_msvc' function at this point, so we will manually verify
# the compiler options here (to strip out conflicting flags)
verify_options_common(created_task.env)
# Not having PDBs stops CL.exe working for precompiled header when we have VCCompiler set to true for IB...
# When DISABLE_DEBUG_SYMBOLS_OVERRIDE doesn't exist in the dictionary it returns []
# which will results to false in this check.
if self.bld.is_option_true('generate_debug_info') or self.generator.env['DISABLE_DEBUG_SYMBOLS_OVERRIDE']:
pdb_folder = self.generator.path.get_bld().make_node(str(self.generator.target_uid))
pdb_cxxflag = '/Fd{}'.format(pdb_folder.abspath())
created_task.env.append_unique('CFLAGS', pdb_cxxflag)
created_task.env.append_unique('CXXFLAGS', pdb_cxxflag)
link_task = getattr(self.generator, 'link_task', None)
if not link_task:
link_task = getattr(self.bld, 'monolithic_link_task', None)
if link_task:
link_task.set_run_after(created_task) # Compile our .cpp before we link.
# link_task is a shared resource that lives on the generator. Use a lock and a separate list
# to ensure that the append order is consistent
with self.generator.task_gen_link_lock:
if not hasattr(self.generator, 'task_gen_link_inputs'):
self.generator.task_gen_link_inputs = []
if(created_task.outputs[0] not in self.generator.task_gen_link_inputs):
self.generator.task_gen_link_inputs.append(created_task.outputs[0])
self.generator.task_gen_link_inputs.sort(key=lambda x: x.name)
for output in self.generator.task_gen_link_inputs:
try:
idx = link_task.inputs.index(output)
del link_task.inputs[idx:]
break
except:
continue
link_task.inputs += self.generator.task_gen_link_inputs
else:
# If we ever have a use case where link_task is inappropriate (non-C-family lang?),
# then we should do "self.more_tasks.append(created_task)" in those cases.
raise Errors.WafError('[ERROR] az_code_gen: Created file {} marked for "should add to build" '
'was not added to a link task.'.format(
node_to_link.path_from(self.generator.bld.bldnode)))
return True
def propagate_azcg_incpaths(self, azcg_paths):
"""
Performs a thread safe and consistently ordered update of this task's generator's INCPATHS
:param azcg_paths: List of paths from this azcg task's output nodes that should be appended to INCPATHS
"""
with self.generator.task_gen_access_lock:
for azcg_path in azcg_paths:
self.generator.env.append_unique('AZCG_INCPATHS', azcg_path)
# AZCG_INCPATHS can be affected by multiple azcg tasks, clean it out and re-add for ordering consistency
self.generator.env['AZCG_INCPATHS'].sort()
for path in self.generator.env['AZCG_INCPATHS']:
if path in self.generator.env['INCPATHS']:
self.generator.env['INCPATHS'].remove(path)
for path in self.generator.env['AZCG_INCPATHS']:
self.generator.env.append_unique('INCPATHS', path)
def write_argument_list_to_file(self):
"""
Writes argument_list to a file
:return: (True, <argument file>) on success; (False, '') on failure
"""
argument_file_path = 'CodeGenArguments/{}_{}.args'.format(self.inputs[0].name.replace(".", "_"), hexlify(self.uid()))
argument_file_node = self.generator.bld.bldnode.find_or_declare(argument_file_path)
if os.name is 'nt' and len(argument_file_node.abspath()) >= 260:
Logs.error('Unable to write argument file for code gen, path will be unable to write due to MAX_PATH limit at 260 characters. Please use a shorter root path. Length: {} - Path: {}'.format(len(argument_file_node.abspath()), argument_file_node.abspath()))
return False, ''
try:
argument_file_node.write('\n'.join(self.argument_list))
except:
Logs.error(
'az_code_gen: Failed to write argument file {}'.format(argument_file_node.abspath()))
return False, ''
return True, argument_file_node.abspath()
def handle_code_generator_output_errors(self, code_gen_output):
try:
json_object = json.loads(code_gen_output)
for output_object in json_object:
if code_generator_verbose and output_object['type'] == 'info':
Logs.debug('az_code_gen: {}'.format(output_object['info']))
if output_object['type'] == 'error':
Logs.error('{} - az_code_gen task error'.format(output_object['error']))
except ValueError as value_error:
# If we get output that isn't JSON, it means Clang errored before
# the code generator gained control. Likely invalid commandline arguments.
raise Errors.WafError('az_code_gen: Failed to json.loads output with error "{}" - output string was:\n{}'.format(str(value_error), code_gen_output))
def handle_code_generator_output(self, code_gen_output):
"""
Decode json object and process return from generator
:param code_gen_output: json string
:return True on success, False on failure
"""
try:
json_object = json.loads(code_gen_output)
errors_reported = False
for output_object in json_object:
if output_object['type'] == 'info':
output = output_object['info']
Logs.debug('az_code_gen: {}'.format(output))
if (self.profile and output.startswith('Profile')):
Logs.debug('az_code_gen: ' + output)
elif output_object['type'] == 'error':
Logs.error('{} - az_code_gen task error'.format(output_object['error']))
errors_reported = True
elif output_object['type'] == 'generated_file':
self.register_output_file(output_object['file_name'],
output_object['should_be_added_to_build'])
elif output_object['type'] == 'dependency_file':
self.register_dependency_file(str(output_object['file_name']))
else:
Logs.error('az_code_gen: Unknown output json type returned from Code Generator. Type is: {} - Raw output: {}'.format(output_object['type'], code_gen_output))
errors_reported = True
# Fail the task if errors were reported
if errors_reported:
return False
# Add local folder of each output node to include path of the task_gen and store path off to pickle for future runs
azcg_paths = self.azcg_get('AZCG_INCPATHS', [])
for output_node in self.outputs:
# This check is here to ensure that tasks that were written out that had None outputs will be skipped.
# The origin of this problem should have been solved by returning after the None checking during register output
if output_node is None:
Logs.warn('az_code_gen: Task output has a None entry, skipping!')
continue
output_path = output_node.parent.abspath()
if output_path not in azcg_paths:
azcg_paths.append(output_path)
# Append any additional paths relative to the output directory found in export includes
output_dir_node = get_azcg_output_dir_node(self.generator)
for export_include in self.generator.export_includes:
if isinstance(export_include, waflib.Node.Node) and export_include.is_child_of(output_dir_node):
export_path = export_include.abspath()
if export_path not in azcg_paths:
azcg_paths.append(export_path)
self.azcg_set('AZCG_INCPATHS', azcg_paths)
self.propagate_azcg_incpaths(azcg_paths)
return True
except ValueError as value_error:
# If we get output that isn't JSON, it means Clang errored before
# the code generator gained control. Likely invalid commandline arguments.
Logs.error('az_code_gen: Failed to json.loads output with error "{}" - output string was:\n{}'.format(str(value_error), code_gen_output))
import traceback
import sys
tb_list = traceback.extract_tb(sys.exc_traceback)
for filename, lineno, name, line in tb_list:
Logs.error(
'{}({}): error {}: in {}: {}'.format(filename, lineno,
value_error.__class__.__name__,
name, line))
filename, lineno, _, _ = tb_list[-1]
Logs.error('{}({}): error {}: {}'.format(filename, lineno,
value_error.__class__.__name__,
str(value_error)))
return False
def print_error_output(self):
Logs.error('Error output stored in {}:'.format(self.error_output_file_node.abspath()))
Logs.error(self.error_output_file_node.read())
def exec_code_generator(self, argument_string):
"""
Execute the code generator with argument string
:return: True on success, False on failure
"""
command_string = '\"' + os.path.join(self.env['CODE_GENERATOR_PATH'][0],
self.env['CODE_GENERATOR_EXECUTABLE']) + '\" ' + argument_string
Logs.debug('az_code_gen: Invoking code generator with command: {}'.format(command_string))
# Ensure not too long to execute on the current host
host = Utils.unversioned_sys_platform()
if (host == 'win_x64') or (host == 'win32'):
if len(command_string) >= command_length_when_windows_fails_to_execute:
raise Errors.WafError("az_code_gen: Unable to execute code generator due to command length being too long")
try:
(code_gen_output, code_gen_error_output) = self.generator.bld.cmd_and_log(
command_string, output=BOTH, quiet=BOTH, shell=False)
if code_gen_error_output and not code_gen_error_output.isspace():
Logs.warn('az_code_gen: Code generator output to stderr even though it indicated success. Output was:{}\n'.format(str(code_gen_error_output)))
except Errors.WafError as e:
if hasattr(e, 'stdout') and e.stdout:
self.handle_code_generator_output_errors(e.stdout)
if hasattr(e, 'stderr') and e.stderr:
Logs.error('az_code_gen: Utility execution produced stderr output: {}'.format(e.stderr))
if self.error_output_file_node:
self.print_error_output()
return e.returncode
self.handle_code_generator_output(code_gen_output)
return 0
def run_code_generator(self):
"""
Run the code generator using at syntax for all accumulated arguments
:return: True on success, False on failure
"""
return self.exec_code_generator(' \"@' + self.argument_file + '\"')
def prepare_task(self):
# Create the directory if it doesn't already exist
self.output_dir.mkdir()
# We expect json output for friendlier parsing
self.add_argument("-output-using-json")
self.add_argument('-input-path "{}"'.format(clean_path(self.input_dir.abspath())))
self.add_argument('-output-path "{}"'.format(clean_path(self.output_dir.abspath())))
# Write input files to a file (command line version is too long)
for input_file in self.inputs:
input_file_rel_path = clean_path(input_file.path_from(self.input_dir))
self.add_argument('-input-file "{}"'.format(input_file_rel_path))
input_file.parent.get_bld().mkdir()
def pypath(python_path):
# Absolute paths are good to go as-is
# Relative paths are assumed relative to src
if not os.path.isabs(python_path):
# Toss it in a node to figure out an absolute path
python_path_node = self.generator.bld.srcnode.make_node(python_path)
python_path = python_path_node.abspath()
if not os.path.exists(python_path):
Logs.warn('az_code_gen: Path given as python path does not exist: {}'.format(python_path))
return clean_path(python_path)
# Python paths
self.add_argument('-python-home "{}"'.format(pypath(self.env['CODE_GENERATOR_PYTHON_HOME'])))
for python_path in self.env['CODE_GENERATOR_PYTHON_PATHS']:
self.add_argument('-python-path "{}"'.format(pypath(python_path)))
# Debug python paths
self.add_argument('-python-home-debug "{}"'.format(pypath(self.env['CODE_GENERATOR_PYTHON_HOME_DEBUG'])))
for python_debug_path in self.env['CODE_GENERATOR_PYTHON_DEBUG_PATHS']:
self.add_argument('-python-debug-path "{}"'.format(pypath(python_debug_path)))
if code_generator_ignore_includes:
self.add_argument('-ignore-includes')
if code_generator_suppress_errors_as_warnings:
self.add_argument('-suppress-errors-as-warnings')
if code_generator_verbose:
self.add_argument('-v')
if Utils.unversioned_sys_platform().startswith('linux'):
self.add_argument('-include-path /usr/include/c++/v1')
for include in self.includes:
self.add_argument('-include-path "{}"'.format(clean_path(include.abspath())))
for include_path in self.env['CODE_GENERATOR_INCLUDE_PATHS']:
self.add_argument('-include-path "{}"'.format(pypath(include_path)))
if 'CODE_GENERATOR_CLANG_INCLUDE_PATH' in self.env:
for clang_include_path in self.env['CODE_GENERATOR_CLANG_INCLUDE_PATH']:
self.add_argument('-include-path "{}"'.format(clean_path(clang_include_path)))
for define in self.defines:
self.add_argument('-define {}'.format(quote(define)))
for script_node in self.script_nodes:
self.add_argument('-codegen-script "{}"'.format(clean_path(script_node.get_src().abspath())))
# Include file that contains code generation tag definitions
codegen_tags = self.env['CODE_GENERATOR_TAGS']
if not codegen_tags:
codegen_tags = 'Code/Framework/AzCore/AzCore/Preprocessor/CodeGen.h'
self.add_argument('-force-include "{}"'.format(clean_path(self.generator.bld.CreateRootRelativePath(codegen_tags))))
if self.error_output_file_node:
self.add_argument('-redirect-output-file "{}"'.format(clean_path(self.error_output_file_node.abspath())))
if 'CLANG_SEARCH_PATHS' in self.env:
self.add_argument('-resource-dir "{}"'.format(self.env['CLANG_SEARCH_PATHS']['libraries'][0]))
if 'ISYSROOT' in self.env:
self.add_argument('-isysroot "{}"'.format(self.env['ISYSROOT']))
if 'ANDROID' in self.defines:
self.add_argument('-is-android-build')
for flag in self.env['CXXFLAGS']:
if flag.startswith('--gcc-toolchain='):
gcc_toolchain = flag.split('=', 2)
self.add_argument('-android-toolchain "{}"'.format(clean_path(gcc_toolchain[1])))
continue
if flag.startswith('--target='):
android_target = flag.split('=', 2)
self.add_argument('-android-target "{}"'.format(clean_path(android_target[1])))
continue
if flag.startswith('--sysroot='):
android_sysroot = flag.split('=', 2)
self.add_argument('-android-sysroot "{}"'.format(clean_path(android_sysroot[1])))
continue
status, self.argument_file = self.write_argument_list_to_file()
if not status:
raise Errors.WafError('[ERROR] az_code_gen task creation failed')
def can_retrieve_cache(self):
try:
self.outputs = self.azcg_get('AZCG_OUTPUTS', [])
except KeyError:
return False
return super(az_code_gen, self).can_retrieve_cache()
def run(self):
# clear link dependencies
self.azcg_set('link_inputs', [])
return self.run_code_generator()
def scan(self):
"""
Re-use the deps from the last run, just as cxx does
"""
dep_nodes = self.generator.bld.node_deps.get(self.uid(), [])
return (dep_nodes, [])
def runnable_status(self):
"""
Ensure that all outputs exist before skipping execution.
"""
ret = super(az_code_gen, self).runnable_status()
if ret == Task.SKIP_ME:
# Get output nodes from storage, check for path not signature as it may not be stable in azcg
outputs = self.azcg_get('AZCG_OUTPUTS', [])
for output_node in outputs:
# If you can't find the file, running is required
output_path = output_node.abspath()
if not os.path.isfile(output_path):
Logs.debug(
'az_code_gen: Running task for file {}, output file {} not found.'.format(
self.inputs[0].abspath(), output_path))
return Task.RUN_ME
# Also add the raw output path
self.generator.env.append_unique('INCPATHS', get_azcg_output_dir_node(self.generator).abspath())
# Also add paths we stored from prior builds
azcg_paths = self.azcg_get('AZCG_INCPATHS', [])
self.propagate_azcg_incpaths(azcg_paths)
# link_inputs is a list of nodes that need to be added to the link each time
for link_node in self.azcg_get('link_inputs', []):
if not self.add_link_task(link_node):
return Task.EXCEPTION
self.outputs = outputs
self.generator.source += outputs
return ret
def get_node_from_dependency_path(self, path):
# collect headers and add them to deps
# this is ported from msvcdeps.py
try:
cached_nodes = self.bld.cached_nodes
except:
cached_nodes = self.bld.cached_nodes = {}
bld = self.generator.bld
lowercase = False
if Utils.is_win32:
(drive, _) = os.path.splitdrive(bld.srcnode.abspath())
lowercase = drive == drive.lower()
correct_case_path = bld.path.abspath()
correct_case_path_len = len(correct_case_path)
correct_case_path_norm = os.path.normcase(correct_case_path)
if os.path.isabs(path):
if Utils.is_win32:
# Force drive letter to match conventions of main source tree
drive, tail = os.path.splitdrive(path)
if os.path.normcase(path[:correct_case_path_len]) == correct_case_path_norm:
# Path is in the sandbox, force it to be correct. MSVC sometimes returns a lowercase path.
path = correct_case_path + path[correct_case_path_len:]
else:
# Check the drive letter
if lowercase and (drive != drive.lower()):
path = drive.lower() + tail
elif (not lowercase) and (drive != drive.upper()):
path = drive.upper() + tail
return path_to_node(bld.root, path, cached_nodes)
else:
base_node = bld.bldnode
# when calling find_resource, make sure the path does not begin by '..'
path = [k for k in Utils.split_path(path) if k and k != '.']
while path[0] == '..':
path = path[1:]
base_node = base_node.parent
return path_to_node(base_node, path, cached_nodes)
def post_run(self):
if hasattr(self, 'cached'):
# Also add the raw output path
self.generator.env.append_unique('INCPATHS', get_azcg_output_dir_node(self.generator).abspath())
# Also add paths we stored from prior builds
azcg_paths = self.azcg_get('AZCG_INCPATHS', [])
self.propagate_azcg_incpaths(azcg_paths)
# link_inputs is a list of nodes that need to be added to the link each time
for link_node in self.azcg_get('link_inputs', []):
if not self.add_link_task(link_node):
return Task.EXCEPTION
self.generator.source += self.outputs
else:
# Register output files generated by the code gen execution
self.process_generated_output()
bld = self.generator.bld
dep_node = None
resolved_nodes = []
# Resolve registered dependencies we got into dependency nodes
for path in self.registered_dependencies:
dep_node = self.get_node_from_dependency_path(path)
if dep_node:
if not (dep_node.is_child_of(bld.srcnode) or dep_node.is_child_of(bld.bldnode)):
# System library
continue
if dep_node in self.inputs:
# Self-dependency
continue
if dep_node in self.outputs:
# Circular dependency
continue
append_to_unique_list(resolved_nodes, dep_node)
else:
Logs.error('az_code_gen: Unable to find dependency file as node: {}'.format(path))
# Add azcg_deps and script nodes as dependencies
for dep_node in itertools.chain(self.azcg_deps, self.script_nodes):
append_to_unique_list(resolved_nodes, dep_node)
bld.node_deps[self.uid()] = resolved_nodes
# force waf to recompute a full signature for this task (we may have new/deleted dependencies we need it to account for)
try:
del self.cache_sig
except:
pass
self.azcg_set('AZCG_OUTPUTS', self.outputs)
Task.Task.post_run(self)
# Due to #includes of code generator header files, we can have an output node which is also an input node.
# In addition, we are taking nodes that are not originally build nodes (e.g. header files) and building them, which alters the signature flow in Node.get_bld_sig().
# Task.post_run() default behavior is to set the Node.sig to the task signature which will change our computed task signature because our outputs are our inputs in same cases.
# To mitigate this, we must restore the original signature for any file that had a non-build signature previously.
# However, we do not want to alter the signature for files that will be consumed by later tasks.
# Therefore, we should restore signatures on any node that is not being added to the build (any output nodes not in link_task).
for output in self.outputs:
if not output in self.azcg_get('link_inputs', []):
output.sig = output.cache_sig = Utils.h_file(output.abspath())
@conf
def is_azcodegen_node(ctx, node):
azcg_node = ctx.bldnode.make_node('azcg')
y = id(azcg_node)
while node.parent:
if id(node) == y:
return True
node = node.parent
return False
| 46.5482 | 266 | 0.640751 |
9194106f7566a893ad381ce9c114f6f26fbcec26 | 1,277 | py | Python | resources/trials/maya/deformerOrder.py | adrienparis/Gapalion | 35d66c2d0de05ffb493a4d8753f675999ff9eaab | [
"MIT"
] | null | null | null | resources/trials/maya/deformerOrder.py | adrienparis/Gapalion | 35d66c2d0de05ffb493a4d8753f675999ff9eaab | [
"MIT"
] | null | null | null | resources/trials/maya/deformerOrder.py | adrienparis/Gapalion | 35d66c2d0de05ffb493a4d8753f675999ff9eaab | [
"MIT"
] | null | null | null | #!/bin/env mayapy
# -- coding: utf-8 --
u"""Sur une propal de Yann GENTY le boss ♥
l'ordre des deformer doit être cluster, puis skinCluster, pour finir sur les blendShape
Si vous mettez des lattices, le test peut se perdre, et donc une vérification manuelle est nécessaire"""
__author__ = "Adrien PARIS"
__email__ = "a.paris.cs@gmail.com"
import maya.cmds as cmds
title = u"Vérification de l'ordre des deformer"
image = ""
def matchnmatch(a, b):
if len(a) == 0:
return False
if len(b) == 0:
return True
if a[0] == b[0]:
return matchnmatch(a, b[1:])
if a[0] != b[0]:
return matchnmatch(a[1:], b)
def test():
temp = ["cluster", "skinCluster", "blendShape"]
# temp = ["cluster", "skinCluster", "tweak", "blendShape"]
passed = True
msg = ["the order should be : ".ljust(30) + "-> " + str(temp), ""]
for s in cmds.ls(type="transform"):
cnt = cmds.listHistory(s)
if cnt is None:
continue
cnt = [cmds.nodeType(x) for x in cnt]
cnt = [x for x in cnt if x in temp]
v = matchnmatch(temp, cnt)
if not v:
passed = False
msg.append(s.ljust(30) + "-> " + str(cnt))
if passed:
msg = []
return passed, msg | 27.76087 | 104 | 0.573218 |
c308169a9cfd95aa5e4f4f9613604699a06098dc | 540 | py | Python | core/manage.py | sieira/cineclub | 005f631cc04c92d58232529b58934734aee4c674 | [
"BSD-3-Clause"
] | null | null | null | core/manage.py | sieira/cineclub | 005f631cc04c92d58232529b58934734aee4c674 | [
"BSD-3-Clause"
] | null | null | null | core/manage.py | sieira/cineclub | 005f631cc04c92d58232529b58934734aee4c674 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cineclub.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.75 | 73 | 0.687037 |
65bd05005a432e1c2f12ed5cb35be2e9557d07bf | 3,241 | py | Python | cerberus/tests/test_rule_excludes.py | pykler/cerberus | 8765b317442c002a84e556bd5d9677b868e6deb2 | [
"0BSD"
] | 2,020 | 2017-03-08T13:24:00.000Z | 2022-03-30T19:46:02.000Z | cerberus/tests/test_rule_excludes.py | pykler/cerberus | 8765b317442c002a84e556bd5d9677b868e6deb2 | [
"0BSD"
] | 281 | 2017-03-08T23:05:10.000Z | 2022-03-25T01:37:04.000Z | cerberus/tests/test_rule_excludes.py | pykler/cerberus | 8765b317442c002a84e556bd5d9677b868e6deb2 | [
"0BSD"
] | 171 | 2017-03-10T17:27:41.000Z | 2022-03-16T06:43:34.000Z | from pytest import mark
from cerberus import errors
from cerberus.tests import assert_fail, assert_success
@mark.parametrize(
("test_function", "document"),
[
(assert_success, {'this_field': {}}),
(assert_success, {'that_field': {}}),
(assert_success, {}),
(assert_fail, {'that_field': {}, 'this_field': {}}),
],
)
def test_excludes(test_function, document):
test_function(
schema={
'this_field': {'type': 'dict', 'excludes': 'that_field'},
'that_field': {'type': 'dict'},
},
document=document,
)
def test_excludes_basic_error_handler_message(validator):
assert_fail(
document={'that_field': {}, 'this_field': {}},
schema={
'this_field': {
'type': 'dict',
'excludes': ['that_field', 'bazo_field'],
'required': True,
},
'that_field': {'type': 'dict', 'excludes': 'this_field', 'required': True},
},
validator=validator,
)
message = errors.BasicErrorHandler.messages[errors.EXCLUDES_FIELD.code]
assert validator.errors == {
'that_field': [message.format("'this_field'", field="that_field")],
'this_field': [
message.format("'that_field', 'bazo_field'", field="this_field")
],
}
@mark.parametrize(
("test_function", "document"),
[
(assert_success, {'this_field': {}}),
(assert_success, {'that_field': {}}),
(assert_success, {'that_field': {}, 'bazo_field': {}}),
(assert_fail, {'this_field': {}, 'that_field': {}}),
(assert_fail, {'this_field': {}, 'bazo_field': {}}),
(assert_fail, {'that_field': {}, 'this_field': {}, 'bazo_field': {}}),
],
)
def test_excludes_of_multiple_fields(test_function, document):
test_function(
schema={
'this_field': {'type': 'dict', 'excludes': ['that_field', 'bazo_field']},
'that_field': {'type': 'dict', 'excludes': 'this_field'},
'bazo_field': {'type': 'dict'},
},
document=document,
)
@mark.parametrize(
("test_function", "document"),
[
(assert_success, {'this_field': {}}),
(assert_success, {'that_field': {}}),
(assert_fail, {}),
(assert_fail, {'that_field': {}, 'this_field': {}}),
],
)
def test_excludes_of_required_fields(test_function, document):
test_function(
schema={
'this_field': {'type': 'dict', 'excludes': 'that_field', 'required': True},
'that_field': {'type': 'dict', 'excludes': 'this_field', 'required': True},
},
document=document,
update=False,
)
@mark.parametrize(
("test_function", "document"),
[
(assert_success, {'this_field': {}}),
(assert_success, {'that_field': {}}),
(assert_success, {}),
(assert_fail, {'that_field': {}, 'this_field': {}}),
],
)
def test_mutual_excludes(test_function, document):
test_function(
schema={
'this_field': {'type': 'dict', 'excludes': 'that_field'},
'that_field': {'type': 'dict', 'excludes': 'this_field'},
},
document=document,
)
| 30.28972 | 87 | 0.547362 |
1f4a8e99328acc6db042e0aef4d9f6a3b034760d | 1,468 | py | Python | examples/tf/vpgis_inverted_pendulum.py | icml2020submission6857/metarl | 9b66cefa2b6bcb6a38096d629ce8853b47c7171d | [
"MIT"
] | 2 | 2020-03-15T14:35:15.000Z | 2021-02-15T16:38:00.000Z | examples/tf/vpgis_inverted_pendulum.py | icml2020submission6857/metarl | 9b66cefa2b6bcb6a38096d629ce8853b47c7171d | [
"MIT"
] | null | null | null | examples/tf/vpgis_inverted_pendulum.py | icml2020submission6857/metarl | 9b66cefa2b6bcb6a38096d629ce8853b47c7171d | [
"MIT"
] | 1 | 2020-02-24T03:04:23.000Z | 2020-02-24T03:04:23.000Z | #!/usr/bin/env python3
"""Example using VPG with ISSampler.
Iterations alternate between live and importance sampled iterations.
"""
import gym
from metarl.envs import normalize
from metarl.experiment import run_experiment
from metarl.np.baselines import LinearFeatureBaseline
from metarl.sampler import ISSampler
from metarl.tf.algos import VPG
from metarl.tf.envs import TfEnv
from metarl.tf.experiment import LocalTFRunner
from metarl.tf.policies import GaussianMLPPolicy
def run_task(snapshot_config, *_):
"""Run the job.
Args:
snapshot_config (metarl.experiment.SnapshotConfig): Configuration
values for snapshotting.
*_ (object): Hyperparameters (unused).
"""
with LocalTFRunner(snapshot_config=snapshot_config) as runner:
env = TfEnv(normalize(gym.make('InvertedPendulum-v2')))
policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = VPG(
env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
max_kl_step=0.01,
)
runner.setup(algo,
env,
sampler_cls=ISSampler,
sampler_args=dict(n_backtrack=1))
runner.train(n_epochs=40, batch_size=4000)
run_experiment(
run_task,
snapshot_mode='last',
seed=1,
)
| 26.690909 | 76 | 0.666894 |
6b064ca582cfbfdf87ddb61570fda368f89034d5 | 9,454 | py | Python | tests/select_related_regress/tests.py | jarvys/django-1.7-jdb | c5c68d2c5d96004e869fbcfda21f42932cab3dc8 | [
"BSD-3-Clause"
] | null | null | null | tests/select_related_regress/tests.py | jarvys/django-1.7-jdb | c5c68d2c5d96004e869fbcfda21f42932cab3dc8 | [
"BSD-3-Clause"
] | null | null | null | tests/select_related_regress/tests.py | jarvys/django-1.7-jdb | c5c68d2c5d96004e869fbcfda21f42932cab3dc8 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from django.test import TestCase
from django.utils import six
from .models import (Building, Child, Device, Port, Item, Country, Connection,
ClientStatus, State, Client, SpecialClient, TUser, Person, Student,
Organizer, Class, Enrollment, Hen, Chick, A, B, C)
class SelectRelatedRegressTests(TestCase):
def test_regression_7110(self):
"""
Regression test for bug #7110.
When using select_related(), we must query the
Device and Building tables using two different aliases (each) in order to
differentiate the start and end Connection fields. The net result is that
both the "connections = ..." queries here should give the same results
without pulling in more than the absolute minimum number of tables
(history has shown that it's easy to make a mistake in the implementation
and include some unnecessary bonus joins).
"""
b = Building.objects.create(name='101')
dev1 = Device.objects.create(name="router", building=b)
dev2 = Device.objects.create(name="switch", building=b)
dev3 = Device.objects.create(name="server", building=b)
port1 = Port.objects.create(port_number='4', device=dev1)
port2 = Port.objects.create(port_number='7', device=dev2)
port3 = Port.objects.create(port_number='1', device=dev3)
c1 = Connection.objects.create(start=port1, end=port2)
c2 = Connection.objects.create(start=port2, end=port3)
connections = Connection.objects.filter(start__device__building=b, end__device__building=b).order_by('id')
self.assertEqual([(c.id, six.text_type(c.start), six.text_type(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
connections = Connection.objects.filter(start__device__building=b, end__device__building=b).select_related().order_by('id')
self.assertEqual([(c.id, six.text_type(c.start), six.text_type(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
# This final query should only have seven tables (port, device and building
# twice each, plus connection once). Thus, 6 joins plus the FROM table.
self.assertEqual(str(connections.query).count(" JOIN "), 6)
def test_regression_8106(self):
"""
Regression test for bug #8106.
Same sort of problem as the previous test, but this time there are
more extra tables to pull in as part of the select_related() and some
of them could potentially clash (so need to be kept separate).
"""
us = TUser.objects.create(name="std")
usp = Person.objects.create(user=us)
uo = TUser.objects.create(name="org")
uop = Person.objects.create(user=uo)
s = Student.objects.create(person=usp)
o = Organizer.objects.create(person=uop)
c = Class.objects.create(org=o)
Enrollment.objects.create(std=s, cls=c)
e_related = Enrollment.objects.all().select_related()[0]
self.assertEqual(e_related.std.person.user.name, "std")
self.assertEqual(e_related.cls.org.person.user.name, "org")
def test_regression_8036(self):
"""
Regression test for bug #8036
the first related model in the tests below
("state") is empty and we try to select the more remotely related
state__country. The regression here was not skipping the empty column results
for country before getting status.
"""
Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
client = Client.objects.create(name='client', status=active)
self.assertEqual(client.status, active)
self.assertEqual(Client.objects.select_related()[0].status, active)
self.assertEqual(Client.objects.select_related('state')[0].status, active)
self.assertEqual(Client.objects.select_related('state', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('status')[0].status, active)
def test_multi_table_inheritance(self):
""" Exercising select_related() with multi-table model inheritance. """
c1 = Child.objects.create(name="child1", value=42)
Item.objects.create(name="item1", child=c1)
Item.objects.create(name="item2")
self.assertQuerysetEqual(
Item.objects.select_related("child").order_by("name"),
["<Item: item1>", "<Item: item2>"]
)
def test_regression_12851(self):
"""
Regression for #12851
Deferred fields are used correctly if you select_related a subset
of fields.
"""
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
Client.objects.create(name='Brian Burke', state=wa, status=active)
burke = Client.objects.select_related('state').defer('state__name').get(name='Brian Burke')
self.assertEqual(burke.name, 'Brian Burke')
self.assertEqual(burke.state.name, 'Western Australia')
# Still works if we're dealing with an inherited class
SpecialClient.objects.create(name='Troy Buswell', state=wa, status=active, value=42)
troy = SpecialClient.objects.select_related('state').defer('state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Still works if we defer an attribute on the inherited class
troy = SpecialClient.objects.select_related('state').defer('value', 'state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Also works if you use only, rather than defer
troy = SpecialClient.objects.select_related('state').only('name', 'state').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
def test_null_join_promotion(self):
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
bob = Client.objects.create(name='Bob', status=active)
jack = Client.objects.create(name='Jack', status=active, state=wa)
qs = Client.objects.filter(state=wa).select_related('state')
with self.assertNumQueries(1):
self.assertEqual(list(qs), [jack])
self.assertEqual(qs[0].state, wa)
# The select_related join wasn't promoted as there was already an
# existing (even if trimmed) inner join to state.
self.assertFalse('LEFT OUTER' in str(qs.query))
qs = Client.objects.select_related('state').order_by('name')
with self.assertNumQueries(1):
self.assertEqual(list(qs), [bob, jack])
self.assertIs(qs[0].state, None)
self.assertEqual(qs[1].state, wa)
# The select_related join was promoted as there is already an
# existing join.
self.assertTrue('LEFT OUTER' in str(qs.query))
def test_regression_19870(self):
"""
Regression for #19870
"""
hen = Hen.objects.create(name='Hen')
Chick.objects.create(name='Chick', mother=hen)
self.assertEqual(Chick.objects.all()[0].mother.name, 'Hen')
self.assertEqual(Chick.objects.select_related()[0].mother.name, 'Hen')
def test_ticket_10733(self):
a = A.objects.create(name='a', lots_of_text='lots_of_text_a', a_field='a_field')
b = B.objects.create(name='b', lots_of_text='lots_of_text_b', b_field='b_field')
c = C.objects.create(name='c', lots_of_text='lots_of_text_c', is_published=True,
c_a=a, c_b=b)
results = C.objects.all().only('name', 'lots_of_text', 'c_a', 'c_b', 'c_b__lots_of_text',
'c_a__name', 'c_b__name').select_related()
self.assertQuerysetEqual(results, [c], lambda x: x)
with self.assertNumQueries(0):
qs_c = results[0]
self.assertEqual(qs_c.name, 'c')
self.assertEqual(qs_c.lots_of_text, 'lots_of_text_c')
self.assertEqual(qs_c.c_b.lots_of_text, 'lots_of_text_b')
self.assertEqual(qs_c.c_a.name, 'a')
self.assertEqual(qs_c.c_b.name, 'b')
def test_regression_22508(self):
building = Building.objects.create(name='101')
device = Device.objects.create(name="router", building=building)
Port.objects.create(port_number='1', device=device)
device = Device.objects.get()
port = device.port_set.select_related('device__building').get()
with self.assertNumQueries(0):
port.device.building
| 47.034826 | 131 | 0.656442 |
48a239c1f371cf052f430b49a4196bff0f793668 | 3,050 | py | Python | app.py | JohnCSW/simple-rest-api | 835f44d4b220ba914daaab7938a515bb8b3d029c | [
"MIT"
] | null | null | null | app.py | JohnCSW/simple-rest-api | 835f44d4b220ba914daaab7938a515bb8b3d029c | [
"MIT"
] | null | null | null | app.py | JohnCSW/simple-rest-api | 835f44d4b220ba914daaab7938a515bb8b3d029c | [
"MIT"
] | null | null | null | from flask import Flask, request
from common.entity_serializer import serialize_entity
from repo.base_repo import BaseRepo
from repo.product_repo import ProductRepo
from repo.query.customer_query_builder import CustomerQueryBuilder
from repo.query.employee_query_builder import EmployeeQueryBuilder
from repo.query.order_query_bulder import OrderQueryBuilder
app = Flask(__name__)
@app.route('/api/customers', methods=['GET'], endpoint='find_customers')
@serialize_entity
def find_customers():
first_name = request.args.get('first_name')
last_name = request.args.get('last_name')
orderCriteria = request.args.get('order_by')
queryBuilder = CustomerQueryBuilder()
query = queryBuilder \
.first_name(first_name) \
.last_name(last_name) \
.order_by_credit_limit(orderCriteria == 'credit_limit') \
.build()
customerRepo = BaseRepo()
customers = customerRepo.find_all(query)
return customers
@app.route('/api/employees', methods=['GET'], endpoint='find_employees')
@serialize_entity
def find_employees():
first_name = request.args.get('first_name')
last_name = request.args.get('last_name')
orderCriteria = request.args.get('order_by')
queryBuilder = EmployeeQueryBuilder()
query = queryBuilder \
.first_name(first_name) \
.last_name(last_name) \
.order_by_last_name(orderCriteria == 'last_name') \
.build()
employeeRepo = BaseRepo()
customers = employeeRepo.find_all(query)
return customers
@app.route('/api/orders', methods=['GET'], endpoint='find_orders')
@serialize_entity
def find_orders():
cust_first_name = request.args.get('first_name')
cust_last_name = request.args.get('last_name')
orderCriteria = request.args.get('order_by')
queryBuilder = OrderQueryBuilder()
query = queryBuilder \
.cust_first_name(cust_first_name) \
.cust_last_name(cust_last_name) \
.order_by_date(orderCriteria == 'date') \
.order_by_cust_last_name(orderCriteria == 'last_name') \
.build()
orderRepo = BaseRepo()
customers = orderRepo.find_all(query)
return customers
@app.route('/api/products', methods=['POST'], endpoint='create_product')
@serialize_entity
def create_proudct():
"""
example input:
{
"MSRP": 95.7,
"buyPrice": 48.81,
"productCode": "S10_2022",
"productDescription": "This replica features working kickstand, front suspension, gear-shift lever, footbrake lever, drive chain, wheels and steering. All parts are particularly delicate due to their precise scale and require special care and attention.",
"productLine": "Motorcycles",
"productName": "1969 Harley Davidson Ultimate Chopper",
"productScale": "1:10",
"productVendor": "Min Lin Diecast",
"quantityInStock": "7933"
}
:return: 'Success' if no errors occur, but it's now lack of error handling.
"""
productRepo = ProductRepo()
productRepo.create_new(request.json)
return 'Success'
| 35.465116 | 263 | 0.702623 |
8cd6d56a9c05a4babcf89961392e490e12e62ac2 | 1,017 | py | Python | tljh/hooks.py | jzf2101/the-littlest-jupyterhub | bdfc390c705c8e017c22f389fe419e8e7fc34e6c | [
"BSD-3-Clause"
] | 1 | 2019-02-14T04:54:00.000Z | 2019-02-14T04:54:00.000Z | tljh/hooks.py | jzf2101/the-littlest-jupyterhub | bdfc390c705c8e017c22f389fe419e8e7fc34e6c | [
"BSD-3-Clause"
] | null | null | null | tljh/hooks.py | jzf2101/the-littlest-jupyterhub | bdfc390c705c8e017c22f389fe419e8e7fc34e6c | [
"BSD-3-Clause"
] | 1 | 2019-02-14T04:54:09.000Z | 2019-02-14T04:54:09.000Z | """
Hook specifications that pluggy plugins can override
"""
import pluggy
hookspec = pluggy.HookspecMarker('tljh')
hookimpl = pluggy.HookimplMarker('tljh')
@hookspec
def tljh_extra_user_conda_packages():
"""
Return list of extra conda packages to install in user environment.
"""
pass
@hookspec
def tljh_extra_user_pip_packages():
"""
Return list of extra pip packages to install in user environment.
"""
pass
@hookspec
def tljh_extra_apt_packages():
"""
Return list of extra apt packages to install in the user environment.
These will be installed before additional pip or conda packages.
"""
pass
@hookspec
def tljh_config_post_install(config):
"""
Modify on-disk tljh-config after installation.
config is a dict-like object that should be modified
in-place. The contents of the on-disk config.yaml will
be the serialized contents of config, so try to not
overwrite anything the user might have explicitly set.
"""
pass | 22.108696 | 73 | 0.713864 |
9b6c7ae515031c09cbad1b5a888c37a12771ba85 | 210 | py | Python | jobs/admin.py | Manasranjanpati/Jobpost | e5654129538e70cedf8aafc65c1b0289a01535e5 | [
"MIT"
] | 20 | 2018-05-04T18:42:35.000Z | 2021-03-18T07:15:12.000Z | src/jobs/admin.py | fleepgeek/django-jobsite | d9547c4ee85751677ba6458380b609973c3b4a8d | [
"MIT"
] | 5 | 2020-02-11T22:22:33.000Z | 2021-06-10T20:18:05.000Z | jobs/admin.py | Manasranjanpati/Jobpost | e5654129538e70cedf8aafc65c1b0289a01535e5 | [
"MIT"
] | 8 | 2018-05-04T19:03:23.000Z | 2020-09-23T00:24:46.000Z | from django.contrib import admin
from .models import Job, Application
class JobAdmin(admin.ModelAdmin):
readonly_fields = ('voucher',)
admin.site.register(Job, JobAdmin)
admin.site.register(Application) | 21 | 36 | 0.780952 |
ff60b582b84f5c73838bd3bfc2474b53ac057c11 | 2,828 | py | Python | nbp_conversion.py | fri-it2/nbp | 920fad740aef9dbea989547e365cf9a8636c1105 | [
"MIT"
] | 1 | 2019-10-28T10:16:30.000Z | 2019-10-28T10:16:30.000Z | nbp_conversion.py | fri-it2/nbp | 920fad740aef9dbea989547e365cf9a8636c1105 | [
"MIT"
] | 3 | 2021-06-08T21:50:11.000Z | 2022-01-13T02:53:34.000Z | nbp_conversion.py | fri-it2/nbp | 920fad740aef9dbea989547e365cf9a8636c1105 | [
"MIT"
] | null | null | null | import string
import pdb
import numpy as np
chs = string.digits + string.ascii_uppercase
chs_len=len(chs)
def convert36ToAscii(start, chs):
"""Convert decimal address to ascci address
:param start: NBP adress in decimal reverse:Big Endian Byte Order: The most significant byte (the "big end") of the data is placed at the byte with the lowest address
:param chs:tsring of all digits and ascii upper
:return:NBP adress in ASCII
"""
if start == 0:
return ''
chs_len = len(chs)
ch = chs[start%chs_len]
return ch + convert36ToAscii(start//chs_len, chs)
def convertAsciiTo36(naslov, chs,number):
"""convert ascci NBP adress to decimal NBP address
:param naslov: NBP address ASCII reverse: Big Endian Byte Order: The most significant byte (the "big end") of the data is placed at the byte with the lowest address
:param chs: string of all digits and ascii upper
:param number:
:return: NBP in decimal
"""
number = (number+chs.index(naslov[0]))*chs_len
naslov = naslov[1:]
if naslov == "":
return number / chs_len
# import pdb
# pdb.set_trace()
return convertAsciiTo36(naslov,chs, number)
def convertDecBin(num):
"""
:param num NBP adress in dec:
:return: NBP in binary
"""
return
def convertBinDec(num):
"""convert binary NBP adress to decimal NBP address
:param num:
:return:
"""
return
def convertOctDec(num):
""" convert octal NBP address to dec NBP address
:param num: 4x octal NBP adrdress [13,13,14,12], 32 bit, 4 x8 bit
:return : decimal NBP address
"""
if len(num)!=4:
print num
return 0
return np.sum(np.array([1,256,256*256,256*256*256])*np.array(num))
def convertNumber32Bit(stevilo, delitelj):
""" convert dec NBP to 32 bit number: 4 x 8bit in dec
:param stevilo: decimal NBP
:param delitelj: 256*256*256
:param naslov: NBP in 32 bit: 4x 8bit, array
:return:
Example
convertNumber32bit(653331952,256*256*256)
"""
deljenec=stevilo / delitelj
ostanek = stevilo % delitelj
try:
naslov
except NameError:
naslov = []
naslov.append(deljenec)
#print
#pdb.set_trace()
if ostanek <= 256:
naslov.append(ostanek)
return naslov
#print(naslov)
#pdb.settrace()
return naslov+convertNumber32Bit(ostanek,delitelj/256)
#r=convertDecAscii(653331952, chs)
#print(r)
#r=convertDecAscii(64716160330,chs)
#naslov="S50PTP"
#naslov=naslov[::-1]
s1=convertAsciiTo36("ASZ65S",chs,0)
#s1=convertAsciiTo36("S56ZSA",chs,0)
#s2=convertAsciiTo36(naslov,chs,0)
#print(r)
print(s1)
#print(s)
#print(s2)
#print(convertDecAscii(1561529872,chs))
#a=convertNumber32Bit(653331952,256*256*256)
#print(a[::-1])
#a=convertOctDec([ 240,13,241,38])
#print(a)
| 25.25 | 170 | 0.667963 |
1a3565fc650d328a9375720173ff2141ff253814 | 455 | py | Python | examples/Motor/Motor_move_to_absolute_position.py | NStrijbosch/Robot-Inventor-SPIKE-Prime-hub2hub | b46d19ce138749dbe60e2ac7869c9fd9e4454d7e | [
"MIT"
] | 2 | 2021-09-12T16:53:26.000Z | 2022-01-23T00:36:09.000Z | examples/Motor/Motor_move_to_absolute_position.py | NStrijbosch/Robot-Inventor-SPIKE-Prime-hub2hub | b46d19ce138749dbe60e2ac7869c9fd9e4454d7e | [
"MIT"
] | 2 | 2021-05-16T14:00:20.000Z | 2022-02-17T20:41:12.000Z | examples/Motor/Motor_move_to_absolute_position.py | NStrijbosch/hub2hub | b46d19ce138749dbe60e2ac7869c9fd9e4454d7e | [
"MIT"
] | null | null | null | from hub2hub import TechnicHub, ble_handler
from time import sleep_ms
# Initialize ble handler and a technic hub
ble = ble_handler()
Thub = TechnicHub(ble)
# connect to a technic hub: press green button on the technic hub
Thub.connect()
# Servo motor connected to port A
Motor = Thub.port.A.motor
# move to 180 degrees and hold
Motor.run_to_position(180,stop_action = 2)
sleep_ms(1000)
# move to 0 and float
Motor.run_to_position(0, stop_action = 0) | 22.75 | 65 | 0.769231 |
0cb403a84a3041156619302fb95467a05e194275 | 902 | py | Python | win64-postgresql/pgAdmin 4/web/pgadmin/utils/html.py | vnetcon/curvy | ed3749bd5d298c7ab6c0625de91c211d6da4c762 | [
"Apache-2.0"
] | null | null | null | win64-postgresql/pgAdmin 4/web/pgadmin/utils/html.py | vnetcon/curvy | ed3749bd5d298c7ab6c0625de91c211d6da4c762 | [
"Apache-2.0"
] | 3 | 2021-09-02T15:51:44.000Z | 2022-03-02T09:53:17.000Z | win64-postgresql/pgAdmin 4/web/pgadmin/utils/html.py | vnetcon/curvy | ed3749bd5d298c7ab6c0625de91c211d6da4c762 | [
"Apache-2.0"
] | null | null | null | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
#########################################################################
"""Utilities for HTML"""
import cgi
from pgadmin.utils import IS_PY2
def safe_str(x):
try:
# For Python2, it can be int, long, float
if IS_PY2:
if isinstance(x, (int, long, float)):
x = str(x)
else:
# For Python3, it can be int, float
if isinstance(x, (int, float)):
x = str(x)
x = x.encode(
'ascii', 'xmlcharrefreplace'
) if hasattr(x, 'encode') else x
if not IS_PY2:
x = x.decode('utf-8')
except Exception:
pass
return cgi.escape(x)
| 25.055556 | 74 | 0.456763 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.