hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4b886e49621f6e4b1aa79d7b17936a6913a207bf | 6,750 | py | Python | plugins/callback.py | NickxFury/MusicPlayer | d40c0a7110c9e79450066e0611a32b2c88bef065 | [
"MIT"
] | null | null | null | plugins/callback.py | NickxFury/MusicPlayer | d40c0a7110c9e79450066e0611a32b2c88bef065 | [
"MIT"
] | null | null | null | plugins/callback.py | NickxFury/MusicPlayer | d40c0a7110c9e79450066e0611a32b2c88bef065 | [
"MIT"
] | 3 | 2021-06-24T10:48:40.000Z | 2021-06-27T08:17:06.000Z | #MIT License
#Copyright (c) 2021 SUBIN
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton, CallbackQuery
from pyrogram import Client, emoji
from utils import mp
from config import Config
playlist=Config.playlist
HELP = """
<b>Add the bot and User account in your Group with admin rights.
Start a VoiceChat
Use /play <song name> or use /play as a reply to an audio file or youtube link.
You can also use /dplay <song name> to play a song from Deezer.</b>
**Common Commands**:
**/play** Reply to an audio file or YouTube link to play it or use /play <song name>.
**/dplay** Play music from Deezer, Use /dplay <song name>
**/player** Show current playing song.
**/help** Show help for commands
**/playlist** Shows the playlist.
**Admin Commands**:
**/skip** [n] ... Skip current or n where n >= 2
**/join** Join voice chat.
**/leave** Leave current voice chat
**/vc** Check which VC is joined.
**/stop** Stop playing.
**/radio** Start Radio.
**/stopradio** Stops Radio Stream.
**/replay** Play from the beginning.
**/clean** Remove unused RAW PCM files.
**/pause** Pause playing.
**/resume** Resume playing.
**/mute** Mute in VC.
**/unmute** Unmute in VC.
**/restart** Restarts the Bot.
"""
@Client.on_callback_query()
async def cb_handler(client: Client, query: CallbackQuery):
if query.from_user.id not in Config.ADMINS and query.data != "help":
await query.answer(
"Who the hell you are",
show_alert=True
)
return
else:
await query.answer()
if query.data == "replay":
group_call = mp.group_call
if not playlist:
return
group_call.restart_playout()
if not playlist:
pl = f"{emoji.NO_ENTRY} Empty Playlist"
else:
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}"
for i, x in enumerate(playlist)
])
await query.edit_message_text(
f"{pl}",
parse_mode="Markdown",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("🔄", callback_data="replay"),
InlineKeyboardButton("⏯", callback_data="pause"),
InlineKeyboardButton("⏩", callback_data="skip")
],
]
)
)
elif query.data == "pause":
if not playlist:
return
else:
mp.group_call.pause_playout()
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}"
for i, x in enumerate(playlist)
])
await query.edit_message_text(f"{emoji.PLAY_OR_PAUSE_BUTTON} Paused\n\n{pl}",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("🔄", callback_data="replay"),
InlineKeyboardButton("⏯", callback_data="resume"),
InlineKeyboardButton("⏩", callback_data="skip")
],
]
)
)
elif query.data == "resume":
if not playlist:
return
else:
mp.group_call.resume_playout()
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}"
for i, x in enumerate(playlist)
])
await query.edit_message_text(f"{emoji.PLAY_OR_PAUSE_BUTTON} Resumed\n\n{pl}",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("🔄", callback_data="replay"),
InlineKeyboardButton("⏯", callback_data="pause"),
InlineKeyboardButton("⏩", callback_data="skip")
],
]
)
)
elif query.data=="skip":
if not playlist:
return
else:
await mp.skip_current_playing()
pl = f"{emoji.PLAY_BUTTON} **Playlist**:\n" + "\n".join([
f"**{i}**. **🎸{x[1]}**\n 👤**Requested by:** {x[4]}"
for i, x in enumerate(playlist)
])
try:
await query.edit_message_text(f"{emoji.PLAY_OR_PAUSE_BUTTON} Skipped\n\n{pl}",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("🔄", callback_data="replay"),
InlineKeyboardButton("⏯", callback_data="pause"),
InlineKeyboardButton("⏩", callback_data="skip")
],
]
)
)
except:
pass
elif query.data=="help":
buttons = [
[
InlineKeyboardButton('🎟 Request Movies', url='https://t.me/MOVIECLUB_CHAT'),
InlineKeyboardButton('Official Channel 🍿', url='https://t.me/MovieClubOfficiall'),
],
[
InlineKeyboardButton('👨🏼💻 Owner', url='https://t.me/NickxFury_bot'),
]
]
reply_markup = InlineKeyboardMarkup(buttons)
await query.edit_message_text(
HELP,
reply_markup=reply_markup
)
| 35.904255 | 98 | 0.538222 |
cabfb786ec4450c304662067813870013472a327 | 2,060 | py | Python | utils/wifi_hd_detector.py | lanfis/WiFi_Monitor | e3426b5cae1113a8ea769bb0171318fc7f0cf051 | [
"MIT"
] | null | null | null | utils/wifi_hd_detector.py | lanfis/WiFi_Monitor | e3426b5cae1113a8ea769bb0171318fc7f0cf051 | [
"MIT"
] | null | null | null | utils/wifi_hd_detector.py | lanfis/WiFi_Monitor | e3426b5cae1113a8ea769bb0171318fc7f0cf051 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# license removed for brevity
import os
import sys
current_folder = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_folder)
import numpy as np
from sys_utils import SYS_UTILS
from console_formatter import Console_Formatter
class WIFI_HD_DETECTOR:
sys_util = SYS_UTILS()
console = Console_Formatter(__name__)
interface = []
chipset = []
driver = []
flag_is_init = False
def __init__(self):
self.run()
def run(self, is_silent=False):
if not is_silent:
print(self.console.INFO("Detecting wifi hardware ..."))
stdout, stderr = self.sys_util(['airmon-ng'])
line_new = self.sys_util.msg_line_split(stdout)
idx_interface = 0
idx_chipset = 2
idx_driver = 3
val = self.sys_util.msg_split(line_new[0], '\t')
if len(val) > 3:
for i in range(len(val)):
idx_interface = int(i) if val[i] == "Interface" else idx_interface
idx_chipset = int(i) if val[i] == "Chipset" else idx_chipset
idx_driver = int(i) if val[i] == "Driver" else idx_driver
self.interface = []
self.chipset = []
self.driver = []
for i in range(1, len(line_new)):
val = self.sys_util.msg_split(line_new[i], '\t')
self.interface = np.append(self.interface, val[idx_interface])
self.chipset = np.append(self.chipset, val[idx_chipset])
self.driver = np.append(self.driver, val[idx_driver])
if not is_silent:
for i in range(len(self.interface)):
print(self.console.INFO("Interface : {:<10}, Chipset : {:<10}, Driver : {:<10}".format(self.interface[i], self.chipset[i], self.driver[i])))
self.flag_is_init = True
return self.flag_is_init
def is_init(self):
return self.flag_is_init
| 34.915254 | 157 | 0.566019 |
a9b9c2bc1c890be6da275476c479b91c2f2d01e0 | 1,855 | py | Python | migrations/versions/7f26a631305e_initial_migration.py | Charles-Ndugire/personal-blog | a56a80ea7602b713d08724d33636bf4e69fc9b06 | [
"MIT"
] | null | null | null | migrations/versions/7f26a631305e_initial_migration.py | Charles-Ndugire/personal-blog | a56a80ea7602b713d08724d33636bf4e69fc9b06 | [
"MIT"
] | 1 | 2021-06-02T00:27:22.000Z | 2021-06-02T00:27:22.000Z | migrations/versions/7f26a631305e_initial_migration.py | Charles-Ndugire/personal-blog | a56a80ea7602b713d08724d33636bf4e69fc9b06 | [
"MIT"
] | null | null | null | """initial migration
Revision ID: 7f26a631305e
Revises:
Create Date: 2019-09-23 09:55:08.493546
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7f26a631305e'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(), nullable=False),
sa.Column('email', sa.String(), nullable=False),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('profile_pic_path', sa.String(), nullable=True),
sa.Column('password_hash', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('articles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('article', sa.String(), nullable=True),
sa.Column('category', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comment', sa.String(length=255), nullable=True),
sa.Column('article_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['article_id'], ['articles.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comments')
op.drop_table('articles')
op.drop_table('users')
# ### end Alembic commands ###
| 31.982759 | 65 | 0.663073 |
14afa56ed0ba08d9655447b62aeba29eb146274d | 22,166 | py | Python | lib/python/pyflyby/_file.py | rsaim/pyflyby | eb7c1ea938de3f48c508c921553d85c3b3648587 | [
"BSD-3-Clause"
] | null | null | null | lib/python/pyflyby/_file.py | rsaim/pyflyby | eb7c1ea938de3f48c508c921553d85c3b3648587 | [
"BSD-3-Clause"
] | null | null | null | lib/python/pyflyby/_file.py | rsaim/pyflyby | eb7c1ea938de3f48c508c921553d85c3b3648587 | [
"BSD-3-Clause"
] | null | null | null | # pyflyby/_file.py.
# Copyright (C) 2011, 2012, 2013, 2014, 2015 Karl Chen.
# License: MIT http://opensource.org/licenses/MIT
from __future__ import absolute_import, division, with_statement
import os
import re
import sys
from pyflyby._util import cached_attribute, memoize
class UnsafeFilenameError(ValueError):
pass
# TODO: statcache
class Filename(object):
"""
A filename.
>>> Filename('/etc/passwd')
Filename('/etc/passwd')
"""
def __new__(cls, arg):
if isinstance(arg, cls):
return arg
if isinstance(arg, basestring):
return cls._from_filename(arg)
raise TypeError
@classmethod
def _from_filename(cls, filename):
if not isinstance(filename, basestring):
raise TypeError
filename = str(filename)
if not filename:
raise UnsafeFilenameError("(empty string)")
if re.search("[^a-zA-Z0-9_=+{}/.,~-]", filename):
raise UnsafeFilenameError(filename)
if re.search("(^|/)~", filename):
raise UnsafeFilenameError(filename)
self = object.__new__(cls)
self._filename = os.path.abspath(filename)
return self
def __str__(self):
return self._filename
def __repr__(self):
return "%s(%r)" % (type(self).__name__, self._filename)
def __truediv__(self, x):
return type(self)(os.path.join(self._filename, x))
def __hash__(self):
return hash(self._filename)
def __eq__(self, o):
if self is o:
return True
if not isinstance(o, Filename):
return NotImplemented
return self._filename == o._filename
def __ne__(self, o):
if self is o:
return False
if not isinstance(o, Filename):
return NotImplemented
return self._filename != o._filename
def __cmp__(self, o):
if self is o:
return 0
if not isinstance(o, Filename):
return NotImplemented
return cmp(self._filename, o._filename)
@cached_attribute
def ext(self):
"""
Returns the extension of this filename, including the dot.
Returns C{None} if no extension.
@rtype:
C{str} or C{None}
"""
lhs, dot, rhs = self._filename.rpartition('.')
if not dot:
return None
return dot + rhs
@cached_attribute
def base(self):
return os.path.basename(self._filename)
@cached_attribute
def dir(self):
return type(self)(os.path.dirname(self._filename))
@cached_attribute
def real(self):
return type(self)(os.path.realpath(self._filename))
@property
def exists(self):
return os.path.exists(self._filename)
@property
def isdir(self):
return os.path.isdir(self._filename)
@property
def isfile(self):
return os.path.isfile(self._filename)
@property
def isreadable(self):
return os.access(self._filename, os.R_OK)
@property
def iswritable(self):
return os.access(self._filename, os.W_OK)
@property
def isexecutable(self):
return os.access(self._filename, os.X_OK)
def startswith(self, prefix):
prefix = Filename(prefix)
if self == prefix:
return True
return self._filename.startswith("%s/" % (prefix,))
def list(self, ignore_unsafe=True):
filenames = [os.path.join(self._filename, f)
for f in sorted(os.listdir(self._filename))]
result = []
for f in filenames:
try:
f = Filename(f)
except UnsafeFilenameError:
if ignore_unsafe:
continue
else:
raise
result.append(f)
return result
@property
def ancestors(self):
"""
Return ancestors of self, from self to /.
>>> Filename("/aa/bb").ancestors
(Filename('/aa/bb'), Filename('/aa'), Filename('/'))
@rtype:
C{tuple} of C{Filename}s
"""
result = [self]
while True:
dir = result[-1].dir
if dir == result[-1]:
break
result.append(dir)
return tuple(result)
@memoize
def _get_PATH():
PATH = os.environ.get("PATH", "").split(os.pathsep)
result = []
for path in PATH:
if not path:
continue
try:
result.append(Filename(path))
except UnsafeFilenameError:
continue
return tuple(result)
def which(program):
"""
Find C{program} on $PATH.
@type program:
C{str}
@rtype:
L{Filename}
@return:
Program on $PATH, or C{None} if not found.
"""
if "/" in program:
raise ValueError("which(): input should be a basename")
# See if it exists in the current directory.
candidate = Filename(program)
if candidate.isreadable:
return candidate
for path in _get_PATH():
candidate = path / program
if candidate.isexecutable:
return candidate
return None
Filename.STDIN = Filename("/dev/stdin")
class FilePos(object):
"""
A (lineno, colno) position within a L{FileText}.
Both lineno and colno are 1-indexed.
"""
def __new__(cls, *args):
if len(args) == 0:
return cls._ONE_ONE
if len(args) == 1:
arg, = args
if isinstance(arg, cls):
return arg
elif arg is None:
return cls._ONE_ONE
elif isinstance(arg, tuple):
args = arg
# Fall through
else:
raise TypeError
lineno, colno = cls._intint(args)
if lineno == colno == 1:
return cls._ONE_ONE # space optimization
if lineno < 1:
raise ValueError(
"FilePos: invalid lineno=%d; should be >= 1" % lineno,)
if colno < 1:
raise ValueError(
"FilePos: invalid colno=%d; should be >= 1" % colno,)
return cls._from_lc(lineno, colno)
@staticmethod
def _intint(args):
if (type(args) is tuple and
len(args) == 2 and
type(args[0]) is type(args[1]) is int):
return args
else:
raise TypeError("Expected (int,int); got %r" % (args,))
@classmethod
def _from_lc(cls, lineno, colno):
self = object.__new__(cls)
self.lineno = lineno
self.colno = colno
return self
def __add__(self, delta):
'''
"Add" a coordinate (line,col) delta to this C{FilePos}.
Note that addition here may be a non-obvious. If there is any line
movement, then the existing column number is ignored, and the new
column is the new column delta + 1 (to convert into 1-based numbers).
@rtype:
L{FilePos}
'''
ldelta, cdelta = self._intint(delta)
assert ldelta >= 0 and cdelta >= 0
if ldelta == 0:
return FilePos(self.lineno, self.colno + cdelta)
else:
return FilePos(self.lineno + ldelta, 1 + cdelta)
def __str__(self):
return "(%d,%d)" % (self.lineno, self.colno)
def __repr__(self):
return "FilePos%s" % (self,)
@property
def _data(self):
return (self.lineno, self.colno)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, FilePos):
return NotImplemented
return self._data == other._data
def __ne__(self, other):
if self is other:
return False
if not isinstance(other, FilePos):
return NotImplemented
return self._data != other._data
def __cmp__(self, other):
if self is other:
return 0
if not isinstance(other, FilePos):
return NotImplemented
return cmp(self._data, other._data)
def __hash__(self):
return hash(self._data)
FilePos._ONE_ONE = FilePos._from_lc(1, 1)
class FileText(object):
"""
Represents a contiguous sequence of lines from a file.
"""
def __new__(cls, arg, filename=None, startpos=None):
"""
Return a new C{FileText} instance.
@type arg:
C{FileText}, C{Filename}, C{str}, or tuple of C{str}
@param arg:
If a sequence of lines, then each should end with a newline and have
no other newlines. Otherwise, something that can be interpreted or
converted into a sequence of lines.
@type filename:
L{Filename}
@param filename:
Filename to attach to this C{FileText}, if not already given by
C{arg}.
@type startpos:
C{FilePos}
@param startpos:
Starting file position (lineno & colno) of this C{FileText}, if not
already given by C{arg}.
@rtype:
C{FileText}
"""
if isinstance(arg, cls):
if filename is startpos is None:
return arg
return arg.alter(filename=filename, startpos=startpos)
elif isinstance(arg, Filename):
return cls(read_file(arg), filename=filename, startpos=startpos)
elif hasattr(arg, "__text__"):
return FileText(arg.__text__(), filename=filename, startpos=startpos)
elif isinstance(arg, basestring):
self = object.__new__(cls)
self.joined = arg
else:
raise TypeError("%s: unexpected %s"
% (cls.__name__, type(arg).__name__))
if filename is not None:
filename = Filename(filename)
startpos = FilePos(startpos)
self.filename = filename
self.startpos = startpos
return self
@classmethod
def _from_lines(cls, lines, filename, startpos):
assert type(lines) is tuple
assert len(lines) > 0
assert type(lines[0]) is str
assert not lines[-1].endswith("\n")
self = object.__new__(cls)
self.lines = lines
self.filename = filename
self.startpos = startpos
return self
@cached_attribute
def lines(self):
r"""
Lines that have been split by newline.
These strings do NOT contain '\n'.
If the input file ended in '\n', then the last item will be the empty
string. This is to avoid having to check lines[-1].endswith('\n')
everywhere.
@rtype:
C{tuple} of C{str}
"""
# Used if only initialized with 'joined'.
# We use str.split() instead of str.splitlines() because the latter
# doesn't distinguish between strings that end in newline or not
# (or requires extra work to process if we use splitlines(True)).
return tuple(self.joined.split('\n'))
@cached_attribute
def joined(self): # used if only initialized with 'lines'
return '\n'.join(self.lines)
@classmethod
def from_filename(cls, filename):
return cls.from_lines(Filename(filename))
def alter(self, filename=None, startpos=None):
if filename is not None:
filename = Filename(filename)
else:
filename = self.filename
if startpos is not None:
startpos = FilePos(startpos)
else:
startpos = self.startpos
if filename == self.filename and startpos == self.startpos:
return self
else:
result = object.__new__(type(self))
result.lines = self.lines
result.joined = self.joined
result.filename = filename
result.startpos = startpos
return result
@cached_attribute
def endpos(self):
"""
The position after the last character in the text.
@rtype:
C{FilePos}
"""
startpos = self.startpos
lines = self.lines
lineno = startpos.lineno + len(lines) - 1
if len(lines) == 1:
colno = startpos.colno + len(lines[-1])
else:
colno = 1 + len(lines[-1])
return FilePos(lineno, colno)
def _lineno_to_index(self, lineno):
lineindex = lineno - self.startpos.lineno
# Check that the lineindex is in range. We don't allow pointing at
# the line after the last line because we already ensured that
# self.lines contains an extra empty string if necessary, to indicate
# a trailing newline in the file.
if not 0 <= lineindex < len(self.lines):
raise IndexError(
"Line number %d out of range [%d, %d)"
% (lineno, self.startpos.lineno, self.endpos.lineno))
return lineindex
def _colno_to_index(self, lineindex, colno):
coloffset = self.startpos.colno if lineindex == 0 else 1
colindex = colno - coloffset
line = self.lines[lineindex]
# Check that the colindex is in range. We do allow pointing at the
# character after the last (non-newline) character in the line.
if not 0 <= colindex <= len(line):
raise IndexError(
"Column number %d on line %d out of range [%d, %d]"
% (colno, lineindex+self.startpos.lineno,
coloffset, coloffset+len(line)))
return colindex
def __getitem__(self, arg):
"""
Return the line(s) with the given line number(s).
If slicing, returns an instance of C{FileText}.
Note that line numbers are indexed based on C{self.startpos.lineno}
(which is 1 at the start of the file).
>>> FileText("a\\nb\\nc\\nd")[2]
'b'
>>> FileText("a\\nb\\nc\\nd")[2:4]
FileText('b\\nc\\n', startpos=(2,1))
>>> FileText("a\\nb\\nc\\nd")[0]
Traceback (most recent call last):
...
IndexError: Line number 0 out of range [1, 4)
When slicing, the input arguments can also be given as C{FilePos}
arguments or (lineno,colno) tuples. These are 1-indexed at the start
of the file.
>>> FileText("a\\nb\\nc\\nd")[(2,2):4]
FileText('\\nc\\n', startpos=(2,2))
@rtype:
C{str} or L{FileText}
"""
L = self._lineno_to_index
C = self._colno_to_index
if isinstance(arg, slice):
if arg.step is not None and arg.step != 1:
raise ValueError("steps not supported")
# Interpret start (lineno,colno) into indexes.
if arg.start is None:
start_lineindex = 0
start_colindex = 0
elif isinstance(arg.start, int):
start_lineindex = L(arg.start)
start_colindex = 0
else:
startpos = FilePos(arg.start)
start_lineindex = L(startpos.lineno)
start_colindex = C(start_lineindex, startpos.colno)
# Interpret stop (lineno,colno) into indexes.
if arg.stop is None:
stop_lineindex = len(self.lines)
stop_colindex = len(self.lines[-1])
elif isinstance(arg.stop, int):
stop_lineindex = L(arg.stop)
stop_colindex = 0
else:
stoppos = FilePos(arg.stop)
stop_lineindex = L(stoppos.lineno)
stop_colindex = C(stop_lineindex, stoppos.colno)
# {start,stop}_{lineindex,colindex} are now 0-indexed
# [open,closed) ranges.
assert 0 <= start_lineindex <= stop_lineindex < len(self.lines)
assert 0 <= start_colindex <= len(self.lines[start_lineindex])
assert 0 <= stop_colindex <= len(self.lines[stop_lineindex])
# Optimization: return entire range
if (start_lineindex == 0 and
start_colindex == 0 and
stop_lineindex == len(self.lines)-1 and
stop_colindex == len(self.lines[-1])):
return self
# Get the lines we care about. We always include an extra entry
# at the end which we'll chop to the desired number of characters.
result_split = list(self.lines[start_lineindex:stop_lineindex+1])
# Clip the starting and ending strings. We do the end clip first
# in case the result has only one line.
result_split[-1] = result_split[-1][:stop_colindex]
result_split[0] = result_split[0][start_colindex:]
# Compute the new starting line and column numbers.
result_lineno = start_lineindex + self.startpos.lineno
if start_lineindex == 0:
result_colno = start_colindex + self.startpos.colno
else:
result_colno = start_colindex + 1
result_startpos = FilePos(result_lineno, result_colno)
return FileText._from_lines(tuple(result_split),
filename=self.filename,
startpos=result_startpos)
elif isinstance(arg, int):
# Return a single line.
lineindex = L(arg)
return self.lines[lineindex]
else:
raise TypeError("bad type %r" % (type(arg),))
@classmethod
def concatenate(cls, args):
"""
Concatenate a bunch of L{FileText} arguments. Uses the C{filename}
and C{startpos} from the first argument.
@rtype:
L{FileText}
"""
args = [FileText(x) for x in args]
if len(args) == 1:
return args[0]
return FileText(
''.join([l.joined for l in args]),
filename=args[0].filename,
startpos=args[0].startpos)
def __repr__(self):
r = "%s(%r" % (type(self).__name__, self.joined,)
if self.filename is not None:
r += ", filename=%r" % (str(self.filename),)
if self.startpos != FilePos():
r += ", startpos=%s" % (self.startpos,)
r += ")"
return r
def __str__(self):
return self.joined
def __eq__(self, o):
if self is o:
return True
if not isinstance(o, FileText):
return NotImplemented
return (self.filename == o.filename and
self.joined == o.joined and
self.startpos == o.startpos)
def __ne__(self, o):
if not isinstance(o, FileText):
return NotImplemented
return not (self == o)
def __cmp__(self, o):
if self is o:
return 0
if not isinstance(o, FileText):
return NotImplemented
return cmp((self.filename, self.joined, self.startpos),
(o .filename, o .joined, o .startpos))
def __hash__(self):
h = hash((self.filename, self.joined, self.startpos))
self.__hash__ = lambda: h
return h
def read_file(filename):
filename = Filename(filename)
if filename == Filename.STDIN:
data = sys.stdin.read()
else:
with open(str(filename), 'rU') as f:
data = f.read()
return FileText(data, filename=filename)
def write_file(filename, data):
filename = Filename(filename)
data = FileText(data)
with open(str(filename), 'w') as f:
f.write(data.joined)
def atomic_write_file(filename, data):
filename = Filename(filename)
data = FileText(data)
temp_filename = Filename("%s.tmp.%s" % (filename, os.getpid(),))
write_file(temp_filename, data)
try:
st = os.stat(str(filename)) # OSError if file didn't exit before
os.chmod(str(temp_filename), st.st_mode)
os.chown(str(temp_filename), -1, st.st_gid) # OSError if not member of group
except OSError:
pass
os.rename(str(temp_filename), str(filename))
def expand_py_files_from_args(pathnames, on_error=lambda filename: None):
"""
Enumerate *.py files, recursively.
Arguments that are files are always included.
Arguments that are directories are recursively searched for *.py files.
@type pathnames:
C{list} of L{Filename}s
@type on_error:
callable
@param on_error:
Function that is called for arguments directly specified in C{pathnames}
that don't exist or are otherwise inaccessible.
@rtype:
C{list} of L{Filename}s
"""
if not isinstance(pathnames, (tuple, list)):
pathnames = [pathnames]
pathnames = [Filename(f) for f in pathnames]
result = []
# Check for problematic arguments. Note that we intentionally only do
# this for directly specified arguments, not for recursively traversed
# arguments.
stack = []
for pathname in reversed(pathnames):
if pathname.isfile:
stack.append((pathname, True))
elif pathname.isdir:
stack.append((pathname, False))
else:
on_error(pathname)
while stack:
pathname, isfile = stack.pop(-1)
if isfile:
result.append(pathname)
continue
for f in reversed(pathname.list()):
# Check inclusions/exclusions for recursion. Note that we
# intentionally do this in the recursive step rather than the
# base step because if the user specification includes
# e.g. .pyflyby, we do want to include it; however, we don't
# want to recurse into .pyflyby ourselves.
if f.base.startswith("."):
continue
if f.base == "__pycache__":
continue
if f.isfile:
if f.ext == ".py":
stack.append((f, True))
elif f.isdir:
stack.append((f, False))
else:
# Silently ignore non-files/dirs from traversal.
pass
return result
| 31.620542 | 84 | 0.565957 |
d4662dbc197fd4e89d89a342e0ad3c7ab25628cb | 1,161 | py | Python | src/lexer/tokens.py | tetrica/compiladores_1 | 85bf37798054f406d144368e1984fd9804bf8891 | [
"MIT"
] | 2 | 2019-04-17T17:48:25.000Z | 2019-06-07T23:46:55.000Z | src/lexer/tokens.py | tetrica/compiladores_1 | 85bf37798054f406d144368e1984fd9804bf8891 | [
"MIT"
] | 2 | 2019-04-17T17:49:58.000Z | 2019-09-10T12:28:30.000Z | src/lexer/tokens.py | tetrica/compiladores_1 | 85bf37798054f406d144368e1984fd9804bf8891 | [
"MIT"
] | null | null | null | from collections import namedtuple
from enum import Enum, auto
from .global_vars import *
_token = namedtuple('Token', [
'content',
'token_type',
'line'
])
def __token_to_string(obj):
return f"T<'{obj.content}', {obj.token_type}>"
def __token_equals(token, toke_other):
same_content = toke_other.content == token.content
same_type = toke_other.token_type == token.token_type
return same_content and same_type
_token.__str__ = __token_to_string
_token.__eq__ = __token_equals
class Token_type(Enum):
EOF = auto()
ID = auto()
INVALID = auto()
KEYWORD = auto()
OPERATOR = auto()
SYMBOL = auto()
def new_token(content, line):
return _token(
content=content,
line=line,
token_type=__match_type(content)
)
def __match_type(lexema):
if lexema == '$':
return Token_type.EOF
elif lexema in OPERATOR:
return Token_type.OPERATOR
elif lexema in KEYWORDS:
return Token_type.KEYWORD
elif lexema in SYMBOLS:
return Token_type.SYMBOL
elif lexema.isalnum():
return Token_type.ID
else:
return Token_type.INVALID | 23.22 | 57 | 0.665805 |
30d02834b92a228b9fd97b7afd233b63c7dbd162 | 109 | py | Python | tests/cv/__init__.py | stateoftheartai/sotaai | 1d01a385f115c567ad55eb10bdae16c1437978a1 | [
"MIT"
] | 23 | 2021-05-23T09:32:36.000Z | 2021-12-22T16:44:21.000Z | tests/cv/__init__.py | stateoftheartai/sotaai | 1d01a385f115c567ad55eb10bdae16c1437978a1 | [
"MIT"
] | null | null | null | tests/cv/__init__.py | stateoftheartai/sotaai | 1d01a385f115c567ad55eb10bdae16c1437978a1 | [
"MIT"
] | 1 | 2021-08-19T22:37:18.000Z | 2021-08-19T22:37:18.000Z | # -*- coding: utf-8 -*-
# Author: Hugo Ochoa <hugo@stateoftheart.ai>
# Copyright: Stateoftheart AI PBC 2020.
| 27.25 | 44 | 0.688073 |
d9ea34232877ccdb856acc8cc671bf8330955353 | 804 | py | Python | demo2/webapp/app.py | dinara92/python-grpc-demo | 9a8776c39a938591cd0ee56173450b65c19bebba | [
"MIT"
] | 3 | 2020-01-11T13:55:38.000Z | 2020-08-25T22:34:15.000Z | demo2/webapp/app.py | belajarqywok/python-grpc-demo | 1e64fa6438c7f997edf9f82de8e855772d86fba8 | [
"MIT"
] | null | null | null | demo2/webapp/app.py | belajarqywok/python-grpc-demo | 1e64fa6438c7f997edf9f82de8e855772d86fba8 | [
"MIT"
] | 1 | 2021-01-01T05:21:44.000Z | 2021-01-01T05:21:44.000Z | from flask import Flask, Response
import sys
from google.protobuf.json_format import MessageToJson
from client_wrapper import ServiceClient
import users_pb2_grpc as users_service
import users_types_pb2 as users_messages
app = Flask(__name__)
app.config['users'] = ServiceClient(users_service, 'UsersStub', 'localhost', 50051)
@app.route('/users/')
def users_get():
request = users_messages.GetUsersRequest(
user=[users_messages.User(username="alexa", user_id=1),
users_messages.User(username="christie", user_id=1)]
)
def get_user():
response = app.config['users'].GetUsers(request)
for resp in response:
yield MessageToJson(resp)
return Response(get_user(), content_type='application/json')
if __name__ == '__main__':
app.run()
| 29.777778 | 83 | 0.720149 |
22ca3460adff11bf359c7353bb8ec2aea5d2f73b | 4,117 | py | Python | stackinabox/util/responses/decorator.py | BenjamenMeyer/stackInABox | 005a3e3f40ae7b7f14fae24d07768731e0ac948e | [
"Apache-2.0"
] | 5 | 2015-02-02T22:02:55.000Z | 2016-02-03T21:58:12.000Z | stackinabox/util/responses/decorator.py | TestInABox/stackInABox | 15586e61a2013b6f4997c652e8412a1784f8fc93 | [
"Apache-2.0"
] | 43 | 2016-05-07T04:08:52.000Z | 2022-03-16T23:43:36.000Z | stackinabox/util/responses/decorator.py | TestInABox/stackInABox | 15586e61a2013b6f4997c652e8412a1784f8fc93 | [
"Apache-2.0"
] | 3 | 2016-05-05T18:05:36.000Z | 2022-03-23T17:41:41.000Z | """
Stack-In-A-Box: Responses Support via decorator
"""
try:
import collections.abc as collections
except ImportError:
# Py2.7 Support
import collections
import functools
import logging
import re
import types
import responses
import six
from stackinabox.services.service import StackInABoxService
from stackinabox.stack import StackInABox
from stackinabox.util.responses.core import (
responses_registration
)
from stackinabox.util import deprecator
from stackinabox.util.tools import CaseInsensitiveDict
logger = logging.getLogger(__name__)
class activate(object):
"""
Decorator class to make use of Responses and Stack-In-A-Box
extremely simple to do.
"""
def __init__(self, uri, *args, **kwargs):
"""
Initialize the decorator instance
:param uri: URI Stack-In-A-Box will use to recognize the HTTP calls
f.e 'localhost'.
:param text_type access_services: name of a keyword parameter in the
test function to assign access to the services created in the
arguments to the decorator.
:param args: A tuple containing all the positional arguments. Any
StackInABoxService arguments are removed before being passed to
the actual function.
:param kwargs: A dictionary of keyword args that are passed to the
actual function.
"""
self.uri = uri
self.services = {}
self.args = []
self.kwargs = kwargs
if "access_services" in self.kwargs:
self.enable_service_access = self.kwargs["access_services"]
del self.kwargs["access_services"]
else:
self.enable_service_access = None
for arg in args:
if self.process_service(arg, raise_on_type=False):
pass
elif (
isinstance(arg, types.GeneratorType) or
isinstance(arg, collections.Iterable)
):
for sub_arg in arg:
self.process_service(sub_arg, raise_on_type=True)
else:
self.args.append(arg)
def process_service(self, arg_based_service, raise_on_type=True):
if isinstance(arg_based_service, StackInABoxService):
logger.debug("Registering {0}".format(arg_based_service.name))
self.services[arg_based_service.name] = arg_based_service
return True
elif raise_on_type:
raise TypeError(
"Generator or Iterable must provide a "
"StackInABoxService in all of its results."
)
return False
def __call__(self, fn):
"""
Call to actually wrap the function call.
"""
@functools.wraps(fn)
def wrapped(*args, **kwargs):
args_copy = list(args)
for arg in self.args:
args_copy.append(arg)
args_finalized = tuple(args_copy)
kwargs.update(self.kwargs)
if self.enable_service_access is not None:
kwargs[self.enable_service_access] = self.services
return_value = None
def run():
responses.mock.start()
StackInABox.reset_services()
for service in self.services.values():
StackInABox.register_service(service)
responses_registration(self.uri)
return_value = fn(*args_finalized, **kwargs)
StackInABox.reset_services()
responses.mock.stop()
responses.mock.reset()
with responses.RequestsMock():
run()
return return_value
return wrapped
class stack_activate(activate):
@deprecator.DeprecatedInterface("stack_activate", "activate")
def __init__(self, *args, **kwargs):
super(stack_activate, self).__init__(*args, **kwargs)
@deprecator.DeprecatedInterface("stack_activate", "activate")
def __call__(self, *args, **kwargs):
super(stack_activate, self).__call__(*args, **kwargs)
| 30.723881 | 76 | 0.616468 |
bb423d0a8ccfaa6581a60ffb17a7eccf81943e01 | 5,253 | py | Python | venv/lib/python3.8/site-packages/azure/mgmt/recoveryservicesbackup/operations/_backup_jobs_operations.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/azure/mgmt/recoveryservicesbackup/operations/_backup_jobs_operations.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/azure/mgmt/recoveryservicesbackup/operations/_backup_jobs_operations.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class BackupJobsOperations(object):
"""BackupJobsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2020-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2020-10-01"
self.config = config
def list(
self, vault_name, resource_group_name, filter=None, skip_token=None, custom_headers=None, raw=False, **operation_config):
"""Provides a pageable list of jobs.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param filter: OData filter options.
:type filter: str
:param skip_token: skipToken Filter.
:type skip_token: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of JobResource
:rtype:
~azure.mgmt.recoveryservicesbackup.models.JobResourcePaged[~azure.mgmt.recoveryservicesbackup.models.JobResource]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.JobResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupJobs'}
| 43.057377 | 164 | 0.636018 |
fe438ea6aea7482ba0181c0bb35aa4b627081ea1 | 17,870 | py | Python | homeassistant/components/number/__init__.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | null | null | null | homeassistant/components/number/__init__.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | null | null | null | homeassistant/components/number/__init__.py | mib1185/core | b17d4ac65cde9a27ff6032d70b148792e5eba8df | [
"Apache-2.0"
] | null | null | null | """Component to allow numeric input for platforms."""
from __future__ import annotations
from collections.abc import Callable
from contextlib import suppress
import dataclasses
from datetime import timedelta
import inspect
import logging
from math import ceil, floor
from typing import Any, Final, final
import voluptuous as vol
from homeassistant.backports.enum import StrEnum
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_MODE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.helpers.entity import Entity, EntityDescription
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import ExtraStoredData, RestoreEntity
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import temperature as temperature_util
from .const import (
ATTR_MAX,
ATTR_MIN,
ATTR_STEP,
ATTR_VALUE,
DEFAULT_MAX_VALUE,
DEFAULT_MIN_VALUE,
DEFAULT_STEP,
DOMAIN,
SERVICE_SET_VALUE,
)
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
_LOGGER = logging.getLogger(__name__)
class NumberDeviceClass(StrEnum):
"""Device class for numbers."""
# temperature (C/F)
TEMPERATURE = "temperature"
DEVICE_CLASSES_SCHEMA: Final = vol.All(vol.Lower, vol.Coerce(NumberDeviceClass))
class NumberMode(StrEnum):
"""Modes for number entities."""
AUTO = "auto"
BOX = "box"
SLIDER = "slider"
UNIT_CONVERSIONS: dict[str, Callable[[float, str, str], float]] = {
NumberDeviceClass.TEMPERATURE: temperature_util.convert,
}
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up Number entities."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_SET_VALUE,
{vol.Required(ATTR_VALUE): vol.Coerce(float)},
async_set_value,
)
return True
async def async_set_value(entity: NumberEntity, service_call: ServiceCall) -> None:
"""Service call wrapper to set a new value."""
value = service_call.data["value"]
if value < entity.min_value or value > entity.max_value:
raise ValueError(
f"Value {value} for {entity.name} is outside valid range {entity.min_value} - {entity.max_value}"
)
try:
native_value = entity.convert_to_native_value(value)
# Clamp to the native range
native_value = min(
max(native_value, entity.native_min_value), entity.native_max_value
)
await entity.async_set_native_value(native_value)
except NotImplementedError:
await entity.async_set_value(value)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_unload_entry(entry)
@dataclasses.dataclass
class NumberEntityDescription(EntityDescription):
"""A class that describes number entities."""
max_value: None = None
min_value: None = None
native_max_value: float | None = None
native_min_value: float | None = None
native_unit_of_measurement: str | None = None
native_step: float | None = None
step: None = None
unit_of_measurement: None = None # Type override, use native_unit_of_measurement
def __post_init__(self) -> None:
"""Post initialisation processing."""
if (
self.max_value is not None
or self.min_value is not None
or self.step is not None
or self.unit_of_measurement is not None
):
if self.__class__.__name__ == "NumberEntityDescription": # type: ignore[unreachable]
caller = inspect.stack()[2]
module = inspect.getmodule(caller[0])
else:
module = inspect.getmodule(self)
if module and module.__file__ and "custom_components" in module.__file__:
report_issue = "report it to the custom component author."
else:
report_issue = (
"create a bug report at "
"https://github.com/home-assistant/core/issues?q=is%3Aopen+is%3Aissue"
)
_LOGGER.warning(
"%s is setting deprecated attributes on an instance of "
"NumberEntityDescription, this is not valid and will be unsupported "
"from Home Assistant 2022.10. Please %s",
module.__name__ if module else self.__class__.__name__,
report_issue,
)
self.native_unit_of_measurement = self.unit_of_measurement
def ceil_decimal(value: float, precision: float = 0) -> float:
"""Return the ceiling of f with d decimals.
This is a simple implementation which ignores floating point inexactness.
"""
factor = 10**precision
return ceil(value * factor) / factor
def floor_decimal(value: float, precision: float = 0) -> float:
"""Return the floor of f with d decimals.
This is a simple implementation which ignores floating point inexactness.
"""
factor = 10**precision
return floor(value * factor) / factor
class NumberEntity(Entity):
"""Representation of a Number entity."""
entity_description: NumberEntityDescription
_attr_max_value: None
_attr_min_value: None
_attr_state: None = None
_attr_step: None
_attr_mode: NumberMode = NumberMode.AUTO
_attr_value: None
_attr_native_max_value: float
_attr_native_min_value: float
_attr_native_step: float
_attr_native_value: float
_attr_native_unit_of_measurement: str | None
_deprecated_number_entity_reported = False
def __init_subclass__(cls, **kwargs: Any) -> None:
"""Post initialisation processing."""
super().__init_subclass__(**kwargs)
if any(
method in cls.__dict__
for method in (
"async_set_value",
"max_value",
"min_value",
"set_value",
"step",
"unit_of_measurement",
"value",
)
):
module = inspect.getmodule(cls)
if module and module.__file__ and "custom_components" in module.__file__:
report_issue = "report it to the custom component author."
else:
report_issue = (
"create a bug report at "
"https://github.com/home-assistant/core/issues?q=is%3Aopen+is%3Aissue"
)
_LOGGER.warning(
"%s::%s is overriding deprecated methods on an instance of "
"NumberEntity, this is not valid and will be unsupported "
"from Home Assistant 2022.10. Please %s",
cls.__module__,
cls.__name__,
report_issue,
)
@property
def capability_attributes(self) -> dict[str, Any]:
"""Return capability attributes."""
return {
ATTR_MIN: self.min_value,
ATTR_MAX: self.max_value,
ATTR_STEP: self.step,
ATTR_MODE: self.mode,
}
@property
def native_min_value(self) -> float:
"""Return the minimum value."""
if hasattr(self, "_attr_native_min_value"):
return self._attr_native_min_value
if (
hasattr(self, "entity_description")
and self.entity_description.native_min_value is not None
):
return self.entity_description.native_min_value
return DEFAULT_MIN_VALUE
@property
@final
def min_value(self) -> float:
"""Return the minimum value."""
if hasattr(self, "_attr_min_value"):
self._report_deprecated_number_entity()
return self._attr_min_value # type: ignore[return-value]
if (
hasattr(self, "entity_description")
and self.entity_description.min_value is not None
):
self._report_deprecated_number_entity() # type: ignore[unreachable]
return self.entity_description.min_value
return self._convert_to_state_value(self.native_min_value, floor_decimal)
@property
def native_max_value(self) -> float:
"""Return the maximum value."""
if hasattr(self, "_attr_native_max_value"):
return self._attr_native_max_value
if (
hasattr(self, "entity_description")
and self.entity_description.native_max_value is not None
):
return self.entity_description.native_max_value
return DEFAULT_MAX_VALUE
@property
@final
def max_value(self) -> float:
"""Return the maximum value."""
if hasattr(self, "_attr_max_value"):
self._report_deprecated_number_entity()
return self._attr_max_value # type: ignore[return-value]
if (
hasattr(self, "entity_description")
and self.entity_description.max_value is not None
):
self._report_deprecated_number_entity() # type: ignore[unreachable]
return self.entity_description.max_value
return self._convert_to_state_value(self.native_max_value, ceil_decimal)
@property
def native_step(self) -> float | None:
"""Return the increment/decrement step."""
if (
hasattr(self, "entity_description")
and self.entity_description.native_step is not None
):
return self.entity_description.native_step
return None
@property
@final
def step(self) -> float:
"""Return the increment/decrement step."""
if hasattr(self, "_attr_step"):
self._report_deprecated_number_entity()
return self._attr_step # type: ignore[return-value]
if (
hasattr(self, "entity_description")
and self.entity_description.step is not None
):
self._report_deprecated_number_entity() # type: ignore[unreachable]
return self.entity_description.step
if hasattr(self, "_attr_native_step"):
return self._attr_native_step
if (native_step := self.native_step) is not None:
return native_step
step = DEFAULT_STEP
value_range = abs(self.max_value - self.min_value)
if value_range != 0:
while value_range <= step:
step /= 10.0
return step
@property
def mode(self) -> NumberMode:
"""Return the mode of the entity."""
return self._attr_mode
@property
@final
def state(self) -> float | None:
"""Return the entity state."""
return self.value
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the unit of measurement of the entity, if any."""
if hasattr(self, "_attr_native_unit_of_measurement"):
return self._attr_native_unit_of_measurement
if hasattr(self, "entity_description"):
return self.entity_description.native_unit_of_measurement
return None
@property
@final
def unit_of_measurement(self) -> str | None:
"""Return the unit of measurement of the entity, after unit conversion."""
if hasattr(self, "_attr_unit_of_measurement"):
return self._attr_unit_of_measurement
if (
hasattr(self, "entity_description")
and self.entity_description.unit_of_measurement is not None
):
return self.entity_description.unit_of_measurement # type: ignore[unreachable]
native_unit_of_measurement = self.native_unit_of_measurement
if (
self.device_class == NumberDeviceClass.TEMPERATURE
and native_unit_of_measurement in (TEMP_CELSIUS, TEMP_FAHRENHEIT)
):
return self.hass.config.units.temperature_unit
return native_unit_of_measurement
@property
def native_value(self) -> float | None:
"""Return the value reported by the number."""
return self._attr_native_value
@property
@final
def value(self) -> float | None:
"""Return the entity value to represent the entity state."""
if hasattr(self, "_attr_value"):
self._report_deprecated_number_entity()
return self._attr_value
if (native_value := self.native_value) is None:
return native_value
return self._convert_to_state_value(native_value, round)
def set_native_value(self, value: float) -> None:
"""Set new value."""
raise NotImplementedError()
async def async_set_native_value(self, value: float) -> None:
"""Set new value."""
await self.hass.async_add_executor_job(self.set_native_value, value)
@final
def set_value(self, value: float) -> None:
"""Set new value."""
raise NotImplementedError()
@final
async def async_set_value(self, value: float) -> None:
"""Set new value."""
await self.hass.async_add_executor_job(self.set_value, value)
def _convert_to_state_value(self, value: float, method: Callable) -> float:
"""Convert a value in the number's native unit to the configured unit."""
native_unit_of_measurement = self.native_unit_of_measurement
unit_of_measurement = self.unit_of_measurement
device_class = self.device_class
if (
native_unit_of_measurement != unit_of_measurement
and device_class in UNIT_CONVERSIONS
):
assert native_unit_of_measurement
assert unit_of_measurement
value_s = str(value)
prec = len(value_s) - value_s.index(".") - 1 if "." in value_s else 0
# Suppress ValueError (Could not convert value to float)
with suppress(ValueError):
value_new: float = UNIT_CONVERSIONS[device_class](
value,
native_unit_of_measurement,
unit_of_measurement,
)
# Round to the wanted precision
value = method(value_new, prec)
return value
def convert_to_native_value(self, value: float) -> float:
"""Convert a value to the number's native unit."""
native_unit_of_measurement = self.native_unit_of_measurement
unit_of_measurement = self.unit_of_measurement
device_class = self.device_class
if (
value is not None
and native_unit_of_measurement != unit_of_measurement
and device_class in UNIT_CONVERSIONS
):
assert native_unit_of_measurement
assert unit_of_measurement
value = UNIT_CONVERSIONS[device_class](
value,
unit_of_measurement,
native_unit_of_measurement,
)
return value
def _report_deprecated_number_entity(self) -> None:
"""Report that the number entity has not been upgraded."""
if not self._deprecated_number_entity_reported:
self._deprecated_number_entity_reported = True
report_issue = self._suggest_report_issue()
_LOGGER.warning(
"Entity %s (%s) is using deprecated NumberEntity features which will "
"be unsupported from Home Assistant Core 2022.10, please %s",
self.entity_id,
type(self),
report_issue,
)
@dataclasses.dataclass
class NumberExtraStoredData(ExtraStoredData):
"""Object to hold extra stored data."""
native_max_value: float | None
native_min_value: float | None
native_step: float | None
native_unit_of_measurement: str | None
native_value: float | None
def as_dict(self) -> dict[str, Any]:
"""Return a dict representation of the number data."""
return dataclasses.asdict(self)
@classmethod
def from_dict(cls, restored: dict[str, Any]) -> NumberExtraStoredData | None:
"""Initialize a stored number state from a dict."""
try:
return cls(
restored["native_max_value"],
restored["native_min_value"],
restored["native_step"],
restored["native_unit_of_measurement"],
restored["native_value"],
)
except KeyError:
return None
class RestoreNumber(NumberEntity, RestoreEntity):
"""Mixin class for restoring previous number state."""
@property
def extra_restore_state_data(self) -> NumberExtraStoredData:
"""Return number specific state data to be restored."""
return NumberExtraStoredData(
self.native_max_value,
self.native_min_value,
self.native_step,
self.native_unit_of_measurement,
self.native_value,
)
async def async_get_last_number_data(self) -> NumberExtraStoredData | None:
"""Restore native_*."""
if (restored_last_extra_data := await self.async_get_last_extra_data()) is None:
return None
return NumberExtraStoredData.from_dict(restored_last_extra_data.as_dict())
| 34.431599 | 109 | 0.64113 |
a7f0b4ad074338453d91c738269e543602e44bfe | 8,157 | py | Python | src/tools/telemetry/telemetry/core/browser_options.py | jxjnjjn/chromium | 435c1d02fd1b99001dc9e1e831632c894523580d | [
"Apache-2.0"
] | 9 | 2018-09-21T05:36:12.000Z | 2021-11-15T15:14:36.000Z | src/tools/telemetry/telemetry/core/browser_options.py | jxjnjjn/chromium | 435c1d02fd1b99001dc9e1e831632c894523580d | [
"Apache-2.0"
] | null | null | null | src/tools/telemetry/telemetry/core/browser_options.py | jxjnjjn/chromium | 435c1d02fd1b99001dc9e1e831632c894523580d | [
"Apache-2.0"
] | 3 | 2018-11-28T14:54:13.000Z | 2020-07-02T07:36:07.000Z | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import sys
import shlex
import logging
import copy
from telemetry.core import browser_finder
from telemetry.core import profile_types
from telemetry.core import wpr_modes
class BrowserOptions(optparse.Values):
"""Options to be used for discovering and launching a browser."""
def __init__(self, browser_type=None):
optparse.Values.__init__(self)
self.browser_type = browser_type
self.browser_executable = None
self.chrome_root = None
self.android_device = None
self.cros_ssh_identity = None
self.dont_override_profile = False
self.profile_dir = None
self.extra_browser_args = []
self.extra_wpr_args = []
self.show_stdout = False
self.extensions_to_load = []
self.cros_remote = None
self.wpr_mode = wpr_modes.WPR_OFF
self.wpr_make_javascript_deterministic = True
self.browser_user_agent_type = None
self.trace_dir = None
self.verbosity = 0
self.page_filter = None
self.page_filter_exclude = None
def Copy(self):
return copy.deepcopy(self)
def CreateParser(self, *args, **kwargs):
parser = optparse.OptionParser(*args, **kwargs)
# Selection group
group = optparse.OptionGroup(parser, 'Which browser to use')
group.add_option('--browser',
dest='browser_type',
default=None,
help='Browser type to run, '
'in order of priority. Supported values: list,%s' %
browser_finder.ALL_BROWSER_TYPES)
group.add_option('--browser-executable',
dest='browser_executable',
help='The exact browser to run.')
group.add_option('--chrome-root',
dest='chrome_root',
help='Where to look for chrome builds.'
'Defaults to searching parent dirs by default.')
group.add_option('--device',
dest='android_device',
help='The android device ID to use'
'If not specified, only 0 or 1 connected devcies are supported.')
group.add_option('--keep_test_server_ports', action='store_true',
help='Indicates the test server ports must be '
'kept. When this is run via a sharder '
'the test server ports should be kept and '
'should not be reset.')
group.add_option(
'--remote',
dest='cros_remote',
help='The IP address of a remote ChromeOS device to use.')
group.add_option('--identity',
dest='cros_ssh_identity',
default=None,
help='The identity file to use when ssh\'ing into the ChromeOS device')
parser.add_option_group(group)
# Browser options
group = optparse.OptionGroup(parser, 'Browser options')
profile_choices = ['clean', 'default'] + profile_types.PROFILE_TYPES
group.add_option('--profile-type',
dest='profile_type',
type='choice',
default='clean',
choices=profile_choices,
help=('The user profile to use. A clean profile is used by default. '
'Supported values: ' + ', '.join(profile_choices)))
group.add_option('--extra-browser-args',
dest='extra_browser_args_as_string',
help='Additional arguments to pass to the browser when it starts')
group.add_option('--extra-wpr-args',
dest='extra_wpr_args_as_string',
help=('Additional arguments to pass to Web Page Replay. '
'See third_party/webpagereplay/replay.py for usage.'))
group.add_option('--show-stdout',
action='store_true',
help='When possible, will display the stdout of the process')
parser.add_option_group(group)
# Page set options
group = optparse.OptionGroup(parser, 'Page set options')
group.add_option('--page-repeat', dest='page_repeat', default=1,
help='Number of times to repeat each individual ' +
'page in the pageset before proceeding.')
group.add_option('--pageset-repeat', dest='pageset_repeat', default=1,
help='Number of times to repeat the entire pageset ' +
'before finishing.')
group.add_option('--pageset-shuffle', action='store_true',
dest='pageset_shuffle',
help='Shuffle the order of pages within a pageset.')
group.add_option('--pageset-shuffle-order-file',
dest='pageset_shuffle_order_file', default=None,
help='Filename of an output of a previously run test on the current ' +
'pageset. The tests will run in the same order again, overriding ' +
'what is specified by --page-repeat and --pageset-repeat.')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Web Page Replay options')
group.add_option('--allow-live-sites',
dest='allow_live_sites', action='store_true',
help='Run against live sites if the Web Page Replay archives don\'t '
'exist. Without this flag, the benchmark will just fail instead '
'of running against live sites.')
parser.add_option_group(group)
# Debugging options
group = optparse.OptionGroup(parser, 'When things go wrong')
group.add_option(
'--trace-dir', dest='trace_dir', default=None,
help='Record traces and store them in this directory.')
group.add_option(
'-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed)')
group.add_option('--print-bootstrap-deps',
action='store_true',
help='Output bootstrap deps list.')
parser.add_option_group(group)
# Platform options
group = optparse.OptionGroup(parser, 'Platform options')
group.add_option('--no-performance-mode', action='store_true',
help='Some platforms run on "full performance mode" where the '
'benchmark is executed at maximum CPU speed in order to minimize noise '
'(specially important for dashboards / continuous builds). '
'This option prevents Telemetry from tweaking such platform settings.')
parser.add_option_group(group)
real_parse = parser.parse_args
def ParseArgs(args=None):
defaults = parser.get_default_values()
for k, v in defaults.__dict__.items():
if k in self.__dict__ and self.__dict__[k] != None:
continue
self.__dict__[k] = v
ret = real_parse(args, self) # pylint: disable=E1121
if self.verbosity >= 2:
logging.basicConfig(level=logging.DEBUG)
elif self.verbosity:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
if self.browser_executable and not self.browser_type:
self.browser_type = 'exact'
if not self.browser_executable and not self.browser_type:
sys.stderr.write('Must provide --browser=<type>. ' +
'Use --browser=list for valid options.\n')
sys.exit(1)
if self.browser_type == 'list':
types = browser_finder.GetAllAvailableBrowserTypes(self)
sys.stderr.write('Available browsers:\n')
sys.stdout.write(' %s\n' % '\n '.join(types))
sys.exit(1)
if self.extra_browser_args_as_string: # pylint: disable=E1101
tmp = shlex.split(
self.extra_browser_args_as_string) # pylint: disable=E1101
self.extra_browser_args.extend(tmp)
delattr(self, 'extra_browser_args_as_string')
if self.extra_wpr_args_as_string: # pylint: disable=E1101
tmp = shlex.split(
self.extra_wpr_args_as_string) # pylint: disable=E1101
self.extra_wpr_args.extend(tmp)
delattr(self, 'extra_wpr_args_as_string')
if self.profile_type == 'default':
self.dont_override_profile = True
elif self.profile_type != 'clean':
self.profile_dir = profile_types.GetProfileDir(self.profile_type)
delattr(self, 'profile_type')
return ret
parser.parse_args = ParseArgs
return parser
def AppendExtraBrowserArg(self, arg):
if arg not in self.extra_browser_args:
self.extra_browser_args.append(arg)
| 39.985294 | 80 | 0.66777 |
dc71f03a0a38890683b9c048d8fba75303cdd983 | 5,017 | py | Python | apns_clerk/backends/dummy.py | razor-1/apns-clerk | 807c1166d463d9be022eedf69ce9be0bf776f096 | [
"Apache-2.0"
] | null | null | null | apns_clerk/backends/dummy.py | razor-1/apns-clerk | 807c1166d463d9be022eedf69ce9be0bf776f096 | [
"Apache-2.0"
] | null | null | null | apns_clerk/backends/dummy.py | razor-1/apns-clerk | 807c1166d463d9be022eedf69ce9be0bf776f096 | [
"Apache-2.0"
] | 1 | 2019-12-17T18:45:13.000Z | 2019-12-17T18:45:13.000Z | # Author Aleksi Hoffman
# Based on apns-client implementation by Sardar Yumatov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from struct import pack
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
from . import BaseBackend, BaseConnection
from ..certificate import BaseCertificate
# python 3 support
import six
# module level logger
LOG = logging.getLogger(__name__)
class Certificate(BaseCertificate):
""" Dummy certificate """
def load_context(self, cert_string=None, cert_file=None, key_string=None, key_file=None, passphrase=None):
""" Returns None as we don't handle any context. """
return None, None
def dump_certificate(self, raw_certificate):
""" Returns dummy contents. All dummy certificates are equal. """
return "CERTIFICATE"
def dump_digest(self, raw_certificate, digest):
""" Returns dummy digsst. All dummy certificates are equal. """
return self.dump_certificate(raw_certificate)
class Backend(BaseBackend):
""" Dummy backend designed for testing without performing real IO. Serves
as an exmple for your custom backends.
"""
# simulate stdio behavior
can_detect_close = False
def __init__(self, push=None, feedback=None, **options):
""" Create new backend.
:Arguments:
- push (list): list of status codes to return while sending messages.
- feedback (int): number of tokens to generate in the feedback stream.
"""
super(Backend, self).__init__(**options)
self.push_results = push
self.push_result_pos = -1
self.feedback_results = feedback
self.new_connections = 0
assert (push is not None) ^ (feedback is not None), "Push results or feedback stream must be provided"
def get_new_connection(self, address, certificate, timeout=None):
""" Open a new connection.
:Arguments:
- address (tuple): target (host, port).
- certificate (:class:`Certificate`): certificate instance.
- timeout (float): connection timeout in seconds
"""
self.new_connections += 1
self.push_result_pos += 1
return Connection(self, address, certificate)
def get_certificate(self, cert_params):
""" Create/load certificate from parameters. """
return Certificate(**cert_params)
def create_lock(self):
""" Provides semaphore with ``threading.Lock`` interface. """
return _threading.Lock()
class Connection(BaseConnection):
""" Dummy connection. """
def __init__(self, pool, address, certificate):
""" Create new dummy connection.
:Arguments:
- pool (:class:`Backend`): dummy backend.
- address (tuple): target host and port.
- certificate (:class:`Certificate`): provider certificate.
"""
super(Connection, self).__init__(address, certificate)
self.pool = pool
self._closed = False
def closed(self):
""" Returns True if :func:`close` has been explicitly called. """
return self._closed
def close(self):
""" Marks this connection as closed. """
self._closed = True
def reset(self):
""" Reset dummy connection to use next result record. """
pass
def write(self, data, timeout):
""" Does nothing, always succeeds. """
if self.closed():
raise IOError("Connection closed")
def peek(self, size):
""" Always returns None as we never fail prematurely. """
return None
def read(self, size, timeout):
""" Iterates over preconfigured send/feedback responses. """
if self.closed():
return None
if self.pool.push_results is not None:
# we are push connection
ret = self.pool.push_results[self.pool.push_result_pos % len(self.pool.push_results)]
if ret is not None:
ret = pack(">BBI", 8, ret, 0)
return ret
else: # feedback mode
ret = []
for x in range(0, self.pool.feedback_results):
token = six.b("test_{0}".format(x))
ret.append(pack(">IH{0}s".format(len(token)), int(time.time()), len(token), token))
self.close()
return six.binary_type().join(ret)
| 33.671141 | 110 | 0.631852 |
06889b42d54bdf04c4f4cb8bc3f5e6a1c2228a53 | 2,220 | py | Python | MRCVolume_viewer.py | dzyla/MRCVolume_viewer | 1e4c364360ecef94a0b351cde5951e7efe6db033 | [
"MIT"
] | null | null | null | MRCVolume_viewer.py | dzyla/MRCVolume_viewer | 1e4c364360ecef94a0b351cde5951e7efe6db033 | [
"MIT"
] | null | null | null | MRCVolume_viewer.py | dzyla/MRCVolume_viewer | 1e4c364360ecef94a0b351cde5951e7efe6db033 | [
"MIT"
] | null | null | null | import numpy as np
from vtkplotter import *
import mrcfile
import argparse
def open_mrcs_file(file_path):
with mrcfile.open(file_path) as mrc_stack:
return mrc_stack.data
def slider(widget, event):
# get the slider position and calculate the value based on the volume scalar data
value = widget.GetRepresentation().GetValue()
value = value / 100 * (np.max(data0) - np.min(data0))
'''
# update the volume based on the raw volume, used for volume view
# can be activated if someone would like to
import vtk
from vtk.util.numpy_support import numpy_to_vtk
varr = numpy_to_vtk(data0.ravel(), deep=True, array_type=vtk.VTK_FLOAT)
varr.SetName('input_scalars')
img = vtk.vtkImageData()
img.SetDimensions(data0.shape)
img.GetPointData().SetScalars(varr)
vol._updateVolume(img)
vol.threshold(value)
'''
iso.updateMesh(vol.isosurface(threshold=value).polydata())
print('Volume threshold: {}'.format(round(value, 4)))
def slider_color(widget, event):
value = widget.GetRepresentation().GetValue()
iso.color(value)
# Look for the file location
parser = argparse.ArgumentParser(description='Open MRC Volume file and change the threshold')
parser.add_argument('--path', metavar='path', type=str, help='MRC volume path')
args = parser.parse_args()
file_path = args.path
if file_path == None:
import easygui
file_path = easygui.fileopenbox()
# open MRC volume and calculate density distribution
data0 = np.array(open_mrcs_file(file_path))
print('Volume shape is: {}'.format(data0.shape))
print('Press h for help and windows commands!')
# change numpy object to vlt volume and set the threshold
vol = Volume(data0, mode=0)
iso = vol.isosurface(threshold=True).color('gray')
# Add actors to the Plotter
vp2 = Plotter(shape=[1, 1], size=(800, 800), bg='white')
# Volume threshold in % because the raw values does not look "nice" on the slider
vp2.addSlider2D(slider, xmin=0, xmax=100, title="volume threshold, %", showValue=True)
vp2.addSlider2D(slider_color, xmin=1, xmax=100, title="color", showValue=True, pos=2)
vp2.show(iso)
vp2.show(interactive=True)
| 33.134328 | 94 | 0.703153 |
1582454e8e282c35ad383ddfde6066cfe5360743 | 17,784 | py | Python | extern_tools/mmd_tools_local/operators/model.py | FoxyTheKing/cats-blender-plugin | 1d795007fed1d1c176be93d57187d8d797579951 | [
"MIT"
] | 3 | 2021-11-03T15:14:35.000Z | 2021-12-10T05:35:38.000Z | extern_tools/mmd_tools_local/operators/model.py | Vapey/cats-blender-plugin | bbc081c504e4fbb28b89e4d0d3b8a716b00bc334 | [
"MIT"
] | null | null | null | extern_tools/mmd_tools_local/operators/model.py | Vapey/cats-blender-plugin | bbc081c504e4fbb28b89e4d0d3b8a716b00bc334 | [
"MIT"
] | 1 | 2020-10-26T23:01:58.000Z | 2020-10-26T23:01:58.000Z | # -*- coding: utf-8 -*-
import bpy
from bpy.types import Operator
from mmd_tools_local import register_wrap
from mmd_tools_local.bpyutils import SceneOp
from mmd_tools_local.core.bone import FnBone
from mmd_tools_local.translations import DictionaryEnum
import mmd_tools_local.core.model as mmd_model
@register_wrap
class MorphSliderSetup(Operator):
bl_idname = 'mmd_tools.morph_slider_setup'
bl_label = 'Morph Slider Setup'
bl_description = 'Translate MMD morphs of selected object into format usable by Blender'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
type = bpy.props.EnumProperty(
name='Type',
description='Select type',
items = [
('CREATE', 'Create', 'Create placeholder object for morph sliders', 'SHAPEKEY_DATA', 0),
('BIND', 'Bind', 'Bind morph sliders', 'DRIVER', 1),
('UNBIND', 'Unbind', 'Unbind morph sliders', 'X', 2),
],
default='CREATE',
)
def execute(self, context):
obj = context.active_object
root = mmd_model.Model.findRoot(context.active_object)
rig = mmd_model.Model(root)
if self.type == 'BIND':
rig.morph_slider.bind()
elif self.type == 'UNBIND':
rig.morph_slider.unbind()
else:
rig.morph_slider.create()
SceneOp(context).active_object = obj
return {'FINISHED'}
@register_wrap
class CleanRiggingObjects(Operator):
bl_idname = 'mmd_tools.clean_rig'
bl_label = 'Clean Rig'
bl_description = 'Delete temporary physics objects of selected object and revert physics to default MMD state'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
root = mmd_model.Model.findRoot(context.active_object)
rig = mmd_model.Model(root)
rig.clean()
SceneOp(context).active_object = root
return {'FINISHED'}
@register_wrap
class BuildRig(Operator):
bl_idname = 'mmd_tools.build_rig'
bl_label = 'Build Rig'
bl_description = 'Translate physics of selected object into format usable by Blender'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
root = mmd_model.Model.findRoot(context.active_object)
rig = mmd_model.Model(root)
rig.build()
SceneOp(context).active_object = root
return {'FINISHED'}
@register_wrap
class CleanAdditionalTransformConstraints(Operator):
bl_idname = 'mmd_tools.clean_additional_transform'
bl_label = 'Clean Additional Transform'
bl_description = 'Delete shadow bones of selected object and revert bones to default MMD state'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
obj = context.active_object
root = mmd_model.Model.findRoot(obj)
rig = mmd_model.Model(root)
rig.cleanAdditionalTransformConstraints()
SceneOp(context).active_object = obj
return {'FINISHED'}
@register_wrap
class ApplyAdditionalTransformConstraints(Operator):
bl_idname = 'mmd_tools.apply_additional_transform'
bl_label = 'Apply Additional Transform'
bl_description = 'Translate appended bones of selected object for Blender'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
def execute(self, context):
obj = context.active_object
root = mmd_model.Model.findRoot(obj)
rig = mmd_model.Model(root)
rig.applyAdditionalTransformConstraints()
SceneOp(context).active_object = obj
return {'FINISHED'}
@register_wrap
class SetupBoneFixedAxes(Operator):
bl_idname = 'mmd_tools.bone_fixed_axis_setup'
bl_label = 'Setup Bone Fixed Axis'
bl_description = 'Setup fixed axis of selected bones'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
type = bpy.props.EnumProperty(
name='Type',
description='Select type',
items = [
('DISABLE', 'Disable', 'Disable MMD fixed axis of selected bones', 0),
('LOAD', 'Load', 'Load/Enable MMD fixed axis of selected bones from their Y-axis or the only rotatable axis', 1),
('APPLY', 'Apply', 'Align bone axes to MMD fixed axis of each bone', 2),
],
default='LOAD',
)
def execute(self, context):
arm = context.active_object
if not arm or arm.type != 'ARMATURE':
self.report({'ERROR'}, 'Active object is not an armature object')
return {'CANCELLED'}
if self.type == 'APPLY':
FnBone.apply_bone_fixed_axis(arm)
FnBone.apply_additional_transformation(arm)
else:
FnBone.load_bone_fixed_axis(arm, enable=(self.type=='LOAD'))
return {'FINISHED'}
@register_wrap
class SetupBoneLocalAxes(Operator):
bl_idname = 'mmd_tools.bone_local_axes_setup'
bl_label = 'Setup Bone Local Axes'
bl_description = 'Setup local axes of each bone'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
type = bpy.props.EnumProperty(
name='Type',
description='Select type',
items = [
('DISABLE', 'Disable', 'Disable MMD local axes of selected bones', 0),
('LOAD', 'Load', 'Load/Enable MMD local axes of selected bones from their bone axes', 1),
('APPLY', 'Apply', 'Align bone axes to MMD local axes of each bone', 2),
],
default='LOAD',
)
def execute(self, context):
arm = context.active_object
if not arm or arm.type != 'ARMATURE':
self.report({'ERROR'}, 'Active object is not an armature object')
return {'CANCELLED'}
if self.type == 'APPLY':
FnBone.apply_bone_local_axes(arm)
FnBone.apply_additional_transformation(arm)
else:
FnBone.load_bone_local_axes(arm, enable=(self.type=='LOAD'))
return {'FINISHED'}
@register_wrap
class CreateMMDModelRoot(Operator):
bl_idname = 'mmd_tools.create_mmd_model_root_object'
bl_label = 'Create a MMD Model Root Object'
bl_description = 'Create a MMD model root object with a basic armature'
bl_options = {'REGISTER', 'UNDO'}
name_j = bpy.props.StringProperty(
name='Name',
description='The name of the MMD model',
default='New MMD Model',
)
name_e = bpy.props.StringProperty(
name='Name(Eng)',
description='The english name of the MMD model',
default='New MMD Model',
)
scale = bpy.props.FloatProperty(
name='Scale',
description='Scale',
default=1.0,
)
def execute(self, context):
rig = mmd_model.Model.create(self.name_j, self.name_e, self.scale, add_root_bone=True)
rig.initialDisplayFrames()
return {'FINISHED'}
def invoke(self, context, event):
vm = context.window_manager
return vm.invoke_props_dialog(self)
@register_wrap
class ConvertToMMDModel(Operator):
bl_idname = 'mmd_tools.convert_to_mmd_model'
bl_label = 'Convert to a MMD Model'
bl_description = 'Convert active armature with its meshes to a MMD model (experimental)'
bl_options = {'REGISTER', 'UNDO'}
ambient_color_source = bpy.props.EnumProperty(
name='Ambient Color Source',
description='Select ambient color source',
items = [
('DIFFUSE', 'Diffuse', 'Diffuse color', 0),
('MIRROR', 'Mirror', 'Mirror color (if property "mirror_color" is available)', 1),
],
default='DIFFUSE',
)
edge_threshold = bpy.props.FloatProperty(
name='Edge Threshold',
description='MMD toon edge will not be enabled if freestyle line color alpha less than this value',
min=0,
max=1.001,
precision=3,
step=0.1,
default=0.1,
)
edge_alpha_min = bpy.props.FloatProperty(
name='Minimum Edge Alpha',
description='Minimum alpha of MMD toon edge color',
min=0,
max=1,
precision=3,
step=0.1,
default=0.5,
)
@classmethod
def poll(cls, context):
obj = context.active_object
return obj and obj.type == 'ARMATURE' and obj.mode != 'EDIT'
def invoke(self, context, event):
vm = context.window_manager
return vm.invoke_props_dialog(self)
def execute(self, context):
#TODO convert some basic MMD properties
armature = context.active_object
scale = 1
model_name = 'New MMD Model'
root = mmd_model.Model.findRoot(armature)
if root is None or root != armature.parent:
rig = mmd_model.Model.create(model_name, model_name, scale, armature=armature)
self.__attach_meshes_to(armature, SceneOp(context).id_objects)
self.__configure_rig(mmd_model.Model(armature.parent))
return {'FINISHED'}
def __attach_meshes_to(self, armature, objects):
def __is_child_of_armature(mesh):
if mesh.parent is None:
return False
return mesh.parent == armature or __is_child_of_armature(mesh.parent)
def __is_using_armature(mesh):
for m in mesh.modifiers:
if m.type =='ARMATURE' and m.object == armature:
return True
return False
def __get_root(mesh):
if mesh.parent is None:
return mesh
return __get_root(mesh.parent)
for x in objects:
if __is_using_armature(x) and not __is_child_of_armature(x):
x_root = __get_root(x)
m = x_root.matrix_world
x_root.parent_type = 'OBJECT'
x_root.parent = armature
x_root.matrix_world = m
def __configure_rig(self, rig):
root = rig.rootObject()
armature = rig.armature()
meshes = tuple(rig.meshes())
rig.loadMorphs()
vertex_groups = {g.name for mesh in meshes for g in mesh.vertex_groups}
for pose_bone in armature.pose.bones:
if not pose_bone.parent:
continue
if not pose_bone.bone.use_connect and pose_bone.name not in vertex_groups:
continue
pose_bone.lock_location = (True, True, True)
from mmd_tools_local.core.material import FnMaterial
for m in {x for mesh in meshes for x in mesh.data.materials if x}:
FnMaterial.convert_to_mmd_material(m)
mmd_material = m.mmd_material
if self.ambient_color_source == 'MIRROR' and hasattr(m, 'mirror_color'):
mmd_material.ambient_color = m.mirror_color
else:
mmd_material.ambient_color = [0.5*c for c in mmd_material.diffuse_color]
if hasattr(m, 'line_color'): # freestyle line color
line_color = list(m.line_color)
mmd_material.enabled_toon_edge = line_color[3] >= self.edge_threshold
mmd_material.edge_color = line_color[:3] + [max(line_color[3], self.edge_alpha_min)]
from mmd_tools_local.operators.display_item import DisplayItemQuickSetup
DisplayItemQuickSetup.load_bone_groups(root.mmd_root, armature)
rig.initialDisplayFrames(reset=False) # ensure default frames
DisplayItemQuickSetup.load_facial_items(root.mmd_root)
@register_wrap
class TranslateMMDModel(Operator):
bl_idname = 'mmd_tools.translate_mmd_model'
bl_label = 'Translate a MMD Model'
bl_description = 'Translate Japanese names of a MMD model'
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
dictionary = bpy.props.EnumProperty(
name='Dictionary',
items=DictionaryEnum.get_dictionary_items,
description='Translate names from Japanese to English using selected dictionary',
)
types = bpy.props.EnumProperty(
name='Types',
description='Select which parts will be translated',
options={'ENUM_FLAG'},
items = [
('BONE', 'Bones', 'Bones', 1),
('MORPH', 'Morphs', 'Morphs', 2),
('MATERIAL', 'Materials', 'Materials', 4),
('DISPLAY', 'Display', 'Display frames', 8),
('PHYSICS', 'Physics', 'Rigidbodies and joints', 16),
('INFO', 'Information', 'Model name and comments', 32),
],
default={'BONE', 'MORPH', 'MATERIAL', 'DISPLAY', 'PHYSICS',},
)
modes = bpy.props.EnumProperty(
name='Modes',
description='Select translation mode',
options={'ENUM_FLAG'},
items = [
('MMD', 'MMD Names', 'Fill MMD English names', 1),
('BLENDER', 'Blender Names', 'Translate blender names (experimental)', 2),
],
default={'MMD'},
)
use_morph_prefix = bpy.props.BoolProperty(
name='Use Morph Prefix',
description='Add/remove prefix to English name of morph',
default=False,
)
overwrite = bpy.props.BoolProperty(
name='Overwrite',
description='Overwrite a translated English name',
default=False,
)
allow_fails = bpy.props.BoolProperty(
name='Allow Fails',
description='Allow incompletely translated names',
default=False,
)
def invoke(self, context, event):
vm = context.window_manager
return vm.invoke_props_dialog(self)
def execute(self, context):
try:
self.__translator = DictionaryEnum.get_translator(self.dictionary)
except Exception as e:
self.report({'ERROR'}, 'Failed to load dictionary: %s'%e)
return {'CANCELLED'}
obj = context.active_object
root = mmd_model.Model.findRoot(obj)
rig = mmd_model.Model(root)
if 'MMD' in self.modes:
for i in self.types:
getattr(self, 'translate_%s'%i.lower())(rig)
if 'BLENDER' in self.modes:
self.translate_blender_names(rig)
translator = self.__translator
txt = translator.save_fails()
if translator.fails:
self.report({'WARNING'}, "Failed to translate %d names, see '%s' in text editor"%(len(translator.fails), txt.name))
return {'FINISHED'}
def translate(self, name_j, name_e):
if not self.overwrite and name_e and self.__translator.is_translated(name_e):
return name_e
if self.allow_fails:
name_e = None
return self.__translator.translate(name_j, name_e)
def translate_blender_names(self, rig):
if 'BONE' in self.types:
for b in rig.armature().pose.bones:
rig.renameBone(b.name, self.translate(b.name, b.name))
if 'MORPH' in self.types:
for i in (x for x in rig.meshes() if x.data.shape_keys):
for kb in i.data.shape_keys.key_blocks:
kb.name = self.translate(kb.name, kb.name)
if 'MATERIAL' in self.types:
for m in (x for x in rig.materials() if x):
m.name = self.translate(m.name, m.name)
if 'DISPLAY' in self.types:
for g in rig.armature().pose.bone_groups:
g.name = self.translate(g.name, g.name)
if 'PHYSICS' in self.types:
for i in rig.rigidBodies():
i.name = self.translate(i.name, i.name)
for i in rig.joints():
i.name = self.translate(i.name, i.name)
if 'INFO' in self.types:
objects = [rig.rootObject(), rig.armature()]
objects.extend(rig.meshes())
for i in objects:
i.name = self.translate(i.name, i.name)
def translate_info(self, rig):
mmd_root = rig.rootObject().mmd_root
mmd_root.name_e = self.translate(mmd_root.name, mmd_root.name_e)
comment_text = bpy.data.texts.get(mmd_root.comment_text, None)
comment_e_text = bpy.data.texts.get(mmd_root.comment_e_text, None)
if comment_text and comment_e_text:
comment_e = self.translate(comment_text.as_string(), comment_e_text.as_string())
comment_e_text.from_string(comment_e)
def translate_bone(self, rig):
bones = rig.armature().pose.bones
for b in bones:
if b.is_mmd_shadow_bone:
continue
b.mmd_bone.name_e = self.translate(b.mmd_bone.name_j, b.mmd_bone.name_e)
def translate_morph(self, rig):
mmd_root = rig.rootObject().mmd_root
attr_list = ('group', 'vertex', 'bone', 'uv', 'material')
prefix_list = ('G_', '', 'B_', 'UV_', 'M_')
for attr, prefix in zip(attr_list, prefix_list):
for m in getattr(mmd_root, attr+'_morphs', []):
m.name_e = self.translate(m.name, m.name_e)
if not prefix:
continue
if self.use_morph_prefix:
if not m.name_e.startswith(prefix):
m.name_e = prefix + m.name_e
elif m.name_e.startswith(prefix):
m.name_e = m.name_e[len(prefix):]
def translate_material(self, rig):
for m in rig.materials():
if m is None:
continue
m.mmd_material.name_e = self.translate(m.mmd_material.name_j, m.mmd_material.name_e)
def translate_display(self, rig):
mmd_root = rig.rootObject().mmd_root
for f in mmd_root.display_item_frames:
f.name_e = self.translate(f.name, f.name_e)
def translate_physics(self, rig):
for i in rig.rigidBodies():
i.mmd_rigid.name_e = self.translate(i.mmd_rigid.name_j, i.mmd_rigid.name_e)
for i in rig.joints():
i.mmd_joint.name_e = self.translate(i.mmd_joint.name_j, i.mmd_joint.name_e)
| 36.896266 | 127 | 0.616678 |
3fe537543b76f0325353e51f67ba74f178cbe7eb | 4,902 | py | Python | supervisor/store/git.py | carver7/supervisor-master | e9802f92c9f77481276ed3c0d524427cc03e4271 | [
"Apache-2.0"
] | null | null | null | supervisor/store/git.py | carver7/supervisor-master | e9802f92c9f77481276ed3c0d524427cc03e4271 | [
"Apache-2.0"
] | null | null | null | supervisor/store/git.py | carver7/supervisor-master | e9802f92c9f77481276ed3c0d524427cc03e4271 | [
"Apache-2.0"
] | null | null | null | """Init file for Supervisor add-on Git."""
import asyncio
import functools as ft
import logging
from pathlib import Path
import shutil
import git
from ..const import ATTR_BRANCH, ATTR_URL, URL_HASSIO_ADDONS
from ..coresys import CoreSysAttributes
from ..validate import RE_REPOSITORY
from .utils import get_hash_from_repository
_LOGGER: logging.Logger = logging.getLogger(__name__)
class GitRepo(CoreSysAttributes):
"""Manage Add-on Git repository."""
def __init__(self, coresys, path, url):
"""Initialize Git base wrapper."""
self.coresys = coresys
self.repo = None
self.path = path
self.lock = asyncio.Lock()
self.data = RE_REPOSITORY.match(url).groupdict()
@property
def url(self):
"""Return repository URL."""
return self.data[ATTR_URL]
@property
def branch(self):
"""Return repository branch."""
return self.data[ATTR_BRANCH]
async def load(self):
"""Init Git add-on repository."""
if not self.path.is_dir():
return await self.clone()
async with self.lock:
try:
_LOGGER.info("Load add-on %s repository", self.path)
self.repo = await self.sys_run_in_executor(git.Repo, str(self.path))
except (
git.InvalidGitRepositoryError,
git.NoSuchPathError,
git.GitCommandError,
) as err:
_LOGGER.error("Can't load %s repo: %s.", self.path, err)
self._remove()
return False
return True
async def clone(self):
"""Clone git add-on repository."""
async with self.lock:
git_args = {
attribute: value
for attribute, value in (
("recursive", True),
("branch", self.branch),
("depth", 1),
("shallow-submodules", True),
)
if value is not None
}
try:
_LOGGER.info("Clone add-on %s repository", self.url)
self.repo = await self.sys_run_in_executor(
ft.partial(
git.Repo.clone_from, self.url, str(self.path), **git_args
)
)
except (
git.InvalidGitRepositoryError,
git.NoSuchPathError,
git.GitCommandError,
) as err:
_LOGGER.error("Can't clone %s repository: %s.", self.url, err)
self._remove()
return False
return True
async def pull(self):
"""Pull Git add-on repo."""
if self.lock.locked():
_LOGGER.warning("It is already a task in progress")
return False
async with self.lock:
_LOGGER.info("Update add-on %s repository", self.url)
branch = self.repo.active_branch.name
try:
# Download data
await self.sys_run_in_executor(
ft.partial(
self.repo.remotes.origin.fetch,
**{"update-shallow": True, "depth": 1},
)
)
# Jump on top of that
await self.sys_run_in_executor(
ft.partial(self.repo.git.reset, f"origin/{branch}", hard=True)
)
# Cleanup old data
await self.sys_run_in_executor(ft.partial(self.repo.git.clean, "-xdf"))
except (
git.InvalidGitRepositoryError,
git.NoSuchPathError,
git.GitCommandError,
) as err:
_LOGGER.error("Can't update %s repo: %s.", self.url, err)
return False
return True
def _remove(self):
"""Remove a repository."""
if not self.path.is_dir():
return
def log_err(funct, path, _):
"""Log error."""
_LOGGER.warning("Can't remove %s", path)
shutil.rmtree(self.path, onerror=log_err)
class GitRepoHassIO(GitRepo):
"""Supervisor add-ons repository."""
def __init__(self, coresys):
"""Initialize Git Supervisor add-on repository."""
super().__init__(coresys, coresys.config.path_addons_core, URL_HASSIO_ADDONS)
class GitRepoCustom(GitRepo):
"""Custom add-ons repository."""
def __init__(self, coresys, url):
"""Initialize custom Git Supervisor addo-n repository."""
path = Path(coresys.config.path_addons_git, get_hash_from_repository(url))
super().__init__(coresys, path, url)
def remove(self):
"""Remove a custom repository."""
_LOGGER.info("Remove custom add-on repository %s", self.url)
self._remove()
| 29.890244 | 87 | 0.537536 |
77897cdc3e2c134000eab8657886bdde184c4c27 | 183 | py | Python | app/config/prod.py | david-ellinger/todo-api-python | 4de61dce4213f64514da5f5f06dd9e7e53d3dbdc | [
"MIT"
] | null | null | null | app/config/prod.py | david-ellinger/todo-api-python | 4de61dce4213f64514da5f5f06dd9e7e53d3dbdc | [
"MIT"
] | null | null | null | app/config/prod.py | david-ellinger/todo-api-python | 4de61dce4213f64514da5f5f06dd9e7e53d3dbdc | [
"MIT"
] | null | null | null | from app.config.base import Config as BaseConfig
class Config(BaseConfig):
DEBUG = False
SQLALCHEMY_DATABASE_URI = "postgresql+psycopg2://sa:password@postgres:5432/todo_db"
| 26.142857 | 87 | 0.775956 |
8bf857573457e9ddd2ba9c4e66e680a0ace73c27 | 41,992 | py | Python | net/views/image.py | bretmckee/astrometry.net | 49d083c46919e8f8fa00b3315fea76fe5b099228 | [
"Net-SNMP",
"Xnet"
] | null | null | null | net/views/image.py | bretmckee/astrometry.net | 49d083c46919e8f8fa00b3315fea76fe5b099228 | [
"Net-SNMP",
"Xnet"
] | null | null | null | net/views/image.py | bretmckee/astrometry.net | 49d083c46919e8f8fa00b3315fea76fe5b099228 | [
"Net-SNMP",
"Xnet"
] | 1 | 2021-03-28T07:52:40.000Z | 2021-03-28T07:52:40.000Z | from __future__ import print_function
import os
import math
import stat
import time
from datetime import datetime, timedelta
try:
# py3
from urllib.parse import urlencode
# from urllib.request import urlretrieve
except ImportError:
# py2
from urllib import urlencode
# from urllib import urlencode urlretrieve
if __name__ == '__main__':
os.environ['DJANGO_SETTINGS_MODULE'] = 'astrometry.net.settings'
import django
django.setup()
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest, QueryDict, StreamingHttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.template import Context, RequestContext, loader
from django.contrib.auth.decorators import login_required
from django import forms
from django.contrib import messages
from astrometry.net.models import *
from astrometry.net import settings
from astrometry.net.log import *
from astrometry.net.tmpfile import *
from astrometry.net.sdss_image import plot_sdss_image
from astrometry.util import image2pnm
from astrometry.util.run_command import run_command
from astrometry.util.file import *
from astrometry.net.models import License
from astrometry.net.views.comment import *
from astrometry.net.views.license import *
from astrometry.net.util import get_page, get_session_form
from astrometry.net.util import NoBulletsRadioSelect
from astrometry.net.views.tag import TagForm, TagSearchForm
from astrometry.net.views.license import LicenseForm
from astrometry.net.views.enhance import *
import json
# repeat this import to override somebody else's import of the datetime module
from datetime import datetime, timedelta
class UserImageForm(forms.ModelForm):
album = forms.ChoiceField(choices=(), required=False)
class Meta:
model = UserImage
exclude = (
'image',
'user',
'tags',
'flags',
'original_file_name',
'submission',
'owner',
'comment_receiver',
'license',
'sky_objects',
)
widgets = {
'description': forms.Textarea(attrs={'cols':60,'rows':3}),
'publicly_visible': NoBulletsRadioSelect(),
}
def __init__(self, user, *args, **kwargs):
super(UserImageForm, self).__init__(*args, **kwargs)
self.fields['album'].choices = [('', 'none')]
self.fields['album'].initial = ''
user_image = kwargs.get('instance')
if user.is_authenticated:
for album in Album.objects.filter(user=user).all():
self.fields['album'].choices += [(album.id, album.title)]
if user_image and user_image in album.user_images.all():
self.fields['album'].initial = album.id
def user_image(req, user_image_id=None):
uimage = get_object_or_404(UserImage, pk=user_image_id)
job = uimage.get_best_job()
calib = None
if job:
calib = job.calibration
#license_form = get_session_form(req.session, PartialLicenseForm)
comment_form = get_session_form(req.session, PartialCommentForm)
tag_form = get_session_form(req.session, TagForm)
images = {}
dim = uimage.image.get_display_image(tempfiles=req.tempfiles)
images['original_display'] = reverse('serve_image', kwargs={'id':dim.id})
images['original'] = reverse('serve_image', kwargs={'id':uimage.image.id})
image_type = 'original'
if job:
if job.calibration:
images['annotated_display'] = reverse('annotated_image', kwargs={'jobid':job.id,'size':'display'})
images['annotated'] = reverse('annotated_image', kwargs={'jobid':job.id,'size':'full'})
images['grid_display'] = reverse('grid_image', kwargs={'jobid':job.id,'size':'display'})
images['grid'] = reverse('grid_image', kwargs={'jobid':job.id,'size':'full'})
images['sdss_display'] = reverse('sdss_image', kwargs={'calid':job.calibration.id,'size':'display'})
images['sdss'] = reverse('sdss_image', kwargs={'calid':job.calibration.id,'size':'full'})
images['galex_display'] = reverse('galex_image', kwargs={'calid':job.calibration.id,'size':'display'})
images['galex'] = reverse('galex_image', kwargs={'calid':job.calibration.id,'size':'full'})
images['redgreen_display'] = reverse('red_green_image', kwargs={'job_id':job.id,'size':'display'})
images['redgreen'] = reverse('red_green_image', kwargs={'job_id':job.id,'size':'full'})
#images['enhanced_display'] = reverse('enhanced_image', kwargs={'job_id':job.id,'size':'display'})
#images['enhanced'] = reverse('enhanced_image', kwargs={'job_id':job.id,'size':'full'})
image_type = 'annotated'
images['extraction_display'] = reverse('extraction_image', kwargs={'job_id':job.id,'size':'display'})
images['extraction'] = reverse('extraction_image', kwargs={'job_id':job.id,'size':'full'})
image_type = req.GET.get('image', image_type)
if image_type in images:
display_url = images[image_type + '_display']
fullsize_url = images[image_type]
else:
display_url=''
fullsize_url=''
flags = Flag.objects.all()
if req.user.is_authenticated:
selected_flags = [flagged_ui.flag for flagged_ui in
FlaggedUserImage.objects.filter(
user_image=uimage,
user=req.user,
)
]
else:
selected_flags = None
if job and job.calibration:
parity = (calib.get_parity() < 0)
wcs = calib.raw_tan
if calib.tweaked_tan is not None:
wcs = calib.tweaked_tan
imgurl = req.build_absolute_uri(images['original'])
thumburl = req.build_absolute_uri(images['original_display'])
fits = uimage.image.disk_file.is_fits_image()
y = wcs.imageh - wcs.crpix2
orient = wcs.get_orientation()
logmsg('Parity', parity, 'FITS', fits, 'Orientation', orient)
if parity:
orient = 360. - orient
wwturl = 'http://www.worldwidetelescope.org/wwtweb/ShowImage.aspx?reverseparity=%s&scale=%.6f&name=%s&imageurl=%s&credits=Astrometry.net+User+(All+Rights+Reserved)&creditsUrl=&ra=%.6f&dec=%.6f&x=%.1f&y=%.1f&rotation=%.2f&thumb=%s' % (parity, wcs.get_pixscale(), uimage.original_file_name, imgurl, wcs.crval1, wcs.crval2, wcs.crpix1, y, orient, thumburl)
else:
wwturl = None
logmsg(uimage.get_absolute_url())
context = {
'request': req,
'display_image': dim,
'image': uimage,
'job': job,
'calib': calib,
'comment_form': comment_form,
#'license_form': license_form,
'tag_form': tag_form,
'images': json.dumps(images),
'display_url': display_url,
'fullsize_url': fullsize_url,
'image_type': image_type,
'flags': flags,
'selected_flags': selected_flags,
'wwt_url': wwturl,
}
if uimage.is_public() or (req.user.is_authenticated and uimage.user == req.user):
template = 'user_image/view.html'
#elif SharedHideable.objects.filter(shared_with=req.user.id, hideable=image).count():
# template = 'user_image/view.html'
else:
messages.error(req, "Sorry, you don't have permission to view this content.")
template = 'user_image/permission_denied.html'
return render(req, template, context)
@login_required
def edit(req, user_image_id=None):
user_image = get_object_or_404(UserImage, pk=user_image_id)
if user_image.user != req.user:
messages.error(req, "Sorry, you don't have permission to edit this content.")
return render(req, 'user_image/permission_denied.html')
if req.method == 'POST':
image_form = UserImageForm(req.user, req.POST, instance=user_image)
license_form = LicenseForm(req.POST)
if image_form.is_valid() and license_form.is_valid():
image_form.save()
pro = get_user_profile(req.user)
license,created = License.objects.get_or_create(
default_license=pro.default_license,
allow_commercial_use=license_form.cleaned_data['allow_commercial_use'],
allow_modifications=license_form.cleaned_data['allow_modifications'],
)
user_image.license = license
album_id = image_form.cleaned_data['album']
albums = Album.objects.filter(user=req.user).filter(user_images__in=[user_image])
if album_id == '':
# remove user_image from album
for album in albums:
album.user_images.remove(user_image)
else:
try:
album = Album.objects.get(pk=int(album_id))
if album not in albums:
# move user_image to new album
for album in albums:
album.user_images.remove(user_image)
album.user_images.add(user_image)
except Exception as e:
pass
selected_flags = req.POST.getlist('flags')
user_image.update_flags(selected_flags, req.user)
user_image.save()
messages.success(req, 'Image details successfully updated.')
return redirect(user_image)
else:
messages.error(req, 'Please fix the following errors:')
else:
image_form = UserImageForm(req.user, instance=user_image)
license_form = LicenseForm(instance=user_image.license)
flags = Flag.objects.all()
selected_flags = [flagged_ui.flag for flagged_ui in
FlaggedUserImage.objects.filter(
user_image=user_image,
user=req.user,
)
]
context = {
'image_form': image_form,
'license_form': license_form,
'flags': flags,
'selected_flags':selected_flags,
'image': user_image,
}
return render(req, 'user_image/edit.html', context)
def serve_image(req, id=None):
image = get_object_or_404(Image, pk=id)
res = HttpResponse(content_type=image.get_mime_type())
date = datetime.now() + timedelta(days=7)
res['Expires'] = time.asctime(date.timetuple())
mtime = os.stat(image.disk_file.get_path())[stat.ST_MTIME]
res['Last-Modified'] = time.asctime(time.gmtime(mtime))
if 'filename' in req.GET:
res['Content-Disposition'] = 'filename=%s' % req.GET['filename']
image.render(res, tempfiles=req.tempfiles)
return res
def grid_image(req, jobid=None, size='full'):
from astrometry.plot.plotstuff import (Plotstuff,
PLOTSTUFF_FORMAT_JPG,
PLOTSTUFF_FORMAT_PPM,
plotstuff_set_size_wcs,
)
job = get_object_or_404(Job, pk=jobid)
ui = job.user_image
img = ui.image
if size == 'display':
dimg = img.get_display_image(tempfiles=req.tempfiles)
scale = float(dimg.width)/img.width
img = dimg
else:
scale = 1.0
wcsfn = job.get_wcs_file()
pnmfn = img.get_pnm_path(tempfiles=req.tempfiles)
outfn = get_temp_file(tempfiles=req.tempfiles)
plot = Plotstuff()
plot.wcs_file = wcsfn
plot.outformat = PLOTSTUFF_FORMAT_JPG
plot.outfn = outfn
plot.scale_wcs(scale)
plotstuff_set_size_wcs(plot.pargs)
# plot image
pimg = plot.image
pimg.set_file(str(pnmfn))
pimg.format = PLOTSTUFF_FORMAT_PPM
plot.plot('image')
grid = plot.grid
ra,dec,radius = job.calibration.get_center_radecradius()
steps = np.array([ 0.02, 0.05, 0.1, 0.2, 0.5,
1., 2., 5., 10., 15., 30., 60. ])
istep = np.argmin(np.abs(np.log(radius) - np.log(steps)))
grid.declabelstep = steps[istep]
nd = plot.count_dec_labels()
if nd < 2:
istep = max(istep-1, 0)
grid.declabelstep = steps[istep]
grid.decstep = grid.declabelstep
plot.alpha = 1.
plot.plot('grid')
plot.alpha = 0.7
grid.declabelstep = 0
grid.decstep /= 2.
plot.plot('grid')
grid.decstep = 0
# RA
cosdec = np.cos(np.deg2rad(dec))
istep = np.argmin(np.abs(np.log(radius/cosdec) - np.log(steps)))
grid.ralabelstep = steps[istep] #min(istep+1, len(steps)-1)]
nra = plot.count_ra_labels()
if nra < 2:
istep = max(istep-1, 0)
grid.ralabelstep = steps[istep]
grid.rastep = grid.ralabelstep
plot.alpha = 1.
plot.plot('grid')
plot.alpha = 0.7
grid.ralabelstep = 0
grid.rastep /= 2.
plot.plot('grid')
plot.write()
res = HttpResponse(open(outfn, 'rb'))
res['Content-Type'] = 'image/jpeg'
return res
def annotated_image(req, jobid=None, size='full'):
job = get_object_or_404(Job, pk=jobid)
ui = job.user_image
img = ui.image
if size == 'display':
dimg = img.get_display_image(tempfiles=req.tempfiles)
scale = float(dimg.width)/img.width
img = dimg
else:
scale = 1.0
wcsfn = job.get_wcs_file()
pnmfn = img.get_pnm_path(tempfiles=req.tempfiles)
annfn = get_temp_file(tempfiles=req.tempfiles)
#datadir = os.path.join(os.path.dirname(os.path.dirname(settings.WEB_DIR)), 'data')
catdir = settings.CAT_DIR
uzcfn = os.path.join(catdir, 'uzc2000.fits')
abellfn = os.path.join(catdir, 'abell-all.fits')
#hdfn = os.path.join(os.path.dirname(os.path.dirname(settings.WEB_DIR)),
#'net', 'hd.fits')
hdfn = settings.HENRY_DRAPER_CAT
tycho2fn = settings.TYCHO2_KD
rad = job.calibration.get_radius()
#logmsg('pnm file: %s' % pnmfn)
args = ['plotann.py --no-grid --toy -10',
'--scale %s' % (str(scale)),]
#if rad < 10.:
if rad < 1.:
#args.append('--uzccat %s' % uzcfn)
args.append('--abellcat %s' % abellfn)
if hdfn:
args.append('--hdcat %s' % hdfn)
if rad < 0.25 and tycho2fn:
args.append('--tycho2cat %s' % tycho2fn)
#if rad > 20:
if rad > 10:
args.append('--no-ngc')
if rad > 30:
args.append('--no-bright')
cmd = ' '.join(args + ['%s %s %s' % (wcsfn, pnmfn, annfn)])
#cmd = 'plot-constellations -w %s -i %s -o %s -s %s -N -C -B -c' % (wcsfn, pnmfn, annfn, str(scale))
import sys
# (rtn,out,err) = run_command('which plotann.py; echo pyp $PYTHONPATH; echo path $PATH; echo llp $LD_LIBRARY_PATH; echo "set"; set')
# return HttpResponse('which: ' + out + err + '<br>sys.path<br>' + '<br>'.join(sys.path) +
# "<br>PATH " + os.environ['PATH'] +
# "<br>LLP " + os.environ['LD_LIBRARY_PATH'] +
# "<br>sys.path " + ':'.join(sys.path) +
# "<br>cmd " + cmd)
os.environ['PYTHONPATH'] = ':'.join(sys.path)
logmsg('Running: ' + cmd)
#logmsg('PYTHONPATH: ' + os.environ['PYTHONPATH'])
#logmsg('PATH: ' + os.environ['PATH'])
#(rtn,out,err) = run_command('which plotann.py')
#logmsg('which plotann.py: ' + out)
(rtn, out, err) = run_command(cmd)
if rtn:
logmsg('out: ' + out)
logmsg('err: ' + err)
return HttpResponse('plot failed: ' + err + "<br><pre>" + out + "</pre><br><pre>" + err + "</pre>")
res = HttpResponse(open(annfn, 'rb'))
#res['Content-Type'] = 'image/png'
# plotann.py produces jpeg by default
res['Content-Type'] = 'image/jpeg'
return res
def onthesky_image(req, zoom=None, calid=None):
from astrometry.net.views.onthesky import plot_aitoff_wcs_outline
from astrometry.net.views.onthesky import plot_wcs_outline
from astrometry.util import util as anutil
#
cal = get_object_or_404(Calibration, pk=calid)
wcsfn = cal.get_wcs_file()
plotfn = get_temp_file(tempfiles=req.tempfiles)
logmsg('onthesky_image: cal', cal, 'wcs', wcsfn, 'plot', plotfn)
#
wcs = anutil.Tan(wcsfn, 0)
zoom = int(zoom)
if zoom == 0:
zoom = wcs.radius() < 15.
plot_aitoff_wcs_outline(wcs, plotfn, zoom=zoom)
elif zoom == 1:
zoom = wcs.radius() < 1.5
plot_wcs_outline(wcs, plotfn, zoom=zoom)
elif zoom == 2:
zoom = wcs.radius() < 0.15
plot_wcs_outline(wcs, plotfn, width=3.6, grid=1, zoom=zoom,
zoomwidth=0.36, hd=True, hd_labels=False,
tycho2=False)
# hd=True is too cluttered at this level
elif zoom == 3:
plot_wcs_outline(wcs, plotfn, width=0.36, grid=0.1, zoom=False,
hd=True, hd_labels=True, tycho2=True)
else:
return HttpResponse('invalid zoom')
res = HttpResponse(open(plotfn, 'rb'))
res['Content-Type'] = 'image/png'
return res
def galex_image(req, calid=None, size='full'):
from astrometry.util import util as anutil
from astrometry.plot import plotstuff as ps
from astrometry.net.galex_jpegs import plot_into_wcs
cal = get_object_or_404(Calibration, pk=calid)
key = 'galex_size%s_cal%i' % (size, cal.id)
df = CachedFile.get(key)
if df is None:
wcsfn = cal.get_wcs_file()
plotfn = get_temp_file(tempfiles=req.tempfiles)
if size == 'display':
image = cal.job.user_image
dimg = image.image.get_display_image(tempfiles=req.tempfiles)
scale = float(dimg.width)/image.image.width
else:
scale = 1.0
# logmsg('WCS filename:', wcsfn)
# logmsg('Plot filename:', plotfn)
# logmsg('Basedir:', settings.GALEX_JPEG_DIR)
# logmsg('Scale:', scale)
plot_into_wcs(wcsfn, plotfn, basedir=settings.GALEX_JPEG_DIR, scale=scale)
# cache
logmsg('Caching key "%s"' % key)
df = CachedFile.add(key, plotfn)
else:
logmsg('Cache hit for key "%s"' % key)
res = HttpResponse(open(df.get_path(), 'rb'))
res['Content-Type'] = 'image/png'
return res
def sdss_image(req, calid=None, size='full'):
cal = get_object_or_404(Calibration, pk=calid)
key = 'sdss_size%s_cal%i' % (size, cal.id)
df = CachedFile.get(key)
if df is None:
wcsfn = cal.get_wcs_file()
from astrometry.util.util import Tan
wcs = Tan(wcsfn)
if size == 'display':
image = cal.job.user_image
dimg = image.image.get_display_image(tempfiles=req.tempfiles)
scale = float(dimg.width)/image.image.width
wcs = wcs.scale(scale)
else:
scale = 1.0
urlargs = urlencode(dict(crval1='%.6f' % wcs.crval[0],
crval2='%.6f' % wcs.crval[1],
crpix1='%.2f' % wcs.crpix[0],
crpix2='%.2f' % wcs.crpix[1],
cd11='%.6g' % wcs.cd[0],
cd12='%.6g' % wcs.cd[1],
cd21='%.6g' % wcs.cd[2],
cd22='%.6g' % wcs.cd[3],
imagew='%i' % int(wcs.imagew),
imageh='%i' % int(wcs.imageh)))
url = 'http://legacysurvey.org/viewer/cutout-wcs/?layer=sdssco&' + urlargs
return HttpResponseRedirect(url)
#print('Retrieving:', url)
#f = urlopen(url)
#plotfn = get_temp_file()
#plotfn, headers = urlretrieve(url, plotfn)
#print('Headers:', headers)
#plot_sdss_image(wcsfn, plotfn, scale)
# cache
# logmsg('Caching key "%s"' % key)
# df = CachedFile.add(key, plotfn)
else:
logmsg('Cache hit for key "%s" -> %s' % (key, df.get_path()))
f = open(df.get_path() , 'rb')
res = HttpResponse(f)
res['Content-Type'] = 'image/jpeg'
return res
def red_green_image(req, job_id=None, size='full'):
from astrometry.plot.plotstuff import (Plotstuff,
PLOTSTUFF_FORMAT_PNG,
PLOTSTUFF_FORMAT_PPM,
#plotstuff_set_size_wcs,
)
job = get_object_or_404(Job, pk=job_id)
ui = job.user_image
sub = ui.submission
img = ui.image
if size == 'display':
dimg = img.get_display_image(tempfiles=req.tempfiles)
scale = float(dimg.width)/img.width
img = dimg
else:
scale = 1.0
axyfn = job.get_axy_file()
wcsfn = job.get_wcs_file()
rdlsfn = job.get_rdls_file()
pnmfn = img.get_pnm_path(tempfiles=req.tempfiles)
exfn = get_temp_file(tempfiles=req.tempfiles)
try:
plot = Plotstuff()
plot.wcs_file = wcsfn
plot.outformat = PLOTSTUFF_FORMAT_PNG
plot.outfn = exfn
plot.scale_wcs(scale)
plot.set_size_from_wcs()
#plotstuff_set_size_wcs(plot.pargs)
# plot image
pimg = plot.image
pimg.set_file(str(pnmfn))
pimg.format = PLOTSTUFF_FORMAT_PPM
plot.color = 'white'
plot.alpha = 1.
if sub.use_sextractor:
xy = plot.xy
xy.xcol = 'X_IMAGE'
xy.ycol = 'Y_IMAGE'
plot.plot('image')
# plot red
xy = plot.xy
if hasattr(img, 'sourcelist'):
# set xy offsets for source lists
fits = img.sourcelist.get_fits_table(tempfiles=req.tempfiles)
#xy.xoff = int(fits.x.min())
#xy.yoff = int(fits.y.min())
xy.xoff = 0.
xy.yoff = 0.
xy.set_filename(str(axyfn))
xy.scale = scale
plot.color = 'red'
xy.nobjs = 200
plot.lw = 2.
plot.markersize = 6
plot.plot('xy')
# plot green
rd = plot.radec
rd.set_filename(str(rdlsfn))
plot.color = 'green'
plot.markersize = 4
plot.plot('radec')
plot.write()
except:
return HttpResponse("plot failed")
res = StreamingHttpResponse(open(exfn, 'rb'))
res['Content-Type'] = 'image/png'
return res
def extraction_image(req, job_id=None, size='full'):
from astrometry.plot.plotstuff import (Plotstuff,
PLOTSTUFF_FORMAT_PNG,
PLOTSTUFF_FORMAT_PPM)
job = get_object_or_404(Job, pk=job_id)
ui = job.user_image
sub = ui.submission
img = ui.image
if size == 'display':
dimg = img.get_display_image(tempfiles=req.tempfiles)
scale = float(dimg.width)/img.width
img = dimg
else:
scale = 1.0
axyfn = job.get_axy_file()
pnmfn = img.get_pnm_path(tempfiles=req.tempfiles)
exfn = get_temp_file(tempfiles=req.tempfiles)
try:
plot = Plotstuff()
plot.size = [img.width, img.height]
plot.outformat = PLOTSTUFF_FORMAT_PNG
plot.outfn = exfn
# plot image
pimg = plot.image
pimg.set_file(str(pnmfn))
pimg.format = PLOTSTUFF_FORMAT_PPM
plot.plot('image')
# plot sources
xy = plot.xy
if hasattr(img, 'sourcelist'):
# set xy offsets for source lists
fits = img.sourcelist.get_fits_table(tempfiles=req.tempfiles)
#xy.xoff = int(fits.x.min())
#xy.yoff = int(fits.y.min())
xy.xoff = xy.yoff = 1.
if sub.use_sextractor:
xy.xcol = 'X_IMAGE'
xy.ycol = 'Y_IMAGE'
xy.set_filename(str(axyfn))
xy.scale = scale
plot.color = 'red'
# plot 50 brightest
xy.firstobj = 0
xy.nobjs = 50
plot.lw = 2.
plot.markersize = 6
plot.plot('xy')
# plot 200 other next brightest sources
xy.firstobj = 50
xy.nobjs = 250
plot.alpha = 0.9
plot.lw = 1.
plot.markersize = 4
plot.plot('xy')
# plot 250 other next brightest sources
xy.firstobj = 250
xy.nobjs = 500
plot.alpha = 0.5
plot.lw = 1.
plot.markersize = 3
plot.plot('xy')
plot.write()
except:
import traceback
traceback.print_exc()
return HttpResponse("plot failed")
res = HttpResponse(open(exfn, 'rb'))
res['Content-Type'] = 'image/png'
return res
# 2MASS:
# Has a SIA service, but does not make mosaics.
# Documented here:
# http://irsa.ipac.caltech.edu/applications/2MASS/IM/docs/siahelp.html
# Eg:
# http://irsa.ipac.caltech.edu/cgi-bin/2MASS/IM/nph-im_sia?POS=187.03125,16.9577633&SIZE=1&type=ql
# WISE:
# -IPAC/IRSA claims to have VO services
# -elaborate javascripty interface
class ShowImagesForm(forms.Form):
calibrated = forms.BooleanField(widget=forms.CheckboxInput(
attrs={'onClick':'this.form.submit();'}),
initial=True, required=False)
processing = forms.BooleanField(widget=forms.CheckboxInput(
attrs={'onClick':'this.form.submit();'}),
initial=False, required=False)
failed = forms.BooleanField(widget=forms.CheckboxInput(
attrs={'onClick':'this.form.submit();'}),
initial=False, required=False)
def index(req, images=None,
template_name='user_image/index.html', context={}):
if images is None:
images = UserImage.objects.public_only(req.user)
form_data = req.GET.copy()
if not (req.GET.get('calibrated')
or req.GET.get('processing')
or req.GET.get('failed')):
form_data['calibrated'] = 'on'
form = ShowImagesForm(form_data)
calibrated = True
processing = False
failed = False
if form.is_valid():
calibrated = form.cleaned_data.get('calibrated')
processing = form.cleaned_data.get('processing')
failed = form.cleaned_data.get('failed')
stats = ['S', 'F', '']
if calibrated is False:
stats.remove('S')
if processing is False:
stats.remove('')
if failed is False:
stats.remove('F')
if len(stats) < 3:
images = images.filter(jobs__status__in=stats)
#print 'index 1:', images.query
# the public_only() view already sorts them
#images = images.order_by('-submission__submitted_on')
#print 'index 2:', images.query
page_number = req.GET.get('page', 1)
page = get_page(images, 27, page_number)
context.update({
'image_page': page,
'show_images_form': form,
})
return render(req, template_name, context)
def index_tag(req):
images = UserImage.objects.public_only(req.user)
#print 'index_tag 1:', images.query
form = TagSearchForm(req.GET)
tag = None
if form.is_valid():
query = form.cleaned_data.get('query')
exact = form.cleaned_data.get('exact')
if query:
if exact:
try:
tags = Tag.objects.filter(text__iexact=query)
if tags.count() > 1:
# More than one match: do case-sensitive query
ctags = Tag.objects.filter(text=query)
# note, 'text' is the primary key, so >1 shouldn't be possible
if len(ctags) == 1:
tag = ctags[0]
else:
# Uh, multiple case-insensitive matches but no case-sens
# matches. Arbitrarily choose first case-insens
tag = tags[0]
else:
tag = tags[0]
images = images.filter(tags=tag)
except Tag.DoesNotExist:
images = UserImage.objects.none()
else:
images = images.filter(tags__text__icontains=query)
images = images.distinct()
#print 'index_tag 2:', images.query
context = {
'tag_search_form': form,
'tag': tag,
}
return index(req, images, 'user_image/index_tag.html', context)
class LocationSearchForm(forms.Form):
ra = forms.FloatField(widget=forms.TextInput(attrs={'size':'5'}))
dec = forms.FloatField(widget=forms.TextInput(attrs={'size':'5'}))
radius = forms.FloatField(widget=forms.TextInput(attrs={'size':'5'}))
def index_location(req):
images = UserImage.objects.public_only(req.user)
form = LocationSearchForm(req.GET)
if form.is_valid():
ra = form.cleaned_data.get('ra', 0)
dec = form.cleaned_data.get('dec', 0)
radius = form.cleaned_data.get('radius', 0)
if ra and dec and radius:
ra *= math.pi/180
dec *= math.pi/180
tempr = math.cos(dec)
x = tempr*math.cos(ra)
y = tempr*math.sin(ra)
z = math.sin(dec)
r = radius/180*math.pi
# HACK - there's probably a better way to do this..?
where = ('(x-(%(x)f))*(x-(%(x)f))+(y-(%(y)f))*(y-(%(y)f))+(z-(%(z)f))*(z-(%(z)f)) < (%(r)f)*(%(r)f)'
% dict(x=x,y=y,z=z,r=r))
where2 = '(r <= %f)' % r
cals = Calibration.objects.extra(where=[where,where2])
images = images.filter(jobs__calibration__in=cals)
images = images.distinct()
context = {
'location_search_form': form,
}
return index(req, images, 'user_image/index_location.html', context)
def index_nearby(req, user_image_id=None):
image = get_object_or_404(UserImage, pk=user_image_id)
images = image.get_neighbouring_user_images()
context = {
'image': image,
}
return index(req, images, 'user_image/index_nearby.html', context)
def index_recent(req):
return index(req,
UserImage.objects.all_visible()[:9], #.order_by('-submission__submitted_on')[:9],
template_name='user_image/index_recent.html')
def index_all(req):
return index(req,
UserImage.objects.all_visible().order_by('-submission__submitted_on'),
template_name='user_image/index_all.html')
def index_user(req, user_id=None):
user = get_object_or_404(User, pk=user_id)
return index(req,
user.user_images.all_visible().order_by('-submission__submitted_on'),
template_name='user_image/index_user.html',
context={'display_user':user})
def index_by_user(req):
# make ordering case insensitive
context = {
'users':User.objects.all_visible().order_by('profile__display_name', 'id')
}
return render(req, 'user_image/index_by_user.html', context)
def index_album(req, album_id=None):
album = get_object_or_404(Album, pk=album_id)
return index(req,
album.user_images.all_visible(),
template_name='user_image/index_album.html',
context={'album':album})
def image_set(req, category, id):
default_category = 'user'
cat_classes = {
'user':User,
'album':Album,
'tag':Tag,
}
if category not in cat_classes:
category = default_category
cat_class = cat_classes[category]
cat_obj = get_object_or_404(cat_class, pk=id)
set_names = {
'user':'Submitted by User %s' % cat_obj.pk,
'album':'Album: %s' % cat_obj.pk,
'tag':'Tag: %s' % cat_obj.pk,
}
image_set_title = set_names[category]
context = {
'images': cat_obj.user_images.all,
'image_set_title':image_set_title,
}
return render(req, 'user_image/image_set.html', context)
def wcs_file(req, jobid=None):
job = get_object_or_404(Job, pk=jobid)
f = open(job.get_wcs_file(), 'rb')
res = HttpResponse(f)
res['Content-Type'] = 'application/fits'
res['Content-Disposition'] = 'attachment; filename=wcs.fits'
return res
def rdls_file(req, jobid=None):
job = get_object_or_404(Job, pk=jobid)
f = open(job.get_rdls_file(), 'rb')
res = HttpResponse(f)
res['Content-Type'] = 'application/fits'
res['Content-Disposition'] = 'attachment; filename=rdls.fits'
return res
def axy_file(req, jobid=None):
job = get_object_or_404(Job, pk=jobid)
f = open(job.get_axy_file(), 'rb')
res = HttpResponse(f)
res['Content-Type'] = 'application/fits'
res['Content-Disposition'] = 'attachment; filename=axy.fits'
return res
def corr_file(req, jobid=None):
job = get_object_or_404(Job, pk=jobid)
f = open(job.get_corr_file(), 'rb')
res = HttpResponse(f)
res['Content-Type'] = 'application/fits'
res['Content-Disposition'] = 'attachment; filename=corr.fits'
return res
def new_fits_file(req, jobid=None):
job = get_object_or_404(Job, pk=jobid)
wcsfn = job.get_wcs_file()
img = job.user_image.image
df = img.disk_file
infn = df.get_path()
if df.is_fits_image():
fitsinfn = infn
else:
## FIXME -- could convert other formats to FITS...
pnmfn = get_temp_file(tempfiles=req.tempfiles)
fitsinfn = get_temp_file(tempfiles=req.tempfiles)
cmd = 'image2pnm.py -i %s -o %s && an-pnmtofits %s > %s' % (infn, pnmfn, pnmfn, fitsinfn)
logmsg('Running: ' + cmd)
(rtn, out, err) = run_command(cmd)
if rtn:
logmsg('out: ' + out)
logmsg('err: ' + err)
return HttpResponse('image2pnm.py failed: out ' + out + ', err ' + err)
outfn = get_temp_file(tempfiles=req.tempfiles)
cmd = 'new-wcs -i %s -w %s -o %s -d' % (fitsinfn, wcsfn, outfn)
logmsg('Running: ' + cmd)
(rtn, out, err) = run_command(cmd)
if rtn:
logmsg('out: ' + out)
logmsg('err: ' + err)
return HttpResponse('plot failed: out ' + out + ', err ' + err)
res = HttpResponse(open(outfn, 'rb'))
res['Content-Type'] = 'application/fits'
res['Content-Length'] = file_size(outfn)
res['Content-Disposition'] = 'attachment; filename=new-image.fits'
return res
def kml_file(req, jobid=None):
#return HttpResponse('KMZ requests are off for now. Post at https://groups.google.com/forum/#!forum/astrometry for help.')
import tempfile
import PIL.Image
job = get_object_or_404(Job, pk=jobid)
wcsfn = job.get_wcs_file()
img = job.user_image.image
df = img.disk_file
pnmfn = img.get_pnm_path(tempfiles=req.tempfiles)
imgfn = get_temp_file(tempfiles=req.tempfiles)
image = PIL.Image.open(pnmfn)
image.save(imgfn, 'PNG')
dirnm = tempfile.mkdtemp()
req.tempdirs.append(dirnm)
warpedimgfn = 'image.png'
kmlfn = 'doc.kml'
outfn = get_temp_file(tempfiles=req.tempfiles)
cmd = ('cd %(dirnm)s'
'; %(wcs2kml)s '
'--input_image_origin_is_upper_left '
'--fitsfile=%(wcsfn)s '
'--imagefile=%(imgfn)s '
'--kmlfile=%(kmlfn)s '
'--outfile=%(warpedimgfn)s '
'; zip -j - %(warpedimgfn)s %(kmlfn)s > %(outfn)s ' %
dict(dirnm=dirnm, wcsfn=wcsfn, imgfn=imgfn, kmlfn=kmlfn,
wcs2kml=settings.WCS2KML,
warpedimgfn=warpedimgfn, outfn=outfn))
logmsg('Running: ' + cmd)
(rtn, out, err) = run_command(cmd)
if rtn:
logmsg('out: ' + out)
logmsg('err: ' + err)
return HttpResponse('kml generation failed: ' + err)
res = HttpResponse(open(outfn, 'rb'))
res['Content-Type'] = 'application/x-zip-compressed'
res['Content-Length'] = file_size(outfn)
res['Content-Disposition'] = 'attachment; filename=image.kmz'
return res
class ImageSearchForm(forms.Form):
SEARCH_CATEGORIES = (('tag', 'By Tag'),
('user', 'By User'),
('location', 'By Location'),
('image', 'By Image'))
search_category = forms.ChoiceField(widget=forms.HiddenInput(),
choices=SEARCH_CATEGORIES,
initial='tag',
required=False)
tags = forms.CharField(widget=forms.TextInput(attrs={'autocomplete':'off'}),
required=False)
user = forms.CharField(widget=forms.TextInput(attrs={'autocomplete':'off'}),
required=False)
image = forms.IntegerField(widget=forms.HiddenInput(), required=False)
ra = forms.FloatField(widget=forms.TextInput(attrs={'size':'5'}),required=False)
dec = forms.FloatField(widget=forms.TextInput(attrs={'size':'5'}),required=False)
radius = forms.FloatField(widget=forms.TextInput(attrs={'size':'5'}),required=False)
calibrated = forms.BooleanField(initial=True, required=False)
processing = forms.BooleanField(initial=False, required=False)
failed = forms.BooleanField(initial=False, required=False)
def clean(self):
category = self.cleaned_data.get('search_category');
if not category:
self.cleaned_data['search_category'] = 'tag'
return self.cleaned_data
def unhide(req, user_image_id):
image = get_object_or_404(UserImage, pk=user_image_id)
if req.user.is_authenticated and req.user == image.user:
image.unhide()
return redirect('user_image', user_image_id)
def hide(req, user_image_id):
image = get_object_or_404(UserImage, pk=user_image_id)
if req.user.is_authenticated and req.user == image.user:
image.hide()
return redirect('user_image', user_image_id)
def search(req):
if req.GET:
form_data = req.GET.copy()
if not (req.GET.get('calibrated')
or req.GET.get('processing')
or req.GET.get('failed')):
form_data['calibrated'] = 'on'
else:
form_data = None
form = ImageSearchForm(form_data)
context = {}
images = UserImage.objects.all_visible()
page_number = req.GET.get('page',1)
category = 'tag'
calibrated = True
processing = False
failed = False
if form.is_valid():
calibrated = form.cleaned_data.get('calibrated')
processing = form.cleaned_data.get('processing')
failed = form.cleaned_data.get('failed')
category = form.cleaned_data.get('search_category');
if category == 'tag':
tags = form.cleaned_data.get('tags','')
if tags.strip():
images = UserImage.objects.none()
tag_objs = []
tags = [t.strip() for t in tags.split(',')]
tags = list(set(tags)) # remove duplicate tags
images = UserImage.objects.all_visible().filter(tags__text__in=tags).distinct()
tag_objs = Tag.objects.filter(text__in=tags)
context['tags'] = tag_objs
elif category == 'user':
username = form.cleaned_data.get('user','')
if username.strip():
user = User.objects.filter(profile__display_name=username)[:1]
images = UserImage.objects.none()
if len(user) > 0:
images = UserImage.objects.all_visible().filter(user=user)
context['display_user'] = user[0]
else:
context['display_users'] = User.objects.filter(profile__display_name__startswith=username)[:5]
elif category == 'location':
ra = form.cleaned_data.get('ra', 0)
dec = form.cleaned_data.get('dec', 0)
radius = form.cleaned_data.get('radius', 0)
if ra and dec and radius:
ra *= math.pi/180
dec *= math.pi/180
tempr = math.cos(dec)
x = tempr*math.cos(ra)
y = tempr*math.sin(ra)
z = math.sin(dec)
r = radius/180*math.pi
# HACK - there's probably a better way to do this..?
where = ('(x-(%(x)f))*(x-(%(x)f))+(y-(%(y)f))*(y-(%(y)f))+(z-(%(z)f))*(z-(%(z)f)) < (%(r)f)*(%(r)f)'
% dict(x=x,y=y,z=z,r=r))
where2= '(r <= %f)' % r
cals = Calibration.objects.extra(where=[where,where2])
images = UserImage.objects.filter(jobs__calibration__in=cals)
elif category == 'image':
image_id = form.cleaned_data.get('image')
if image_id:
image = get_object_or_404(UserImage, pk=image_id)
context['image'] = image
images = image.get_neighbouring_user_images(limit=None)
if calibrated is False:
images = images.exclude(jobs__status='S')
if processing is False:
images = images.exclude(jobs__status__isnull=True)
if failed is False:
images = images.exclude(jobs__status='F')
page = get_page(images, 4*5, page_number)
context.update({'form': form,
'search_category': category,
'image_page': page})
return render(req, 'user_image/search.html', context)
if __name__ == '__main__':
# class Duck(object):
# pass
# req = Duck()
# onthesky_image(req, zoom=0, calid=1)
loc = SkyLocation()
loc.nside = 16
loc.healpix = 889
import time
t0 = time.time()
locs = loc.get_neighbouring_user_images()
t1 = time.time()
locs = locs[:6]
t2 = time.time()
print(len(locs), 'locations found')
t3 = time.time()
print('get_neighbouring_user_image:', t1-t0)
print('limit:', t2-t1)
print('count:', t3-t2)
import sys
sys.exit(0)
from django.test import Client
c = Client()
#r = c.get('/user_images/2676353')
#r = c.get('/extraction_image_full/4005556')
r = c.get('/red_green_image_display/4515804')
#print(r)
with open('out.html', 'wb') as f:
for x in r:
f.write(x)
| 35.556308 | 361 | 0.588588 |
152ab154bcc7233b46d915a9ccf8cc32792ecd7a | 2,497 | py | Python | src/prefect/utilities/aws.py | andykawabata/prefect | a11061c19847beeea26616ccaf4b404ad939676b | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-09-28T16:24:02.000Z | 2020-10-08T17:08:19.000Z | src/prefect/utilities/aws.py | andykawabata/prefect | a11061c19847beeea26616ccaf4b404ad939676b | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2021-06-28T20:33:49.000Z | 2022-02-27T10:58:04.000Z | src/prefect/utilities/aws.py | yalaudah/prefect | 2f7f92c39a4575119c3268b0415841c6aca5df60 | [
"Apache-2.0"
] | 2 | 2021-03-03T17:46:43.000Z | 2021-03-05T15:39:35.000Z | """
Utility functions for interacting with AWS.
"""
import prefect
import boto3
from typing import Any
def get_boto_client(
resource: str, credentials: dict = None, use_session: bool = False, **kwargs: Any
) -> "boto3.client":
"""
Utility function for loading boto3 client objects from a given set of credentials.
Args:
- resource (str): the name of the resource to retrieve a client for
- credentials (dict, optional): a dictionary of AWS credentials used to
initialize the Client; if not provided, will attempt to load the
Client using ambient environment settings
- use_session (bool, optional): a boolean specifying whether to load
this client using a session or not; defaults to `False`
- **kwargs (Any, optional): additional keyword arguments to pass to boto3
Returns:
- Client: an initialized and authenticated boto3 Client
"""
aws_access_key = None
aws_secret_access_key = None
aws_session_token = None
if credentials:
aws_access_key = credentials["ACCESS_KEY"]
aws_secret_access_key = credentials["SECRET_ACCESS_KEY"]
aws_session_token = credentials.get("SESSION_TOKEN")
else:
ctx_credentials = prefect.context.get("secrets", {}).get("AWS_CREDENTIALS", {})
aws_access_key = ctx_credentials.get("ACCESS_KEY")
aws_secret_access_key = ctx_credentials.get("SECRET_ACCESS_KEY")
aws_session_token = ctx_credentials.get("SESSION_TOKEN")
if use_session:
# see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html?#multithreading-multiprocessing # noqa
session = boto3.session.Session()
return session.client(
resource,
aws_access_key_id=aws_access_key or kwargs.pop("aws_access_key_id", None),
aws_secret_access_key=aws_secret_access_key
or kwargs.pop("aws_secret_access_key", None),
aws_session_token=aws_session_token
or kwargs.pop("aws_session_token", None),
**kwargs
)
else:
return boto3.client(
resource,
aws_access_key_id=aws_access_key or kwargs.pop("aws_access_key_id", None),
aws_secret_access_key=aws_secret_access_key
or kwargs.pop("aws_secret_access_key", None),
aws_session_token=aws_session_token
or kwargs.pop("aws_session_token", None),
**kwargs
)
| 39.015625 | 130 | 0.671205 |
5fc329cbe9ccad077c54b1d0d7f069c1860e130f | 18,056 | py | Python | google/cloud/sql/v1/sql-v1-py/google/cloud/sql/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/cloud/sql/v1/sql-v1-py/google/cloud/sql/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/cloud/sql/v1/sql-v1-py/google/cloud/sql/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.sql_v1.services.sql_backup_runs_service.client import SqlBackupRunsServiceClient
from google.cloud.sql_v1.services.sql_backup_runs_service.async_client import SqlBackupRunsServiceAsyncClient
from google.cloud.sql_v1.services.sql_connect_service.client import SqlConnectServiceClient
from google.cloud.sql_v1.services.sql_connect_service.async_client import SqlConnectServiceAsyncClient
from google.cloud.sql_v1.services.sql_databases_service.client import SqlDatabasesServiceClient
from google.cloud.sql_v1.services.sql_databases_service.async_client import SqlDatabasesServiceAsyncClient
from google.cloud.sql_v1.services.sql_flags_service.client import SqlFlagsServiceClient
from google.cloud.sql_v1.services.sql_flags_service.async_client import SqlFlagsServiceAsyncClient
from google.cloud.sql_v1.services.sql_instance_names_service.client import SqlInstanceNamesServiceClient
from google.cloud.sql_v1.services.sql_instance_names_service.async_client import SqlInstanceNamesServiceAsyncClient
from google.cloud.sql_v1.services.sql_instances_service.client import SqlInstancesServiceClient
from google.cloud.sql_v1.services.sql_instances_service.async_client import SqlInstancesServiceAsyncClient
from google.cloud.sql_v1.services.sql_operations_service.client import SqlOperationsServiceClient
from google.cloud.sql_v1.services.sql_operations_service.async_client import SqlOperationsServiceAsyncClient
from google.cloud.sql_v1.services.sql_ssl_certs_service.client import SqlSslCertsServiceClient
from google.cloud.sql_v1.services.sql_ssl_certs_service.async_client import SqlSslCertsServiceAsyncClient
from google.cloud.sql_v1.services.sql_tiers_service.client import SqlTiersServiceClient
from google.cloud.sql_v1.services.sql_tiers_service.async_client import SqlTiersServiceAsyncClient
from google.cloud.sql_v1.services.sql_users_service.client import SqlUsersServiceClient
from google.cloud.sql_v1.services.sql_users_service.async_client import SqlUsersServiceAsyncClient
from google.cloud.sql_v1.types.cloud_sql_backup_runs import BackupRun
from google.cloud.sql_v1.types.cloud_sql_backup_runs import BackupRunsListResponse
from google.cloud.sql_v1.types.cloud_sql_backup_runs import SqlBackupRunsDeleteRequest
from google.cloud.sql_v1.types.cloud_sql_backup_runs import SqlBackupRunsGetRequest
from google.cloud.sql_v1.types.cloud_sql_backup_runs import SqlBackupRunsInsertRequest
from google.cloud.sql_v1.types.cloud_sql_backup_runs import SqlBackupRunsListRequest
from google.cloud.sql_v1.types.cloud_sql_backup_runs import SqlBackupKind
from google.cloud.sql_v1.types.cloud_sql_backup_runs import SqlBackupRunStatus
from google.cloud.sql_v1.types.cloud_sql_backup_runs import SqlBackupRunType
from google.cloud.sql_v1.types.cloud_sql_connect import ConnectSettings
from google.cloud.sql_v1.types.cloud_sql_connect import GenerateEphemeralCertRequest
from google.cloud.sql_v1.types.cloud_sql_connect import GenerateEphemeralCertResponse
from google.cloud.sql_v1.types.cloud_sql_connect import GetConnectSettingsRequest
from google.cloud.sql_v1.types.cloud_sql_databases import DatabasesListResponse
from google.cloud.sql_v1.types.cloud_sql_databases import SqlDatabasesDeleteRequest
from google.cloud.sql_v1.types.cloud_sql_databases import SqlDatabasesGetRequest
from google.cloud.sql_v1.types.cloud_sql_databases import SqlDatabasesInsertRequest
from google.cloud.sql_v1.types.cloud_sql_databases import SqlDatabasesListRequest
from google.cloud.sql_v1.types.cloud_sql_databases import SqlDatabasesUpdateRequest
from google.cloud.sql_v1.types.cloud_sql_flags import Flag
from google.cloud.sql_v1.types.cloud_sql_flags import FlagsListResponse
from google.cloud.sql_v1.types.cloud_sql_flags import SqlFlagsListRequest
from google.cloud.sql_v1.types.cloud_sql_flags import SqlFlagType
from google.cloud.sql_v1.types.cloud_sql_instances import BinLogCoordinates
from google.cloud.sql_v1.types.cloud_sql_instances import CloneContext
from google.cloud.sql_v1.types.cloud_sql_instances import DatabaseInstance
from google.cloud.sql_v1.types.cloud_sql_instances import DemoteMasterContext
from google.cloud.sql_v1.types.cloud_sql_instances import FailoverContext
from google.cloud.sql_v1.types.cloud_sql_instances import InstancesCloneRequest
from google.cloud.sql_v1.types.cloud_sql_instances import InstancesDemoteMasterRequest
from google.cloud.sql_v1.types.cloud_sql_instances import InstancesExportRequest
from google.cloud.sql_v1.types.cloud_sql_instances import InstancesFailoverRequest
from google.cloud.sql_v1.types.cloud_sql_instances import InstancesImportRequest
from google.cloud.sql_v1.types.cloud_sql_instances import InstancesListResponse
from google.cloud.sql_v1.types.cloud_sql_instances import InstancesListServerCasResponse
from google.cloud.sql_v1.types.cloud_sql_instances import InstancesRestoreBackupRequest
from google.cloud.sql_v1.types.cloud_sql_instances import InstancesRotateServerCaRequest
from google.cloud.sql_v1.types.cloud_sql_instances import InstancesTruncateLogRequest
from google.cloud.sql_v1.types.cloud_sql_instances import OnPremisesConfiguration
from google.cloud.sql_v1.types.cloud_sql_instances import ReplicaConfiguration
from google.cloud.sql_v1.types.cloud_sql_instances import RestoreBackupContext
from google.cloud.sql_v1.types.cloud_sql_instances import RotateServerCaContext
from google.cloud.sql_v1.types.cloud_sql_instances import SqlExternalSyncSettingError
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesAddServerCaRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesCloneRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesCreateEphemeralCertRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesDeleteRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesDemoteMasterRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesExportRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesFailoverRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesGetRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesImportRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesInsertRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesListRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesListServerCasRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesPatchRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesPromoteReplicaRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesRescheduleMaintenanceRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesRescheduleMaintenanceRequestBody
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesResetSslConfigRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesRestartRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesRestoreBackupRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesRotateServerCaRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesStartExternalSyncRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesStartReplicaRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesStopReplicaRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesTruncateLogRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesUpdateRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesVerifyExternalSyncSettingsRequest
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstancesVerifyExternalSyncSettingsResponse
from google.cloud.sql_v1.types.cloud_sql_instances import SslCertsCreateEphemeralRequest
from google.cloud.sql_v1.types.cloud_sql_instances import TruncateLogContext
from google.cloud.sql_v1.types.cloud_sql_instances import SqlInstanceType
from google.cloud.sql_v1.types.cloud_sql_instances import SqlSuspensionReason
from google.cloud.sql_v1.types.cloud_sql_operations import OperationsListResponse
from google.cloud.sql_v1.types.cloud_sql_operations import SqlOperationsGetRequest
from google.cloud.sql_v1.types.cloud_sql_operations import SqlOperationsListRequest
from google.cloud.sql_v1.types.cloud_sql_resources import AclEntry
from google.cloud.sql_v1.types.cloud_sql_resources import ApiWarning
from google.cloud.sql_v1.types.cloud_sql_resources import BackupConfiguration
from google.cloud.sql_v1.types.cloud_sql_resources import BackupContext
from google.cloud.sql_v1.types.cloud_sql_resources import BackupRetentionSettings
from google.cloud.sql_v1.types.cloud_sql_resources import Database
from google.cloud.sql_v1.types.cloud_sql_resources import DatabaseFlags
from google.cloud.sql_v1.types.cloud_sql_resources import DemoteMasterConfiguration
from google.cloud.sql_v1.types.cloud_sql_resources import DemoteMasterMySqlReplicaConfiguration
from google.cloud.sql_v1.types.cloud_sql_resources import DenyMaintenancePeriod
from google.cloud.sql_v1.types.cloud_sql_resources import DiskEncryptionConfiguration
from google.cloud.sql_v1.types.cloud_sql_resources import DiskEncryptionStatus
from google.cloud.sql_v1.types.cloud_sql_resources import ExportContext
from google.cloud.sql_v1.types.cloud_sql_resources import ImportContext
from google.cloud.sql_v1.types.cloud_sql_resources import InsightsConfig
from google.cloud.sql_v1.types.cloud_sql_resources import InstanceReference
from google.cloud.sql_v1.types.cloud_sql_resources import IpConfiguration
from google.cloud.sql_v1.types.cloud_sql_resources import IpMapping
from google.cloud.sql_v1.types.cloud_sql_resources import LocationPreference
from google.cloud.sql_v1.types.cloud_sql_resources import MaintenanceWindow
from google.cloud.sql_v1.types.cloud_sql_resources import MySqlReplicaConfiguration
from google.cloud.sql_v1.types.cloud_sql_resources import MySqlSyncConfig
from google.cloud.sql_v1.types.cloud_sql_resources import Operation
from google.cloud.sql_v1.types.cloud_sql_resources import OperationError
from google.cloud.sql_v1.types.cloud_sql_resources import OperationErrors
from google.cloud.sql_v1.types.cloud_sql_resources import Settings
from google.cloud.sql_v1.types.cloud_sql_resources import SqlActiveDirectoryConfig
from google.cloud.sql_v1.types.cloud_sql_resources import SqlServerAuditConfig
from google.cloud.sql_v1.types.cloud_sql_resources import SqlServerDatabaseDetails
from google.cloud.sql_v1.types.cloud_sql_resources import SslCert
from google.cloud.sql_v1.types.cloud_sql_resources import SslCertDetail
from google.cloud.sql_v1.types.cloud_sql_resources import SyncFlags
from google.cloud.sql_v1.types.cloud_sql_resources import SqlAvailabilityType
from google.cloud.sql_v1.types.cloud_sql_resources import SqlBackendType
from google.cloud.sql_v1.types.cloud_sql_resources import SqlDatabaseVersion
from google.cloud.sql_v1.types.cloud_sql_resources import SqlDataDiskType
from google.cloud.sql_v1.types.cloud_sql_resources import SqlFileType
from google.cloud.sql_v1.types.cloud_sql_resources import SqlIpAddressType
from google.cloud.sql_v1.types.cloud_sql_resources import SqlPricingPlan
from google.cloud.sql_v1.types.cloud_sql_resources import SqlReplicationType
from google.cloud.sql_v1.types.cloud_sql_resources import SqlUpdateTrack
from google.cloud.sql_v1.types.cloud_sql_ssl_certs import SqlSslCertsDeleteRequest
from google.cloud.sql_v1.types.cloud_sql_ssl_certs import SqlSslCertsGetRequest
from google.cloud.sql_v1.types.cloud_sql_ssl_certs import SqlSslCertsInsertRequest
from google.cloud.sql_v1.types.cloud_sql_ssl_certs import SqlSslCertsListRequest
from google.cloud.sql_v1.types.cloud_sql_ssl_certs import SslCertsInsertRequest
from google.cloud.sql_v1.types.cloud_sql_ssl_certs import SslCertsInsertResponse
from google.cloud.sql_v1.types.cloud_sql_ssl_certs import SslCertsListResponse
from google.cloud.sql_v1.types.cloud_sql_tiers import SqlTiersListRequest
from google.cloud.sql_v1.types.cloud_sql_tiers import Tier
from google.cloud.sql_v1.types.cloud_sql_tiers import TiersListResponse
from google.cloud.sql_v1.types.cloud_sql_users import SqlServerUserDetails
from google.cloud.sql_v1.types.cloud_sql_users import SqlUsersDeleteRequest
from google.cloud.sql_v1.types.cloud_sql_users import SqlUsersInsertRequest
from google.cloud.sql_v1.types.cloud_sql_users import SqlUsersListRequest
from google.cloud.sql_v1.types.cloud_sql_users import SqlUsersUpdateRequest
from google.cloud.sql_v1.types.cloud_sql_users import User
from google.cloud.sql_v1.types.cloud_sql_users import UsersListResponse
__all__ = ('SqlBackupRunsServiceClient',
'SqlBackupRunsServiceAsyncClient',
'SqlConnectServiceClient',
'SqlConnectServiceAsyncClient',
'SqlDatabasesServiceClient',
'SqlDatabasesServiceAsyncClient',
'SqlFlagsServiceClient',
'SqlFlagsServiceAsyncClient',
'SqlInstanceNamesServiceClient',
'SqlInstanceNamesServiceAsyncClient',
'SqlInstancesServiceClient',
'SqlInstancesServiceAsyncClient',
'SqlOperationsServiceClient',
'SqlOperationsServiceAsyncClient',
'SqlSslCertsServiceClient',
'SqlSslCertsServiceAsyncClient',
'SqlTiersServiceClient',
'SqlTiersServiceAsyncClient',
'SqlUsersServiceClient',
'SqlUsersServiceAsyncClient',
'BackupRun',
'BackupRunsListResponse',
'SqlBackupRunsDeleteRequest',
'SqlBackupRunsGetRequest',
'SqlBackupRunsInsertRequest',
'SqlBackupRunsListRequest',
'SqlBackupKind',
'SqlBackupRunStatus',
'SqlBackupRunType',
'ConnectSettings',
'GenerateEphemeralCertRequest',
'GenerateEphemeralCertResponse',
'GetConnectSettingsRequest',
'DatabasesListResponse',
'SqlDatabasesDeleteRequest',
'SqlDatabasesGetRequest',
'SqlDatabasesInsertRequest',
'SqlDatabasesListRequest',
'SqlDatabasesUpdateRequest',
'Flag',
'FlagsListResponse',
'SqlFlagsListRequest',
'SqlFlagType',
'BinLogCoordinates',
'CloneContext',
'DatabaseInstance',
'DemoteMasterContext',
'FailoverContext',
'InstancesCloneRequest',
'InstancesDemoteMasterRequest',
'InstancesExportRequest',
'InstancesFailoverRequest',
'InstancesImportRequest',
'InstancesListResponse',
'InstancesListServerCasResponse',
'InstancesRestoreBackupRequest',
'InstancesRotateServerCaRequest',
'InstancesTruncateLogRequest',
'OnPremisesConfiguration',
'ReplicaConfiguration',
'RestoreBackupContext',
'RotateServerCaContext',
'SqlExternalSyncSettingError',
'SqlInstancesAddServerCaRequest',
'SqlInstancesCloneRequest',
'SqlInstancesCreateEphemeralCertRequest',
'SqlInstancesDeleteRequest',
'SqlInstancesDemoteMasterRequest',
'SqlInstancesExportRequest',
'SqlInstancesFailoverRequest',
'SqlInstancesGetRequest',
'SqlInstancesImportRequest',
'SqlInstancesInsertRequest',
'SqlInstancesListRequest',
'SqlInstancesListServerCasRequest',
'SqlInstancesPatchRequest',
'SqlInstancesPromoteReplicaRequest',
'SqlInstancesRescheduleMaintenanceRequest',
'SqlInstancesRescheduleMaintenanceRequestBody',
'SqlInstancesResetSslConfigRequest',
'SqlInstancesRestartRequest',
'SqlInstancesRestoreBackupRequest',
'SqlInstancesRotateServerCaRequest',
'SqlInstancesStartExternalSyncRequest',
'SqlInstancesStartReplicaRequest',
'SqlInstancesStopReplicaRequest',
'SqlInstancesTruncateLogRequest',
'SqlInstancesUpdateRequest',
'SqlInstancesVerifyExternalSyncSettingsRequest',
'SqlInstancesVerifyExternalSyncSettingsResponse',
'SslCertsCreateEphemeralRequest',
'TruncateLogContext',
'SqlInstanceType',
'SqlSuspensionReason',
'OperationsListResponse',
'SqlOperationsGetRequest',
'SqlOperationsListRequest',
'AclEntry',
'ApiWarning',
'BackupConfiguration',
'BackupContext',
'BackupRetentionSettings',
'Database',
'DatabaseFlags',
'DemoteMasterConfiguration',
'DemoteMasterMySqlReplicaConfiguration',
'DenyMaintenancePeriod',
'DiskEncryptionConfiguration',
'DiskEncryptionStatus',
'ExportContext',
'ImportContext',
'InsightsConfig',
'InstanceReference',
'IpConfiguration',
'IpMapping',
'LocationPreference',
'MaintenanceWindow',
'MySqlReplicaConfiguration',
'MySqlSyncConfig',
'Operation',
'OperationError',
'OperationErrors',
'Settings',
'SqlActiveDirectoryConfig',
'SqlServerAuditConfig',
'SqlServerDatabaseDetails',
'SslCert',
'SslCertDetail',
'SyncFlags',
'SqlAvailabilityType',
'SqlBackendType',
'SqlDatabaseVersion',
'SqlDataDiskType',
'SqlFileType',
'SqlIpAddressType',
'SqlPricingPlan',
'SqlReplicationType',
'SqlUpdateTrack',
'SqlSslCertsDeleteRequest',
'SqlSslCertsGetRequest',
'SqlSslCertsInsertRequest',
'SqlSslCertsListRequest',
'SslCertsInsertRequest',
'SslCertsInsertResponse',
'SslCertsListResponse',
'SqlTiersListRequest',
'Tier',
'TiersListResponse',
'SqlServerUserDetails',
'SqlUsersDeleteRequest',
'SqlUsersInsertRequest',
'SqlUsersListRequest',
'SqlUsersUpdateRequest',
'User',
'UsersListResponse',
)
| 54.715152 | 115 | 0.852847 |
d7fc502988b5bbc7543d990d08e7629334a29637 | 23,846 | py | Python | sdk/python/pulumi_azure/frontdoor/firewall_policy.py | davidobrien1985/pulumi-azure | 811beeea473bd798d77354521266a87a2fac5888 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/frontdoor/firewall_policy.py | davidobrien1985/pulumi-azure | 811beeea473bd798d77354521266a87a2fac5888 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/frontdoor/firewall_policy.py | davidobrien1985/pulumi-azure | 811beeea473bd798d77354521266a87a2fac5888 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class FirewallPolicy(pulumi.CustomResource):
custom_block_response_body: pulumi.Output[str]
"""
If a `custom_rule` block's action type is `block`, this is the response body. The body must be specified in base64 encoding.
"""
custom_block_response_status_code: pulumi.Output[float]
"""
If a `custom_rule` block's action type is `block`, this is the response status code. Possible values are `200`, `403`, `405`, `406`, or `429`.
"""
custom_rules: pulumi.Output[list]
"""
One or more `custom_rule` blocks as defined below.
* `action` (`str`) - The action to perform when the rule is matched. Possible values are `Allow`, `Block`, `Log`, or `Redirect`.
* `enabled` (`bool`) - Is the rule is enabled or disabled? Defaults to `true`.
* `matchConditions` (`list`) - One or more `match_condition` block defined below.
* `matchValues` (`list`) - Up to `100` possible values to match.
* `matchVariable` (`str`) - The request variable to compare with. Possible values are `Cookies`, `PostArgs`, `QueryString`, `RemoteAddr`, `RequestBody`, `RequestHeader`, `RequestMethod`, or `RequestUri`.
* `negationCondition` (`bool`) - Should the result of the condition be negated.
* `operator` (`str`) - Comparison type to use for matching with the variable value. Possible values are `Any`, `BeginsWith`, `Contains`, `EndsWith`, `Equal`, `GeoMatch`, `GreaterThan`, `GreaterThanOrEqual`, `IPMatch`, `LessThan`, `LessThanOrEqual` or `RegEx`.
* `selector` (`str`) - Match against a specific key if the `match_variable` is `QueryString`, `PostArgs`, `RequestHeader` or `Cookies`.
* `transforms` (`list`) - Up to `5` transforms to apply. Possible values are `Lowercase`, `RemoveNulls`, `Trim`, `Uppercase`, `URLDecode` or`URLEncode`.
* `name` (`str`) - Gets name of the resource that is unique within a policy. This name can be used to access the resource.
* `priority` (`float`) - The priority of the rule. Rules with a lower value will be evaluated before rules with a higher value. Defaults to `1`.
* `rateLimitDurationInMinutes` (`float`) - The rate limit duration in minutes. Defaults to `1`.
* `rateLimitThreshold` (`float`) - The rate limit threshold. Defaults to `10`.
* `type` (`str`) - The type of rule. Possible values are `MatchRule` or `RateLimitRule`.
"""
enabled: pulumi.Output[bool]
"""
Is the policy a enabled state or disabled state. Defaults to `true`.
"""
frontend_endpoint_ids: pulumi.Output[list]
"""
the Frontend Endpoints associated with this Front Door Web Application Firewall policy.
"""
location: pulumi.Output[str]
"""
Resource location.
"""
managed_rules: pulumi.Output[list]
"""
One or more `managed_rule` blocks as defined below.
* `exclusions` (`list`) - One or more `exclusion` blocks as defined below.
* `matchVariable` (`str`) - The variable type to be excluded. Possible values are `QueryStringArgNames`, `RequestBodyPostArgNames`, `RequestCookieNames`, `RequestHeaderNames`.
* `operator` (`str`) - Comparison operator to apply to the selector when specifying which elements in the collection this exclusion applies to. Possible values are: `Equals`, `Contains`, `StartsWith`, `EndsWith`, `EqualsAny`.
* `selector` (`str`) - Selector for the value in the `match_variable` attribute this exclusion applies to.
* `overrides` (`list`) - One or more `override` blocks as defined below.
* `exclusions` (`list`) - One or more `exclusion` blocks as defined below.
* `matchVariable` (`str`) - The variable type to be excluded. Possible values are `QueryStringArgNames`, `RequestBodyPostArgNames`, `RequestCookieNames`, `RequestHeaderNames`.
* `operator` (`str`) - Comparison operator to apply to the selector when specifying which elements in the collection this exclusion applies to. Possible values are: `Equals`, `Contains`, `StartsWith`, `EndsWith`, `EqualsAny`.
* `selector` (`str`) - Selector for the value in the `match_variable` attribute this exclusion applies to.
* `ruleGroupName` (`str`) - The managed rule group to override.
* `rules` (`list`) - One or more `rule` blocks as defined below. If none are specified, all of the rules in the group will be disabled.
* `action` (`str`) - The action to be applied when the rule matches. Possible values are `Allow`, `Block`, `Log`, or `Redirect`.
* `enabled` (`bool`) - Is the managed rule override enabled or disabled. Defaults to `false`
* `exclusions` (`list`) - One or more `exclusion` blocks as defined below.
* `matchVariable` (`str`) - The variable type to be excluded. Possible values are `QueryStringArgNames`, `RequestBodyPostArgNames`, `RequestCookieNames`, `RequestHeaderNames`.
* `operator` (`str`) - Comparison operator to apply to the selector when specifying which elements in the collection this exclusion applies to. Possible values are: `Equals`, `Contains`, `StartsWith`, `EndsWith`, `EqualsAny`.
* `selector` (`str`) - Selector for the value in the `match_variable` attribute this exclusion applies to.
* `rule_id` (`str`) - Identifier for the managed rule.
* `type` (`str`) - The name of the managed rule to use with this resource.
* `version` (`str`) - The version on the managed rule to use with this resource.
"""
mode: pulumi.Output[str]
"""
The firewall policy mode. Possible values are `Detection`, `Prevention` and defaults to `Prevention`.
"""
name: pulumi.Output[str]
"""
The name of the policy. Changing this forces a new resource to be created.
"""
redirect_url: pulumi.Output[str]
"""
If action type is redirect, this field represents redirect URL for the client.
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group. Changing this forces a new resource to be created.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the Web Application Firewall Policy.
"""
def __init__(__self__, resource_name, opts=None, custom_block_response_body=None, custom_block_response_status_code=None, custom_rules=None, enabled=None, managed_rules=None, mode=None, name=None, redirect_url=None, resource_group_name=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Manages an Azure Front Door Web Application Firewall Policy instance.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] custom_block_response_body: If a `custom_rule` block's action type is `block`, this is the response body. The body must be specified in base64 encoding.
:param pulumi.Input[float] custom_block_response_status_code: If a `custom_rule` block's action type is `block`, this is the response status code. Possible values are `200`, `403`, `405`, `406`, or `429`.
:param pulumi.Input[list] custom_rules: One or more `custom_rule` blocks as defined below.
:param pulumi.Input[bool] enabled: Is the policy a enabled state or disabled state. Defaults to `true`.
:param pulumi.Input[list] managed_rules: One or more `managed_rule` blocks as defined below.
:param pulumi.Input[str] mode: The firewall policy mode. Possible values are `Detection`, `Prevention` and defaults to `Prevention`.
:param pulumi.Input[str] name: The name of the policy. Changing this forces a new resource to be created.
:param pulumi.Input[str] redirect_url: If action type is redirect, this field represents redirect URL for the client.
:param pulumi.Input[str] resource_group_name: The name of the resource group. Changing this forces a new resource to be created.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the Web Application Firewall Policy.
The **custom_rules** object supports the following:
* `action` (`pulumi.Input[str]`) - The action to perform when the rule is matched. Possible values are `Allow`, `Block`, `Log`, or `Redirect`.
* `enabled` (`pulumi.Input[bool]`) - Is the rule is enabled or disabled? Defaults to `true`.
* `matchConditions` (`pulumi.Input[list]`) - One or more `match_condition` block defined below.
* `matchValues` (`pulumi.Input[list]`) - Up to `100` possible values to match.
* `matchVariable` (`pulumi.Input[str]`) - The request variable to compare with. Possible values are `Cookies`, `PostArgs`, `QueryString`, `RemoteAddr`, `RequestBody`, `RequestHeader`, `RequestMethod`, or `RequestUri`.
* `negationCondition` (`pulumi.Input[bool]`) - Should the result of the condition be negated.
* `operator` (`pulumi.Input[str]`) - Comparison type to use for matching with the variable value. Possible values are `Any`, `BeginsWith`, `Contains`, `EndsWith`, `Equal`, `GeoMatch`, `GreaterThan`, `GreaterThanOrEqual`, `IPMatch`, `LessThan`, `LessThanOrEqual` or `RegEx`.
* `selector` (`pulumi.Input[str]`) - Match against a specific key if the `match_variable` is `QueryString`, `PostArgs`, `RequestHeader` or `Cookies`.
* `transforms` (`pulumi.Input[list]`) - Up to `5` transforms to apply. Possible values are `Lowercase`, `RemoveNulls`, `Trim`, `Uppercase`, `URLDecode` or`URLEncode`.
* `name` (`pulumi.Input[str]`) - Gets name of the resource that is unique within a policy. This name can be used to access the resource.
* `priority` (`pulumi.Input[float]`) - The priority of the rule. Rules with a lower value will be evaluated before rules with a higher value. Defaults to `1`.
* `rateLimitDurationInMinutes` (`pulumi.Input[float]`) - The rate limit duration in minutes. Defaults to `1`.
* `rateLimitThreshold` (`pulumi.Input[float]`) - The rate limit threshold. Defaults to `10`.
* `type` (`pulumi.Input[str]`) - The type of rule. Possible values are `MatchRule` or `RateLimitRule`.
The **managed_rules** object supports the following:
* `exclusions` (`pulumi.Input[list]`) - One or more `exclusion` blocks as defined below.
* `matchVariable` (`pulumi.Input[str]`) - The variable type to be excluded. Possible values are `QueryStringArgNames`, `RequestBodyPostArgNames`, `RequestCookieNames`, `RequestHeaderNames`.
* `operator` (`pulumi.Input[str]`) - Comparison operator to apply to the selector when specifying which elements in the collection this exclusion applies to. Possible values are: `Equals`, `Contains`, `StartsWith`, `EndsWith`, `EqualsAny`.
* `selector` (`pulumi.Input[str]`) - Selector for the value in the `match_variable` attribute this exclusion applies to.
* `overrides` (`pulumi.Input[list]`) - One or more `override` blocks as defined below.
* `exclusions` (`pulumi.Input[list]`) - One or more `exclusion` blocks as defined below.
* `matchVariable` (`pulumi.Input[str]`) - The variable type to be excluded. Possible values are `QueryStringArgNames`, `RequestBodyPostArgNames`, `RequestCookieNames`, `RequestHeaderNames`.
* `operator` (`pulumi.Input[str]`) - Comparison operator to apply to the selector when specifying which elements in the collection this exclusion applies to. Possible values are: `Equals`, `Contains`, `StartsWith`, `EndsWith`, `EqualsAny`.
* `selector` (`pulumi.Input[str]`) - Selector for the value in the `match_variable` attribute this exclusion applies to.
* `ruleGroupName` (`pulumi.Input[str]`) - The managed rule group to override.
* `rules` (`pulumi.Input[list]`) - One or more `rule` blocks as defined below. If none are specified, all of the rules in the group will be disabled.
* `action` (`pulumi.Input[str]`) - The action to be applied when the rule matches. Possible values are `Allow`, `Block`, `Log`, or `Redirect`.
* `enabled` (`pulumi.Input[bool]`) - Is the managed rule override enabled or disabled. Defaults to `false`
* `exclusions` (`pulumi.Input[list]`) - One or more `exclusion` blocks as defined below.
* `matchVariable` (`pulumi.Input[str]`) - The variable type to be excluded. Possible values are `QueryStringArgNames`, `RequestBodyPostArgNames`, `RequestCookieNames`, `RequestHeaderNames`.
* `operator` (`pulumi.Input[str]`) - Comparison operator to apply to the selector when specifying which elements in the collection this exclusion applies to. Possible values are: `Equals`, `Contains`, `StartsWith`, `EndsWith`, `EqualsAny`.
* `selector` (`pulumi.Input[str]`) - Selector for the value in the `match_variable` attribute this exclusion applies to.
* `rule_id` (`pulumi.Input[str]`) - Identifier for the managed rule.
* `type` (`pulumi.Input[str]`) - The name of the managed rule to use with this resource.
* `version` (`pulumi.Input[str]`) - The version on the managed rule to use with this resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['custom_block_response_body'] = custom_block_response_body
__props__['custom_block_response_status_code'] = custom_block_response_status_code
__props__['custom_rules'] = custom_rules
__props__['enabled'] = enabled
__props__['managed_rules'] = managed_rules
__props__['mode'] = mode
__props__['name'] = name
__props__['redirect_url'] = redirect_url
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['frontend_endpoint_ids'] = None
__props__['location'] = None
super(FirewallPolicy, __self__).__init__(
'azure:frontdoor/firewallPolicy:FirewallPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, custom_block_response_body=None, custom_block_response_status_code=None, custom_rules=None, enabled=None, frontend_endpoint_ids=None, location=None, managed_rules=None, mode=None, name=None, redirect_url=None, resource_group_name=None, tags=None):
"""
Get an existing FirewallPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] custom_block_response_body: If a `custom_rule` block's action type is `block`, this is the response body. The body must be specified in base64 encoding.
:param pulumi.Input[float] custom_block_response_status_code: If a `custom_rule` block's action type is `block`, this is the response status code. Possible values are `200`, `403`, `405`, `406`, or `429`.
:param pulumi.Input[list] custom_rules: One or more `custom_rule` blocks as defined below.
:param pulumi.Input[bool] enabled: Is the policy a enabled state or disabled state. Defaults to `true`.
:param pulumi.Input[list] frontend_endpoint_ids: the Frontend Endpoints associated with this Front Door Web Application Firewall policy.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[list] managed_rules: One or more `managed_rule` blocks as defined below.
:param pulumi.Input[str] mode: The firewall policy mode. Possible values are `Detection`, `Prevention` and defaults to `Prevention`.
:param pulumi.Input[str] name: The name of the policy. Changing this forces a new resource to be created.
:param pulumi.Input[str] redirect_url: If action type is redirect, this field represents redirect URL for the client.
:param pulumi.Input[str] resource_group_name: The name of the resource group. Changing this forces a new resource to be created.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the Web Application Firewall Policy.
The **custom_rules** object supports the following:
* `action` (`pulumi.Input[str]`) - The action to perform when the rule is matched. Possible values are `Allow`, `Block`, `Log`, or `Redirect`.
* `enabled` (`pulumi.Input[bool]`) - Is the rule is enabled or disabled? Defaults to `true`.
* `matchConditions` (`pulumi.Input[list]`) - One or more `match_condition` block defined below.
* `matchValues` (`pulumi.Input[list]`) - Up to `100` possible values to match.
* `matchVariable` (`pulumi.Input[str]`) - The request variable to compare with. Possible values are `Cookies`, `PostArgs`, `QueryString`, `RemoteAddr`, `RequestBody`, `RequestHeader`, `RequestMethod`, or `RequestUri`.
* `negationCondition` (`pulumi.Input[bool]`) - Should the result of the condition be negated.
* `operator` (`pulumi.Input[str]`) - Comparison type to use for matching with the variable value. Possible values are `Any`, `BeginsWith`, `Contains`, `EndsWith`, `Equal`, `GeoMatch`, `GreaterThan`, `GreaterThanOrEqual`, `IPMatch`, `LessThan`, `LessThanOrEqual` or `RegEx`.
* `selector` (`pulumi.Input[str]`) - Match against a specific key if the `match_variable` is `QueryString`, `PostArgs`, `RequestHeader` or `Cookies`.
* `transforms` (`pulumi.Input[list]`) - Up to `5` transforms to apply. Possible values are `Lowercase`, `RemoveNulls`, `Trim`, `Uppercase`, `URLDecode` or`URLEncode`.
* `name` (`pulumi.Input[str]`) - Gets name of the resource that is unique within a policy. This name can be used to access the resource.
* `priority` (`pulumi.Input[float]`) - The priority of the rule. Rules with a lower value will be evaluated before rules with a higher value. Defaults to `1`.
* `rateLimitDurationInMinutes` (`pulumi.Input[float]`) - The rate limit duration in minutes. Defaults to `1`.
* `rateLimitThreshold` (`pulumi.Input[float]`) - The rate limit threshold. Defaults to `10`.
* `type` (`pulumi.Input[str]`) - The type of rule. Possible values are `MatchRule` or `RateLimitRule`.
The **managed_rules** object supports the following:
* `exclusions` (`pulumi.Input[list]`) - One or more `exclusion` blocks as defined below.
* `matchVariable` (`pulumi.Input[str]`) - The variable type to be excluded. Possible values are `QueryStringArgNames`, `RequestBodyPostArgNames`, `RequestCookieNames`, `RequestHeaderNames`.
* `operator` (`pulumi.Input[str]`) - Comparison operator to apply to the selector when specifying which elements in the collection this exclusion applies to. Possible values are: `Equals`, `Contains`, `StartsWith`, `EndsWith`, `EqualsAny`.
* `selector` (`pulumi.Input[str]`) - Selector for the value in the `match_variable` attribute this exclusion applies to.
* `overrides` (`pulumi.Input[list]`) - One or more `override` blocks as defined below.
* `exclusions` (`pulumi.Input[list]`) - One or more `exclusion` blocks as defined below.
* `matchVariable` (`pulumi.Input[str]`) - The variable type to be excluded. Possible values are `QueryStringArgNames`, `RequestBodyPostArgNames`, `RequestCookieNames`, `RequestHeaderNames`.
* `operator` (`pulumi.Input[str]`) - Comparison operator to apply to the selector when specifying which elements in the collection this exclusion applies to. Possible values are: `Equals`, `Contains`, `StartsWith`, `EndsWith`, `EqualsAny`.
* `selector` (`pulumi.Input[str]`) - Selector for the value in the `match_variable` attribute this exclusion applies to.
* `ruleGroupName` (`pulumi.Input[str]`) - The managed rule group to override.
* `rules` (`pulumi.Input[list]`) - One or more `rule` blocks as defined below. If none are specified, all of the rules in the group will be disabled.
* `action` (`pulumi.Input[str]`) - The action to be applied when the rule matches. Possible values are `Allow`, `Block`, `Log`, or `Redirect`.
* `enabled` (`pulumi.Input[bool]`) - Is the managed rule override enabled or disabled. Defaults to `false`
* `exclusions` (`pulumi.Input[list]`) - One or more `exclusion` blocks as defined below.
* `matchVariable` (`pulumi.Input[str]`) - The variable type to be excluded. Possible values are `QueryStringArgNames`, `RequestBodyPostArgNames`, `RequestCookieNames`, `RequestHeaderNames`.
* `operator` (`pulumi.Input[str]`) - Comparison operator to apply to the selector when specifying which elements in the collection this exclusion applies to. Possible values are: `Equals`, `Contains`, `StartsWith`, `EndsWith`, `EqualsAny`.
* `selector` (`pulumi.Input[str]`) - Selector for the value in the `match_variable` attribute this exclusion applies to.
* `rule_id` (`pulumi.Input[str]`) - Identifier for the managed rule.
* `type` (`pulumi.Input[str]`) - The name of the managed rule to use with this resource.
* `version` (`pulumi.Input[str]`) - The version on the managed rule to use with this resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["custom_block_response_body"] = custom_block_response_body
__props__["custom_block_response_status_code"] = custom_block_response_status_code
__props__["custom_rules"] = custom_rules
__props__["enabled"] = enabled
__props__["frontend_endpoint_ids"] = frontend_endpoint_ids
__props__["location"] = location
__props__["managed_rules"] = managed_rules
__props__["mode"] = mode
__props__["name"] = name
__props__["redirect_url"] = redirect_url
__props__["resource_group_name"] = resource_group_name
__props__["tags"] = tags
return FirewallPolicy(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 81.385666 | 303 | 0.68393 |
fe5804e8a5e3d01144b44b57782c5fea5b13dd95 | 8,697 | py | Python | data_tests/models.py | andrewbird2/django-data-tests | 9cc187d094e5e9ac18a41913861544a424c1eb9a | [
"MIT"
] | 2 | 2019-12-11T14:16:59.000Z | 2020-03-24T20:16:39.000Z | data_tests/models.py | andrewbird2/django-data-tests | 9cc187d094e5e9ac18a41913861544a424c1eb9a | [
"MIT"
] | null | null | null | data_tests/models.py | andrewbird2/django-data-tests | 9cc187d094e5e9ac18a41913861544a424c1eb9a | [
"MIT"
] | 1 | 2020-03-24T20:17:02.000Z | 2020-03-24T20:17:02.000Z | # -*- coding: utf-8 -*-
from functools import lru_cache
import logging
from django.apps import apps
from django.contrib.contenttypes import fields
from django.contrib.contenttypes.models import ContentType
from django.db import models, router
from django.urls import reverse
from model_utils.models import TimeStampedModel
from data_tests.constants import MAX_MESSAGE_LENGTH
logger = logging.getLogger(__name__)
db_for_read = lru_cache()(router.db_for_read)
class TestMethod(models.Model):
class Meta:
unique_together = ('content_type', 'method_name')
title = models.CharField(max_length=256)
method_name = models.CharField(max_length=256)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, related_name='test_methods')
is_class_method = models.BooleanField()
def __str__(self):
return self.title
def save(self, *args, **kwargs):
return super(TestMethod, self).save(*args, **kwargs)
def model_class(self):
return self.content_type.model_class()
def method(self):
return getattr(self.model_class(), self.method_name)
def delete_stale_results(self):
deleted, _ = self.test_results.filter(object_id__isnull=True).delete()
if deleted:
logger.info('Deleted {} stale test results'.format(deleted))
def add_new_result_objects(self):
all_object_pks = set(self.model_class()._base_manager.values_list('pk', flat=True))
existing_test_results = set(self.test_results.values_list('object_id', flat=True))
to_insert = []
for new_id in all_object_pks - existing_test_results:
to_insert.append(TestResult(test_method=self, content_type=self.content_type, object_id=new_id))
TestResult.objects.bulk_create(to_insert)
def _run_test_method_instance(self):
for result in self.test_results.all():
result.run_test_method()
def class_method_result(self):
assert self.is_class_method
results = getattr(self.model_class(), self.method_name)()
if type(results) is tuple:
failing, message = results
else:
failing = results
message = None
return failing, message
def _run_test_method_class(self):
try:
qs_failing, message = self.class_method_result()
# Update failing results
self.test_results.filter(object_id__in=qs_failing.values_list('id', flat=True)).update(passed=False,
message=message or '')
# Update passing results
self.test_results.exclude(object_id__in=qs_failing.values_list('id', flat=True)).update(passed=True,
message='')
except Exception as e:
self.test_results.update(passed=False, message="Test failed to run correctly! {}".format(str(e)))
def run_test_method(self):
logger.info('Running test: {} {}'.format(self.content_type, self))
self.delete_stale_results()
self.add_new_result_objects()
if self.is_class_method:
self._run_test_method_class()
else:
self._run_test_method_instance()
results = self.test_results.all()
logger.info('Test completed: {} successful, {} failing (of which {} are supposed to fail)\n'.format(
results.filter(passed=True).count(),
results.filter(passed=False).count(),
results.filter(passed=False, xfail=True).count()
))
@classmethod
def rerun_tests_for_model(cls, model):
ct = ContentType.objects.get(app_label=model._meta.app_label, model=model._meta.model_name)
for test_method in cls.objects.filter(content_type=ct):
test_method.run_test_method()
@classmethod
def rerun_all_tests(cls):
for test_method in cls.objects.all():
test_method.run_test_method()
@classmethod
def add_test_methods_for_content_type(cls, content_type):
from data_tests.registry import registry
for method_name, defaults in registry[content_type].items():
cls.objects.update_or_create(
content_type=content_type,
method_name=method_name,
defaults=defaults
)
class TestResult(TimeStampedModel):
test_method = models.ForeignKey(TestMethod, on_delete=models.CASCADE, related_name='test_results')
message = models.CharField(max_length=MAX_MESSAGE_LENGTH)
passed = models.BooleanField(default=False)
xfail = models.BooleanField(default=False, verbose_name="Supposed to fail")
justification = models.CharField(blank=True, max_length=500)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, related_name='test_results')
object_id = models.PositiveIntegerField(blank=True, null=True, db_index=True)
def __str__(self):
return str(self.test_method)
class Meta:
unique_together = ('test_method', 'object_id', 'content_type')
@staticmethod
@lru_cache()
def get_model_class_from_content_type(content_type_id):
""" cached version of self.content_type.model_class() """
ct = ContentType.objects.get(id=content_type_id)
return apps.get_model(ct.app_label, ct.model)
def get_object(self):
""" return the object that was tested """
model_class = self.get_model_class_from_content_type(self.content_type_id)
db_alias = db_for_read(model_class)
manager = model_class._base_manager.using(db_alias)
return manager.get(pk=self.object_id)
@classmethod
def get_test_results(cls, obj=None, qs=None):
assert obj is not None or qs is not None # Note "assert obj or qs" does not work
ids = [obj.id] if obj else qs.values_list('id', flat=True)
class_type = type(obj) if obj else qs.model
ct = ContentType.objects.get(app_label=class_type._meta.app_label, model=class_type._meta.model_name)
return cls.objects.filter(content_type=ct, object_id__in=ids)
def run_test_method(self):
try:
obj = self.get_object()
method = self.test_method.method()
if self.test_method.is_class_method:
qs_failing, message = self.test_method.class_method_result()
if obj in qs_failing:
method_result = False, message
else:
method_result = True
else:
method_result = method(obj)
except Exception as e:
method_result = False, "Test failed to run correctly! {}".format(str(e))
if type(method_result) is bool:
self.passed = method_result
self.message = ''
else:
passed, message = method_result
self.passed = passed
self.message = message[0:MAX_MESSAGE_LENGTH] if message else ''
self.save()
def object_admin_url(self):
return reverse("admin:%s_%s_change" % (self.content_type.app_label, self.content_type.model),
args=(self.object_id,))
def object_admin_hyperlink(self, text=None):
return '<a href="%s">%s</a>' % (self.object_admin_url(), text or str(self))
def test_result_admin_url(self):
model = self._meta.model
ct = ContentType.objects.get(app_label=model._meta.app_label, model=model._meta.model_name)
return reverse("admin:%s_%s_change" % (ct.app_label, ct.model), args=(self.id,))
def test_result_admin_hyperlink(self, text=None):
return '<a href="%s">%s</a>' % (self.test_result_admin_url(), text or str(self))
@classmethod
def test_results_for_object(cls, obj):
model = obj._meta.model
ct = ContentType.objects.get(app_label=model._meta.app_label, model=model._meta.model_name)
TestMethod.add_test_methods_for_content_type(ct)
missing_test_methods = TestMethod.objects.filter(content_type=ct).exclude(
id__in=cls.objects.filter(content_type=ct,
object_id=obj.pk).values_list(
'test_method_id', flat=True))
for test_method in missing_test_methods:
cls.objects.create(content_type=ct, object_id=obj.pk, test_method=test_method)
return cls.objects.filter(content_type=ct, object_id=obj.pk)
@classmethod
def rerun_tests_for_object(cls, obj):
for test_result in cls.test_results_for_object(obj):
test_result.run_test_method()
| 40.078341 | 121 | 0.655398 |
fe5365ef750c75eae7654c5bc4be7e2d71092ee7 | 6,277 | py | Python | test_pyconvcli_internal_cli/test_pyconvcli.py | jlepinski/pyconvcli | 1b7c0f0ef44be6675b03f82ee9ba36ec38220473 | [
"Apache-2.0"
] | 4 | 2020-12-08T20:49:38.000Z | 2022-03-20T09:48:03.000Z | test_pyconvcli_internal_cli/test_pyconvcli.py | jlepinski/pyconvcli | 1b7c0f0ef44be6675b03f82ee9ba36ec38220473 | [
"Apache-2.0"
] | 1 | 2021-01-01T01:04:28.000Z | 2021-01-01T01:04:28.000Z | test_pyconvcli_internal_cli/test_pyconvcli.py | jlepinski/pyconvcli | 1b7c0f0ef44be6675b03f82ee9ba36ec38220473 | [
"Apache-2.0"
] | 1 | 2022-03-20T09:48:41.000Z | 2022-03-20T09:48:41.000Z | import unittest
from pyconvcli import PyConvCli
import os
import sys
from contextlib import redirect_stdout
from io import StringIO
import pkg_resources
from argparse import ArgumentError
class TestPyConvCli(unittest.TestCase):
def test_update_parser_for_functions(self):
sys.argv = ['test_pyconvcli_internal_cli', "here", 'custom', 'route']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
args, parsers = cli.parse_args()
self.assertEqual(len(parsers['test_pyconvcli_internal_cli.here.custom.route']['callables']), 2)
def test_groups_feature(self):
sys.argv = ['test_pyconvcli_internal_cli', "here", 'custom', 'route']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
args, parsers = cli.parse_args()
self.assertEqual(len(parsers['test_pyconvcli_internal_cli.here.custom.groups']['callables']['groupsCommand']['groups']), 2)
def test_there_or_not_action_stored(self):
sys.argv = ['test_pyconvcli_internal_cli', "there", "thereOrNotCommand", '--feature', '--notfeature']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out = StringIO()
with redirect_stdout(std_out):
cli.run()
self.assertEqual(std_out.getvalue().strip(),"feature:True,notfeature:False")
std_out = StringIO()
sys.argv = ['pyconvcli-test', "there", "thereOrNotCommand"]
with redirect_stdout(std_out):
cli.run()
self.assertEqual(std_out.getvalue().strip(),"feature:False,notfeature:True")
def test_already_existing_path_as_callable(self):
sys.argv = ['test_pyconvcli_internal_cli', "here", "testing", '--ascii', '<()()()>']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out = StringIO()
with redirect_stdout(std_out):
cli.run()
self.assertEqual(std_out.getvalue().strip(),"ascii: '<()()()>'")
def test_already_existing_at_root_path_as_callable(self):
sys.argv = ['test_pyconvcli_internal_cli', "here", '--ascii', '<()()()>']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out = StringIO()
with redirect_stdout(std_out):
cli.run()
self.assertEqual(std_out.getvalue().strip(),"ascii: '<()()()>'")
def test_already_existing_at_root_path_as_callable(self):
sys.argv = ['test_pyconvcli_internal_cli', "there"]
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out = StringIO()
with redirect_stdout(std_out):
cli.run()
self.assertEqual(std_out.getvalue().strip(),'no params but I was called')
def test_action_command(self):
sys.argv = ['test_pyconvcli_internal_cli', "--version"]
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out = StringIO()
with redirect_stdout(std_out):
cli.run()
self.assertEqual(std_out.getvalue().strip(),pkg_resources.get_distribution("pyconvcli").version)
def test_2_narg_action_command(self):
sys.argv = ['test_pyconvcli_internal_cli', "--nargs2test",'3','resd']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out = StringIO()
with redirect_stdout(std_out):
cli.run()
self.assertEqual(std_out.getvalue().strip(),str(['3', 'resd']))
sys.argv = ['test_pyconvcli_internal_cli', "--nargs2test",'3','resd','greens']
with self.assertRaises(SystemExit):
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
cli.run()
sys.argv = ['test_pyconvcli_internal_cli', "--nargs2test",'hello']
with self.assertRaises(SystemExit):
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
cli.run()
def test_star_narg_action_command(self):
sys.argv = ['test_pyconvcli_internal_cli', "--nargsstartest",'3','resd']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out = StringIO()
with redirect_stdout(std_out):
cli.run()
self.assertEqual(std_out.getvalue().strip(),str(['3', 'resd']))
sys.argv = ['test_pyconvcli_internal_cli', "--nargsstartest",'3','resd','greens']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out2 = StringIO()
with redirect_stdout(std_out2):
cli.run()
self.assertEqual(std_out2.getvalue().strip(),str(['3', 'resd', 'greens']))
sys.argv = ['test_pyconvcli_internal_cli', "--nargsstartest",'hello']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out3 = StringIO()
with redirect_stdout(std_out3):
cli.run()
self.assertEqual(std_out3.getvalue().strip(),str(['hello']))
sys.argv = ['test_pyconvcli_internal_cli', "--nargsstartest",'hello', 'there']
cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
std_out3 = StringIO()
with redirect_stdout(std_out3):
cli.run()
# just testing or demonstrating that with * as nargs we can't enter other sub commands
self.assertEqual(std_out3.getvalue().strip(),str(['hello', 'there']))
# def test_app(self):
# sys.argv = ['test_pyconvcli_internal_cli', "here", 'custom', 'route']
# cli = PyConvCli('test_pyconvcli_internal_cli', os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
# args, parsers = cli.parse_args()
# cli.parsers = parsers
# cli.visualize()
| 49.425197 | 131 | 0.665764 |
7ea5a587c89620d3eb45b1e977408b0414c80524 | 3,036 | py | Python | model/python/util/blocks.py | poojahira/nlu-winograd | 6ce7003b4cd800a4cdc4d75d4c577d1398f6c114 | [
"MIT"
] | 1 | 2018-12-22T08:34:02.000Z | 2018-12-22T08:34:02.000Z | model/python/util/blocks.py | poojahira/nlu-winograd | 6ce7003b4cd800a4cdc4d75d4c577d1398f6c114 | [
"MIT"
] | null | null | null | model/python/util/blocks.py | poojahira/nlu-winograd | 6ce7003b4cd800a4cdc4d75d4c577d1398f6c114 | [
"MIT"
] | 2 | 2018-04-18T04:13:43.000Z | 2020-01-30T23:33:15.000Z | """
Functions and components that can be slotted into tensorflow models.
TODO: Write functions for various types of attention.
"""
import tensorflow as tf
def length(sequence):
"""
Get true length of sequences (without padding), and mask for true-length in max-length.
Input of shape: (batch_size, max_seq_length, hidden_dim)
Output shapes,
length: (batch_size)
mask: (batch_size, max_seq_length, 1)
"""
populated = tf.sign(tf.abs(sequence))
length = tf.cast(tf.reduce_sum(populated, axis=1), tf.int32)
mask = tf.cast(tf.expand_dims(populated, -1), tf.float32)
return length, mask
def biLSTM(inputs, dim, seq_len, name):
"""
A Bi-Directional LSTM layer. Returns forward and backward hidden states as a tuple, and cell states as a tuple.
Ouput of hidden states: [(batch_size, max_seq_length, hidden_dim), (batch_size, max_seq_length, hidden_dim)]
Same shape for cell states.
"""
with tf.name_scope(name):
with tf.variable_scope('forward' + name):
lstm_fwd = tf.contrib.rnn.LSTMCell(num_units=dim)
with tf.variable_scope('backward' + name):
lstm_bwd = tf.contrib.rnn.LSTMCell(num_units=dim)
hidden_states, cell_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fwd, cell_bw=lstm_bwd, inputs=inputs, sequence_length=seq_len, dtype=tf.float32, scope=name)
return hidden_states, cell_states
def LSTM(inputs, dim, seq_len, name):
"""
An LSTM layer. Returns hidden states and cell states as a tuple.
Ouput shape of hidden states: (batch_size, max_seq_length, hidden_dim)
Same shape for cell states.
"""
with tf.name_scope(name):
cell = tf.contrib.rnn.LSTMCell(num_units=dim)
hidden_states, cell_states = tf.nn.dynamic_rnn(cell, inputs=inputs, sequence_length=seq_len, dtype=tf.float32, scope=name)
return hidden_states, cell_states
def last_output(output, true_length):
"""
To get the last hidden layer form a dynamically unrolled RNN.
Input of shape (batch_size, max_seq_length, hidden_dim).
true_length: Tensor of shape (batch_size). Such a tensor is given by the length() function.
Output of shape (batch_size, hidden_dim).
"""
max_length = int(output.get_shape()[1])
length_mask = tf.expand_dims(tf.one_hot(true_length-1, max_length, on_value=1., off_value=0.), -1)
last_output = tf.reduce_sum(tf.multiply(output, length_mask), 1)
return last_output
def masked_softmax(scores, mask):
"""
Used to calculcate a softmax score with true sequence length (without padding), rather than max-sequence length.
Input shape: (batch_size, max_seq_length, hidden_dim).
mask parameter: Tensor of shape (batch_size, max_seq_length). Such a mask is given by the length() function.
"""
numerator = tf.exp(tf.subtract(scores, tf.reduce_max(scores, 1, keepdims=True))) * mask
denominator = tf.reduce_sum(numerator, 1, keepdims=True)
weights = tf.div(numerator, denominator)
return weights
| 35.717647 | 174 | 0.708827 |
89a632749236c852dbd4a86c99ec662957c4f851 | 12,380 | py | Python | research/RetrievalRE/models/gpt2/tokenization_gpt2.py | zjunlp/PromptKG | 791bf82390eeadc30876d9f95e8dd26cd05de3dc | [
"MIT"
] | 11 | 2022-02-04T12:32:37.000Z | 2022-03-25T11:49:48.000Z | research/RetrievalRE/models/gpt2/tokenization_gpt2.py | zjunlp/PromptKG | 791bf82390eeadc30876d9f95e8dd26cd05de3dc | [
"MIT"
] | null | null | null | research/RetrievalRE/models/gpt2/tokenization_gpt2.py | zjunlp/PromptKG | 791bf82390eeadc30876d9f95e8dd26cd05de3dc | [
"MIT"
] | 4 | 2022-02-04T05:08:23.000Z | 2022-03-16T02:07:52.000Z | # coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
from transformers.utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"gpt2": 1024,
"gpt2-medium": 1024,
"gpt2-large": 1024,
"gpt2-xl": 1024,
"distilgpt2": 1024,
}
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
characters the bpe code barfs on.
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
tables between utf-8 bytes and unicode strings.
"""
bs = (
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class GPT2Tokenizer(PreTrainedTokenizer):
"""
Construct a GPT-2 tokenizer. Based on byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
::
>>> from transformers import GPT2Tokenizer
>>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
>>> tokenizer("Hello world")['input_ids']
[15496, 995]
>>> tokenizer(" Hello world")['input_ids']
[18435, 995]
You can get around that behavior by passing ``add_prefix_space=True`` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
.. note::
When used with ``is_split_into_words=True``, this tokenizer will add a space before each word (even the first
one).
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
merges_file (:obj:`str`):
Path to the merges file.
errors (:obj:`str`, `optional`, defaults to :obj:`"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode
<https://docs.python.org/3/library/stdtypes.html#bytes.decode>`__ for more information.
unk_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`):
The beginning of sequence token.
eos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`):
The end of sequence token.
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (GPT2 tokenizer detect beginning of words by the preceding space).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
unk_token="<|endoftext|>",
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
add_prefix_space=False,
**kwargs
):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
super().__init__(
errors=errors,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
bpe_merges = merges_handle.read().split("\n")[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
for token in re.findall(self.pat, text):
token = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
text = "".join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
if is_split_into_words or add_prefix_space:
text = " " + text
return (text, kwargs)
def _build_conversation_input_ids(self, conversation: "Conversation") -> List[int]:
input_ids = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(text, add_special_tokens=False) + [self.eos_token_id])
if len(input_ids) > self.model_max_length:
input_ids = input_ids[-self.model_max_length :]
return input_ids
| 39.935484 | 119 | 0.622859 |
cf329e5f66247bbca95e4bf363db6bddf61b921c | 1,046 | py | Python | DataAggregator/FIPS_Reference.py | Trigition/Village | bf22077f54e87be2dda1bf8984d0a62914e8e70f | [
"Apache-2.0"
] | 1 | 2017-05-17T15:28:52.000Z | 2017-05-17T15:28:52.000Z | DataAggregator/FIPS_Reference.py | Trigition/Village | bf22077f54e87be2dda1bf8984d0a62914e8e70f | [
"Apache-2.0"
] | null | null | null | DataAggregator/FIPS_Reference.py | Trigition/Village | bf22077f54e87be2dda1bf8984d0a62914e8e70f | [
"Apache-2.0"
] | null | null | null | FIPS_Reference = {
"AL":"01",
"AK":"02",
"AZ":"04",
"AR":"05",
"AS":"60",
"CA":"06",
"CO":"08",
"CT":"09",
"DE":"10",
"FL":"12",
"GA":"13",
"GU":"66",
"HI":"15",
"ID":"16",
"IL":"17",
"IN":"18",
"IA":"19",
"KS":"20",
"KY":"21",
"LA":"22",
"ME":"23",
"MD":"24",
"MA":"25",
"MI":"26",
"MN":"27",
"MS":"28",
"MO":"29",
"MT":"30",
"NE":"32",
"NV":"32",
"NH":"33",
"NJ":"34",
"NM":"35",
"NY":"36",
"NC":"37",
"ND":"38",
"OH":"39",
"OK":"40",
"OR":"41",
"PA":"42",
"RI":"44",
"PR":"72",
"SC":"45",
"SD":"46",
"TN":"47",
"TX":"48",
"UT":"49",
"VT":"50",
"VI":"78",
"VA":"51",
"WA":"53",
"WV":"54",
"WI":"55",
"WY":"56"
}
| 18.350877 | 18 | 0.219885 |
21a202c23d1794f48aa97e31838a01e2dc2f489f | 26,346 | py | Python | hphp/hack/test/verify.py | the-storm/hhvm | 8c173160bb6b8e56277a04d00efe4c1a234b605c | [
"PHP-3.01",
"Zend-2.0"
] | null | null | null | hphp/hack/test/verify.py | the-storm/hhvm | 8c173160bb6b8e56277a04d00efe4c1a234b605c | [
"PHP-3.01",
"Zend-2.0"
] | 1 | 2021-04-19T09:53:42.000Z | 2021-04-19T09:53:42.000Z | hphp/hack/test/verify.py | isabella232/hhvm | 6345684c993c575db92cbe18cd739147bfa0296f | [
"PHP-3.01",
"Zend-2.0"
] | null | null | null | #!/usr/bin/env python3
# pyre-strict
import argparse
import difflib
import os
import os.path
import re
import shlex
import subprocess
import sys
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional, Tuple
from hphp.hack.test.parse_errors import Error, parse_errors, sprint_errors
max_workers = 48
verbose = False
dump_on_failure = False
batch_size = 500
@dataclass
class TestCase:
file_path: str
input: Optional[str]
expected: str
@dataclass
class Result:
test_case: TestCase
output: str
is_failure: bool
"""
Per-test flags passed to test executable. Expected to be in a file with
same name as test, but with .flags extension.
"""
def compare_errors_by_line_no(
errors_exp: List[Error], errors_out: List[Error]
) -> Tuple[List[Error], List[Error]]:
i_out = 0
i_exp = 0
len_out = len(errors_out)
len_exp = len(errors_exp)
errors_in_out_not_in_exp = []
errors_in_exp_not_in_out = []
while i_out < len_out and i_exp < len_exp:
err_out = errors_out[i_out]
err_exp = errors_exp[i_exp]
l_out = err_out.message.position.line
l_exp = err_exp.message.position.line
if l_out < l_exp:
errors_in_out_not_in_exp.append(err_out)
i_out += 1
elif l_exp < l_out:
errors_in_exp_not_in_out.append(err_exp)
i_exp += 1
else:
i_out += 1
i_exp += 1
if i_out >= len_out:
for i in range(i_exp, len_exp):
errors_in_exp_not_in_out.append(errors_exp[i])
elif i_exp >= len_exp:
for i in range(i_out, len_out):
errors_in_out_not_in_exp.append(errors_out[i])
return (errors_in_exp_not_in_out, errors_in_out_not_in_exp)
def compare_output_files_error_lines_only(
file_out: str, file_exp: str
) -> Tuple[bool, str]:
out = ""
failed = False
try:
errors_out = parse_errors(file_out)
errors_exp = parse_errors(file_exp)
(
errors_in_exp_not_in_out,
errors_in_out_not_in_exp,
) = compare_errors_by_line_no(errors_out=errors_out, errors_exp=errors_exp)
failed = bool(errors_in_exp_not_in_out) or bool(errors_in_out_not_in_exp)
if errors_in_exp_not_in_out:
out += f"""\033[93mExpected errors which were not produced:\033[0m
{sprint_errors(errors_in_exp_not_in_out)}
"""
if errors_in_out_not_in_exp:
out += f"""\033[93mProduced errors which were not expected:\033[0m
{sprint_errors(errors_in_out_not_in_exp)}
"""
except IOError as e:
out = f"Warning: {e}"
return (failed, out)
def check_output_error_lines_only(
test: str, out_ext: str = ".out", exp_ext: str = ".exp"
) -> Tuple[bool, str]:
file_out = test + out_ext
file_exp = test + exp_ext
return compare_output_files_error_lines_only(file_out=file_out, file_exp=file_exp)
def get_test_flags(path: str) -> List[str]:
prefix, _ext = os.path.splitext(path)
path = prefix + ".flags"
if not os.path.isfile(path):
return []
with open(path) as file:
return shlex.split(file.read().strip())
def check_output(
case: TestCase,
out_extension: str,
default_expect_regex: Optional[str],
ignore_error_text: bool,
only_compare_error_lines: bool,
) -> Result:
if only_compare_error_lines:
(failed, out) = check_output_error_lines_only(case.file_path)
return Result(test_case=case, output=out, is_failure=failed)
else:
out_path = case.file_path + out_extension
try:
with open(out_path, "r") as f:
output: str = f.read()
except FileNotFoundError:
out_path = os.path.realpath(out_path)
output = "Output file " + out_path + " was not found!"
return check_result(case, default_expect_regex, ignore_error_text, output)
def debug_cmd(cwd: str, cmd: List[str]) -> None:
if verbose:
print("From directory", os.path.realpath(cwd))
print("Executing", " ".join(cmd))
print()
def run_batch_tests(
test_cases: List[TestCase],
program: str,
default_expect_regex: Optional[str],
ignore_error_text: bool,
no_stderr: bool,
force_color: bool,
mode_flag: List[str],
get_flags: Callable[[str], List[str]],
out_extension: str,
only_compare_error_lines: bool = False,
) -> List[Result]:
"""
Run the program with batches of files and return a list of results.
"""
# Each directory needs to be in a separate batch because flags are different
# for each directory.
# Compile a list of directories to test cases, and then
dirs_to_files: Dict[str, List[TestCase]] = {}
for case in test_cases:
test_dir = os.path.dirname(case.file_path)
dirs_to_files.setdefault(test_dir, []).append(case)
# run a list of test cases.
# The contract here is that the program will write to
# filename.out_extension for each file, and we read that
# for the output.
def run(test_cases: List[TestCase]) -> List[Result]:
if not test_cases:
raise AssertionError()
first_test = test_cases[0]
test_dir = os.path.dirname(first_test.file_path)
flags = get_flags(test_dir)
test_flags = get_test_flags(first_test.file_path)
cmd = [program]
cmd += mode_flag
cmd += ["--batch-files", "--out-extension", out_extension]
cmd += flags + test_flags
cmd += [os.path.basename(case.file_path) for case in test_cases]
debug_cmd(test_dir, cmd)
env = os.environ.copy()
env["FORCE_ERROR_COLOR"] = "true" if force_color else "false"
try:
return_code = subprocess.call(
cmd,
stderr=None if no_stderr else subprocess.STDOUT,
cwd=test_dir,
universal_newlines=True,
env=env,
)
except subprocess.CalledProcessError:
# we don't care about nonzero exit codes... for instance, type
# errors cause hh_single_type_check to produce them
return_code = None
if return_code == -11:
print(
"Segmentation fault while running the following command "
+ "from directory "
+ os.path.realpath(test_dir)
)
print(" ".join(cmd))
print()
results = []
for case in test_cases:
result = check_output(
case,
out_extension=out_extension,
default_expect_regex=default_expect_regex,
ignore_error_text=ignore_error_text,
only_compare_error_lines=only_compare_error_lines,
)
results.append(result)
return results
# Create a list of batched cases.
all_batched_cases: List[List[TestCase]] = []
# For each directory, we split all the test cases
# into chunks of batch_size. Then each of these lists
# is a separate job for each thread in the threadpool.
for cases in dirs_to_files.values():
batched_cases: List[List[TestCase]] = [
cases[i : i + batch_size] for i in range(0, len(cases), batch_size)
]
all_batched_cases += batched_cases
executor = ThreadPoolExecutor(max_workers=max_workers)
futures = [executor.submit(run, test_batch) for test_batch in all_batched_cases]
results = [future.result() for future in futures]
# Flatten the list
return [item for sublist in results for item in sublist]
def run_test_program(
test_cases: List[TestCase],
program: str,
default_expect_regex: Optional[str],
ignore_error_text: bool,
no_stderr: bool,
force_color: bool,
mode_flag: List[str],
get_flags: Callable[[str], List[str]],
timeout: Optional[float] = None,
) -> List[Result]:
"""
Run the program and return a list of results.
"""
def run(test_case: TestCase) -> Result:
test_dir, test_name = os.path.split(test_case.file_path)
flags = get_flags(test_dir)
test_flags = get_test_flags(test_case.file_path)
cmd = [program]
cmd += mode_flag
if test_case.input is None:
cmd.append(test_name)
cmd += flags + test_flags
debug_cmd(test_dir, cmd)
env = os.environ.copy()
env["FORCE_ERROR_COLOR"] = "true" if force_color else "false"
try:
output = subprocess.check_output(
cmd,
stderr=None if no_stderr else subprocess.STDOUT,
cwd=test_dir,
universal_newlines=True,
# pyre-ignore
input=test_case.input,
timeout=timeout,
errors="replace",
env=env,
)
except subprocess.TimeoutExpired as e:
output = "Timed out. " + str(e.output)
except subprocess.CalledProcessError as e:
# we don't care about nonzero exit codes... for instance, type
# errors cause hh_single_type_check to produce them
output = str(e.output)
return check_result(test_case, default_expect_regex, ignore_error_text, output)
executor = ThreadPoolExecutor(max_workers=max_workers)
futures = [executor.submit(run, test_case) for test_case in test_cases]
return [future.result() for future in futures]
def filter_ocaml_stacktrace(text: str) -> str:
"""take a string and remove all the lines that look like
they're part of an OCaml stacktrace"""
assert isinstance(text, str)
it = text.splitlines()
out = []
for x in it:
drop_line = x.lstrip().startswith("Called") or x.lstrip().startswith("Raised")
if drop_line:
pass
else:
out.append(x)
return "\n".join(out)
def filter_version_field(text: str) -> str:
"""given a string, remove the part that looks like the schema version"""
assert isinstance(text, str)
return re.sub(
r'"version":"\d{4}-\d{2}-\d{2}-\d{4}"', r'"version":"sanitised"', text, count=1
)
def filter_temp_hhi_path(text: str) -> str:
"""The .hhi files are stored in a temporary directory whose name
changes every time. Normalise it.
/tmp/ASjh5RoWbb/builtins_fb.hhi -> /tmp/hhi_dir/builtins_fb.hhi
"""
return re.sub(
r"/tmp/[^/]*/([a-zA-Z0-9_]+\.hhi)",
"/tmp/hhi_dir/\\1",
text,
)
def compare_expected(expected: str, out: str) -> bool:
if expected == "No errors\n" or out == "No errors\n":
return expected == out
else:
return True
# Strip leading and trailing whitespace from every line
def strip_lines(text: str) -> str:
return "\n".join(line.strip() for line in text.splitlines())
def check_result(
test_case: TestCase,
default_expect_regex: Optional[str],
ignore_error_messages: bool,
out: str,
) -> Result:
"""
Check that the output of the test in :out corresponds to the expected
output, or if a :default_expect_regex is provided,
check that the output in :out contains the provided regex.
"""
expected = filter_temp_hhi_path(
filter_version_field(strip_lines(test_case.expected))
)
normalized_out = filter_temp_hhi_path(filter_version_field(strip_lines(out)))
is_ok = (
expected == normalized_out
or (ignore_error_messages and compare_expected(expected, normalized_out))
or expected == filter_ocaml_stacktrace(normalized_out)
or (
default_expect_regex is not None
and re.search(default_expect_regex, normalized_out) is not None
)
)
return Result(test_case=test_case, output=out, is_failure=not is_ok)
def record_results(results: List[Result], out_ext: str) -> None:
for result in results:
outfile = result.test_case.file_path + out_ext
with open(outfile, "wb") as f:
f.write(bytes(result.output, "UTF-8"))
def find_in_ancestors_rec(dir: str, path: str) -> str:
if path == "" or os.path.dirname(path) == path:
raise Exception("Could not find directory %s in ancestors." % dir)
if os.path.basename(path) == dir:
return path
return find_in_ancestors_rec(dir, os.path.dirname(path))
def find_in_ancestors(dir: str, path: str) -> str:
try:
return find_in_ancestors_rec(dir, path)
except Exception:
raise Exception("Could not find directory %s in ancestors of %s." % (dir, path))
def get_exp_out_dirs(test_file: str) -> Tuple[str, str]:
if (
os.environ.get("HACK_BUILD_ROOT") is not None
and os.environ.get("HACK_SOURCE_ROOT") is not None
):
exp_dir = os.environ["HACK_SOURCE_ROOT"]
out_dir = os.environ["HACK_BUILD_ROOT"]
else:
fbcode = find_in_ancestors("fbcode", test_file)
exp_dir = os.path.join(fbcode, "hphp", "hack")
out_dir = os.path.dirname(find_in_ancestors("test", test_file))
return exp_dir, out_dir
def report_failures(
total: int,
failures: List[Result],
out_extension: str,
expect_extension: str,
fallback_expect_extension: Optional[str],
no_copy: bool = False,
only_compare_error_lines: bool = False,
) -> None:
if only_compare_error_lines:
for failure in failures:
print(f"\033[95m{failure.test_case.file_path}\033[0m")
print(failure.output)
print()
elif failures != []:
record_results(failures, out_extension)
if dump_on_failure:
dump_failures(failures)
fnames = [failure.test_case.file_path for failure in failures]
print("To review the failures, use the following command: ")
fallback_expect_ext_var = ""
if fallback_expect_extension is not None:
fallback_expect_ext_var = "FALLBACK_EXP_EXT=%s " % fallback_expect_extension
first_test_file = os.path.realpath(failures[0].test_case.file_path)
out_dir: str # for Pyre
(exp_dir, out_dir) = get_exp_out_dirs(first_test_file)
output_dir_var = "SOURCE_ROOT=%s OUTPUT_ROOT=%s " % (exp_dir, out_dir)
# Get a full path to 'review.sh' so this command be run
# regardless of your current directory.
review_script = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "review.sh"
)
if not os.path.isfile(review_script):
review_script = "./hphp/hack/test/review.sh"
def fname_map_var(f: str) -> str:
return "hphp/hack/" + os.path.relpath(f, out_dir)
print(
"OUT_EXT=%s EXP_EXT=%s %s%sNO_COPY=%s %s %s"
% (
out_extension,
expect_extension,
fallback_expect_ext_var,
output_dir_var,
"true" if no_copy else "false",
review_script,
" ".join(map(fname_map_var, fnames)),
)
)
def dump_failures(failures: List[Result]) -> None:
for f in failures:
expected = f.test_case.expected
actual = f.output
diff = difflib.ndiff(expected.splitlines(True), actual.splitlines(True))
print("Details for the failed test %s:" % f.test_case.file_path)
print("\n>>>>> Expected output >>>>>>\n")
print(expected)
print("\n===== Actual output ======\n")
print(actual)
print("\n<<<<< End Actual output <<<<<<<\n")
print("\n>>>>> Diff >>>>>>>\n")
print("".join(diff))
print("\n<<<<< End Diff <<<<<<<\n")
def get_hh_flags(test_dir: str) -> List[str]:
path = os.path.join(test_dir, "HH_FLAGS")
if not os.path.isfile(path):
if verbose:
print("No HH_FLAGS file found")
return []
with open(path) as f:
return shlex.split(f.read())
def files_with_ext(files: List[str], ext: str) -> List[str]:
"""
Returns the set of filenames in :files that end in :ext
"""
filtered_files: List[str] = []
for file in files:
prefix, suffix = os.path.splitext(file)
if suffix == ext:
filtered_files.append(prefix)
return filtered_files
def list_test_files(root: str, disabled_ext: str, test_ext: str) -> List[str]:
if os.path.isfile(root):
if root.endswith(test_ext):
return [root]
else:
return []
elif os.path.isdir(root):
result: List[str] = []
children = os.listdir(root)
disabled = files_with_ext(children, disabled_ext)
for child in children:
if child != "disabled" and child not in disabled:
result.extend(
list_test_files(os.path.join(root, child), disabled_ext, test_ext)
)
return result
elif os.path.islink(root):
# Some editors create broken symlinks as part of their locking scheme,
# so ignore those.
return []
else:
raise Exception("Could not find test file or directory at %s" % root)
def get_content_(file_path: str, ext: str) -> str:
with open(file_path + ext, "r") as fexp:
return fexp.read()
def get_content(
file_path: str, ext: str = "", fallback_ext: Optional[str] = None
) -> str:
try:
return get_content_(file_path, ext)
except FileNotFoundError:
if fallback_ext is not None:
try:
return get_content_(file_path, fallback_ext)
except FileNotFoundError:
return ""
else:
return ""
def run_tests(
files: List[str],
expected_extension: str,
fallback_expect_extension: Optional[str],
out_extension: str,
use_stdin: str,
program: str,
default_expect_regex: Optional[str],
batch_mode: str,
ignore_error_text: bool,
no_stderr: bool,
force_color: bool,
mode_flag: List[str],
get_flags: Callable[[str], List[str]],
timeout: Optional[float] = None,
only_compare_error_lines: bool = False,
) -> List[Result]:
# for each file, create a test case
test_cases = [
TestCase(
file_path=file,
expected=get_content(file, expected_extension, fallback_expect_extension),
input=get_content(file) if use_stdin else None,
)
for file in files
]
if batch_mode:
results = run_batch_tests(
test_cases,
program,
default_expect_regex,
ignore_error_text,
no_stderr,
force_color,
mode_flag,
get_flags,
out_extension,
only_compare_error_lines,
)
else:
results = run_test_program(
test_cases,
program,
default_expect_regex,
ignore_error_text,
no_stderr,
force_color,
mode_flag,
get_flags,
timeout=timeout,
)
failures = [result for result in results if result.is_failure]
num_results = len(results)
if failures == []:
print(
"All tests in the suite passed! "
"The number of tests that ran: %d\n" % num_results
)
else:
print("The number of tests that failed: %d/%d\n" % (len(failures), num_results))
report_failures(
num_results,
failures,
args.out_extension,
args.expect_extension,
args.fallback_expect_extension,
only_compare_error_lines=only_compare_error_lines,
)
sys.exit(1) # this exit code fails the suite and lets Buck know
return results
def run_idempotence_tests(
results: List[Result],
expected_extension: str,
out_extension: str,
program: str,
default_expect_regex: Optional[str],
mode_flag: List[str],
get_flags: Callable[[str], List[str]],
) -> None:
idempotence_test_cases = [
TestCase(
file_path=result.test_case.file_path,
expected=result.test_case.expected,
input=result.output,
)
for result in results
]
idempotence_results = run_test_program(
idempotence_test_cases,
program,
default_expect_regex,
False,
False,
False,
mode_flag,
get_flags,
)
num_idempotence_results = len(idempotence_results)
idempotence_failures = [
result for result in idempotence_results if result.is_failure
]
if idempotence_failures == []:
print(
"All idempotence tests in the suite passed! The number of "
"idempotence tests that ran: %d\n" % num_idempotence_results
)
else:
print(
"The number of idempotence tests that failed: %d/%d\n"
% (len(idempotence_failures), num_idempotence_results)
)
report_failures(
num_idempotence_results,
idempotence_failures,
out_extension + out_extension, # e.g., *.out.out
expected_extension,
None,
no_copy=True,
)
sys.exit(1) # this exit code fails the suite and lets Buck know
def get_flags_cache(args_flags: List[str]) -> Callable[[str], List[str]]:
flags_cache: Dict[str, List[str]] = {}
def get_flags(test_dir: str) -> List[str]:
if test_dir not in flags_cache:
flags_cache[test_dir] = get_hh_flags(test_dir)
flags = flags_cache[test_dir]
if args_flags is not None:
flags = flags + args_flags
return flags
return get_flags
def get_flags_dummy(args_flags: List[str]) -> Callable[[str], List[str]]:
def get_flags(_: str) -> List[str]:
return args_flags
return get_flags
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("test_path", help="A file or a directory. ")
parser.add_argument("--program", type=os.path.abspath)
parser.add_argument("--out-extension", type=str, default=".out")
parser.add_argument("--expect-extension", type=str, default=".exp")
parser.add_argument("--fallback-expect-extension", type=str)
parser.add_argument("--default-expect-regex", type=str)
parser.add_argument("--in-extension", type=str, default=".php")
parser.add_argument("--disabled-extension", type=str, default=".no_typecheck")
parser.add_argument("--verbose", action="store_true")
parser.add_argument(
"--idempotence",
action="store_true",
help="Verify that the output passed to the program "
"as input results in the same output.",
)
parser.add_argument("--max-workers", type=int, default="48")
parser.add_argument(
"--diff",
action="store_true",
help="On test failure, show the content of " "the files and a diff",
)
parser.add_argument("--mode-flag", type=str)
parser.add_argument("--flags", nargs=argparse.REMAINDER)
parser.add_argument(
"--stdin", action="store_true", help="Pass test input file via stdin"
)
parser.add_argument(
"--no-stderr",
action="store_true",
help="Do not include stderr output in the output file",
)
parser.add_argument(
"--batch", action="store_true", help="Run tests in batches to the test program"
)
parser.add_argument(
"--ignore-error-text",
action="store_true",
help="Do not compare error text when verifying output",
)
parser.add_argument(
"--only-compare-error-lines",
action="store_true",
help="Does not care about exact expected error message, "
"but only compare the error line numbers.",
)
parser.add_argument(
"--timeout",
type=int,
help="Timeout in seconds for each test, in non-batch mode.",
)
parser.add_argument(
"--force-color",
action="store_true",
help="Set the FORCE_ERROR_COLOR environment variable, "
"which causes the test output to retain terminal escape codes.",
)
parser.add_argument(
"--no-hh-flags", action="store_true", help="Do not read HH_FLAGS files"
)
parser.epilog = (
"%s looks for a file named HH_FLAGS in the same directory"
" as the test files it is executing. If found, the "
"contents will be passed as arguments to "
"<program> in addition to any arguments "
"specified by --flags" % parser.prog
)
args: argparse.Namespace = parser.parse_args()
max_workers = args.max_workers
verbose = args.verbose
dump_on_failure = args.diff
if os.getenv("SANDCASTLE") is not None:
dump_on_failure = True
if not os.path.isfile(args.program):
raise Exception("Could not find program at %s" % args.program)
files: List[str] = list_test_files(
args.test_path, args.disabled_extension, args.in_extension
)
if len(files) == 0:
raise Exception("Could not find any files to test in " + args.test_path)
mode_flag: List[str] = [] if args.mode_flag is None else [args.mode_flag]
get_flags: Callable[[str], List[str]] = (
get_flags_dummy(args.flags) if args.no_hh_flags else get_flags_cache(args.flags)
)
results: List[Result] = run_tests(
files,
args.expect_extension,
args.fallback_expect_extension,
args.out_extension,
args.stdin,
args.program,
args.default_expect_regex,
args.batch,
args.ignore_error_text,
args.no_stderr,
args.force_color,
mode_flag,
get_flags,
timeout=args.timeout,
only_compare_error_lines=args.only_compare_error_lines,
)
# Doesn't make sense to check failures for idempotence
successes: List[Result] = [result for result in results if not result.is_failure]
if args.idempotence and successes:
run_idempotence_tests(
successes,
args.expect_extension,
args.out_extension,
args.program,
args.default_expect_regex,
mode_flag,
get_flags,
)
| 32.051095 | 88 | 0.620132 |
280612f1aebc0b511d88320c518a08e5b52d4333 | 1,979 | py | Python | spiderGUI.py | ConnorDFlynn/Group1PySpider | 8f2b8f161a544f59dcbdc3fbb572f4eb2b3be3b7 | [
"Apache-2.0"
] | null | null | null | spiderGUI.py | ConnorDFlynn/Group1PySpider | 8f2b8f161a544f59dcbdc3fbb572f4eb2b3be3b7 | [
"Apache-2.0"
] | null | null | null | spiderGUI.py | ConnorDFlynn/Group1PySpider | 8f2b8f161a544f59dcbdc3fbb572f4eb2b3be3b7 | [
"Apache-2.0"
] | null | null | null | __author__ = 'Connor'
__author__ = 'Alex'
import webbrowser
import subprocess
from tkinter import * #Importing Tkinter package
class spiderGUI:
def __init__(self, master):
url = "http://localhost:5000" #WebUI Local Host Address
url2 = "http://pyspider.readthedocs.org/en/master/tutorial/" #Tutorial Address
def OpenUrl():
try:
webbrowser.open_new(url) #Open WebUI URL
master.destroy() #Close window after click
except:
pass
subprocess.Popen("run.bat")
def OpenUrl2():
try:
webbrowser.open_new(url2) #Open Tutorial URL
except:
pass
def window(): #Window Creation Function
labelfont = ('hel', 12, 'bold') #Label font variable that has preferences for the PySpider label
label = Label(master, text = 'PySpider Start', bg ='light green') #Create a header label
label.config(font=labelfont) #Adding fonts
label.pack() #Placing the label into the Tkinter window
button = Button(master, fg='red', text = 'Start Here!', command = OpenUrl, font = 'hel') #Create a button that opens localhost
button.pack(pady=10) #Space in between buttons and placing button in window
tutButton = Button(master, text = 'Tutorial!', command = OpenUrl2) #Open tutorial URL when button clicked
tutButton.pack() #Placing the button into the Tkinter window
window() #Call the window function when SpiderGUI class is called
main = Tk() #TK initialize
main.geometry("350x150") #Create window that can be re-sized
main.title("Welcome to PySpider!!!") #Create title for window
main.configure(bg='light green') #Make background color light green
gui = spiderGUI(main) #Instance of spiderGUI
main.mainloop() #Call main method that opens up the window with the preferences | 42.106383 | 139 | 0.632137 |
1e0d268323b32c2c1ca8b4d1f3455b16bc50c804 | 207 | py | Python | contests/atcoder/abc179/abc179_c/main.py | conao3/coder | 2cdb610fec013da88a3470d460108e8a9b462445 | [
"CC0-1.0"
] | null | null | null | contests/atcoder/abc179/abc179_c/main.py | conao3/coder | 2cdb610fec013da88a3470d460108e8a9b462445 | [
"CC0-1.0"
] | null | null | null | contests/atcoder/abc179/abc179_c/main.py | conao3/coder | 2cdb610fec013da88a3470d460108e8a9b462445 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
from typing import *
# def solve(N: int) -> int:
def solve(N):
pass
def main():
N = int(input())
a = solve(N)
print(a)
if __name__ == '__main__':
main()
| 10.894737 | 27 | 0.536232 |
97e23a495bde4cb07a0f2a980e666be3fe8eb718 | 405 | py | Python | backend/kickOffProject/asgi.py | Dobro929/HSOS-SEP-PlantMap-2022 | 37a307c48c62ffe96672696f52a2ab2e420b102d | [
"BSD-3-Clause"
] | null | null | null | backend/kickOffProject/asgi.py | Dobro929/HSOS-SEP-PlantMap-2022 | 37a307c48c62ffe96672696f52a2ab2e420b102d | [
"BSD-3-Clause"
] | 2 | 2022-03-31T16:56:53.000Z | 2022-03-31T16:58:24.000Z | backend/kickOffProject/asgi.py | Dobro929/HSOS-SEP-PlantMap-2022 | 37a307c48c62ffe96672696f52a2ab2e420b102d | [
"BSD-3-Clause"
] | null | null | null | """
ASGI config for kickOffProject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kickOffProject.settings')
application = get_asgi_application()
| 23.823529 | 78 | 0.792593 |
55080f6578bd7eaa7571d068151d95a08103e643 | 6,240 | py | Python | src/dof/model/container.py | docker-forensics-toolkit/toolkit | c9bb176b15e26a82036d2292493a3ea789ed5144 | [
"Apache-2.0"
] | 39 | 2019-03-27T03:34:17.000Z | 2022-03-24T20:02:56.000Z | src/dof/model/container.py | docker-forensics-toolkit/toolkit | c9bb176b15e26a82036d2292493a3ea789ed5144 | [
"Apache-2.0"
] | 2 | 2019-04-07T11:57:38.000Z | 2021-01-23T14:06:30.000Z | src/dof/model/container.py | docker-forensics-toolkit/toolkit | c9bb176b15e26a82036d2292493a3ea789ed5144 | [
"Apache-2.0"
] | 7 | 2020-02-21T04:13:27.000Z | 2022-03-26T21:01:21.000Z | import collections
import json
from enum import Enum, auto
from pathlib import Path
import subprocess
from typing import List
from infrastructure.logging import trace
Volume = collections.namedtuple('Volume', 'source destination')
class ConfigVersion(Enum):
One = auto()
Two = auto()
class Container:
def __init__(self, docker_home: Path, config_file: dict, config_version: ConfigVersion):
self.docker_home = docker_home
self.config_file = config_file
self.config_version = config_version
def __eq__(self, other):
return self.id == other.id
@staticmethod
def from_v2_config(container_folder: Path):
container_file = container_folder / "config.v2.json"
container_config = Container.__read_container_config(container_file)
host_config = Container.__read_host_config(container_folder)
container_config['HostConfig'] = host_config
return container_config
@staticmethod
def __read_container_config(container_file):
trace(f"Reading container config from: {str(container_file)}")
with container_file.open() as file:
container_config = json.load(file)
return container_config
@staticmethod
def __read_host_config(container_folder):
host_file = container_folder / "hostconfig.json"
trace(f"Reading container host config from: {str(host_file)}")
with host_file.open() as file:
host_config = json.load(file)
return host_config
def mount_container_filesystem(self, container_mountpoint: Path, image_mountpoint: Path) -> Path:
"""Tries to mount the container filesystem using the 'mount' command."""
if self.storage_driver != "overlay2":
raise NotImplementedError("Mounting container filesystems is only supported for overlay2 storage driver")
command = ["mount", "-t", "overlay", "overlay", "-r", "-o",
f"lowerdir={self.image_layer_folders}:"
f"{self.container_layer_folder}",
str(container_mountpoint)]
trace(f'Running: {" ".join(command)}')
subprocess.check_call(command, cwd=str(self.storage_driver_folder))
for volume in self.volumes:
if volume.source:
volume_dest = container_mountpoint / volume.destination[1:]
command = ["mount", "--bind", str(image_mountpoint / volume.source[1:]), str(volume_dest)]
trace(f'Running: {" ".join(command)}')
subprocess.check_call(command)
print(f"Mounted volume {volume.destination}")
return container_mountpoint
@property
def id(self):
return self.config_file["ID"]
@property
def name(self):
return self.config_file["Name"]
@property
def container_folder(self):
return self.docker_home / "containers" / self.id
@property
def creation_date(self):
return self.config_file['Created']
@property
def state(self) -> str:
if self.config_file["State"]["Running"]:
return "running"
else:
return "dead"
@property
def restart_policy(self) -> str:
return self.config_file['HostConfig']['RestartPolicy']['Name']
@property
def entrypoint(self):
return self.config_file['Config']['Entrypoint']
@property
def command(self):
return self.config_file['Config']['Cmd']
@property
def container_layer_id(self) -> str:
with self._container_layer_id_file.open() as file:
return file.read()
@property
def storage_driver_folder(self) -> Path:
return self.docker_home / self.storage_driver
@property
def container_layer_folder(self) -> Path:
if self.storage_driver == "overlay2":
return self.storage_driver_folder / self.container_layer_id / "diff"
elif self.storage_driver == "aufs":
return self.storage_driver_folder / "diff" / self.container_layer_id
else:
raise NotImplemented(f"Unsupported storage driver {self.storage_driver}")
@property
def container_layer_work_folder(self) -> Path:
return self.storage_driver_folder / self.container_layer_id / "work"
@property
def image_layer_folders(self) -> str:
with (self.storage_driver_folder / self.container_layer_id / "lower").open() as lower_file:
return lower_file.read()
@property
def _container_layer_id_file(self) -> Path:
return self.docker_home / "image" / self.storage_driver / "layerdb" / "mounts" / self.id / "mount-id"
def get_path_to_logfile(self, image_mountpoint: Path) -> Path:
if self.logging_driver() == "json-file":
relative_logfile_path = self.config_file['LogPath'][1:]
return image_mountpoint / relative_logfile_path
elif self.logging_driver() == "local":
return self.container_folder / "local-logs" / "container.log"
@property
def storage_driver(self) -> str:
return self.config_file["Driver"]
@property
def ports(self) -> dict:
return self.config_file["NetworkSettings"]["Ports"]
@property
def image_tag(self) -> str:
return self.config_file["Config"]["Image"]
@property
def image_id(self) -> str:
return self.config_file["Image"]
@property
def volumes(self) -> List[Volume]:
volume_list = []
mount_points: dict = self.config_file['MountPoints']
for mountpoint, config in mount_points.items():
volume_list.append(Volume(source=config['Source'], destination=mountpoint))
return volume_list
def ports_to_string(self) -> str:
string = ""
for container_port, host_port_list in self.ports.items():
if not host_port_list:
string += "<none>"
else:
for mapping in host_port_list:
string += f"{mapping['HostIp']}:{mapping['HostPort']}"
string += "->"
string += container_port
return string
def logging_driver(self) -> str:
return self.config_file["HostConfig"]["LogConfig"]["Type"]
| 34.285714 | 117 | 0.642949 |
d0bab84d7e71b32b433ef3b8e3fc8e14230b81bc | 522 | py | Python | src/pwbus/transformations/_transform_json.py | fszostak/pwbus | 008893b039547eb886b04eea1b6177902b4d12ea | [
"MIT"
] | 1 | 2020-05-05T01:12:46.000Z | 2020-05-05T01:12:46.000Z | src/pwbus/transformations/_transform_json.py | fszostak/pwbus | 008893b039547eb886b04eea1b6177902b4d12ea | [
"MIT"
] | null | null | null | src/pwbus/transformations/_transform_json.py | fszostak/pwbus | 008893b039547eb886b04eea1b6177902b4d12ea | [
"MIT"
] | null | null | null | # PWBus - _TransformJSON Class
#:
#: maintainer: fabio.szostak@perfweb.com.br | Mon Nov 18 07:36:30 -03 2019
import traceback
from pwbus.commons.logging import *
from pwbus.transformations._transform import _Transform
class _TransformJSON(_Transform):
# _TransformJSON.parsein - return Dict
#
def parse_in(self, payload):
return self.stringToJson(data=payload)
# _TransformJSON.parse_out - receive Dict
#
def parse_out(self, payload):
return self.jsonToString(data=payload)
| 22.695652 | 75 | 0.722222 |
91e447f59899b898b3a85d8bdde00306d83e08b7 | 54,682 | py | Python | pybind/slxos/v17r_2_00/tm_state/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v17r_2_00/tm_state/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v17r_2_00/tm_state/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import cngn_mon_dev
import cngn_mon_voq
import cngn_mon_del_pkt
import tmdevicestataggr
import non_empty_voq
import buf_pool_stats
import tmcpustatsslot
import tmcpustatsslotallgrp
import tm_top_discard_pkt_data
import tm_top_max_queue_depth_data
import tm_max_buff_util_data
import tmvoqstatistics
class tm_state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-sysdiag-operational - based on the path /tm-state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: TM statistics
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__cngn_mon_dev','__cngn_mon_voq','__cngn_mon_del_pkt','__tmdevicestataggr','__non_empty_voq','__buf_pool_stats','__tmcpustatsslot','__tmcpustatsslotallgrp','__tm_top_discard_pkt_data','__tm_top_max_queue_depth_data','__tm_max_buff_util_data','__tmvoqstatistics',)
_yang_name = 'tm-state'
_rest_name = 'tm-state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__cngn_mon_del_pkt = YANGDynClass(base=cngn_mon_del_pkt.cngn_mon_del_pkt, is_container='container', presence=False, yang_name="cngn-mon-del-pkt", rest_name="cngn-mon-del-pkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-cngn-mon-del-pkt', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=False)
self.__tmdevicestataggr = YANGDynClass(base=YANGListType("slot tower",tmdevicestataggr.tmdevicestataggr, yang_name="tmdevicestataggr", rest_name="tmdevicestataggr", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmdevicestataggr', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tmdevicestataggr", rest_name="tmdevicestataggr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmdevicestataggr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
self.__cngn_mon_dev = YANGDynClass(base=cngn_mon_dev.cngn_mon_dev, is_container='container', presence=False, yang_name="cngn-mon-dev", rest_name="cngn-mon-dev", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-cngn-mon-dev', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=False)
self.__tmcpustatsslot = YANGDynClass(base=YANGListType("slot_id cpugroup_id priority",tmcpustatsslot.tmcpustatsslot, yang_name="tmcpustatsslot", rest_name="tmcpustatsslot", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot-id cpugroup-id priority', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmcpustatsslot', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tmcpustatsslot", rest_name="tmcpustatsslot", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmcpustatsslot', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
self.__tmvoqstatistics = YANGDynClass(base=YANGListType("ing_slot ing_tower egr_voqid",tmvoqstatistics.tmvoqstatistics, yang_name="tmvoqstatistics", rest_name="tmvoqstatistics", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ing-slot ing-tower egr-voqid', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmvoqstatistics', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tmvoqstatistics", rest_name="tmvoqstatistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmvoqstatistics', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
self.__tm_top_max_queue_depth_data = YANGDynClass(base=YANGListType("slot tower id",tm_top_max_queue_depth_data.tm_top_max_queue_depth_data, yang_name="tm-top-max-queue-depth-data", rest_name="tm-top-max-queue-depth-data", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower id', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-top-max-queue-depth-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tm-top-max-queue-depth-data", rest_name="tm-top-max-queue-depth-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-top-max-queue-depth-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
self.__buf_pool_stats = YANGDynClass(base=YANGListType("slot tower",buf_pool_stats.buf_pool_stats, yang_name="buf-pool-stats", rest_name="buf-pool-stats", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower', extensions={u'tailf-common': {u'callpoint': u'sysdiag-buf-pool-stats', u'cli-suppress-show-path': None}}), is_container='list', yang_name="buf-pool-stats", rest_name="buf-pool-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-buf-pool-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
self.__tm_max_buff_util_data = YANGDynClass(base=YANGListType("slot tower",tm_max_buff_util_data.tm_max_buff_util_data, yang_name="tm-max-buff-util-data", rest_name="tm-max-buff-util-data", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-max-buff-util-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tm-max-buff-util-data", rest_name="tm-max-buff-util-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-max-buff-util-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
self.__tmcpustatsslotallgrp = YANGDynClass(base=YANGListType("slot_id priority",tmcpustatsslotallgrp.tmcpustatsslotallgrp, yang_name="tmcpustatsslotallgrp", rest_name="tmcpustatsslotallgrp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot-id priority', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmcpustatsslotallgrp', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tmcpustatsslotallgrp", rest_name="tmcpustatsslotallgrp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmcpustatsslotallgrp', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
self.__non_empty_voq = YANGDynClass(base=YANGListType("slot tower",non_empty_voq.non_empty_voq, yang_name="non-empty-voq", rest_name="non-empty-voq", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower', extensions={u'tailf-common': {u'callpoint': u'sysdiag-non-empty-voq', u'cli-suppress-show-path': None}}), is_container='list', yang_name="non-empty-voq", rest_name="non-empty-voq", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-non-empty-voq', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
self.__tm_top_discard_pkt_data = YANGDynClass(base=YANGListType("slot tower id",tm_top_discard_pkt_data.tm_top_discard_pkt_data, yang_name="tm-top-discard-pkt-data", rest_name="tm-top-discard-pkt-data", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower id', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-top-discard-pkt-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tm-top-discard-pkt-data", rest_name="tm-top-discard-pkt-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-top-discard-pkt-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
self.__cngn_mon_voq = YANGDynClass(base=cngn_mon_voq.cngn_mon_voq, is_container='container', presence=False, yang_name="cngn-mon-voq", rest_name="cngn-mon-voq", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-cngn-mon-voq', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'tm-state']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'tm-state']
def _get_cngn_mon_dev(self):
"""
Getter method for cngn_mon_dev, mapped from YANG variable /tm_state/cngn_mon_dev (container)
YANG Description: TM discard pkt config
"""
return self.__cngn_mon_dev
def _set_cngn_mon_dev(self, v, load=False):
"""
Setter method for cngn_mon_dev, mapped from YANG variable /tm_state/cngn_mon_dev (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_cngn_mon_dev is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cngn_mon_dev() directly.
YANG Description: TM discard pkt config
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=cngn_mon_dev.cngn_mon_dev, is_container='container', presence=False, yang_name="cngn-mon-dev", rest_name="cngn-mon-dev", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-cngn-mon-dev', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cngn_mon_dev must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=cngn_mon_dev.cngn_mon_dev, is_container='container', presence=False, yang_name="cngn-mon-dev", rest_name="cngn-mon-dev", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-cngn-mon-dev', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=False)""",
})
self.__cngn_mon_dev = t
if hasattr(self, '_set'):
self._set()
def _unset_cngn_mon_dev(self):
self.__cngn_mon_dev = YANGDynClass(base=cngn_mon_dev.cngn_mon_dev, is_container='container', presence=False, yang_name="cngn-mon-dev", rest_name="cngn-mon-dev", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-cngn-mon-dev', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=False)
def _get_cngn_mon_voq(self):
"""
Getter method for cngn_mon_voq, mapped from YANG variable /tm_state/cngn_mon_voq (container)
YANG Description: TM discard voq pkt config
"""
return self.__cngn_mon_voq
def _set_cngn_mon_voq(self, v, load=False):
"""
Setter method for cngn_mon_voq, mapped from YANG variable /tm_state/cngn_mon_voq (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_cngn_mon_voq is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cngn_mon_voq() directly.
YANG Description: TM discard voq pkt config
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=cngn_mon_voq.cngn_mon_voq, is_container='container', presence=False, yang_name="cngn-mon-voq", rest_name="cngn-mon-voq", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-cngn-mon-voq', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cngn_mon_voq must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=cngn_mon_voq.cngn_mon_voq, is_container='container', presence=False, yang_name="cngn-mon-voq", rest_name="cngn-mon-voq", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-cngn-mon-voq', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=False)""",
})
self.__cngn_mon_voq = t
if hasattr(self, '_set'):
self._set()
def _unset_cngn_mon_voq(self):
self.__cngn_mon_voq = YANGDynClass(base=cngn_mon_voq.cngn_mon_voq, is_container='container', presence=False, yang_name="cngn-mon-voq", rest_name="cngn-mon-voq", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-cngn-mon-voq', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=False)
def _get_cngn_mon_del_pkt(self):
"""
Getter method for cngn_mon_del_pkt, mapped from YANG variable /tm_state/cngn_mon_del_pkt (container)
YANG Description: TM delete pkt config
"""
return self.__cngn_mon_del_pkt
def _set_cngn_mon_del_pkt(self, v, load=False):
"""
Setter method for cngn_mon_del_pkt, mapped from YANG variable /tm_state/cngn_mon_del_pkt (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_cngn_mon_del_pkt is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cngn_mon_del_pkt() directly.
YANG Description: TM delete pkt config
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=cngn_mon_del_pkt.cngn_mon_del_pkt, is_container='container', presence=False, yang_name="cngn-mon-del-pkt", rest_name="cngn-mon-del-pkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-cngn-mon-del-pkt', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cngn_mon_del_pkt must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=cngn_mon_del_pkt.cngn_mon_del_pkt, is_container='container', presence=False, yang_name="cngn-mon-del-pkt", rest_name="cngn-mon-del-pkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-cngn-mon-del-pkt', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=False)""",
})
self.__cngn_mon_del_pkt = t
if hasattr(self, '_set'):
self._set()
def _unset_cngn_mon_del_pkt(self):
self.__cngn_mon_del_pkt = YANGDynClass(base=cngn_mon_del_pkt.cngn_mon_del_pkt, is_container='container', presence=False, yang_name="cngn-mon-del-pkt", rest_name="cngn-mon-del-pkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-cngn-mon-del-pkt', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='container', is_config=False)
def _get_tmdevicestataggr(self):
"""
Getter method for tmdevicestataggr, mapped from YANG variable /tm_state/tmdevicestataggr (list)
YANG Description: Get TM device stats from all towers and all slots
"""
return self.__tmdevicestataggr
def _set_tmdevicestataggr(self, v, load=False):
"""
Setter method for tmdevicestataggr, mapped from YANG variable /tm_state/tmdevicestataggr (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tmdevicestataggr is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tmdevicestataggr() directly.
YANG Description: Get TM device stats from all towers and all slots
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("slot tower",tmdevicestataggr.tmdevicestataggr, yang_name="tmdevicestataggr", rest_name="tmdevicestataggr", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmdevicestataggr', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tmdevicestataggr", rest_name="tmdevicestataggr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmdevicestataggr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tmdevicestataggr must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("slot tower",tmdevicestataggr.tmdevicestataggr, yang_name="tmdevicestataggr", rest_name="tmdevicestataggr", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmdevicestataggr', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tmdevicestataggr", rest_name="tmdevicestataggr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmdevicestataggr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)""",
})
self.__tmdevicestataggr = t
if hasattr(self, '_set'):
self._set()
def _unset_tmdevicestataggr(self):
self.__tmdevicestataggr = YANGDynClass(base=YANGListType("slot tower",tmdevicestataggr.tmdevicestataggr, yang_name="tmdevicestataggr", rest_name="tmdevicestataggr", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmdevicestataggr', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tmdevicestataggr", rest_name="tmdevicestataggr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmdevicestataggr', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
def _get_non_empty_voq(self):
"""
Getter method for non_empty_voq, mapped from YANG variable /tm_state/non_empty_voq (list)
YANG Description: non empty voq's in the system
"""
return self.__non_empty_voq
def _set_non_empty_voq(self, v, load=False):
"""
Setter method for non_empty_voq, mapped from YANG variable /tm_state/non_empty_voq (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_non_empty_voq is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_non_empty_voq() directly.
YANG Description: non empty voq's in the system
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("slot tower",non_empty_voq.non_empty_voq, yang_name="non-empty-voq", rest_name="non-empty-voq", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower', extensions={u'tailf-common': {u'callpoint': u'sysdiag-non-empty-voq', u'cli-suppress-show-path': None}}), is_container='list', yang_name="non-empty-voq", rest_name="non-empty-voq", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-non-empty-voq', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """non_empty_voq must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("slot tower",non_empty_voq.non_empty_voq, yang_name="non-empty-voq", rest_name="non-empty-voq", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower', extensions={u'tailf-common': {u'callpoint': u'sysdiag-non-empty-voq', u'cli-suppress-show-path': None}}), is_container='list', yang_name="non-empty-voq", rest_name="non-empty-voq", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-non-empty-voq', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)""",
})
self.__non_empty_voq = t
if hasattr(self, '_set'):
self._set()
def _unset_non_empty_voq(self):
self.__non_empty_voq = YANGDynClass(base=YANGListType("slot tower",non_empty_voq.non_empty_voq, yang_name="non-empty-voq", rest_name="non-empty-voq", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower', extensions={u'tailf-common': {u'callpoint': u'sysdiag-non-empty-voq', u'cli-suppress-show-path': None}}), is_container='list', yang_name="non-empty-voq", rest_name="non-empty-voq", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-non-empty-voq', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
def _get_buf_pool_stats(self):
"""
Getter method for buf_pool_stats, mapped from YANG variable /tm_state/buf_pool_stats (list)
YANG Description: TM buf pool stats per slot
"""
return self.__buf_pool_stats
def _set_buf_pool_stats(self, v, load=False):
"""
Setter method for buf_pool_stats, mapped from YANG variable /tm_state/buf_pool_stats (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_buf_pool_stats is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_buf_pool_stats() directly.
YANG Description: TM buf pool stats per slot
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("slot tower",buf_pool_stats.buf_pool_stats, yang_name="buf-pool-stats", rest_name="buf-pool-stats", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower', extensions={u'tailf-common': {u'callpoint': u'sysdiag-buf-pool-stats', u'cli-suppress-show-path': None}}), is_container='list', yang_name="buf-pool-stats", rest_name="buf-pool-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-buf-pool-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """buf_pool_stats must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("slot tower",buf_pool_stats.buf_pool_stats, yang_name="buf-pool-stats", rest_name="buf-pool-stats", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower', extensions={u'tailf-common': {u'callpoint': u'sysdiag-buf-pool-stats', u'cli-suppress-show-path': None}}), is_container='list', yang_name="buf-pool-stats", rest_name="buf-pool-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-buf-pool-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)""",
})
self.__buf_pool_stats = t
if hasattr(self, '_set'):
self._set()
def _unset_buf_pool_stats(self):
self.__buf_pool_stats = YANGDynClass(base=YANGListType("slot tower",buf_pool_stats.buf_pool_stats, yang_name="buf-pool-stats", rest_name="buf-pool-stats", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower', extensions={u'tailf-common': {u'callpoint': u'sysdiag-buf-pool-stats', u'cli-suppress-show-path': None}}), is_container='list', yang_name="buf-pool-stats", rest_name="buf-pool-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-buf-pool-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
def _get_tmcpustatsslot(self):
"""
Getter method for tmcpustatsslot, mapped from YANG variable /tm_state/tmcpustatsslot (list)
YANG Description: TM voq stats for CPU port per slot
"""
return self.__tmcpustatsslot
def _set_tmcpustatsslot(self, v, load=False):
"""
Setter method for tmcpustatsslot, mapped from YANG variable /tm_state/tmcpustatsslot (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tmcpustatsslot is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tmcpustatsslot() directly.
YANG Description: TM voq stats for CPU port per slot
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("slot_id cpugroup_id priority",tmcpustatsslot.tmcpustatsslot, yang_name="tmcpustatsslot", rest_name="tmcpustatsslot", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot-id cpugroup-id priority', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmcpustatsslot', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tmcpustatsslot", rest_name="tmcpustatsslot", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmcpustatsslot', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tmcpustatsslot must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("slot_id cpugroup_id priority",tmcpustatsslot.tmcpustatsslot, yang_name="tmcpustatsslot", rest_name="tmcpustatsslot", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot-id cpugroup-id priority', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmcpustatsslot', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tmcpustatsslot", rest_name="tmcpustatsslot", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmcpustatsslot', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)""",
})
self.__tmcpustatsslot = t
if hasattr(self, '_set'):
self._set()
def _unset_tmcpustatsslot(self):
self.__tmcpustatsslot = YANGDynClass(base=YANGListType("slot_id cpugroup_id priority",tmcpustatsslot.tmcpustatsslot, yang_name="tmcpustatsslot", rest_name="tmcpustatsslot", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot-id cpugroup-id priority', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmcpustatsslot', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tmcpustatsslot", rest_name="tmcpustatsslot", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmcpustatsslot', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
def _get_tmcpustatsslotallgrp(self):
"""
Getter method for tmcpustatsslotallgrp, mapped from YANG variable /tm_state/tmcpustatsslotallgrp (list)
YANG Description: TM voq stats for CPU port per slot for all CPU group
"""
return self.__tmcpustatsslotallgrp
def _set_tmcpustatsslotallgrp(self, v, load=False):
"""
Setter method for tmcpustatsslotallgrp, mapped from YANG variable /tm_state/tmcpustatsslotallgrp (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tmcpustatsslotallgrp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tmcpustatsslotallgrp() directly.
YANG Description: TM voq stats for CPU port per slot for all CPU group
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("slot_id priority",tmcpustatsslotallgrp.tmcpustatsslotallgrp, yang_name="tmcpustatsslotallgrp", rest_name="tmcpustatsslotallgrp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot-id priority', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmcpustatsslotallgrp', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tmcpustatsslotallgrp", rest_name="tmcpustatsslotallgrp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmcpustatsslotallgrp', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tmcpustatsslotallgrp must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("slot_id priority",tmcpustatsslotallgrp.tmcpustatsslotallgrp, yang_name="tmcpustatsslotallgrp", rest_name="tmcpustatsslotallgrp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot-id priority', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmcpustatsslotallgrp', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tmcpustatsslotallgrp", rest_name="tmcpustatsslotallgrp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmcpustatsslotallgrp', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)""",
})
self.__tmcpustatsslotallgrp = t
if hasattr(self, '_set'):
self._set()
def _unset_tmcpustatsslotallgrp(self):
self.__tmcpustatsslotallgrp = YANGDynClass(base=YANGListType("slot_id priority",tmcpustatsslotallgrp.tmcpustatsslotallgrp, yang_name="tmcpustatsslotallgrp", rest_name="tmcpustatsslotallgrp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot-id priority', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmcpustatsslotallgrp', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tmcpustatsslotallgrp", rest_name="tmcpustatsslotallgrp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmcpustatsslotallgrp', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
def _get_tm_top_discard_pkt_data(self):
"""
Getter method for tm_top_discard_pkt_data, mapped from YANG variable /tm_state/tm_top_discard_pkt_data (list)
YANG Description: TM voq stats to get list of top discarded destination ports
"""
return self.__tm_top_discard_pkt_data
def _set_tm_top_discard_pkt_data(self, v, load=False):
"""
Setter method for tm_top_discard_pkt_data, mapped from YANG variable /tm_state/tm_top_discard_pkt_data (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tm_top_discard_pkt_data is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tm_top_discard_pkt_data() directly.
YANG Description: TM voq stats to get list of top discarded destination ports
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("slot tower id",tm_top_discard_pkt_data.tm_top_discard_pkt_data, yang_name="tm-top-discard-pkt-data", rest_name="tm-top-discard-pkt-data", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower id', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-top-discard-pkt-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tm-top-discard-pkt-data", rest_name="tm-top-discard-pkt-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-top-discard-pkt-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tm_top_discard_pkt_data must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("slot tower id",tm_top_discard_pkt_data.tm_top_discard_pkt_data, yang_name="tm-top-discard-pkt-data", rest_name="tm-top-discard-pkt-data", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower id', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-top-discard-pkt-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tm-top-discard-pkt-data", rest_name="tm-top-discard-pkt-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-top-discard-pkt-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)""",
})
self.__tm_top_discard_pkt_data = t
if hasattr(self, '_set'):
self._set()
def _unset_tm_top_discard_pkt_data(self):
self.__tm_top_discard_pkt_data = YANGDynClass(base=YANGListType("slot tower id",tm_top_discard_pkt_data.tm_top_discard_pkt_data, yang_name="tm-top-discard-pkt-data", rest_name="tm-top-discard-pkt-data", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower id', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-top-discard-pkt-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tm-top-discard-pkt-data", rest_name="tm-top-discard-pkt-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-top-discard-pkt-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
def _get_tm_top_max_queue_depth_data(self):
"""
Getter method for tm_top_max_queue_depth_data, mapped from YANG variable /tm_state/tm_top_max_queue_depth_data (list)
YANG Description: TM voq stats to get list of top max queue depth
"""
return self.__tm_top_max_queue_depth_data
def _set_tm_top_max_queue_depth_data(self, v, load=False):
"""
Setter method for tm_top_max_queue_depth_data, mapped from YANG variable /tm_state/tm_top_max_queue_depth_data (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tm_top_max_queue_depth_data is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tm_top_max_queue_depth_data() directly.
YANG Description: TM voq stats to get list of top max queue depth
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("slot tower id",tm_top_max_queue_depth_data.tm_top_max_queue_depth_data, yang_name="tm-top-max-queue-depth-data", rest_name="tm-top-max-queue-depth-data", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower id', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-top-max-queue-depth-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tm-top-max-queue-depth-data", rest_name="tm-top-max-queue-depth-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-top-max-queue-depth-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tm_top_max_queue_depth_data must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("slot tower id",tm_top_max_queue_depth_data.tm_top_max_queue_depth_data, yang_name="tm-top-max-queue-depth-data", rest_name="tm-top-max-queue-depth-data", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower id', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-top-max-queue-depth-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tm-top-max-queue-depth-data", rest_name="tm-top-max-queue-depth-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-top-max-queue-depth-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)""",
})
self.__tm_top_max_queue_depth_data = t
if hasattr(self, '_set'):
self._set()
def _unset_tm_top_max_queue_depth_data(self):
self.__tm_top_max_queue_depth_data = YANGDynClass(base=YANGListType("slot tower id",tm_top_max_queue_depth_data.tm_top_max_queue_depth_data, yang_name="tm-top-max-queue-depth-data", rest_name="tm-top-max-queue-depth-data", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower id', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-top-max-queue-depth-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tm-top-max-queue-depth-data", rest_name="tm-top-max-queue-depth-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-top-max-queue-depth-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
def _get_tm_max_buff_util_data(self):
"""
Getter method for tm_max_buff_util_data, mapped from YANG variable /tm_state/tm_max_buff_util_data (list)
YANG Description: Snapshot of max TM buffer utilzation
"""
return self.__tm_max_buff_util_data
def _set_tm_max_buff_util_data(self, v, load=False):
"""
Setter method for tm_max_buff_util_data, mapped from YANG variable /tm_state/tm_max_buff_util_data (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tm_max_buff_util_data is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tm_max_buff_util_data() directly.
YANG Description: Snapshot of max TM buffer utilzation
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("slot tower",tm_max_buff_util_data.tm_max_buff_util_data, yang_name="tm-max-buff-util-data", rest_name="tm-max-buff-util-data", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-max-buff-util-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tm-max-buff-util-data", rest_name="tm-max-buff-util-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-max-buff-util-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tm_max_buff_util_data must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("slot tower",tm_max_buff_util_data.tm_max_buff_util_data, yang_name="tm-max-buff-util-data", rest_name="tm-max-buff-util-data", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-max-buff-util-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tm-max-buff-util-data", rest_name="tm-max-buff-util-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-max-buff-util-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)""",
})
self.__tm_max_buff_util_data = t
if hasattr(self, '_set'):
self._set()
def _unset_tm_max_buff_util_data(self):
self.__tm_max_buff_util_data = YANGDynClass(base=YANGListType("slot tower",tm_max_buff_util_data.tm_max_buff_util_data, yang_name="tm-max-buff-util-data", rest_name="tm-max-buff-util-data", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot tower', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-max-buff-util-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tm-max-buff-util-data", rest_name="tm-max-buff-util-data", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tm-max-buff-util-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
def _get_tmvoqstatistics(self):
"""
Getter method for tmvoqstatistics, mapped from YANG variable /tm_state/tmvoqstatistics (list)
YANG Description: Get TM VOQ statistics
"""
return self.__tmvoqstatistics
def _set_tmvoqstatistics(self, v, load=False):
"""
Setter method for tmvoqstatistics, mapped from YANG variable /tm_state/tmvoqstatistics (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tmvoqstatistics is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tmvoqstatistics() directly.
YANG Description: Get TM VOQ statistics
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("ing_slot ing_tower egr_voqid",tmvoqstatistics.tmvoqstatistics, yang_name="tmvoqstatistics", rest_name="tmvoqstatistics", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ing-slot ing-tower egr-voqid', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmvoqstatistics', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tmvoqstatistics", rest_name="tmvoqstatistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmvoqstatistics', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tmvoqstatistics must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("ing_slot ing_tower egr_voqid",tmvoqstatistics.tmvoqstatistics, yang_name="tmvoqstatistics", rest_name="tmvoqstatistics", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ing-slot ing-tower egr-voqid', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmvoqstatistics', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tmvoqstatistics", rest_name="tmvoqstatistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmvoqstatistics', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)""",
})
self.__tmvoqstatistics = t
if hasattr(self, '_set'):
self._set()
def _unset_tmvoqstatistics(self):
self.__tmvoqstatistics = YANGDynClass(base=YANGListType("ing_slot ing_tower egr_voqid",tmvoqstatistics.tmvoqstatistics, yang_name="tmvoqstatistics", rest_name="tmvoqstatistics", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ing-slot ing-tower egr-voqid', extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmvoqstatistics', u'cli-suppress-show-path': None}}), is_container='list', yang_name="tmvoqstatistics", rest_name="tmvoqstatistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'sysdiag-tmvoqstatistics', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='list', is_config=False)
cngn_mon_dev = __builtin__.property(_get_cngn_mon_dev)
cngn_mon_voq = __builtin__.property(_get_cngn_mon_voq)
cngn_mon_del_pkt = __builtin__.property(_get_cngn_mon_del_pkt)
tmdevicestataggr = __builtin__.property(_get_tmdevicestataggr)
non_empty_voq = __builtin__.property(_get_non_empty_voq)
buf_pool_stats = __builtin__.property(_get_buf_pool_stats)
tmcpustatsslot = __builtin__.property(_get_tmcpustatsslot)
tmcpustatsslotallgrp = __builtin__.property(_get_tmcpustatsslotallgrp)
tm_top_discard_pkt_data = __builtin__.property(_get_tm_top_discard_pkt_data)
tm_top_max_queue_depth_data = __builtin__.property(_get_tm_top_max_queue_depth_data)
tm_max_buff_util_data = __builtin__.property(_get_tm_max_buff_util_data)
tmvoqstatistics = __builtin__.property(_get_tmvoqstatistics)
_pyangbind_elements = {'cngn_mon_dev': cngn_mon_dev, 'cngn_mon_voq': cngn_mon_voq, 'cngn_mon_del_pkt': cngn_mon_del_pkt, 'tmdevicestataggr': tmdevicestataggr, 'non_empty_voq': non_empty_voq, 'buf_pool_stats': buf_pool_stats, 'tmcpustatsslot': tmcpustatsslot, 'tmcpustatsslotallgrp': tmcpustatsslotallgrp, 'tm_top_discard_pkt_data': tm_top_discard_pkt_data, 'tm_top_max_queue_depth_data': tm_top_max_queue_depth_data, 'tm_max_buff_util_data': tm_max_buff_util_data, 'tmvoqstatistics': tmvoqstatistics, }
| 95.933333 | 918 | 0.76111 |
2d724b75e651c82f19e874c7b0d9aa84b5bd14f6 | 5,418 | py | Python | augraphy/augmentations/folding.py | RyonSayer/augraphy | be1e8dcf0f129ac3fc30ba1cad0d8de02443f67f | [
"MIT"
] | null | null | null | augraphy/augmentations/folding.py | RyonSayer/augraphy | be1e8dcf0f129ac3fc30ba1cad0d8de02443f67f | [
"MIT"
] | null | null | null | augraphy/augmentations/folding.py | RyonSayer/augraphy | be1e8dcf0f129ac3fc30ba1cad0d8de02443f67f | [
"MIT"
] | null | null | null | import random
import numpy as np
from augraphy.augmentations.lib import warp_fold_left_side
from augraphy.augmentations.lib import warp_fold_right_side
from augraphy.base.augmentation import Augmentation
class Folding(Augmentation):
"""Emulates folding effect from perspective transformation
:param fold_x: X coordinate of the folding effect.
:type fold_x: int, optional
:param fold_deviation: Deviation (in pixels) of provided X coordinate location.
:type fold_deviation: tuple, optional
:param fold count: Number of applied foldings
:type fold_count: int, optional
:param fold_noise: Level of noise added to folding area. Range from
value of 0 to 1.
:type fold_noise: float, optional
:param gradient_width: Tuple (min, max) Measure of the space affected
by fold prior to being warped (in units of
percentage of width of page)
:type gradient_width: tuple, optional
:param gradient_height: Tuple (min, max) Measure of depth of fold (unit
measured as percentage page height)
:type gradient_height: tuple, optional
:param p: The probability this Augmentation will be applied.
:type p: float, optional
"""
def __init__(
self,
fold_x=None,
fold_deviation=(0, 0),
fold_count=2,
fold_noise=0.1,
gradient_width=(0.1, 0.2),
gradient_height=(0.01, 0.02),
p=1,
):
super().__init__(p=p)
self.fold_x = fold_x
self.fold_deviation = fold_deviation
self.fold_count = fold_count
self.fold_noise = fold_noise
self.gradient_width = gradient_width
self.gradient_height = gradient_height
# Constructs a string representation of this Augmentation.
def __repr__(self):
return f"Folding(fold_x={self.fold_x}, fold_deviation={self.fold_deviation}, fold_count={self.fold_count}, fold_noise={self.fold_noise}, gradient_width={self.gradient_width}, gradient_height={self.gradient_height},p={self.p})"
# Apply perspective transform 2 times and get single folding effect
def apply_folding(
self,
img,
ysize,
xsize,
gradient_width,
gradient_height,
fold_noise,
):
min_fold_x = min(np.ceil(gradient_width[0] * xsize), xsize).astype("int")
max_fold_x = min(np.ceil(gradient_width[1] * xsize), xsize).astype("int")
fold_width_one_side = int(
random.randint(min_fold_x, max_fold_x) / 2,
) # folding width from left to center of folding, or from right to center of folding
# test for valid folding center line
if (xsize - fold_width_one_side - 1) < (fold_width_one_side + 1):
print("Folding augmentation is not applied, please increase image size")
return img
# center of folding
if self.fold_x is None:
fold_x = random.randint(
fold_width_one_side + 1,
xsize - fold_width_one_side - 1,
)
else:
deviation = random.randint(self.fold_deviation[0], self.fold_deviation[1]) * random.choice([-1, 1])
fold_x = min(max(self.fold_x + deviation, fold_width_one_side + 1), xsize - fold_width_one_side - 1)
fold_y_shift_min = min(np.ceil(gradient_height[0] * ysize), ysize).astype("int")
fold_y_shift_max = min(np.ceil(gradient_height[1] * ysize), ysize).astype("int")
fold_y_shift = random.randint(
fold_y_shift_min,
fold_y_shift_max,
) # y distortion in folding (support positive y value for now)
if (fold_width_one_side != 0) and (fold_y_shift != 0):
img_fold_l = warp_fold_left_side(
img,
ysize,
fold_noise,
fold_x,
fold_width_one_side,
fold_y_shift,
)
img_fold_r = warp_fold_right_side(
img_fold_l,
ysize,
fold_noise,
fold_x,
fold_width_one_side,
fold_y_shift,
)
return img_fold_r
else:
if fold_width_one_side == 0:
print(
"Folding augmentation is not applied, please increase gradient width or image size",
)
else:
print(
"Folding augmentation is not applied, please increase gradient height or image size",
)
return img
# Applies the Augmentation to input data.
def __call__(self, image, layer=None, force=False):
if force or self.should_run():
image = image.copy()
# get image dimension
if len(image.shape) > 2:
ysize, xsize, _ = image.shape
else:
ysize, xsize = image.shape
# apply folding multiple times
image_fold = image.copy()
for _ in range(self.fold_count):
image_fold = self.apply_folding(
image_fold,
ysize,
xsize,
self.gradient_width,
self.gradient_height,
self.fold_noise,
)
return image_fold
| 36.608108 | 234 | 0.587117 |
f66b3183f393220dd6616c5d48bba4a671025c5a | 17,700 | py | Python | clients/python/generated/pyaemcloudmanagerapi/api/repositories_api.py | shinesolutions/cloudmanager-api-clients | d73a25878f6cc57af954362ba8dccc90d54e6131 | [
"Apache-2.0"
] | 3 | 2020-06-23T05:31:52.000Z | 2020-11-26T05:34:57.000Z | clients/python/generated/pyaemcloudmanagerapi/api/repositories_api.py | shinesolutions/cloudmanager-api-clients | d73a25878f6cc57af954362ba8dccc90d54e6131 | [
"Apache-2.0"
] | 2 | 2021-01-21T01:19:54.000Z | 2021-12-09T22:30:22.000Z | clients/python/generated/pyaemcloudmanagerapi/api/repositories_api.py | shinesolutions/cloudmanager-api-clients | d73a25878f6cc57af954362ba8dccc90d54e6131 | [
"Apache-2.0"
] | 1 | 2020-11-18T11:48:13.000Z | 2020-11-18T11:48:13.000Z | # coding: utf-8
"""
Cloud Manager API
This API allows access to Cloud Manager programs, pipelines, and environments by an authorized technical account created through the Adobe I/O Console. The base url for this API is https://cloudmanager.adobe.io, e.g. to get the list of programs for an organization, you would make a GET request to https://cloudmanager.adobe.io/api/programs (with the correct set of headers as described below). This swagger file can be downloaded from https://raw.githubusercontent.com/AdobeDocs/cloudmanager-api-docs/master/swagger-specs/api.yaml. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from pyaemcloudmanagerapi.api_client import ApiClient
from pyaemcloudmanagerapi.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class RepositoriesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_repositories(self, program_id, x_gw_ims_org_id, authorization, x_api_key, **kwargs): # noqa: E501
"""Lists Repositories # noqa: E501
Lists all Repositories in an program # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_repositories(program_id, x_gw_ims_org_id, authorization, x_api_key, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str program_id: Identifier of the program (required)
:param str x_gw_ims_org_id: IMS organization ID that the request is being made under. (required)
:param str authorization: Bearer [token] - An access token for the technical account created through integration with Adobe IO (required)
:param str x_api_key: IMS Client ID (API Key) which is subscribed to consume services on console.adobe.io (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: RepositoryList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_repositories_with_http_info(program_id, x_gw_ims_org_id, authorization, x_api_key, **kwargs) # noqa: E501
def get_repositories_with_http_info(self, program_id, x_gw_ims_org_id, authorization, x_api_key, **kwargs): # noqa: E501
"""Lists Repositories # noqa: E501
Lists all Repositories in an program # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_repositories_with_http_info(program_id, x_gw_ims_org_id, authorization, x_api_key, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str program_id: Identifier of the program (required)
:param str x_gw_ims_org_id: IMS organization ID that the request is being made under. (required)
:param str authorization: Bearer [token] - An access token for the technical account created through integration with Adobe IO (required)
:param str x_api_key: IMS Client ID (API Key) which is subscribed to consume services on console.adobe.io (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(RepositoryList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'program_id',
'x_gw_ims_org_id',
'authorization',
'x_api_key'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_repositories" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'program_id' is set
if self.api_client.client_side_validation and ('program_id' not in local_var_params or # noqa: E501
local_var_params['program_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `program_id` when calling `get_repositories`") # noqa: E501
# verify the required parameter 'x_gw_ims_org_id' is set
if self.api_client.client_side_validation and ('x_gw_ims_org_id' not in local_var_params or # noqa: E501
local_var_params['x_gw_ims_org_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `x_gw_ims_org_id` when calling `get_repositories`") # noqa: E501
# verify the required parameter 'authorization' is set
if self.api_client.client_side_validation and ('authorization' not in local_var_params or # noqa: E501
local_var_params['authorization'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `authorization` when calling `get_repositories`") # noqa: E501
# verify the required parameter 'x_api_key' is set
if self.api_client.client_side_validation and ('x_api_key' not in local_var_params or # noqa: E501
local_var_params['x_api_key'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `x_api_key` when calling `get_repositories`") # noqa: E501
collection_formats = {}
path_params = {}
if 'program_id' in local_var_params:
path_params['programId'] = local_var_params['program_id'] # noqa: E501
query_params = []
header_params = {}
if 'x_gw_ims_org_id' in local_var_params:
header_params['x-gw-ims-org-id'] = local_var_params['x_gw_ims_org_id'] # noqa: E501
if 'authorization' in local_var_params:
header_params['Authorization'] = local_var_params['authorization'] # noqa: E501
if 'x_api_key' in local_var_params:
header_params['x-api-key'] = local_var_params['x_api_key'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/program/{programId}/repositories', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_repository(self, program_id, repository_id, x_gw_ims_org_id, authorization, x_api_key, **kwargs): # noqa: E501
"""Get Repository # noqa: E501
Returns an repository by its id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_repository(program_id, repository_id, x_gw_ims_org_id, authorization, x_api_key, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str program_id: Identifier of the program (required)
:param str repository_id: Identifier of the repository (required)
:param str x_gw_ims_org_id: IMS organization ID that the request is being made under. (required)
:param str authorization: Bearer [token] - An access token for the technical account created through integration with Adobe IO (required)
:param str x_api_key: IMS Client ID (API Key) which is subscribed to consume services on console.adobe.io (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Repository
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_repository_with_http_info(program_id, repository_id, x_gw_ims_org_id, authorization, x_api_key, **kwargs) # noqa: E501
def get_repository_with_http_info(self, program_id, repository_id, x_gw_ims_org_id, authorization, x_api_key, **kwargs): # noqa: E501
"""Get Repository # noqa: E501
Returns an repository by its id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_repository_with_http_info(program_id, repository_id, x_gw_ims_org_id, authorization, x_api_key, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str program_id: Identifier of the program (required)
:param str repository_id: Identifier of the repository (required)
:param str x_gw_ims_org_id: IMS organization ID that the request is being made under. (required)
:param str authorization: Bearer [token] - An access token for the technical account created through integration with Adobe IO (required)
:param str x_api_key: IMS Client ID (API Key) which is subscribed to consume services on console.adobe.io (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Repository, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'program_id',
'repository_id',
'x_gw_ims_org_id',
'authorization',
'x_api_key'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_repository" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'program_id' is set
if self.api_client.client_side_validation and ('program_id' not in local_var_params or # noqa: E501
local_var_params['program_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `program_id` when calling `get_repository`") # noqa: E501
# verify the required parameter 'repository_id' is set
if self.api_client.client_side_validation and ('repository_id' not in local_var_params or # noqa: E501
local_var_params['repository_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `repository_id` when calling `get_repository`") # noqa: E501
# verify the required parameter 'x_gw_ims_org_id' is set
if self.api_client.client_side_validation and ('x_gw_ims_org_id' not in local_var_params or # noqa: E501
local_var_params['x_gw_ims_org_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `x_gw_ims_org_id` when calling `get_repository`") # noqa: E501
# verify the required parameter 'authorization' is set
if self.api_client.client_side_validation and ('authorization' not in local_var_params or # noqa: E501
local_var_params['authorization'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `authorization` when calling `get_repository`") # noqa: E501
# verify the required parameter 'x_api_key' is set
if self.api_client.client_side_validation and ('x_api_key' not in local_var_params or # noqa: E501
local_var_params['x_api_key'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `x_api_key` when calling `get_repository`") # noqa: E501
collection_formats = {}
path_params = {}
if 'program_id' in local_var_params:
path_params['programId'] = local_var_params['program_id'] # noqa: E501
if 'repository_id' in local_var_params:
path_params['repositoryId'] = local_var_params['repository_id'] # noqa: E501
query_params = []
header_params = {}
if 'x_gw_ims_org_id' in local_var_params:
header_params['x-gw-ims-org-id'] = local_var_params['x_gw_ims_org_id'] # noqa: E501
if 'authorization' in local_var_params:
header_params['Authorization'] = local_var_params['authorization'] # noqa: E501
if 'x_api_key' in local_var_params:
header_params['x-api-key'] = local_var_params['x_api_key'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/program/{programId}/repository/{repositoryId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Repository', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 53.636364 | 550 | 0.630395 |
e13f5dced0632b1dfa7c17cd70e5a09a41863015 | 865 | py | Python | test/test_encoding.py | codders/mitmproxy | 4f9deae8fc2b5f8b0519f82b1f3cdda6c115b475 | [
"MIT"
] | 3 | 2016-10-08T05:19:11.000Z | 2020-05-29T20:08:56.000Z | test/test_encoding.py | codders/mitmproxy | 4f9deae8fc2b5f8b0519f82b1f3cdda6c115b475 | [
"MIT"
] | null | null | null | test/test_encoding.py | codders/mitmproxy | 4f9deae8fc2b5f8b0519f82b1f3cdda6c115b475 | [
"MIT"
] | 1 | 2015-08-20T02:20:27.000Z | 2015-08-20T02:20:27.000Z | from libmproxy import encoding
def test_identity():
assert "string" == encoding.decode("identity", "string")
assert "string" == encoding.encode("identity", "string")
assert not encoding.encode("nonexistent", "string")
assert None == encoding.decode("nonexistent encoding", "string")
def test_gzip():
assert "string" == encoding.decode(
"gzip",
encoding.encode(
"gzip",
"string"))
assert None == encoding.decode("gzip", "bogus")
def test_deflate():
assert "string" == encoding.decode(
"deflate",
encoding.encode(
"deflate",
"string"))
assert "string" == encoding.decode(
"deflate",
encoding.encode(
"deflate",
"string")[
2:-
4])
assert None == encoding.decode("deflate", "bogus")
| 25.441176 | 68 | 0.559538 |
db1588c097751050abf6457a4816b1d57b2d4b01 | 364 | py | Python | db.py | nirbhay24/pythonRestApi | 03595d11181103cfddecf8925953aac2af4c4613 | [
"MIT"
] | null | null | null | db.py | nirbhay24/pythonRestApi | 03595d11181103cfddecf8925953aac2af4c4613 | [
"MIT"
] | null | null | null | db.py | nirbhay24/pythonRestApi | 03595d11181103cfddecf8925953aac2af4c4613 | [
"MIT"
] | 1 | 2019-01-09T16:35:40.000Z | 2019-01-09T16:35:40.000Z | #!flask/bin/python
import pyodbc
server = 'kms-db.database.windows.net'
database = 'kms_dev'
username = 'kmsadmin@kms-db'
password = 'Dotvik@98'
driver= '{SQL Server}'
connectionString = 'DRIVER='+driver+';PORT=1433;SERVER='+server+';PORT=1443;DATABASE='+database+';UID='+username+';PWD='+ password
print(connectionString)
cnxn = pyodbc.connect(connectionString) | 33.090909 | 130 | 0.741758 |
38d921b9310db716abdea82726dc3758093340f9 | 920 | py | Python | 008-NoCorrectionBraking/008-NoCorrectionBraking.py | haruki-taka8/hello-python | c18419fd387240dde81e4df546a5e8a6d37d7164 | [
"Unlicense"
] | null | null | null | 008-NoCorrectionBraking/008-NoCorrectionBraking.py | haruki-taka8/hello-python | c18419fd387240dde81e4df546a5e8a6d37d7164 | [
"Unlicense"
] | null | null | null | 008-NoCorrectionBraking/008-NoCorrectionBraking.py | haruki-taka8/hello-python | c18419fd387240dde81e4df546a5e8a6d37d7164 | [
"Unlicense"
] | null | null | null | # 008-NoCorrectionBraking.py
# "No correction braking" is my best attempt at translating "ブレーキ込め直しなし"
# ブレーキ込め直しなし is an advanced train braking technique.
# Once the train driver applies brakes to a certain maximum level,
# they only release the brake gradually until the train stops.
# This is an example of no correction braking:
# 0 > 1 > 6 > 4 > 2 > 0
# ^ upon reaching the maximum level of 6, the braking level decreases only.
# This program reads in a LF-separated list of integers,
# and determine whether the array indicates a no correction braking.
isValid = True
lastInp = 0
release = False
while True:
try:
thisInp = int(input())
if release and thisInp > lastInp:
isValid = False
break
elif not release and thisInp < lastInp:
release = True
lastInp = thisInp
except:
break
print(isValid)
| 26.285714 | 75 | 0.659783 |
d83a7e73f6c6b1a2247f1c15836210956a9a2e00 | 314 | py | Python | Add Digits.py | ChyavanKoushik/-My-Solutions-to-Leetcode-problems-using-Python-3 | b78779bd3f5313ab4752f9e9a23cb4a93805aff6 | [
"MIT"
] | 1 | 2020-07-11T15:10:19.000Z | 2020-07-11T15:10:19.000Z | Add Digits.py | ChyavanKoushik/-My-Solutions-to-Leetcode-problems-using-Python-3 | b78779bd3f5313ab4752f9e9a23cb4a93805aff6 | [
"MIT"
] | null | null | null | Add Digits.py | ChyavanKoushik/-My-Solutions-to-Leetcode-problems-using-Python-3 | b78779bd3f5313ab4752f9e9a23cb4a93805aff6 | [
"MIT"
] | null | null | null | class Solution:
def addDigits(self, num):
"""
:type num: int
:rtype: int
"""
lis = list(str(num))
while(len(lis)!=1):
summ = 0
for item in lis:
summ += int(item)
lis = list(str(summ))
return int(lis[0]) | 24.153846 | 33 | 0.414013 |
cf3a249490d91a26f7275cfd2b0a3474b83f292b | 3,730 | py | Python | project/test.py | ChakirBelhaj/python-for-text-analysis | 92320539a48d2a4ceea9dd55f4e17e5a39429508 | [
"Apache-2.0"
] | null | null | null | project/test.py | ChakirBelhaj/python-for-text-analysis | 92320539a48d2a4ceea9dd55f4e17e5a39429508 | [
"Apache-2.0"
] | null | null | null | project/test.py | ChakirBelhaj/python-for-text-analysis | 92320539a48d2a4ceea9dd55f4e17e5a39429508 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import seaborn as sns
import numpy as np
from scipy.stats import norm
from scipy import stats
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import warnings
warnings.filterwarnings('ignore')
# pd.set_option('display.max_rows', None)
trainData = pd.read_csv (r'./KS_model_training_data.csv', sep = ',')
trainData = trainData.drop(columns=['backers_count', 'converted_pledged_amount', 'pledged', 'usd_pledged'])
trainData.isnull().sum()
trainData = trainData.dropna()
np.where(trainData.applymap(lambda x: x == ''))
trainData = trainData.head(100000)
trainData['created_at'] = pd.to_datetime(trainData['created_at'],unit='s')
trainData['created_month'] = trainData.created_at.apply(lambda x: x.month)
trainData['deadline'] = pd.to_datetime(trainData['deadline'],unit='s')
#derived features
trainData['created_year'] = trainData.created_at.apply(lambda x: x.year)
trainData['blurb_length'] = trainData['blurb'].str.len()
trainData['name_length'] = trainData['name'].str.len()
trainData.drop(['project_id', 'blurb', 'blurb_length','created_at', 'deadline', 'fx_rate', 'goal', 'launched_at', 'name', 'staff_pick',
'location', 'project_url', 'reward_url', 'created_month', 'name_length'], 1, inplace=True)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.feature_selection import RFE
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_score
testData = pd.read_csv (r'./KS_test_data.csv', sep = ';')
np.where(testData.applymap(lambda x: x == ''))
testData = testData.dropna()
testData['name_length'] = testData['name'].str.len()
testData['blurb_length'] = testData['blurb'].str.len()
testData['created_at'] = pd.to_datetime(testData['created_at'],unit='s')
testData['created_month'] = testData.created_at.apply(lambda x: x.month)
testData['created_year'] = testData.created_at.apply(lambda x: x.year)
testData.drop(['project_id', 'blurb','blurb_length','created_at', 'deadline', 'fx_rate', 'goal', 'launched_at', 'name', 'staff_pick',
'location', 'project_url', 'reward_url', 'created_month', 'name_length'], 1, inplace=True)
categoriesToEncode = ['category', 'subcategory', 'currency', 'country']
trainDataHotEncoded = pd.get_dummies(trainData, prefix='category', columns=categoriesToEncode)
testDataHotEncoded = pd.get_dummies(testData, prefix='category', columns=categoriesToEncode)
y = trainDataHotEncoded['funded']
X = trainDataHotEncoded.drop('funded', axis=1)
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
knn = KNeighborsClassifier(n_neighbors = 27)
cross_val_score(knn, X, y, cv=10)
# knn.fit(X_train,y_train)
# accuracy = knn.score(X_test, y_test)
# print(classification_report(y_test, knn.predict(X)))
# print("accuracy = " + str(round(100 * accuracy)) + "%")
print("starting")
# from sklearn.model_selection import GridSearchCV
#source
#https://medium.datadriveninvestor.com/k-nearest-neighbors-in-python-hyperparameters-tuning-716734bc557f
#List Hyperparameters that we want to tune.
leaf_size = list(range(1,50))
n_neighbors = list(range(1,30))
p=[1,2]
hyperparameters = dict(leaf_size=leaf_size, n_neighbors=n_neighbors, p=p)
#Create new KNN object
knn_2 = KNeighborsClassifier()
#Use GridSearch
clf = GridSearchCV(knn_2, hyperparameters, cv=10)
#Fit the model
best_model = clf.fit(X,y)
#Print The value of best Hyperparameters
print('Best leaf_size:', best_model.best_estimator_.get_params()['leaf_size'])
print('Best p:', best_model.best_estimator_.get_params()['p'])
print('Best n_neighbors:', best_model.best_estimator_.get_params()['n_neighbors']) | 40.107527 | 136 | 0.755228 |
ef22d723fda63f489fc37c7848b70eefaf8ab287 | 7,026 | py | Python | tape/tasks/AbstractLanguageModelingTask.py | nickbhat/tape-1 | 96778d1a3bb35acf966c6b32b4df1d738fa19ffd | [
"MIT"
] | null | null | null | tape/tasks/AbstractLanguageModelingTask.py | nickbhat/tape-1 | 96778d1a3bb35acf966c6b32b4df1d738fa19ffd | [
"MIT"
] | null | null | null | tape/tasks/AbstractLanguageModelingTask.py | nickbhat/tape-1 | 96778d1a3bb35acf966c6b32b4df1d738fa19ffd | [
"MIT"
] | null | null | null | from typing import Tuple, List, Dict, Union
import os
from glob import glob
import pickle as pkl
import tensorflow as tf
import rinokeras as rk
from tape.losses import classification_loss_and_accuracy
from .Task import SequenceToSequenceClassificationTask
class AbstractLanguageModelingTask(SequenceToSequenceClassificationTask):
def loss_function(self,
inputs: Dict[str, tf.Tensor],
outputs: Dict[str, tf.Tensor]) -> Tuple[tf.Tensor, Dict[str, tf.Tensor]]:
labels = inputs[self._label_name]
logits = outputs[self._output_name]
if self._mask_name != 'sequence_mask':
mask = outputs[self._mask_name]
else:
mask = rk.utils.convert_sequence_length_to_sequence_mask(
labels, inputs['protein_length'])
loss, accuracy = classification_loss_and_accuracy(
labels, logits, mask)
ece = tf.exp(loss)
probs = tf.nn.softmax(logits)
logp = tf.nn.log_softmax(logits)
perplexity = tf.exp(-tf.reduce_sum(probs * logp, -1))
weights = tf.ones_like(perplexity) * tf.cast(mask, perplexity.dtype)
perplexity = tf.reduce_sum(perplexity * weights) / (tf.reduce_sum(weights) + 1e-10)
metrics = {self.key_metric: accuracy, 'ECE': ece, 'Perplexity': perplexity}
return loss, metrics
def get_train_files(self, data_folder) -> List[str]:
train_files = glob(os.path.join(data_folder, 'pfam', '*train_*[0-9].tfrecord'))
if len(train_files) == 0:
raise FileNotFoundError("No training TFrecord files found in directory")
return train_files
def get_valid_files(self, data_folder) -> List[str]:
valid_files = glob(os.path.join(data_folder, 'pfam', '*valid_*[0-9].tfrecord'))
if len(valid_files) == 0:
raise FileNotFoundError("No validation TFrecord files found in directory")
return valid_files
def prepare_dataset(self, # type: ignore
filenames: tf.data.Dataset,
buckets: List[int],
batch_sizes: List[int],
shuffle: bool,
is_holdout: bool,
holdout_clans: set,
holdout_families: set) -> tf.data.Dataset:
def _check_membership(tensor, array):
iscontained = tf.py_func(lambda t: t in array, [tensor], tf.bool)
iscontained.set_shape(())
return iscontained
def _filter_fn(example):
is_holdout_example = \
_check_membership(example['clan'], holdout_clans) | \
_check_membership(example['family'], holdout_families)
return ~ (is_holdout ^ is_holdout_example)
def _load_records_and_preprocess(fname: tf.Tensor):
dataset = tf.data.TFRecordDataset(fname)
dataset = dataset.map(self._deserialization_func)
# Hold out a prespecified set of families and clans
dataset = dataset.filter(_filter_fn)
return dataset
dataset = filenames.apply(
tf.data.experimental.parallel_interleave(
_load_records_and_preprocess,
sloppy=True,
cycle_length=128,
buffer_output_elements=32))
dataset = dataset.shuffle(1024) if shuffle else dataset.prefetch(1024)
batch_fun = tf.data.experimental.bucket_by_sequence_length(
lambda example: example['protein_length'],
buckets,
batch_sizes)
dataset = dataset.apply(batch_fun)
return dataset
def get_data(self,
boundaries: Tuple[List[int], List[int]],
data_folder: str,
max_sequence_length: int = 100000,
add_cls_token: bool = False,
**kwargs) -> Tuple[tf.data.Dataset, tf.data.Dataset]:
fam_file = os.path.join(data_folder, 'pfam', 'pfam_fams.pkl')
clan_file = os.path.join(data_folder, 'pfam', 'pfam_clans.pkl')
_holdout_clans = ['CL0635', 'CL0624', 'CL0355', 'CL0100', 'CL0417', 'CL0630']
_holdout_families = ['PF18346', 'PF14604', 'PF18697', 'PF03577', 'PF01112', 'PF03417']
with open(fam_file, 'rb') as f:
fam_dict: Dict[str, int] = pkl.load(f)
with open(clan_file, 'rb') as f:
clan_dict: Dict[str, int] = pkl.load(f)
holdout_clans = {clan_dict[k] for k in _holdout_clans}
holdout_families = {fam_dict[k] for k in _holdout_families}
print('Currently holding out the following families:', *_holdout_families, sep='\n-')
print('Currently holding out the following clans: ', *_holdout_clans, sep='\n-')
train_files = self.get_train_files(data_folder)
valid_files = self.get_valid_files(data_folder)
train_files = [fname for fname in train_files if fname not in valid_files]
train_filenames = tf.data.Dataset.from_tensor_slices(tf.constant(train_files))
valid_filenames = tf.data.Dataset.from_tensor_slices(tf.constant(valid_files))
buckets, batch_sizes = boundaries
train_data = self.prepare_dataset(
train_filenames, buckets, batch_sizes, shuffle=True, is_holdout=False,
holdout_clans=holdout_clans, holdout_families=holdout_families)
valid_data = self.prepare_dataset(
valid_filenames, buckets, batch_sizes, shuffle=False, is_holdout=False,
holdout_clans=holdout_clans, holdout_families=holdout_families)
return train_data, valid_data
def get_test_data(self,
boundaries: Tuple[List[int], List[int]],
datafile: Union[str, List[str]],
**kwargs) -> tf.data.Dataset:
if isinstance(datafile, str):
datafile = [datafile]
if not all(map(os.path.exists, datafile)):
raise FileNotFoundError(datafile)
is_holdout = any(map(lambda fname: 'valid' in fname, datafile))
fam_file = 'data/pfam/pfam_fams.pkl'
clan_file = 'data/pfam/pfam_clans.pkl'
_holdout_clans = ['CL0635', 'CL0624', 'CL0355', 'CL0100', 'CL0417', 'CL0630']
_holdout_families = ['PF18346', 'PF14604', 'PF18697', 'PF03577', 'PF01112', 'PF03417']
with open(fam_file, 'rb') as f:
fam_dict: Dict[str, int] = pkl.load(f)
with open(clan_file, 'rb') as f:
clan_dict: Dict[str, int] = pkl.load(f)
holdout_clans = {clan_dict[k] for k in _holdout_clans}
holdout_families = {fam_dict[k] for k in _holdout_families}
buckets, batch_sizes = boundaries
filenames = tf.data.Dataset.from_tensor_slices(tf.constant(datafile))
test_data = self.prepare_dataset(
filenames, buckets, batch_sizes, shuffle=False, is_holdout=is_holdout,
holdout_clans=holdout_clans, holdout_families=holdout_families)
return test_data
| 41.087719 | 95 | 0.624395 |
0b1242bb3c7c7a1f5c7ed7b262f5a5737a2b3a33 | 1,671 | py | Python | Python/Tests/TestData/DebugAttach/Program.py | techkey/PTVS | 8355e67eedd8e915ca49bd38a2f36172696fd903 | [
"Apache-2.0"
] | 404 | 2019-05-07T02:21:57.000Z | 2022-03-31T17:03:04.000Z | Python/Tests/TestData/DebugAttach/Program.py | techkey/PTVS | 8355e67eedd8e915ca49bd38a2f36172696fd903 | [
"Apache-2.0"
] | 1,672 | 2019-05-06T21:09:38.000Z | 2022-03-31T23:16:04.000Z | Python/Tests/TestData/DebugAttach/Program.py | RaymonGulati1/PTVS | ee1d09f2a94be4e21016f7579205bb65ec82c616 | [
"Apache-2.0"
] | 186 | 2019-05-13T03:17:37.000Z | 2022-03-31T16:24:05.000Z | from threading import Thread
import threading
import time
from exceptions import KeyboardInterrupt
class C(object): pass
global thread_abort
thread_abort = False
def exception_storm():
my_name = threading.current_thread().name
my_ident = threading.current_thread().ident
i = 0
from weakref import WeakValueDictionary
d = WeakValueDictionary()
k = C()
v = C()
d[k] = v
x = C()
while not thread_abort:
d.__contains__(x)
i += 1
if i % 10000 == 0: print('{} [{}] processed {} exceptions'.format(my_name, my_ident, i))
print("Exiting")
def lazy_sleeper(sleep_seconds=1):
my_name = threading.current_thread().name
my_ident = threading.current_thread().ident
i = 0
while not thread_abort:
time.sleep(sleep_seconds)
i += 1
if i % 10 == 0: print('{} [{}] woke up after {} naps'.format(my_name, my_ident, i*sleep_seconds))
def wait_for_threads(threads, timeout=10):
for t in threads:
print('joining {} ...'.format(t.name))
t.join(timeout)
if t.is_alive(): print('\ttimed out joining {}'.format(t.name))
else: print('\t{} exited normally'.format(t.name))
if __name__ == '__main__':
threads = []
for i in xrange(20):
threads.append(Thread(target=exception_storm, name='Exceptions-{}'.format(i)))
threads.append(Thread(target=lazy_sleeper, name='Sleeper-{}'.format(i)))
for t in threads: t.start()
try:
while True:
wait_for_threads(threads)
except KeyboardInterrupt:
thread_abort = True
wait_for_threads(threads)
| 27.85 | 105 | 0.618791 |
5e9490b64546e921c59ffb9e80f8276b085e65e0 | 1,078 | py | Python | api.py | meantheory/rpn | 9092ac41ec21e8082d04d5a79a9586ece301c8da | [
"MIT"
] | null | null | null | api.py | meantheory/rpn | 9092ac41ec21e8082d04d5a79a9586ece301c8da | [
"MIT"
] | null | null | null | api.py | meantheory/rpn | 9092ac41ec21e8082d04d5a79a9586ece301c8da | [
"MIT"
] | null | null | null | from flask import Flask
from flask import request
from flask import jsonify
import rpncontroller
import shunt
app = Flask(__name__)
@app.route("/")
def hello():
return jsonify(info = "RPN API")
@app.route("/v1/rpn", methods=["POST"])
def v1rpnweb():
json = request.json
if request.json:
rpninput = request.json.get('input')
try:
result = rpncontroller.dorpnalt(rpninput)
resp = jsonify(answer=result)
except IndexError:
resp = jsonify(error=rpncontroller.INDEX_ERROR)
resp.status_code = 400
except ValueError:
resp = jsonify(error=rpncontroller.VALUE_ERROR)
resp.status_code = 400
return resp
@app.route("/v1/calculator", methods=["POST"])
def v1calculator():
json = request.json
if request.json:
cinput = request.json.get('input')
c = shunt.Calculator()
try:
result = c.calculate(cinput)
resp = jsonify(answer=result)
except Exception as e:
resp = jsonify(error='%s' % e)
resp.status_code = 400
return resp
if __name__ == "__main__":
#app.run()
app.debug = True
app.run(host='0.0.0.0')
| 18.586207 | 50 | 0.681818 |
bddcae8efcee5f405c291f0fcfc15c90af99fdbf | 25,376 | py | Python | main.py | TRojaner2013/vaccipy | bbd9c1b5f5edba6d6d91422034fdffa28804538b | [
"MIT"
] | null | null | null | main.py | TRojaner2013/vaccipy | bbd9c1b5f5edba6d6d91422034fdffa28804538b | [
"MIT"
] | null | null | null | main.py | TRojaner2013/vaccipy | bbd9c1b5f5edba6d6d91422034fdffa28804538b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import copy
import json
import os
from tools.exceptions import ValidationError, PushoverNotificationError, TelegramNotificationError
from tools.its import ImpfterminService
from tools.kontaktdaten import decode_wochentag, encode_wochentag, get_kontaktdaten, \
validate_kontaktdaten, validate_datum
from tools.utils import create_missing_dirs, get_current_version, \
get_latest_version, pushover_validation, remove_prefix, \
telegram_validation, unique, update_available
from tools.chromium_downloader import check_chromium, download_chromium, check_webdriver, download_webdriver, current_platform
PATH = os.path.dirname(os.path.realpath(__file__))
def update_kontaktdaten_interactive(
known_kontaktdaten,
command,
configure_notifications,
filepath=None):
"""
Interaktive Eingabe und anschließendes Abspeichern der Kontaktdaten.
:param known_kontaktdaten: Bereits bekannte Kontaktdaten, die nicht mehr
abgefragt werden sollen.
:param command: Entweder "code" oder "search". Bestimmt, welche
Kontaktdaten überhaupt benötigt werden.
:param configure_notifications: Boolean - die Option zum einrichten von Pushover
und Telegram wird nur bei true angezeigt
:param filepath: Pfad zur JSON-Datei zum Abspeichern der Kontaktdaten.
Default: data/kontaktdaten.json im aktuellen Ordner
:return: Dictionary mit Kontaktdaten
"""
assert (command in ["code", "search"])
# Werfe Fehler, falls die übergebenen Kontaktdaten bereits ungültig sind.
validate_kontaktdaten(known_kontaktdaten)
kontaktdaten = copy.deepcopy(known_kontaktdaten)
with open(filepath, 'w', encoding='utf-8') as file:
if "plz_impfzentren" not in kontaktdaten:
print(
"Mit einem Code kann in mehreren Impfzentren gleichzeitig nach einem Termin gesucht werden.\n"
"Eine Übersicht über die Gruppierung der Impfzentren findest du hier:\n"
"https://github.com/iamnotturner/vaccipy/wiki/Ein-Code-fuer-mehrere-Impfzentren\n\n"
"Trage nun die PLZ deines Impfzentrums ein. Für mehrere Impfzentren die PLZ's kommagetrennt nacheinander.\n"
"Beispiel: 68163, 69124, 69469\n")
input_kontaktdaten_key(kontaktdaten,
["plz_impfzentren"],
"> PLZ's der Impfzentren: ",
lambda x: unique([plz.strip() for plz in x.split(",")]))
print()
if "codes" not in kontaktdaten and command == "search":
print(
"Bitte gebe jetzt die Vermittlungscodes passend zu den ausgewählten Impfzentren ein.\n"
"Beachte dabei, dass nur ein Vermittlungscode je Gruppierung benötigt wird.\n"
"Weitere Infos: https://github.com/iamnotturner/vaccipy/wiki/Ein-Code-fuer-mehrere-Impfzentren\n\n"
"Mehrere Vermittlungscodes müssen durch Kommas getrennt werden.\n"
"Beispiel: ABCD-1234-EFGH, ABCD-4321-EFGH, 1234-56AB-CDEF\n")
input_kontaktdaten_key(
kontaktdaten, ["codes"], "> Vermittlungscodes: ",
lambda x: unique([code.strip() for code in x.split(",")]))
print()
if "kontakt" not in kontaktdaten:
kontaktdaten["kontakt"] = {}
if "anrede" not in kontaktdaten["kontakt"] and command == "search":
input_kontaktdaten_key(
kontaktdaten, ["kontakt", "anrede"], "> Anrede (Frau/Herr/Kind/Divers): ")
if "vorname" not in kontaktdaten["kontakt"] and command == "search":
input_kontaktdaten_key(
kontaktdaten, ["kontakt", "vorname"], "> Vorname: ")
if "nachname" not in kontaktdaten["kontakt"] and command == "search":
input_kontaktdaten_key(
kontaktdaten, ["kontakt", "nachname"], "> Nachname: ")
if "strasse" not in kontaktdaten["kontakt"] and command == "search":
input_kontaktdaten_key(
kontaktdaten, ["kontakt", "strasse"], "> Strasse (ohne Hausnummer): ")
if "hausnummer" not in kontaktdaten["kontakt"] and command == "search":
input_kontaktdaten_key(
kontaktdaten, ["kontakt", "hausnummer"], "> Hausnummer: ")
if "plz" not in kontaktdaten["kontakt"] and command == "search":
input_kontaktdaten_key(
kontaktdaten, ["kontakt", "plz"], "> PLZ des Wohnorts: ")
if "ort" not in kontaktdaten["kontakt"] and command == "search":
input_kontaktdaten_key(
kontaktdaten, ["kontakt", "ort"], "> Wohnort: ")
if "phone" not in kontaktdaten["kontakt"]:
input_kontaktdaten_key(
kontaktdaten,
["kontakt", "phone"],
"> Telefonnummer: +49",
lambda x: x if x.startswith("+49") else f"+49{remove_prefix(x, '0')}")
if "notificationChannel" not in kontaktdaten["kontakt"]:
kontaktdaten["kontakt"]["notificationChannel"] = "email"
if "notificationReceiver" not in kontaktdaten["kontakt"]:
input_kontaktdaten_key(
kontaktdaten, ["kontakt", "notificationReceiver"], "> Mail: ")
if configure_notifications:
if "notifications" not in kontaktdaten:
kontaktdaten["notifications"] = {}
if "pushover" not in kontaktdaten["notifications"]:
while True:
kontaktdaten["notifications"]["pushover"] = {}
if input("> Benachtigung mit Pushover einrichten? (y/n): ").lower() != "n":
print()
input_kontaktdaten_key(
kontaktdaten, ["notifications", "pushover", "app_token"],
"> Geben Sie den Pushover APP Token ein: ")
input_kontaktdaten_key(
kontaktdaten, ["notifications", "pushover", "user_key"],
"> Geben Sie den Pushover User Key ein: ")
try:
validation_code = str(pushover_validation(kontaktdaten["notifications"]["pushover"]))
except PushoverNotificationError as exc:
print(f"Fehler: {exc}\nBitte versuchen Sie es erneut.")
continue
validation_input = input("Geben Sie den Validierungscode ein:").strip()
if validation_input == validation_code:
break
del kontaktdaten["notifications"]["pushover"]
print("Validierung fehlgeschlagen.")
print()
else:
print()
break
if "telegram" not in kontaktdaten["notifications"]:
while True:
kontaktdaten["notifications"]["telegram"] = {}
if input("> Benachtigung mit Telegram einrichten? (y/n): ").lower() != "n":
print()
input_kontaktdaten_key(
kontaktdaten, ["notifications", "telegram", "api_token"],
"> Geben Sie den Telegram API Token ein: ")
input_kontaktdaten_key(
kontaktdaten, ["notifications", "telegram", "chat_id"],
"> Geben Sie die Telegram Chat ID ein: ")
try:
validation_code = str(telegram_validation(kontaktdaten["notifications"]["telegram"]))
except TelegramNotificationError as exc:
print(f"Fehler: {exc}\nBitte versuchen Sie es erneut.")
continue
validation_input = input("Geben Sie den Validierungscode ein:").strip()
if validation_input == validation_code:
break
del kontaktdaten["notifications"]["telegram"]
print("Validierung fehlgeschlagen.")
print()
else:
print()
break
if "zeitrahmen" not in kontaktdaten and command == "search":
kontaktdaten["zeitrahmen"] = {}
if input("> Zeitrahmen festlegen? (y/n): ").lower() != "n":
print()
input_kontaktdaten_key(
kontaktdaten, ["zeitrahmen", "einhalten_bei"],
"> Für welchen Impftermin soll der Zeitrahmen gelten? (1/2/beide): ")
input_kontaktdaten_key(
kontaktdaten, ["zeitrahmen", "von_datum"],
"> Von Datum (Leer lassen zum Überspringen): ",
lambda x: x if x else None) # Leeren String zu None umwandeln
input_kontaktdaten_key(
kontaktdaten, ["zeitrahmen", "bis_datum"],
"> Bis Datum (Leer lassen zum Überspringen): ",
lambda x: x if x else None) # Leeren String zu None umwandeln
input_kontaktdaten_key(
kontaktdaten, ["zeitrahmen", "von_uhrzeit"],
"> Von Uhrzeit (Leer lassen zum Überspringen): ",
lambda x: x if x else None) # Leeren String zu None umwandeln
input_kontaktdaten_key(
kontaktdaten, ["zeitrahmen", "bis_uhrzeit"],
"> Bis Uhrzeit (Leer lassen zum Überspringen): ",
lambda x: x if x else None) # Leeren String zu None umwandeln
print(
"Trage nun die Wochentage ein, an denen die ausgewählten Impftermine liegen dürfen.\n"
"Mehrere Wochentage können durch Komma getrennt werden.\n"
"Beispiel: Mo, Di, Mi, Do, Fr, Sa, So\n"
"Leer lassen, um alle Wochentage auszuwählen.")
input_kontaktdaten_key(
kontaktdaten, ["zeitrahmen", "wochentage"],
"> Erlaubte Wochentage: ", parse_wochentage)
print()
json.dump(kontaktdaten, file, ensure_ascii=False, indent=4)
return kontaktdaten
def parse_wochentage(string):
wochentage = [wt.strip() for wt in string.split(",")]
# Leere strings durch "if wt" rausfiltern
nums = [decode_wochentag(wt) for wt in wochentage if wt]
if not nums:
# None zurückgeben, damit der Key nicht gesetzt wird.
# Folglich wird der Default genutzt: Alle Wochentage sind zulässig.
return None
nums = sorted(set(nums))
return [encode_wochentag(num) for num in nums]
def input_kontaktdaten_key(
kontaktdaten,
path,
prompt,
transformer=lambda x: x):
target = kontaktdaten
for key in path[:-1]:
target = target[key]
key = path[-1]
while True:
try:
value = transformer(input(prompt).strip())
# Wenn transformer None zurückgibt, setzen wir den Key nicht.
if value is not None:
target[key] = value
validate_kontaktdaten(kontaktdaten)
break
except ValidationError as exc:
print(f"\n{str(exc)}\n")
def run_search_interactive(kontaktdaten_path, configure_notifications, check_delay):
"""
Interaktives Setup für die Terminsuche:
1. Ggf. zuerst Eingabe, ob Kontaktdaten aus kontaktdaten.json geladen
werden sollen.
2. Laden der Kontaktdaten aus kontaktdaten.json.
3. Bei unvollständigen Kontaktdaten: Interaktive Eingabe der fehlenden
Kontaktdaten.
4. Terminsuche
:param kontaktdaten_path: Pfad zur JSON-Datei mit Kontaktdaten. Default: data/kontaktdaten.json im aktuellen Ordner
:param configure_notifications: Wird durchgereicht zu update_kontaktdaten_interactive()
"""
print(
"Bitte trage zunächst deinen Impfcode und deine Kontaktdaten ein.\n"
f"Die Daten werden anschließend lokal in der Datei '{os.path.basename(kontaktdaten_path)}' abgelegt.\n"
"Du musst sie zukünftig nicht mehr eintragen.\n")
kontaktdaten = {}
if os.path.isfile(kontaktdaten_path):
daten_laden = input(
f"> Sollen die vorhandenen Daten aus '{os.path.basename(kontaktdaten_path)}' geladen werden? (y/n): ").lower()
if daten_laden.lower() != "n":
kontaktdaten = get_kontaktdaten(kontaktdaten_path)
print()
kontaktdaten = update_kontaktdaten_interactive(
kontaktdaten, "search", configure_notifications, kontaktdaten_path)
return run_search(kontaktdaten, check_delay)
def run_search(kontaktdaten, check_delay):
"""
Nicht-interaktive Terminsuche
:param kontaktdaten: Dictionary mit Kontaktdaten
"""
try:
codes = kontaktdaten["codes"]
# Hinweis, wenn noch alte Version der Kontaktdaten.json verwendet wird
if kontaktdaten.get("plz"):
print(
"ACHTUNG: Du verwendest noch die alte Version der 'Kontaktdaten.json'!\n"
"Lösche vor dem nächsten Ausführen die Datei und fülle die Kontaktdaten bitte erneut aus.\n")
plz_impfzentren = [kontaktdaten.get("plz")]
else:
plz_impfzentren = kontaktdaten["plz_impfzentren"]
kontakt = kontaktdaten["kontakt"]
print(
f"Kontaktdaten wurden geladen für: {kontakt['vorname']} {kontakt['nachname']}\n")
notifications = kontaktdaten.get("notifications", {})
zeitrahmen = kontaktdaten["zeitrahmen"]
except KeyError as exc:
raise ValueError(
"Kontaktdaten konnten nicht aus 'kontaktdaten.json' geladen werden.\n"
"Bitte überprüfe, ob sie im korrekten JSON-Format sind oder gebe "
"deine Daten beim Programmstart erneut ein.\n") from exc
ImpfterminService.terminsuche(
codes=codes,
plz_impfzentren=plz_impfzentren,
kontakt=kontakt,
notifications=notifications,
zeitrahmen=zeitrahmen,
check_delay=check_delay,
PATH=PATH)
def gen_code_interactive(kontaktdaten_path):
"""
Interaktives Setup für die Codegenerierung:
1. Ggf. zuerst Eingabe, ob Kontaktdaten aus kontaktdaten.json geladen
werden sollen.
2. Laden der Kontaktdaten aus kontaktdaten.json.
3. Bei unvollständigen Kontaktdaten: Interaktive Eingabe derjenigen
fehlenden Kontaktdaten, die für die Codegenerierung benötigt werden.
4. Codegenerierung
:param kontaktdaten_path: Pfad zur JSON-Datei mit Kontaktdaten. Default: kontaktdaten.json im aktuellen Ordner
"""
print(
"Du kannst dir jetzt direkt einen Vermittlungscode erstellen.\n"
"Dazu benötigst du eine Mailadresse, Telefonnummer und die PLZ deines Impfzentrums.\n"
f"Die Daten werden anschließend lokal in der Datei '{os.path.basename(kontaktdaten_path)}' abgelegt.\n"
"Du musst sie zukünftig nicht mehr eintragen.\n")
kontaktdaten = {}
if os.path.isfile(kontaktdaten_path):
daten_laden = input(
f"> Sollen die vorhandenen Daten aus '{os.path.basename(kontaktdaten_path)}' geladen werden (y/n)?: ").lower()
if daten_laden.lower() != "n":
kontaktdaten = get_kontaktdaten(kontaktdaten_path)
print()
kontaktdaten = update_kontaktdaten_interactive(
kontaktdaten, "code", False, kontaktdaten_path)
return gen_code(kontaktdaten)
def gen_code(kontaktdaten):
"""
Codegenerierung ohne interaktive Eingabe der Kontaktdaten
:param kontaktdaten: Dictionary mit Kontaktdaten
"""
try:
plz_impfzentrum = kontaktdaten["plz_impfzentren"][0]
mail = kontaktdaten["kontakt"]["notificationReceiver"]
telefonnummer = kontaktdaten["kontakt"]["phone"]
if not telefonnummer.startswith("+49"):
telefonnummer = f"+49{remove_prefix(telefonnummer, '0')}"
except KeyError as exc:
raise ValueError(
"Kontaktdaten konnten nicht aus 'kontaktdaten.json' geladen werden.\n"
"Bitte überprüfe, ob sie im korrekten JSON-Format sind oder gebe "
"deine Daten beim Programmstart erneut ein.\n") from exc
its = ImpfterminService([], {}, PATH)
print("\nBitte trage nachfolgend dein Geburtsdatum im Format DD.MM.YYYY ein.\n"
"Beispiel: 02.03.1982\n")
while True:
try:
geburtsdatum = input("> Geburtsdatum: ")
validate_datum(geburtsdatum)
break
except ValidationError as exc:
print("Das Datum entspricht nicht dem richtigen Format (DD.MM.YYYY). "
"Bitte erneut versuchen.")
# code anfordern via selenium
try:
if its.selenium_code_anfordern(mail, telefonnummer, plz_impfzentrum, geburtsdatum):
return True
except RuntimeError as exc:
print(
f"\nDie Code-Generierung war leider nicht erfolgreich:\n{str(exc)}")
return False
print("Die Code-Generierung war leider nicht erfolgreich.")
return False
def subcommand_search(args):
if args.configure_only:
update_kontaktdaten_interactive(
get_kontaktdaten(args.file), "search", args.configure_notifications, args.file)
elif args.read_only:
run_search(get_kontaktdaten(args.file), check_delay=args.retry_sec)
else:
run_search_interactive(args.file, args.configure_notifications, check_delay=args.retry_sec)
def subcommand_code(args):
if args.configure_only:
update_kontaktdaten_interactive(
get_kontaktdaten(args.file), "code", args.file)
elif args.read_only:
gen_code(get_kontaktdaten(args.file))
else:
gen_code_interactive(args.file)
def subcommand_install_chromium():
# Mac_Arm currently not working
if current_platform() == 'mac-arm':
print('Zur Zeit kann keine eigene Chromium Instanz auf einem Mac M1 installiert werden.')
else:
if not check_chromium():
download_chromium()
else:
print('Eigene Chromium Instanz bereits installiert.')
if not check_webdriver():
download_webdriver()
def validate_args(args):
"""
Raises ValueError if args contain invalid settings.
"""
if args.configure_only and args.read_only:
raise ValueError(
"--configure-only und --read-only kann nicht gleichzeitig verwendet werden")
def main():
create_missing_dirs(PATH)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help="commands", dest="command")
base_subparser = argparse.ArgumentParser(add_help=False)
base_subparser.add_argument(
"-f",
"--file",
help="Pfad zur JSON-Datei für Kontaktdaten")
base_subparser.add_argument(
"-c",
"--configure-only",
action='store_true',
help="Nur Kontaktdaten erfassen und in JSON-Datei abspeichern")
base_subparser.add_argument(
"-r",
"--read-only",
action='store_true',
help="Es wird nicht nach fehlenden Kontaktdaten gefragt. Stattdessen wird ein Fehler angezeigt, falls benötigte Kontaktdaten in der JSON-Datei fehlen.")
base_subparser.add_argument(
"-n",
"--configure-notifications",
action='store_true',
help="Gibt bei der Erfassung der Kontaktdaten die Möglichkeit, Benachrichtungen über Pushover und Telegram zu konfigurieren.")
parser_search = subparsers.add_parser(
"search", parents=[base_subparser], help="Termin suchen")
parser_search.add_argument(
"-s",
"--retry-sec",
type=int,
default=60,
help="Wartezeit zwischen zwei Versuchen (in Sekunden)")
parser_code = subparsers.add_parser(
"code",
parents=[base_subparser],
help="Vermittlungscode generieren")
args = parser.parse_args()
if not hasattr(args, "file") or args.file is None:
args.file = os.path.join(PATH, "data/kontaktdaten.json")
if not hasattr(args, "configure_only"):
args.configure_only = False
if not hasattr(args, "read_only"):
args.read_only = False
if not hasattr(args, "retry_sec"):
args.retry_sec = 60
if not hasattr(args, "configure_notifications"):
args.configure_notifications = False
try:
validate_args(args)
except ValueError as exc:
parser.error(str(exc))
# parser.error terminates the program with status code 2.
if args.command is not None:
try:
if args.command == "search":
subcommand_search(args)
elif args.command == "code":
subcommand_code(args)
else:
assert False
except ValidationError as exc:
print(f"Fehler in {json.dumps(args.file)}:\n{str(exc)}")
else:
extended_settings = False
while True:
print(
"Was möchtest du tun?\n"
"[1] Termin suchen\n"
"[2] Vermittlungscode generieren\n"
"[3] Eigene Chromium Instanz im Vaccipy Ordner installieren\n"
f"[x] Erweiterte Einstellungen {'verbergen' if extended_settings else 'anzeigen'}\n")
if extended_settings:
print(
f"[c] --configure-only {'de' if args.configure_only else ''}aktivieren\n"
f"[r] --read-only {'de' if args.read_only else ''}aktivieren\n"
"[s] --retry-sec setzen\n"
f"[n] --configure-notifications {'de' if args.configure_notifications else ''}aktivieren\n\n")
option = input("> Option: ").lower()
print()
try:
if option == "1":
subcommand_search(args)
elif option == "2":
subcommand_code(args)
elif option == "3":
subcommand_install_chromium()
elif option == "x":
extended_settings = not extended_settings
elif extended_settings and option == "c":
new_args = copy.copy(args)
new_args.configure_only = not new_args.configure_only
validate_args(new_args)
args = new_args
print(
f"--configure-only {'de' if not args.configure_only else ''}aktiviert.")
elif extended_settings and option == "r":
new_args = copy.copy(args)
new_args.read_only = not new_args.read_only
validate_args(new_args)
args = new_args
print(
f"--read-only {'de' if not args.read_only else ''}aktiviert.")
elif extended_settings and option == "s":
args.retry_sec = int(input("> --retry-sec="))
if args.retry_sec<30:
print("[RETRY-SEC] Um die Server nicht übermäßig zu belasten, wurde der Wert auf 30 Sekunden gesetzt")
args.retry_sec = 30
elif extended_settings and option == "n":
new_args = copy.copy(args)
new_args.configure_notifications = not new_args.configure_notifications
validate_args(new_args)
args = new_args
print(
f"--configure-notifications {'de' if not args.configure_notifications else ''}aktiviert.")
else:
print("Falscheingabe! Bitte erneut versuchen.")
print()
except Exception as exc:
print(f"\nFehler:\n{str(exc)}\n")
if __name__ == "__main__":
print("""
_
(_)
__ __ __ _ ___ ___ _ _ __ _ _
\ \ / / / _` | / __| / __| | | | '_ \ | | | |
\ V / | (_| | | (__ | (__ | | | |_) | | |_| |
\_/ \__,_| \___| \___| |_| | .__/ \__, |
| | __/ |
|_| |___/
""")
# Auf aktuelle Version prüfen
try:
if not update_available():
print('Du verwendest die aktuellste Version von vaccipy: ' + get_current_version())
else:
print("Du verwendest eine alte Version von vaccipy.\n"
"Bitte installiere die aktuellste Version. Link zum Download:\n"
"https://github.com/iamnotturner/vaccipy/releases/tag/" + get_latest_version())
except:
print("vaccipy konnte nicht auf die neuste Version geprüft werden.")
print()
print("Automatische Terminbuchung für den Corona Impfterminservice\n")
print("Vor der Ausführung des Programms ist die Berechtigung zur Impfung zu prüfen.\n"
"Ob Anspruch auf eine Impfung besteht, kann hier nachgelesen werden:\n"
"https://www.impfterminservice.de/terminservice/faq\n")
main()
| 42.222962 | 160 | 0.593119 |
aea2be1e099438299b4f649004f61a3b825f6c35 | 3,573 | py | Python | mpm88_2phase.py | taichi-dev/mls_mpm_88_extensions | 01928b95bde17d27a288d040d32514a9b45986db | [
"MIT"
] | 2 | 2021-06-24T10:25:28.000Z | 2021-08-10T13:00:00.000Z | mpm88_2phase.py | taichi-dev/mls_mpm_88_extensions | 01928b95bde17d27a288d040d32514a9b45986db | [
"MIT"
] | null | null | null | mpm88_2phase.py | taichi-dev/mls_mpm_88_extensions | 01928b95bde17d27a288d040d32514a9b45986db | [
"MIT"
] | 1 | 2021-08-10T09:31:01.000Z | 2021-08-10T09:31:01.000Z | import taichi as ti
ti.init(arch=ti.gpu)
n_particles = 16384
n_grid = 128
dx = 1 / n_grid
dt = 0.5e-4
p_vol = 1 / n_particles
p_mass0 = 1 / n_particles
gravity = 9.8
bound = 3
cAir = 20
RT = cAir**2
rho_ratio = 100
E = RT * (rho_ratio - 1)
x = ti.Vector.field(2, float, n_particles)
v = ti.Vector.field(2, float, n_particles)
C = ti.Matrix.field(2, 2, float, n_particles)
type_p = ti.field(ti.i32, n_particles)
color = ti.field(ti.i32, n_particles)
J = ti.field(float, n_particles)
use_C = ti.field(ti.i32, ())
grid_v = ti.Vector.field(2, float, (n_grid, n_grid))
grid_m = ti.field(float, (n_grid, n_grid))
@ti.kernel
def substep():
for i, j in grid_m:
grid_v[i, j] = [0, 0]
grid_m[i, j] = 0
for p in x:
Xp = x[p] / dx
base = int(Xp - 0.5)
fx = Xp - base
p_mass = (1.0 + (rho_ratio - 1.0) * type_p[p]) * p_mass0
w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]
stress = p_vol * RT * dt * 4 / dx**2
if type_p[p] == 1:
stress += -dt * 4 * E * p_vol * (J[p] - 1) / dx**2
mu = 0.1 #粘度
stressMu = -(C[p] + C[p].transpose()) * mu #粘性应力矩阵
stressMu *= dt * p_vol * 4 / dx**2
affine = ti.Matrix([[stress, 0], [0, stress]
]) + p_mass * C[p] + stressMu
for i, j in ti.static(ti.ndrange(3, 3)):
offset = ti.Vector([i, j])
dpos = (offset - fx) * dx
weight = w[i].x * w[j].y
grid_v[base + offset] += weight * (p_mass * v[p] + affine @ dpos)
grid_m[base + offset] += weight * p_mass
for i, j in grid_m:
if grid_m[i, j] > 0:
grid_v[i, j] /= grid_m[i, j]
grid_v[i, j].y -= dt * gravity
if i < bound and grid_v[i, j].x < 0:
grid_v[i, j].x = 0
if i > n_grid - bound and grid_v[i, j].x > 0:
grid_v[i, j].x = 0
if j < bound and grid_v[i, j].y < 0:
grid_v[i, j].y = 0
if j > n_grid - bound and grid_v[i, j].y > 0:
grid_v[i, j].y = 0
for p in x:
Xp = x[p] / dx
base = int(Xp - 0.5)
fx = Xp - base
w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]
new_v = ti.Vector.zero(float, 2)
new_C = ti.Matrix.zero(float, 2, 2)
for i, j in ti.static(ti.ndrange(3, 3)):
offset = ti.Vector([i, j])
dpos = (offset - fx) * dx
weight = w[i].x * w[j].y
g_v = grid_v[base + offset]
new_v += weight * g_v
if use_C[None]:
new_C += 4 * weight * g_v.outer_product(dpos) / dx**2
v[p] = new_v
x[p] += dt * v[p]
J[p] *= 1 + dt * new_C.trace()
C[p] = new_C
@ti.kernel
def init():
for i in range(n_particles):
x1 = ti.random()
x2 = ti.random()
x[i] = [x1 * 0.8 + 0.1, x2 * 0.8 + 0.1]
v[i] = [0, -1]
J[i] = 1
@ti.kernel
def change_type():
for i in range(n_particles):
if x[i].x < 0.6 and x[i].x > 0.2 and x[i].y < 0.6 and x[i].y > 0.2:
type_p[i] = 1
color[i] = 0xCCCCCC
J[i] = 1
init()
for i in range(n_particles):
color[i] = 0x068587
gui = ti.GUI('MPM88')
k = 0
while gui.running and k <= 20000:
for s in range(100):
substep()
if k == 50:
use_C[None] = 1
change_type()
gui.clear(0x112F41)
gui.circles(x.to_numpy(), radius=1.2, color=color.to_numpy())
gui.show() #use gui.show(f'{k:06d}.png') to save pictures
k += 1
| 28.814516 | 77 | 0.482228 |
277eb6fb4c3b367d31c8dd9e553ddea1e5a2febe | 5,809 | py | Python | tests/unit/api/mail/test_v0.py | ai-platform/amundsenfrontendlibrary | 6c95d5aa6acc9c85607b3453852cfb64cbdb712c | [
"Apache-2.0"
] | null | null | null | tests/unit/api/mail/test_v0.py | ai-platform/amundsenfrontendlibrary | 6c95d5aa6acc9c85607b3453852cfb64cbdb712c | [
"Apache-2.0"
] | null | null | null | tests/unit/api/mail/test_v0.py | ai-platform/amundsenfrontendlibrary | 6c95d5aa6acc9c85607b3453852cfb64cbdb712c | [
"Apache-2.0"
] | null | null | null | import unittest
from http import HTTPStatus
from typing import Dict, List
from flask import Response, jsonify, make_response
from amundsen_application import create_app
from amundsen_application.base.base_mail_client import BaseMailClient
local_app = create_app('amundsen_application.config.TestConfig', 'tests/templates')
class MockMailClient(BaseMailClient):
def __init__(self, status_code: int, recipients: List = []) -> None:
self.status_code = status_code
def send_email(self,
sender: str = None,
recipients: List = [],
subject: str = None,
text: str = None,
html: str = None,
optional_data: Dict = {}) -> Response:
return make_response(jsonify({}), self.status_code)
class MockBadClient(BaseMailClient):
def __init__(self) -> None:
pass
def send_email(self,
sender: str = None,
recipients: List = [],
subject: str = None,
text: str = None,
html: str = None,
optional_data: Dict = {}) -> Response:
raise Exception('Bad client')
class MailTest(unittest.TestCase):
def test_feedback_client_not_implemented(self) -> None:
"""
Test mail client is not implemented, and endpoint should return appropriate code
:return:
"""
with local_app.test_client() as test:
response = test.post('/explore/api/mail/v0/feedback', json={
'rating': '10', 'comment': 'test'
})
self.assertEqual(response.status_code, HTTPStatus.NOT_IMPLEMENTED)
def test_feedback_client_success(self) -> None:
"""
Test mail client success
:return:
"""
local_app.config['MAIL_CLIENT'] = MockMailClient(status_code=200)
with local_app.test_client() as test:
response = test.post('/explore/api/mail/v0/feedback', json={
'rating': '10', 'comment': 'test'
})
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_feedback_client_raise_exception(self) -> None:
"""
Test failure due to incorrect implementation of base_mail_client
:return:
"""
local_app.config['MAIL_CLIENT'] = MockBadClient()
with local_app.test_client() as test:
response = test.post('/explore/api/mail/v0/feedback', json={
'rating': '10', 'comment': 'test'
})
self.assertEqual(response.status_code, HTTPStatus.INTERNAL_SERVER_ERROR)
def test_feedback_client_propagate_status_code(self) -> None:
"""
Test that specific status codes returned from a custom mail client propagate,
so that they may be appropriately logged and surfaced to the React application
:return:
"""
expected_code = HTTPStatus.BAD_REQUEST
local_app.config['MAIL_CLIENT'] = MockMailClient(status_code=expected_code)
with local_app.test_client() as test:
response = test.post('/explore/api/mail/v0/feedback', json={
'rating': '10', 'comment': 'test'
})
self.assertEqual(response.status_code, expected_code)
@unittest.mock.patch('amundsen_application.api.mail.v0.send_notification')
def test_notification_endpoint_calls_send_notification(self, send_notification_mock) -> None:
"""
Test that the endpoint calls send_notification with the correct information
from the request json
:return:
"""
test_recipients = ['test@test.com']
test_notification_type = 'added'
test_options = {}
with local_app.test_client() as test:
test.post('/explore/api/mail/v0/notification', json={
'recipients': test_recipients,
'notificationType': test_notification_type,
'options': test_options,
})
send_notification_mock.assert_called_with(
notification_type=test_notification_type,
options=test_options,
recipients=test_recipients,
sender=local_app.config['AUTH_USER_METHOD'](local_app).email
)
@unittest.mock.patch('amundsen_application.api.mail.v0.send_notification')
def test_notification_endpoint_fails_missing_notification_type(self, send_notification_mock) -> None:
"""
Test that the endpoint fails if notificationType is not provided in the
request json
:return:
"""
test_recipients = ['test@test.com']
test_sender = 'test2@test.com'
test_options = {}
with local_app.test_client() as test:
response = test.post('/explore/api/mail/v0/notification', json={
'recipients': test_recipients,
'sender': test_sender,
'options': test_options,
})
self.assertEquals(response.status_code, HTTPStatus.BAD_REQUEST)
self.assertFalse(send_notification_mock.called)
@unittest.mock.patch('amundsen_application.api.mail.v0.send_notification')
def test_notification_endpoint_fails_with_exception(self, send_notification_mock) -> None:
"""
Test that the endpoint returns 500 exception when error occurs
and that send_notification is not called
:return:
"""
with local_app.test_client() as test:
# generates error
response = test.post('/explore/api/mail/v0/notification', json=None)
self.assertEquals(response.status_code, HTTPStatus.INTERNAL_SERVER_ERROR)
self.assertFalse(send_notification_mock.called)
| 38.986577 | 105 | 0.621105 |
32e9f8a724c9c34b3d436e3528e9ac85d9b94a0f | 3,779 | py | Python | A3/A3_V2/Server/ServerController.py | lingt-xyz/Data-Communications-Computer-Networks | 79490b7177010525976fcad5ca3aa846765d4b2b | [
"MIT"
] | 2 | 2019-11-07T20:31:30.000Z | 2022-02-10T20:24:24.000Z | A3/A3_V2/Server/ServerController.py | lingt-xyz/Data-Communications-Computer-Networks | 79490b7177010525976fcad5ca3aa846765d4b2b | [
"MIT"
] | null | null | null | A3/A3_V2/Server/ServerController.py | lingt-xyz/Data-Communications-Computer-Networks | 79490b7177010525976fcad5ca3aa846765d4b2b | [
"MIT"
] | null | null | null | import logging
from socket import *
from DealPackets.Packet import *
from DealPackets.packetConstructor import *
from Server.ServerWindow import *
from Client.ClientWindow import *
import const
class ReceiverController:
address = None
__socketRC = None
__packetBuilder = None
def __init__(self):
self.__routerAddr = (const.ROUTER_IP,const.ROUTER_PORT)
def receiveMessage(self):
"""
Receive message from the client
"""
# First, connect
if (self.buildConnection()):
# Second, receive request
window = ServerWindow()
while not window.finished():
p = self.getPacket()
# TODO discard possible packet from handshake
window.process(p)
# TODO send ACK
p = self.__packetBuilder.build(PACKET_TYPE_AK)
self.__conn.sendto(p.to_bytes(), self.__routerAddr)
self.__socketRC.close()
# Third, response
# Fourth, Disconnect
self.disConnect()
def sendPacket(self, packetType, sequenceNumber, content=None):
print("Sending packet type: " + str(packetType) + " with #" + str(sequenceNumber))
packet = self.__packetBuilder.build(packetType, sequenceNumber, content)
self.__socketRC.sendto(packet.getBytes(), self.__routerAddr)
def getPacket(self, timeout=None):
self.__socketRC.settimeout(timeout)
try:
data, addr = self.__socketRC.recvfrom(PACKET_SIZE)
except socket.timeout:
return None
pkt = Packet.from_bytes(data)
logging.debug("Got packet type: {} with #{}".format(str(pkt.packet_type),str(pkt.seq_num)))
if (self.__packetBuilder is None):
self.address = (pkt.getDestinationAddress(), pkt.getDestinationPort())
self.__packetBuilder = PacketConstructor(pkt.getDestinationAddress(), pkt.getDestinationPort())
return pkt
def buildConnection(self):
"""
Three-way handshake
"""
self.__socketRC = socket(AF_INET, SOCK_DGRAM)
self.__socketRC.bind(('', const.SERVER_PORT))
logging.info("Server is listening at {}:{}.".format(const.SERVER_IP, const.SERVER_PORT))
packet = self.getPacket()
# boolean if connection is built
# TODO: if pkt type is syn, send ack syn, if already acked, return true
if (packet.packet_type == PACKET_TYPE_SYN):
addr = (packet.peer_ip_addr, packet.peer_port)
self.sendPacket(PACKET_TYPE_SYN_AK, 0)
# we can just ignore the comming ACK, because it could be lost but the sender would not deal with this case
# but we do shuld be careful with the first packet when receiving the http request
return True
'''''
packet = self.getPacket()
if (packet.packet_type == PACKET_TYPE_AK):
windowSize = int(packet.payload.rstrip())
self.__window = ReceiverWindow(windowSize, self.sendPacket, self.getPacket)
return True
'''''
return False
def disConnect(self):
"""
Disconnecting: FIN, ACK, FIN, ACK
"""
logging.info("Disconnecting from {}:{}.".format(self.__packetBuilder.__destinationAddress, self.__packetBuilder.destinationPort))
self.__conn.close()
'''''
def getMessage(self):
self.__socketRC = socket(AF_INET, SOCK_DGRAM)
self.__socketRC.bind(('', self.__port))
print("Listening")
# Make sure we have some connection.
if (self.buildConnection()):
# TODO: if window not finished, keep doing till end, send ack pkt,
return self.__window.getMessage()
'''''
| 34.045045 | 137 | 0.622122 |
56c54c3a3ddc4bfec657215abcbba457b807abbd | 22,506 | py | Python | geotrek/maintenance/tests/test_views.py | billux/Geotrek-admin | cdc2749bb391b46a55d3870802d93f8a96db3740 | [
"BSD-2-Clause"
] | null | null | null | geotrek/maintenance/tests/test_views.py | billux/Geotrek-admin | cdc2749bb391b46a55d3870802d93f8a96db3740 | [
"BSD-2-Clause"
] | null | null | null | geotrek/maintenance/tests/test_views.py | billux/Geotrek-admin | cdc2749bb391b46a55d3870802d93f8a96db3740 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import os
from collections import OrderedDict
from unittest import skipIf
from django.conf import settings
from django.contrib.gis.geos import Point, LineString
from django.contrib.gis import gdal
from django.test import TestCase
import json
from geotrek.common.tests import CommonTest
from mapentity.serializers.shapefile import ZipShapeSerializer, shapefile_files
from geotrek.authent.factories import PathManagerFactory
from geotrek.core.factories import StakeFactory
from geotrek.core.helpers import TopologyHelper
from geotrek.common.factories import OrganismFactory
from geotrek.common.tests import TranslationResetMixin
from geotrek.maintenance.models import Intervention, InterventionStatus, Project
from geotrek.maintenance.views import ProjectFormatList
from geotrek.core.factories import (PathFactory, PathAggregationFactory,
TopologyFactory)
from geotrek.infrastructure.factories import InfrastructureFactory
from geotrek.signage.factories import SignageFactory
from geotrek.maintenance.factories import (InterventionFactory, InfrastructureInterventionFactory,
InterventionDisorderFactory, InterventionStatusFactory,
ProjectFactory, ContractorFactory, InterventionJobFactory,
SignageInterventionFactory)
class InterventionViewsTest(CommonTest):
model = Intervention
modelfactory = InterventionFactory
userfactory = PathManagerFactory
def get_bad_data(self):
return OrderedDict([
('name', ''),
('manday_set-TOTAL_FORMS', '0'),
('manday_set-INITIAL_FORMS', '1'),
('manday_set-MAX_NUM_FORMS', '0'),
]), u'This field is required.'
def get_good_data(self):
InterventionStatusFactory.create()
good_data = {
'name': 'test',
'date': '2012-08-23',
'disorders': InterventionDisorderFactory.create().pk,
'comments': '',
'slope': 0,
'area': 0,
'subcontract_cost': 0.0,
'stake': StakeFactory.create().pk,
'height': 0.0,
'project': '',
'width': 0.0,
'length': 0.0,
'status': InterventionStatus.objects.all()[0].pk,
'heliport_cost': 0.0,
'material_cost': 0.0,
'manday_set-TOTAL_FORMS': '2',
'manday_set-INITIAL_FORMS': '0',
'manday_set-MAX_NUM_FORMS': '',
'manday_set-0-nb_days': '48.75',
'manday_set-0-job': InterventionJobFactory.create().pk,
'manday_set-0-id': '',
'manday_set-0-DELETE': '',
'manday_set-1-nb_days': '12',
'manday_set-1-job': InterventionJobFactory.create().pk,
'manday_set-1-id': '',
'manday_set-1-DELETE': '',
}
if settings.TREKKING_TOPOLOGY_ENABLED:
path = PathFactory.create()
good_data['topology'] = '{"paths": [%s]}' % path.pk,
else:
good_data['topology'] = 'SRID=4326;POINT (5.1 6.6)'
return good_data
def test_creation_form_on_signage(self):
self.login()
if settings.TREKKING_TOPOLOGY_ENABLED:
signa = SignageFactory.create()
else:
signa = SignageFactory.create(geom='SRID=2154;POINT (700000 6600000)')
signage = u"%s" % signa
response = self.client.get(Intervention.get_add_url() + '?signage=%s' % signa.pk)
self.assertEqual(response.status_code, 200)
self.assertContains(response, signage)
form = response.context['form']
self.assertEqual(form.initial['signage'], signa)
# Should be able to save form successfully
data = self.get_good_data()
data['signage'] = signa.pk
response = self.client.post(Intervention.get_add_url() + '?signage=%s' % signa.pk, data)
self.assertEqual(response.status_code, 302)
def test_creation_form_on_signage_with_errors(self):
self.login()
if settings.TREKKING_TOPOLOGY_ENABLED:
signa = SignageFactory.create()
else:
signa = SignageFactory.create(geom='SRID=2154;POINT (700000 6600000)')
signage = u"%s" % signa
response = self.client.get(Intervention.get_add_url() + '?signage=%s' % signa.pk)
self.assertEqual(response.status_code, 200)
self.assertContains(response, signage)
form = response.context['form']
self.assertEqual(form.initial['signage'], signa)
data = self.get_good_data()
data['signage'] = signa.pk
# If form invalid, it should not fail
data.pop('status')
response = self.client.post(Intervention.get_add_url() + '?signage=%s' % signa.pk, data)
self.assertEqual(response.status_code, 200)
def test_update_form_on_signage(self):
self.login()
if settings.TREKKING_TOPOLOGY_ENABLED:
signa = SignageFactory.create()
else:
signa = SignageFactory.create(geom='SRID=2154;POINT (700000 6600000)')
signage = u"%s" % signa
intervention = InterventionFactory.create()
self.assertIsNone(intervention.signage)
intervention.set_topology(signa)
intervention.save()
self.assertIsNotNone(intervention.signage)
response = self.client.get(intervention.get_update_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, signage)
# Should be able to save form successfully
form = response.context['form']
data = form.initial
data['disorders'] = data['disorders'][0].pk
data['project'] = ''
data['signage'] = form.fields['signage'].initial.pk # because it is set after form init, not form.initial :(
data.update(**{
'manday_set-TOTAL_FORMS': '0',
'manday_set-INITIAL_FORMS': '0',
'manday_set-MAX_NUM_FORMS': '',
})
# Form URL is modified in form init
formurl = intervention.get_update_url() + '?signage=%s' % signa.pk
response = self.client.post(formurl, data)
self.assertEqual(response.status_code, 302)
def test_update_signage(self):
self.login()
target_year = 2017
if settings.TREKKING_TOPOLOGY_ENABLED:
intervention = SignageInterventionFactory.create()
else:
intervention = SignageInterventionFactory.create(geom='SRID=2154;POINT (700000 6600000)')
signa = intervention.signage
# Save infrastructure form
response = self.client.get(signa.get_update_url())
form = response.context['form']
data = form.initial
data['name'] = 'modified'
data['implantation_year'] = target_year
if settings.TREKKING_TOPOLOGY_ENABLED:
data['topology'] = '{"paths": [%s]}' % PathFactory.create().pk
else:
data['geom'] = 'SRID=4326;POINT (2.0 6.6)'
data['manager'] = OrganismFactory.create().pk
response = self.client.post(signa.get_update_url(), data)
self.assertEqual(response.status_code, 302)
# Check that intervention was not deleted (bug #783)
intervention.reload()
self.assertFalse(intervention.deleted)
self.assertEqual(intervention.signage.name, 'modified')
self.assertEqual(intervention.signage.implantation_year, target_year)
def test_creation_form_on_infrastructure(self):
self.login()
if settings.TREKKING_TOPOLOGY_ENABLED:
infra = InfrastructureFactory.create()
else:
infra = InfrastructureFactory.create(geom='SRID=2154;POINT (700000 6600000)')
infrastr = u"%s" % infra
response = self.client.get(Intervention.get_add_url() + '?infrastructure=%s' % infra.pk)
self.assertEqual(response.status_code, 200)
self.assertContains(response, infrastr)
form = response.context['form']
self.assertEqual(form.initial['infrastructure'], infra)
# Should be able to save form successfully
data = self.get_good_data()
data['infrastructure'] = infra.pk
response = self.client.post(Intervention.get_add_url() + '?infrastructure=%s' % infra.pk, data)
self.assertEqual(response.status_code, 302)
def test_creation_form_on_infrastructure_with_errors(self):
self.login()
if settings.TREKKING_TOPOLOGY_ENABLED:
infra = InfrastructureFactory.create()
else:
infra = InfrastructureFactory.create(geom='SRID=2154;POINT (700000 6600000)')
infrastr = u"%s" % infra
response = self.client.get(Intervention.get_add_url() + '?infrastructure=%s' % infra.pk)
self.assertEqual(response.status_code, 200)
self.assertContains(response, infrastr)
form = response.context['form']
self.assertEqual(form.initial['infrastructure'], infra)
data = self.get_good_data()
data['infrastructure'] = infra.pk
# If form invalid, it should not fail
data.pop('status')
response = self.client.post(Intervention.get_add_url() + '?infrastructure=%s' % infra.pk, data)
self.assertEqual(response.status_code, 200)
def test_update_form_on_infrastructure(self):
self.login()
if settings.TREKKING_TOPOLOGY_ENABLED:
infra = InfrastructureFactory.create()
else:
infra = InfrastructureFactory.create(geom='SRID=2154;POINT (700000 6600000)')
infrastr = u"%s" % infra
intervention = InterventionFactory.create()
intervention.set_topology(infra)
intervention.save()
response = self.client.get(intervention.get_update_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, infrastr)
# Should be able to save form successfully
form = response.context['form']
data = form.initial
data['disorders'] = data['disorders'][0].pk
data['project'] = ''
data['infrastructure'] = form.fields['infrastructure'].initial.pk # because it is set after form init, not form.initial :(
data.update(**{
'manday_set-TOTAL_FORMS': '0',
'manday_set-INITIAL_FORMS': '0',
'manday_set-MAX_NUM_FORMS': '',
})
# Form URL is modified in form init
formurl = intervention.get_update_url() + '?infrastructure=%s' % infra.pk
response = self.client.post(formurl, data)
self.assertEqual(response.status_code, 302)
def test_disorders_not_mandatory(self):
self.login()
data = self.get_good_data()
data.pop('disorders')
response = self.client.post(Intervention.get_add_url(), data)
self.assertEqual(response.status_code, 302)
def test_update_infrastructure(self):
self.login()
target_year = 2017
if settings.TREKKING_TOPOLOGY_ENABLED:
intervention = InfrastructureInterventionFactory.create()
else:
intervention = InfrastructureInterventionFactory.create(geom='SRID=2154;POINT (700000 6600000)')
infra = intervention.infrastructure
# Save infrastructure form
response = self.client.get(infra.get_update_url())
form = response.context['form']
data = form.initial
data['name'] = 'modified'
data['implantation_year'] = target_year
if settings.TREKKING_TOPOLOGY_ENABLED:
data['topology'] = '{"paths": [%s]}' % PathFactory.create().pk
else:
data['geom'] = 'SRID=4326;POINT (2.0 6.6)'
response = self.client.post(infra.get_update_url(), data)
self.assertEqual(response.status_code, 302)
# Check that intervention was not deleted (bug #783)
intervention.reload()
self.assertFalse(intervention.deleted)
self.assertEqual(intervention.infrastructure.name, 'modified')
self.assertEqual(intervention.infrastructure.implantation_year, target_year)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_form_default_stake(self):
"""
Without segmentation dynamic we do not have paths so we can't put any stake by default coming from paths
"""
self.login()
good_data = self.get_good_data()
good_data['stake'] = ''
good_data['topology'] = """
{"offset":0,"positions":{"0":[0.8298653170816073,1],"2":[0,0.04593024777973237]},"paths":[%s,%s,%s]}
""" % (PathFactory.create().pk, PathFactory.create().pk, PathFactory.create().pk)
response = self.client.post(Intervention.get_add_url(), good_data)
self.assertEqual(response.status_code, 302)
response = self.client.get(response._headers['location'][1])
self.assertTrue('object' in response.context)
intervention = response.context['object']
self.assertFalse(intervention.stake is None)
def test_form_deleted_projects(self):
self.login()
p1 = ProjectFactory.create()
p2 = ProjectFactory.create()
i = InterventionFactory.create(project=p1)
response = self.client.get(i.get_update_url())
self.assertEqual(response.status_code, 200)
form = self.get_form(response)
projects = form.fields['project'].queryset.all()
self.assertItemsEqual(projects, [p1, p2])
p2.delete()
projects = form.fields['project'].queryset.all()
self.assertItemsEqual(projects, [p1])
def test_no_html_in_csv_infrastructure(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
InfrastructureInterventionFactory.create()
else:
InfrastructureInterventionFactory.create(geom='SRID=2154;POINT (700000 6600000)')
super(InterventionViewsTest, self).test_no_html_in_csv()
def test_no_html_in_csv_signage(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
SignageInterventionFactory.create()
else:
SignageInterventionFactory.create(geom='SRID=2154;POINT (700000 6600000)')
super(InterventionViewsTest, self).test_no_html_in_csv()
def test_structurerelated_not_loggedin(self):
# Test that it does not fail on update if not logged in
self.client.logout()
response = self.client.get(Intervention.get_add_url())
self.assertEqual(response.status_code, 302)
i = InterventionFactory.create()
response = self.client.get(i.get_update_url())
self.assertEqual(response.status_code, 302)
class ProjectViewsTest(CommonTest):
model = Project
modelfactory = ProjectFactory
userfactory = PathManagerFactory
def get_bad_data(self):
return OrderedDict([
('begin_year', ''),
('funding_set-TOTAL_FORMS', '0'),
('funding_set-INITIAL_FORMS', '1'),
('funding_set-MAX_NUM_FORMS', '0'),
]), u'This field is required.'
def get_good_data(self):
return {
'name': 'test',
'stake': '',
'type': '',
'domain': '',
'begin_year': '2010',
'end_year': '2012',
'constraints': '',
'global_cost': '12',
'comments': '',
'contractors': ContractorFactory.create().pk,
'project_owner': OrganismFactory.create().pk,
'project_manager': OrganismFactory.create().pk,
'funding_set-TOTAL_FORMS': '2',
'funding_set-INITIAL_FORMS': '0',
'funding_set-MAX_NUM_FORMS': '',
'funding_set-0-amount': '468.0',
'funding_set-0-organism': OrganismFactory.create().pk,
'funding_set-0-project': '',
'funding_set-0-id': '',
'funding_set-0-DELETE': '',
'funding_set-1-amount': '789',
'funding_set-1-organism': OrganismFactory.create().pk,
'funding_set-1-project': '',
'funding_set-1-id': '',
'funding_set-1-DELETE': ''
}
def _check_update_geom_permission(self, response):
pass
def test_project_layer(self):
self.login()
p1 = ProjectFactory.create()
ProjectFactory.create()
if settings.TREKKING_TOPOLOGY_ENABLED:
InterventionFactory.create(project=p1)
else:
InterventionFactory.create(project=p1, geom='SRID=2154;POINT (700000 6600000)')
# Check that only p1 is in geojson
response = self.client.get(self.model.get_layer_url())
self.assertEqual(response.status_code, 200)
geojson = json.loads(response.content)
features = geojson['features']
self.assertEqual(len(Project.objects.all()), 2)
self.assertEqual(len(features), 1)
self.assertEqual(features[0]['properties']['pk'], p1.pk)
def test_project_bbox_filter(self):
self.login()
p1 = ProjectFactory.create()
ProjectFactory.create()
ProjectFactory.create()
if settings.TREKKING_TOPOLOGY_ENABLED:
t = TopologyFactory.create()
else:
t = TopologyFactory.create(geom='SRID=2154;POINT (700000 6600000)')
InterventionFactory.create(project=p1, topology=t)
def jsonlist(bbox):
url = self.model.get_jsonlist_url() + bbox
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
jsondict = json.loads(response.content)
return jsondict['aaData']
# Check that projects without interventions are always present
self.assertEqual(len(Project.objects.all()), 3)
self.assertEqual(len(jsonlist('')), 3)
self.assertEqual(len(jsonlist('?bbox=POLYGON((1%202%200%2C1%202%200%2C1%202%200%2C1%202%200%2C1%202%200))')), 2)
# Give a bbox that match intervention, and check that all 3 projects are back
bbox = '?bbox=POLYGON((2.9%2046.4%2C%203.1%2046.4%2C%203.1%2046.6%2C%202.9%2046.6%2C%202.9%2046.4))'
self.assertEqual(len(jsonlist(bbox)), 3)
def test_deleted_interventions(self):
project = ProjectFactory.create()
if settings.TREKKING_TOPOLOGY_ENABLED:
intervention = InterventionFactory.create()
else:
intervention = InterventionFactory.create(geom='SRID=2154;POINT (700000 6600000)')
project.interventions.add(intervention)
self.login()
response = self.client.get(project.get_detail_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, intervention.name)
intervention.delete()
response = self.client.get(project.get_detail_url())
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, intervention.name)
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class ExportTest(TranslationResetMixin, TestCase):
def test_shape_mixed(self):
"""
Test that a project made of intervention of different geom create multiple files.
Check that those files are each of a different type (Point/LineString) and that
the project and the intervention are correctly referenced in it.
"""
# Create topology line
topo_line = TopologyFactory.create(no_path=True)
line = PathFactory.create(geom=LineString(Point(10, 10), Point(11, 10)))
PathAggregationFactory.create(topo_object=topo_line, path=line)
# Create a topology point
lng, lat = tuple(Point(1, 1, srid=settings.SRID).transform(settings.API_SRID, clone=True))
closest_path = PathFactory(geom=LineString(Point(0, 0), Point(1, 0), srid=settings.SRID))
topo_point = TopologyHelper._topologypoint(lng, lat, None).reload()
self.assertEquals(topo_point.paths.get(), closest_path)
# Create one intervention by geometry (point/linestring)
it_point = InterventionFactory.create(topology=topo_point)
it_line = InterventionFactory.create(topology=topo_line)
# reload
it_point = type(it_point).objects.get(pk=it_point.pk)
it_line = type(it_line).objects.get(pk=it_line.pk)
proj = ProjectFactory.create()
proj.interventions.add(it_point)
proj.interventions.add(it_line)
# instanciate the class based view 'abnormally' to use create_shape directly
# to avoid making http request, authent and reading from a zip
pfl = ZipShapeSerializer()
devnull = open(os.devnull, "wb")
pfl.serialize(Project.objects.all(), stream=devnull, delete=False,
fields=ProjectFormatList.columns)
self.assertEquals(len(pfl.layers), 2)
ds_point = gdal.DataSource(pfl.layers.values()[0])
layer_point = ds_point[0]
ds_line = gdal.DataSource(pfl.layers.values()[1])
layer_line = ds_line[0]
self.assertEquals(layer_point.geom_type.name, 'MultiPoint')
self.assertEquals(layer_line.geom_type.name, 'LineString')
for layer in [layer_point, layer_line]:
self.assertEquals(layer.srs.name, 'RGF93_Lambert_93')
self.assertItemsEqual(layer.fields, [
u'id', u'name', u'period', u'type', u'domain', u'constraint',
u'global_cos', u'interventi', u'interven_1', u'comments',
u'contractor', u'project_ow', u'project_ma', u'founders',
u'related_st', u'insertion_', u'update_dat',
u'cities', u'districts', u'restricted'
])
self.assertEquals(len(layer_point), 1)
self.assertEquals(len(layer_line), 1)
for feature in layer_point:
self.assertEquals(str(feature['id']), str(proj.pk))
self.assertEquals(len(feature.geom.geos), 1)
self.assertAlmostEqual(feature.geom.geos[0].x, it_point.geom.x)
self.assertAlmostEqual(feature.geom.geos[0].y, it_point.geom.y)
for feature in layer_line:
self.assertEquals(str(feature['id']), str(proj.pk))
self.assertTrue(feature.geom.geos.equals(it_line.geom))
# Clean-up temporary shapefiles
for layer_file in pfl.layers.values():
for subfile in shapefile_files(layer_file):
os.remove(subfile)
| 41.677778 | 131 | 0.637652 |
7ad60f33c471984e98038bf32e77d5a95f525844 | 15,009 | py | Python | db/test.py | sysu-team1/BackEnd | 4773545897fee3aa7a767cbe6d011372623e1e58 | [
"MIT"
] | 1 | 2019-11-19T09:08:50.000Z | 2019-11-19T09:08:50.000Z | db/test.py | sysu-team1/BackEnd | 4773545897fee3aa7a767cbe6d011372623e1e58 | [
"MIT"
] | null | null | null | db/test.py | sysu-team1/BackEnd | 4773545897fee3aa7a767cbe6d011372623e1e58 | [
"MIT"
] | null | null | null | from config import make_pattern
from .Accept import Accept, random_accepts
from .Answer import Answer
from .Organization import Organization, random_orgs
from .prepare import ALL_TAGS, QUESTIONNAIRE_INDEX, model_repr
from .Problem import Problem
from .Student import Student, random_stus
from .Task import Task, random_tasks
def test_json():
import time
import datetime
print(Student(openid=1000000, email='email1', password='password1'))
print(Organization(openid=2, email='email2', password='password2'))
print(Task(id=1, publish_id=2, publish_time=time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), limit_num=10,
title='title1', content='word1, word2', tag='tag1 tag2'))
print(Problem(id=1, task_id=1, description='desc1',
all_answers='answer1;answer2;answer3'))
print(Accept(accept_id=1000000, task_id=1, accept_time=datetime.datetime.now(),
finish_time='2020-01-01 00:00:00')) # TODO now()
print(Answer(accept_id=1, problem_id=1, answer='answer1'))
def test_normal_crud(db_helper):
import random
stus = random_stus(100)
db_helper.save_all(stus)
db_helper.commit()
print('---------- 随机查询10位幸运学生(可能重复)')
for _ in range(10):
print(db_helper.query_student(openid=random.choice(stus).openid))
orgs = random_orgs(10)
db_helper.save_all(orgs)
db_helper.commit()
print('---------- 随机查询3个幸运组织(可能重复)')
for _ in range(3):
print(db_helper.query_oraganization(openid=random.choice(orgs).openid))
tasks = random_tasks(50, orgs, stus, db_helper)
print('---------- 查找某些任务的发布者(可能重复)')
sample_tasks = random.sample(tasks, 10)
for task in sample_tasks:
print(task.id, task.publish_id, db_helper.get_publisher(task))
print('---------- 根据时间查询10个幸运任务')
query_tasks_by_time = db_helper.get_task()
for task in query_tasks_by_time:
print(task)
print('---------- 根据时间与tag查询10个幸运任务')
query_tasks_by_tag = db_helper.get_task_by_tag(search_tag='tag1')
for task in query_tasks_by_tag:
print(task)
print('---------- 根据时间与text查询10个幸运任务')
query_tasks_by_content = db_helper.get_task_by_text(search_text='word1')
for task in query_tasks_by_content:
print(task)
print('---------- 查找某些表单')
form_tasks = list(filter(lambda task: task.tag.find(
ALL_TAGS[QUESTIONNAIRE_INDEX]) != -1, tasks))
sample_form_tasks = random.sample(form_tasks, min(10, len(form_tasks)))
for task in sample_form_tasks:
print(task)
for problem in task.problems:
print('\t', problem)
# random_tasks 中没有将 problems 赋给 task ,但这里自动获取了,因为 relationship
# 直接在 mysql 命令行中检查:
# select * from tasks where publish_id not in (select openid from students union select openid from organizations); -- 检查 publish_id 是否合法
# select * from tasks where publish_time != '2000-01-01 00:00:00' and publish_time >= now(); -- 检查 publish_time 是否合法
# select * from tasks where limit_time != '2000-01-01 00:00:00' and limit_time <= publish_time; -- 检查 limit_time 是否合法
# select * from tasks where limit_num != -1 and limit_num < 1; -- 检查 limit_num 是否合法
# select * from tasks where accept_num < 0 or accept_num > limit_num; -- 检查 accept_num 是否合法
accepts = random_accepts(300, tasks, stus, db_helper)
print('---------- 查找这些任务的接收者(可能重复)')
sample_accepts = random.sample(accepts, 10)
for accept in sample_accepts:
print(accept, accept.student)
print('---------- 查找某些表单的接受')
form_accepts = list(filter(lambda accept: accept.tag.find(
ALL_TAGS[QUESTIONNAIRE_INDEX]) != -1, accepts))
sample_form_accepts = random.sample(
form_accepts, min(10, len(form_accepts)))
for accept in sample_form_accepts:
print(accept)
for answer in accept.answers:
print('\t', answer)
# 直接走 mysql 命令行中检查:
# select * from accepts where accept_id not in (select openid from students); -- 检查 accept_id 是否合法
# select * from accepts where task_id not in (select id from tasks); -- 检查 task_id 是否合法
# select * from accepts where accept_time ??? -- 检查 accept_time 是否合法
# select * from accepts where finish_time ??? -- 检查 finish_time 是否合法
# 检查 problem TODO
# 检查 answer TODO
""" def test_none():
print('---------- stu._tasks && stu.get_tasks')
stu = Student.query.filter(Student.openid == 1000000).one_or_none()
print(stu, stu._tasks, stu.get_tasks()[0], sep='\n')
print('---------- org._tasks && org.get_tasks')
org = Organization.query.filter(Organization.openid == 1).one_or_none()
print(org, org._tasks, org.get_tasks()[0], sep='\n')
print('---------- task._publisher && task.get_publisher')
task = Task.query.filter(Task.id == 1).one_or_none()
print(task, task._publisher, task.get_publisher(), sep='\n')
print('---------- test')
print(stu._tasks[0], org._tasks[0], task._publisher, sep='\n') """
def test_time(db_helper, update_add_num):
print('---------- 根据时间加载最新10条')
orders = ['id', 'publish_time']
patterns = make_pattern(len(orders))
tasks = db_helper.get_task()
for task in tasks:
print(model_repr(task, patterns, orders))
num_tasks = len(tasks)
if num_tasks == update_add_num:
last_id = tasks[-1].id
print('---------- 根据时间加载后10条')
tasks = db_helper.get_task(last_id=last_id)
for task in tasks:
print(model_repr(task, patterns, orders))
elif num_tasks > update_add_num:
print('什么鬼!!! something wrong happen')
return
else:
print('not enough tasks')
print('---------- 根据tag加载最新10条')
orders = ['id', 'publish_time', 'tag']
patterns = make_pattern(len(orders))
tasks = db_helper.get_task_by_tag(ALL_TAGS[QUESTIONNAIRE_INDEX])
for task in tasks:
print(model_repr(task, patterns, orders))
num_tasks = len(tasks)
if num_tasks == update_add_num:
last_id = tasks[-1].id
print('---------- 根据tag加载后10条')
tasks = db_helper.get_task_by_tag(
ALL_TAGS[QUESTIONNAIRE_INDEX], last_id=last_id)
for task in tasks:
print(model_repr(task, patterns, orders))
elif num_tasks > update_add_num:
print('什么鬼!!! something wrong happen')
return
else:
print('not enough tasks')
print('---------- 根据text加载最新10条')
orders = ['id', 'publish_time', 'title', 'content']
patterns = make_pattern(len(orders))
tasks = db_helper.get_task_by_text('word2')
for task in tasks:
print(model_repr(task, patterns, orders))
num_tasks = len(tasks)
if num_tasks == update_add_num:
last_id = tasks[-1].id
print('---------- 根据text加载后10条')
tasks = db_helper.get_task_by_text('word2', last_id=last_id)
for task in tasks:
print(model_repr(task, patterns, orders))
elif num_tasks > update_add_num:
print('什么鬼!!! something wrong happen')
return
else:
print('not enough tasks')
test_publish_task(db_helper.get_all_publish_tasks, 4, 6,
'加载组织6发布的最新4条任务', '加载组织6发布的后4条任务')
def test_accetp_and_publish(db_helper, update_add_num):
openid = 6
update_add_num = 5
test_publish_task(db_helper.get_all_publish_tasks, openid, update_add_num,
'加载组织{}发布的所有的最新{}条任务'.format(openid, update_add_num),
'加载组织{}发布的所有的后{}条任务'.format(openid, update_add_num))
test_publish_task(db_helper.get_ongoing_publish_tasks, openid, update_add_num,
'加载组织{}发布的进行中的最新{}条任务'.format(openid, update_add_num),
'加载组织{}发布的进行中的后{}条任务'.format(openid, update_add_num))
test_publish_task(db_helper.get_finished_publish_tasks, openid, update_add_num,
'加载组织{}发布的已完成的最新{}条任务'.format(openid, update_add_num),
'加载组织{}发布的已完成的后{}条任务'.format(openid, update_add_num))
openid = 1000041
update_add_num = 5
test_accept_task(db_helper.get_all_accept_tasks, openid, update_add_num,
'加载学生{}接受的所有的最新{}条任务'.format(openid, update_add_num),
'加载学生{}接受的所有的后{}条任务'.format(openid, update_add_num))
test_accept_task(db_helper.get_ongoing_accept_tasks, openid, update_add_num,
'加载学生{}接受的进行中的最新{}条任务'.format(openid, update_add_num),
'加载学生{}接受的进行中的后{}条任务'.format(openid, update_add_num))
test_accept_task(db_helper.get_complete_accept_tasks, openid, update_add_num,
'加载学生{}接受的结束了的最新{}条任务'.format(openid, update_add_num),
'加载学生{}接受的结束了的后{}条任务'.format(openid, update_add_num))
test_accept_task(db_helper.get_finished_accept_tasks, openid, update_add_num,
'加载学生{}接受的已完成的最新{}条任务'.format(openid, update_add_num),
'加载学生{}接受的已完成的后{}条任务'.format(openid, update_add_num))
def test_publish_task(method, openid, update_add_num, message1, message2):
print('----------', message1)
orders = ['id', 'publish_time', 'limit_time']
patterns = make_pattern(len(orders))
tasks = method(openid=openid, length=update_add_num)
for task in tasks:
print(model_repr(task, patterns, orders))
num_tasks = len(tasks)
if num_tasks == update_add_num:
last_id = tasks[-1].id
print('----------', message2)
tasks = method(openid=openid, last_id=last_id, length=update_add_num)
for task in tasks:
print(model_repr(task, patterns, orders))
elif num_tasks > update_add_num:
print('---------- 什么鬼!!! something wrong happen')
return
else:
print('---------- not enough tasks')
def test_accept_task(method, openid, update_add_num, message1, message2):
print('----------', message1)
orders = ['id', 'publish_time', 'limit_time']
patterns = make_pattern(len(orders))
tasks, last_accept_time = method(openid=openid, length=update_add_num)
for task in tasks:
print(model_repr(task, patterns, orders))
num_tasks = len(tasks)
if num_tasks == update_add_num:
print('----------', message2)
tasks, last_accept_time = method(
openid=openid, last_accept_time=last_accept_time, length=update_add_num)
for task in tasks:
print(model_repr(task, patterns, orders))
elif num_tasks > update_add_num:
print('---------- 什么鬼!!! something wrong happen')
return
else:
print('---------- not enough tasks')
def test_create_student_and_organization(db_helper):
'''
# 只产生学生与组织
'''
stus = random_stus(10)
org = random_orgs(10)
db_helper.save_all(stus)
db_helper.save_all(org)
db_helper.commit()
def test_some_methods(db_helper, app, update_add_num):
# ---------- test save / query_student / query_oraganization / delete_all
# stu = Student(email='liangyy75@qq.com', password='liangyy75@pass', student_id='16340134', name='liangyy75', sex='男', collage='数据科学与计算机学院', grade=2016, edu_bg='本科')
# print(db_helper.save(stu))
# print(db_helper.query_student(openid=stu.openid))
# org = Organization(email='liangyy75@qq.com', password='liangyy75@pass', name='test org')
# db_helper.save(org)
# print(db_helper.query_oraganization(org.openid))
# db_helper.delete(stu)
# db_helper.delete_all([org])
# ---------- test delete
# print(db_helper.query_oraganization(openid=11))
# db_helper.delete(db_helper.query_oraganization(openid=11))
# print(db_helper.query_oraganization(openid=11))
# ---------- test update_student_or_organization
# print(db_helper.query_student(openid=app.config['SPLIT_STU_ORG'] + 10).sex)
# print(db_helper.update_student_or_organization(
# app.config['SPLIT_STU_ORG'] + 10, 'sex', '男'))
# print(db_helper.query_student(openid=app.config['SPLIT_STU_ORG'] + 10).sex)
# print(db_helper.update_student_or_organization(
# app.config['SPLIT_STU_ORG'] + 10, 'sex', '女'))
# print(db_helper.query_student(openid=app.config['SPLIT_STU_ORG'] + 10).sex)
# ---------- test has_accept / get_recipient / sign_up_true / accept_task
# recipients = db_helper.get_recipient(1, length=100)
# print(len(recipients))
# print(db_helper.has_accept(recipients[0].openid, 1))
# print(db_helper.sign_up_true(email='liangyy75@qq2.com', password='liangyy75@pass2', student_id='16340134', sex='男', collage='数据科学与计算机学院', grade=2016, name='liangyy75'))
# stu = Student.query.filter(Student.email == 'liangyy75@qq2.com').one_or_none()
# print(db_helper.has_accept(stu.openid, 1))
# print(db_helper.accept_task(stu.openid, 1))
# print(len(db_helper.get_recipient(1, length=100)))
# print(db_helper.has_accept(stu.openid, 1))
# print(db_helper.delete(stu))
# print(len(db_helper.get_recipient(1, length=100)))
# print(db_helper.has_accept(stu.openid, 1))
# ---------- test get_xxx_publish_task / get_xxx_accept_task / get_task_by_xxx
# test_time(db_helper, update_add_num)
# print('-' * 100)
# test_accetp_and_publish(db_helper, update_add_num)
# ---------- test get_publisher / get_recipient / get_all_problems / get_all_answers
# print(db_helper.get_task_by_id(1))
# print(db_helper.get_publisher(1))
# print(db_helper.get_publisher(1) == db_helper.get_task_by_id(1).publisher)
# print(len(db_helper.get_recipient(1, length=100)))
# print(len(db_helper.get_all_problems(1).split('^')))
# print(len(db_helper.get_all_answers(1)))
# print(sum(map(lambda item: len(item), db_helper.get_all_answers(1))))
# ---------- test finish_task / cancel_task / accept_task / has_accept
# print(db_helper.sign_up_true(email='liangyy75@qq2.com', password='liangyy75@pass2', student_id='16340134', sex='男', collage='数据科学与计算机学院', grade=2016, name='liangyy75'))
# stu = Student.query.filter(Student.email == 'liangyy75@qq2.com').one_or_none()
# print(db_helper.cancel_task(stu.openid, 1))
# print(db_helper.has_accept(stu.openid, 1))
# print(db_helper.accept_task(stu.openid, 1))
# print(db_helper.has_accept(stu.openid, 1))
# print(Accept.query.filter(Accept.accept_id == stu.openid).filter(
# Accept.task_id == 1).one_or_none().finish_time)
# print(db_helper.finish_task(stu.openid, 1))
# print(Accept.query.filter(Accept.accept_id == stu.openid).filter(
# Accept.task_id == 1).one_or_none().finish_time)
# db_helper.delete(stu)
# ---------- test charge / carry_over / cash_in
print(db_helper.query_oraganization(openid=1).cash)
print(db_helper.charge(1, 1000))
print(db_helper.query_oraganization(openid=1).cash)
print(db_helper.query_student(openid=app.config['SPLIT_STU_ORG']).cash)
print(db_helper.carry_over(source_id=1, target_id=app.config['SPLIT_STU_ORG'], money_num=1000))
print(db_helper.query_student(openid=app.config['SPLIT_STU_ORG']).cash)
print(db_helper.query_oraganization(openid=1).cash)
| 45.072072 | 174 | 0.663535 |
5bc26d90a5e4b80c29787a0dbe0ca98820c0a578 | 9,450 | py | Python | ml-agents-envs/mlagents_envs/communicator_objects/observation_pb2.py | netdrones/ml-agents | 7d7d6f149c92ea2067d7cea364d92c8c3b8db3f4 | [
"Apache-2.0"
] | 2 | 2021-08-10T21:39:09.000Z | 2021-08-11T09:46:03.000Z | ml-agents-envs/mlagents_envs/communicator_objects/observation_pb2.py | netdrones/ml-agents | 7d7d6f149c92ea2067d7cea364d92c8c3b8db3f4 | [
"Apache-2.0"
] | 1 | 2021-01-31T09:16:16.000Z | 2021-01-31T18:24:34.000Z | ml-agents-envs/mlagents_envs/communicator_objects/observation_pb2.py | netdrones/ml-agents | 7d7d6f149c92ea2067d7cea364d92c8c3b8db3f4 | [
"Apache-2.0"
] | 1 | 2021-01-31T09:02:17.000Z | 2021-01-31T09:02:17.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mlagents_envs/communicator_objects/observation.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='mlagents_envs/communicator_objects/observation.proto',
package='communicator_objects',
syntax='proto3',
serialized_pb=_b('\n4mlagents_envs/communicator_objects/observation.proto\x12\x14\x63ommunicator_objects\"\x81\x03\n\x10ObservationProto\x12\r\n\x05shape\x18\x01 \x03(\x05\x12\x44\n\x10\x63ompression_type\x18\x02 \x01(\x0e\x32*.communicator_objects.CompressionTypeProto\x12\x19\n\x0f\x63ompressed_data\x18\x03 \x01(\x0cH\x00\x12\x46\n\nfloat_data\x18\x04 \x01(\x0b\x32\x30.communicator_objects.ObservationProto.FloatDataH\x00\x12\"\n\x1a\x63ompressed_channel_mapping\x18\x05 \x03(\x05\x12\x1c\n\x14\x64imension_properties\x18\x06 \x03(\x05\x12\x44\n\x10observation_type\x18\x07 \x01(\x0e\x32*.communicator_objects.ObservationTypeProto\x1a\x19\n\tFloatData\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x02\x42\x12\n\x10observation_data*)\n\x14\x43ompressionTypeProto\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03PNG\x10\x01*F\n\x14ObservationTypeProto\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x08\n\x04GOAL\x10\x01\x12\n\n\x06REWARD\x10\x02\x12\x0b\n\x07MESSAGE\x10\x03\x42%\xaa\x02\"Unity.MLAgents.CommunicatorObjectsb\x06proto3')
)
_COMPRESSIONTYPEPROTO = _descriptor.EnumDescriptor(
name='CompressionTypeProto',
full_name='communicator_objects.CompressionTypeProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PNG', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=466,
serialized_end=507,
)
_sym_db.RegisterEnumDescriptor(_COMPRESSIONTYPEPROTO)
CompressionTypeProto = enum_type_wrapper.EnumTypeWrapper(_COMPRESSIONTYPEPROTO)
_OBSERVATIONTYPEPROTO = _descriptor.EnumDescriptor(
name='ObservationTypeProto',
full_name='communicator_objects.ObservationTypeProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GOAL', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REWARD', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MESSAGE', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=509,
serialized_end=579,
)
_sym_db.RegisterEnumDescriptor(_OBSERVATIONTYPEPROTO)
ObservationTypeProto = enum_type_wrapper.EnumTypeWrapper(_OBSERVATIONTYPEPROTO)
NONE = 0
PNG = 1
DEFAULT = 0
GOAL = 1
REWARD = 2
MESSAGE = 3
_OBSERVATIONPROTO_FLOATDATA = _descriptor.Descriptor(
name='FloatData',
full_name='communicator_objects.ObservationProto.FloatData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data', full_name='communicator_objects.ObservationProto.FloatData.data', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=419,
serialized_end=444,
)
_OBSERVATIONPROTO = _descriptor.Descriptor(
name='ObservationProto',
full_name='communicator_objects.ObservationProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shape', full_name='communicator_objects.ObservationProto.shape', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='compression_type', full_name='communicator_objects.ObservationProto.compression_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='compressed_data', full_name='communicator_objects.ObservationProto.compressed_data', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='float_data', full_name='communicator_objects.ObservationProto.float_data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='compressed_channel_mapping', full_name='communicator_objects.ObservationProto.compressed_channel_mapping', index=4,
number=5, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dimension_properties', full_name='communicator_objects.ObservationProto.dimension_properties', index=5,
number=6, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='observation_type', full_name='communicator_objects.ObservationProto.observation_type', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_OBSERVATIONPROTO_FLOATDATA, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='observation_data', full_name='communicator_objects.ObservationProto.observation_data',
index=0, containing_type=None, fields=[]),
],
serialized_start=79,
serialized_end=464,
)
_OBSERVATIONPROTO_FLOATDATA.containing_type = _OBSERVATIONPROTO
_OBSERVATIONPROTO.fields_by_name['compression_type'].enum_type = _COMPRESSIONTYPEPROTO
_OBSERVATIONPROTO.fields_by_name['float_data'].message_type = _OBSERVATIONPROTO_FLOATDATA
_OBSERVATIONPROTO.fields_by_name['observation_type'].enum_type = _OBSERVATIONTYPEPROTO
_OBSERVATIONPROTO.oneofs_by_name['observation_data'].fields.append(
_OBSERVATIONPROTO.fields_by_name['compressed_data'])
_OBSERVATIONPROTO.fields_by_name['compressed_data'].containing_oneof = _OBSERVATIONPROTO.oneofs_by_name['observation_data']
_OBSERVATIONPROTO.oneofs_by_name['observation_data'].fields.append(
_OBSERVATIONPROTO.fields_by_name['float_data'])
_OBSERVATIONPROTO.fields_by_name['float_data'].containing_oneof = _OBSERVATIONPROTO.oneofs_by_name['observation_data']
DESCRIPTOR.message_types_by_name['ObservationProto'] = _OBSERVATIONPROTO
DESCRIPTOR.enum_types_by_name['CompressionTypeProto'] = _COMPRESSIONTYPEPROTO
DESCRIPTOR.enum_types_by_name['ObservationTypeProto'] = _OBSERVATIONTYPEPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ObservationProto = _reflection.GeneratedProtocolMessageType('ObservationProto', (_message.Message,), dict(
FloatData = _reflection.GeneratedProtocolMessageType('FloatData', (_message.Message,), dict(
DESCRIPTOR = _OBSERVATIONPROTO_FLOATDATA,
__module__ = 'mlagents_envs.communicator_objects.observation_pb2'
# @@protoc_insertion_point(class_scope:communicator_objects.ObservationProto.FloatData)
))
,
DESCRIPTOR = _OBSERVATIONPROTO,
__module__ = 'mlagents_envs.communicator_objects.observation_pb2'
# @@protoc_insertion_point(class_scope:communicator_objects.ObservationProto)
))
_sym_db.RegisterMessage(ObservationProto)
_sym_db.RegisterMessage(ObservationProto.FloatData)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\252\002\"Unity.MLAgents.CommunicatorObjects'))
# @@protoc_insertion_point(module_scope)
| 41.447368 | 1,023 | 0.773545 |
2499f19b59a22b5ce3c91ac6c25a85f923639d73 | 347 | py | Python | test_calweek.py | yuridadt/week_calc | 97c553b61d635803f8ceb623d96ffbfe81f6f390 | [
"MIT"
] | null | null | null | test_calweek.py | yuridadt/week_calc | 97c553b61d635803f8ceb623d96ffbfe81f6f390 | [
"MIT"
] | null | null | null | test_calweek.py | yuridadt/week_calc | 97c553b61d635803f8ceb623d96ffbfe81f6f390 | [
"MIT"
] | null | null | null | import pytest
from calweek import CalWeek
def test_cal_week():
cw = CalWeek('2018-51')
assert str(cw) == '2018-51', 'Str representation failed'
assert cw + 2 == '2019-01', 'Add failed'
assert cw - 3 == '2018-48', 'Sub failed'
assert CalWeek('2019-04') - cw == 5, 'Sub failed'
if __name__ == '__main__':
test_cal_week()
| 23.133333 | 60 | 0.62536 |
74f54fa2924e6f3ae75a2187ad83bc0ed045e07a | 377 | py | Python | api/benchmarks/urls.py | OpenUpSA/salga-mobile-api | ef445c01404512a7b1c19912438a6f161a554c76 | [
"MIT"
] | null | null | null | api/benchmarks/urls.py | OpenUpSA/salga-mobile-api | ef445c01404512a7b1c19912438a6f161a554c76 | [
"MIT"
] | 4 | 2020-06-05T17:45:14.000Z | 2021-06-10T19:28:46.000Z | api/benchmarks/urls.py | OpenUpSA/salga-mobile-api | ef445c01404512a7b1c19912438a6f161a554c76 | [
"MIT"
] | 1 | 2020-06-22T02:09:25.000Z | 2020-06-22T02:09:25.000Z | from django.views.decorators.cache import cache_page
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^mandates$',
cache_page(600)(views.BenchmarkMandateView.as_view()),
name='mandates'),
url(r'^indicators/(?P<indicator>[\d]+)$',
cache_page(600)(views.BenchmarkIndicatorView.as_view()),
name='indicators'),
]
| 26.928571 | 64 | 0.67374 |
3510e0f9cb1784865a95ecdc443effad56ad29a3 | 3,411 | py | Python | fuzzers/011-ffconfig/generate.py | unixb0y/prjxray | 569a44d8beadb7cf6548f7437e46fb456bacdd90 | [
"ISC"
] | 1 | 2020-10-31T19:32:41.000Z | 2020-10-31T19:32:41.000Z | fuzzers/011-ffconfig/generate.py | unixb0y/prjxray | 569a44d8beadb7cf6548f7437e46fb456bacdd90 | [
"ISC"
] | null | null | null | fuzzers/011-ffconfig/generate.py | unixb0y/prjxray | 569a44d8beadb7cf6548f7437e46fb456bacdd90 | [
"ISC"
] | 1 | 2021-05-07T11:53:23.000Z | 2021-05-07T11:53:23.000Z | #!/usr/bin/env python3
'''
FDCE Primitive: D Flip-Flop with Clock Enable and Asynchronous Clear
FDPE Primitive: D Flip-Flop with Clock Enable and Asynchronous Preset
FDRE Primitive: D Flip-Flop with Clock Enable and Synchronous Reset
FDSE Primitive: D Flip-Flop with Clock Enable and Synchronous Set
LDCE Primitive: Transparent Data Latch with Asynchronous Clear and Gate Enable
LDPE Primitive: Transparent Data Latch with Asynchronous Preset and Gate Enable
'''
from prims import *
import sys, re
sys.path.append("../../../utils/")
from segmaker import segmaker
segmk = segmaker("design.bits")
def ones(l):
#return l + [x + '_1' for x in l]
#return sorted(l + [x + '_1' for x in l])
ret = []
for x in l:
ret.append(x)
ret.append(x + '_1')
return ret
def loadtop():
'''
i,prim,loc,bel
0,FDPE,SLICE_X12Y100,C5FF
1,FDPE,SLICE_X15Y100,A5FF
2,FDPE_1,SLICE_X16Y100,B5FF
3,LDCE_1,SLICE_X17Y100,BFF
'''
f = open('top.txt', 'r')
f.readline()
ret = {}
for l in f:
i, prim, loc, bel, init = l.split(",")
i = int(i)
init = int(init)
ret[loc] = (i, prim, loc, bel, init)
return ret
top = loadtop()
def vs2i(s):
return {"1'b0": 0, "1'b1": 1}[s]
print("Loading tags from design.txt")
with open("design.txt", "r") as f:
for line in f:
'''
puts $fp "$type $tile $grid_x $grid_y $ff $bel_type $used $usedstr"
CLBLM_L CLBLM_L_X10Y137 30 13 SLICE_X13Y137/AFF REG_INIT 1 FDRE
CLBLM_L CLBLM_L_X10Y137 30 13 SLICE_X12Y137/D5FF FF_INIT 0
'''
line = line.split()
tile_type = line[0]
tile_name = line[1]
grid_x = line[2]
grid_y = line[3]
# Other code uses BEL name
# SLICE_X12Y137/D5FF
site_ff_name = line[4]
site, ff_name = site_ff_name.split('/')
ff_type = line[5]
used = int(line[6])
cel_prim = None
cel_name = None
if used:
cel_name = line[7]
# ex: FDCE
cel_prim = line[8]
# 1'b1
# cinv = int(line[9][-1])
cinv = int(line[9])
init = vs2i(line[10])
#init = int(line[10])
# A B C D
which = ff_name[0]
# LUT6 vs LUT5 FF
is5 = '5' in ff_name
if used:
segmk.addtag(site, "%s.ZINI" % ff_name, 1 ^ init)
# CLKINV turns out to be more complicated than origianlly thought
if isff(cel_prim):
segmk.addtag(site, "CLKINV", cinv)
else:
segmk.addtag(site, "CLKINV", 1 ^ cinv)
# Synchronous vs asynchronous FF
# Unlike most bits, shared between all CLB FFs
segmk.addtag(site, "FFSYNC", cel_prim in ('FDSE', 'FDRE'))
# Latch bit
# Only applies to LUT6 (non-5) FF's
if not is5:
segmk.addtag(site, "LATCH", isl(cel_prim))
'''
On name:
The primitives you listed have a control input to set the FF value to zero (clear/reset),
the other three primitives have a control input that sets the FF value to one.
Z => inversion
'''
segmk.addtag(
site, "%s.ZRST" % ff_name,
cel_prim in ('FDRE', 'FDCE', 'LDCE'))
segmk.compile()
segmk.write()
| 27.731707 | 101 | 0.555849 |
fd256c93827f2e9a10927f2eaa40c2d71542d62e | 2,823 | py | Python | tensorflow_federated/python/research/analytics/heavy_hitters/heavy_hitters_utils_test.py | houcharlie/federated-legacy | cb10a9cdcea33288f8113e7445782d21c8c65f81 | [
"Apache-2.0"
] | 5 | 2020-06-04T20:10:25.000Z | 2020-07-22T02:15:38.000Z | tensorflow_federated/python/research/analytics/heavy_hitters/heavy_hitters_utils_test.py | houcharlie/federated-legacy | cb10a9cdcea33288f8113e7445782d21c8c65f81 | [
"Apache-2.0"
] | 5 | 2020-07-20T13:39:12.000Z | 2020-08-27T18:00:56.000Z | tensorflow_federated/python/research/analytics/heavy_hitters/heavy_hitters_utils_test.py | houcharlie/federated-legacy | cb10a9cdcea33288f8113e7445782d21c8c65f81 | [
"Apache-2.0"
] | 1 | 2022-02-27T18:31:12.000Z | 2022-02-27T18:31:12.000Z | # Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow_federated.python.research.analytics.heavy_hitters import heavy_hitters_utils as hh_utils
class HeavyHittersUtilsTest(tf.test.TestCase):
def test_top_k(self):
signal = ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'd', 'e']
self.assertEqual(hh_utils.top_k(signal, 1), {'a': 3})
self.assertEqual(hh_utils.top_k(signal, 2), {'a': 3, 'b': 2, 'c': 2})
def test_precision(self):
signal = {'a': 3, 'b': 2, 'c': 1, 'd': 0}
ground_truth = {'a': 3, 'b': 2, 'c': 1, 'd': 0}
self.assertAlmostEqual(hh_utils.precision(ground_truth, signal, 2), 1.0)
ground_truth = {'a': 3, 'c': 2, 'b': 1, 'd': 0}
self.assertAlmostEqual(hh_utils.precision(ground_truth, signal, 2), 0.5)
ground_truth = {'a': 3, 'c': 2, 'b': 1, 'd': 0}
self.assertAlmostEqual(hh_utils.precision(ground_truth, signal, 3), 1.0)
ground_truth = {'a': 3, 'd': 2, 'b': 2, 'c': 2}
self.assertAlmostEqual(hh_utils.precision(ground_truth, signal, 3), 1.0)
def test_recall(self):
signal = {'a': 3, 'b': 2, 'c': 1, 'd': 0}
ground_truth = {'a': 3, 'b': 2, 'c': 1, 'd': 0}
self.assertAlmostEqual(hh_utils.recall(ground_truth, signal, 2), 1.0)
ground_truth = {'a': 3, 'c': 2, 'b': 1, 'd': 0}
self.assertAlmostEqual(hh_utils.recall(ground_truth, signal, 2), 0.5)
ground_truth = {'a': 3, 'c': 2, 'b': 1, 'd': 0}
self.assertAlmostEqual(hh_utils.recall(ground_truth, signal, 3), 1.0)
ground_truth = {'a': 3, 'd': 2, 'b': 2, 'c': 2}
self.assertAlmostEqual(hh_utils.recall(ground_truth, signal, 3), 0.75)
def test_f1_score(self):
signal = {'a': 3, 'b': 2, 'c': 1, 'd': 0}
ground_truth = {'a': 3, 'b': 2, 'c': 1, 'd': 0}
self.assertAlmostEqual(hh_utils.f1_score(ground_truth, signal, 2), 1.0)
ground_truth = {'a': 3, 'c': 2, 'b': 1, 'd': 0}
self.assertAlmostEqual(hh_utils.f1_score(ground_truth, signal, 2), 0.5)
ground_truth = {'a': 3, 'c': 2, 'b': 1, 'd': 0}
self.assertAlmostEqual(hh_utils.f1_score(ground_truth, signal, 3), 1.0)
ground_truth = {'a': 3, 'd': 2, 'b': 2, 'c': 2}
self.assertAlmostEqual(
hh_utils.f1_score(ground_truth, signal, 3), 0.85714285)
if __name__ == '__main__':
tf.test.main()
| 37.144737 | 104 | 0.634786 |
20729801dbd6c15d742917db4d9ee76d6ea2353d | 330 | py | Python | day00/ex05/kata04.py | d-r-e/Machine-Learning-Bootcamp | 618cad97c04d15fec6e8a371c526ad8e08cae35a | [
"MIT"
] | null | null | null | day00/ex05/kata04.py | d-r-e/Machine-Learning-Bootcamp | 618cad97c04d15fec6e8a371c526ad8e08cae35a | [
"MIT"
] | 6 | 2021-05-25T08:51:39.000Z | 2021-05-25T08:51:40.000Z | day00/ex05/kata04.py | d-r-e/Python-Bootcamp-42AI | 618cad97c04d15fec6e8a371c526ad8e08cae35a | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
def main():
tup = (0, 4, 132.42222, 10000, 12345,67)
print("day_"+ str(tup[0]).zfill(2), end=', ')
print("ex_" + str(tup[1]).zfill(2), end=' : ')
print(str(round(tup[2], 2)), end=', ')
print(format(tup[3], ".2e"), end=', ')
print(format(tup[4], ".2e"))
if __name__ == "__main__":
main() | 27.5 | 48 | 0.536364 |
240f0d1ddda01f9784e04444a869d2decd719d61 | 5,313 | py | Python | test/unit/postprocessing/test_threshold_optimizer_multiple_sensitive_features.py | bthng/fairlearn | 042d18c530c397191363b5cf57d7684d52cf817e | [
"MIT"
] | null | null | null | test/unit/postprocessing/test_threshold_optimizer_multiple_sensitive_features.py | bthng/fairlearn | 042d18c530c397191363b5cf57d7684d52cf817e | [
"MIT"
] | null | null | null | test/unit/postprocessing/test_threshold_optimizer_multiple_sensitive_features.py | bthng/fairlearn | 042d18c530c397191363b5cf57d7684d52cf817e | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
import numpy as np
import pandas as pd
from sklearn.metrics import balanced_accuracy_score, accuracy_score
from sklearn.linear_model import LinearRegression
from fairlearn.postprocessing import ThresholdOptimizer
from fairlearn.metrics import (
MetricFrame,
false_positive_rate,
false_negative_rate
)
fairness_metrics = {
"accuracy": accuracy_score,
"false_positive_rate": false_positive_rate,
"positive_count": lambda true, pred: np.sum(true),
"false_negative_rate": false_negative_rate,
"negative_count": lambda true, pred: np.sum(1-true),
"balanced_accuracy": balanced_accuracy_score
}
def test_threshold_optimizer_multiple_sensitive_features():
# Create sensitive features so that the third column is the first two combined.
# Also, the name a2 is long since that caused bug #728.
# The bug caused the merged names to get cut off, resulting in multiple groups
# getting merged internally. To avoid that this test case checks even internal
# representations.
X = pd.DataFrame([
[0, 4], [6, 2], [1, 3], [10, 5], [1, 7], [-2, 1], [3, 10], [14, 5],
[1, 3], [1, 5], [1, 7], [-5, 9], [3, 13], [7, 1], [-8, 4], [9, 1]])
y = pd.Series([0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0])
a1 = "a"
a2 = "a very very very very very very very long group name"
a3 = "a group name with commas ,, in , it"
a4 = "a group name with backslashes \\ in \\\\ it"
A = pd.DataFrame([[a1, a3, a1 + a3], [a1, a3, a1 + a3],
[a2, a3, a2 + a3], [a2, a3, a2 + a3], [a2, a3, a2 + a3], [a2, a3, a2 + a3],
[a2, a4, a2 + a4], [a2, a4, a2 + a4], [a2, a4, a2 + a4], [a2, a4, a2 + a4],
[a2, a4, a2 + a4], [a2, a4, a2 + a4], [a2, a4, a2 + a4], [a2, a4, a2 + a4],
[a1, a4, a1 + a4], [a1, a4, a1 + a4]],
columns=['SF1', 'SF2', 'SF1+2'])
estimator = LinearRegression()
estimator.fit(X, y)
postprocess_est_multi = ThresholdOptimizer(
estimator=estimator,
constraints="demographic_parity",
objective="accuracy_score",
prefit=True
)
postprocess_est_combined = ThresholdOptimizer(
estimator=estimator,
constraints="demographic_parity",
objective="accuracy_score",
prefit=True
)
postprocess_est_multi.fit(X, y, sensitive_features=A.loc[:, ['SF1', 'SF2']])
postprocess_est_combined.fit(X, y, sensitive_features=A.loc[:, 'SF1+2'])
X_test = pd.concat([
pd.DataFrame([[5, 4], [7, 2], [0, 3], [1, 2], [-2, 9], [1, 1], [0, 5], [-3, 3]]),
X])
A_test = pd.concat([
pd.DataFrame([[a1, a3, a1 + a3], [a1, a3, a1 + a3],
[a2, a3, a2 + a3], [a2, a3, a2 + a3],
[a2, a4, a2 + a4], [a2, a4, a2 + a4],
[a1, a4, a1 + a4], [a1, a4, a1 + a4]],
columns=['SF1', 'SF2', 'SF1+2']),
A])
y_test = pd.concat([pd.Series([0, 1, 0, 1, 0, 1, 0, 1]), y])
y_pred_multi = postprocess_est_multi.predict(
X_test, sensitive_features=A_test.loc[:, ['SF1', 'SF2']], random_state=1)
y_pred_combined = postprocess_est_combined.predict(
X_test, sensitive_features=A_test.loc[:, 'SF1+2'], random_state=1)
metricframe_multi = MetricFrame(
fairness_metrics,
y_test,
y_pred_multi,
sensitive_features=A_test.loc[:, ['SF1', 'SF2']]
)
metricframe_combined = MetricFrame(
fairness_metrics,
y_test,
y_pred_combined,
sensitive_features=A_test.loc[:, 'SF1+2']
)
# multi - names after escaping
a3_escaped = a3.replace(',', '\\,')
a4_escaped = a4.replace('\\', '\\\\')
a13 = f"{a1},{a3_escaped}"
a14 = f"{a1},{a4_escaped}"
a23 = f"{a2},{a3_escaped}"
a24 = f"{a2},{a4_escaped}"
assert (metricframe_combined.overall == metricframe_multi.overall).all()
assert (metricframe_combined.by_group.loc[a1+a3] ==
metricframe_multi.by_group.loc[(a1, a3)]).all()
assert (metricframe_combined.by_group.loc[a2+a3] ==
metricframe_multi.by_group.loc[(a2, a3)]).all()
assert (metricframe_combined.by_group.loc[a1+a4] ==
metricframe_multi.by_group.loc[(a1, a4)]).all()
assert (metricframe_combined.by_group.loc[a2+a4] ==
metricframe_multi.by_group.loc[(a2, a4)]).all()
# comparing string representations of interpolation dicts is sufficient
assert str(postprocess_est_combined.interpolated_thresholder_.interpolation_dict[a1+a3]) == \
str(postprocess_est_multi.interpolated_thresholder_.interpolation_dict[a13])
assert str(postprocess_est_combined.interpolated_thresholder_.interpolation_dict[a1+a4]) == \
str(postprocess_est_multi.interpolated_thresholder_.interpolation_dict[a14])
assert str(postprocess_est_combined.interpolated_thresholder_.interpolation_dict[a2+a3]) == \
str(postprocess_est_multi.interpolated_thresholder_.interpolation_dict[a23])
assert str(postprocess_est_combined.interpolated_thresholder_.interpolation_dict[a2+a4]) == \
str(postprocess_est_multi.interpolated_thresholder_.interpolation_dict[a24])
| 42.166667 | 97 | 0.622624 |
e8cc56f26562298764147ef3d01135f02173bf5a | 23 | py | Python | spotlight/__init__.py | jordanwinemiller/spotlight | c3bae1130412238b968a6e80023f1cb6fbaaa5a7 | [
"MIT"
] | 42 | 2020-10-13T19:47:37.000Z | 2022-03-26T09:56:46.000Z | spotlight/__init__.py | jordanwinemiller/spotlight | c3bae1130412238b968a6e80023f1cb6fbaaa5a7 | [
"MIT"
] | 3 | 2020-01-29T16:37:50.000Z | 2022-02-04T12:01:51.000Z | spotlight/__init__.py | jordanwinemiller/spotlight | c3bae1130412238b968a6e80023f1cb6fbaaa5a7 | [
"MIT"
] | 5 | 2020-11-30T14:48:44.000Z | 2022-02-19T17:18:21.000Z | __version__ = 'v0.1.5'
| 11.5 | 22 | 0.652174 |
b8fac6278aea4ff61053316cf7c6403f0c40d6e8 | 957 | py | Python | utils/model.py | david8862/keras-CenterNet | e74b933f6dd5ffac04f2de3eb0d887742be8490f | [
"Apache-2.0"
] | 195 | 2019-11-10T10:50:22.000Z | 2022-03-18T01:37:05.000Z | utils/model.py | gellston/keras-CenterNet | 39cb123a94d7774490df28e637240de03577f912 | [
"Apache-2.0"
] | 57 | 2019-11-18T05:55:29.000Z | 2022-03-30T06:09:39.000Z | utils/model.py | gellston/keras-CenterNet | 39cb123a94d7774490df28e637240de03577f912 | [
"Apache-2.0"
] | 58 | 2019-11-11T08:13:56.000Z | 2022-03-14T09:49:47.000Z | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def freeze(model):
"""
Set all layers in a model to non-trainable.
The weights for these layers will not be updated during training.
This function modifies the given model in-place,
but it also returns the modified model to allow easy chaining with other functions.
"""
for layer in model.layers:
layer.trainable = False
return model
| 31.9 | 87 | 0.745037 |
b84e14e2618c042a04491b43337c03f3a0ee0084 | 1,319 | py | Python | deoldify/dataset.py | Dakini/AnimeColorDeOldify | 5151e5fea87fec57366aed7afba54f743ffdfd10 | [
"MIT"
] | 115 | 2020-03-26T21:30:52.000Z | 2022-03-30T11:45:03.000Z | deoldify/dataset.py | A2K2005/AnimeColorDeOldify | 5151e5fea87fec57366aed7afba54f743ffdfd10 | [
"MIT"
] | 3 | 2020-04-02T18:26:06.000Z | 2022-03-12T00:20:11.000Z | deoldify/dataset.py | A2K2005/AnimeColorDeOldify | 5151e5fea87fec57366aed7afba54f743ffdfd10 | [
"MIT"
] | 18 | 2020-03-26T21:31:06.000Z | 2022-03-21T08:56:07.000Z | import fastai
from fastai import *
from fastai.core import *
from fastai.vision.transform import get_transforms
from fastai.vision.data import ImageImageList, ImageDataBunch, imagenet_stats
from .augs import noisify
def get_colorize_data(
sz: int,
bs: int,
crappy_path: Path,
good_path: Path,
random_seed: int = None,
keep_pct: float = 1.0,
num_workers: int = 8,
stats: tuple = imagenet_stats,
xtra_tfms=[],
) -> ImageDataBunch:
src = (
ImageImageList.from_folder(crappy_path, convert_mode='RGB')
.use_partial_data(sample_pct=keep_pct, seed=random_seed)
.split_by_rand_pct(0.1, seed=random_seed)
)
data = (
src.label_from_func(lambda x: good_path / x.relative_to(crappy_path))
.transform(
get_transforms(
max_zoom=1.2, max_lighting=0.5, max_warp=0.25, xtra_tfms=xtra_tfms
),
size=sz,
tfm_y=True,
)
.databunch(bs=bs, num_workers=num_workers, no_check=True)
.normalize(stats, do_y=True)
)
data.c = 3
return data
def get_dummy_databunch(stats=imagenet_stats) -> ImageDataBunch:
path = Path('./dummy/')
return get_colorize_data(
sz=1, bs=1, crappy_path=path, good_path=path, stats=stats,keep_pct=0.001
)
| 26.918367 | 82 | 0.648976 |
83d41bbd2bab53a6c0c4bd99fc847a316235ddca | 1,202 | py | Python | video_tools/commands/compress.py | bertonha/viceo_compressor | 601f222a2d5aa67a03cd3f792ba293346386d25f | [
"MIT"
] | 2 | 2019-08-15T03:34:36.000Z | 2019-08-15T04:36:44.000Z | video_tools/commands/compress.py | bertonha/video_compressor | 601f222a2d5aa67a03cd3f792ba293346386d25f | [
"MIT"
] | null | null | null | video_tools/commands/compress.py | bertonha/video_compressor | 601f222a2d5aa67a03cd3f792ba293346386d25f | [
"MIT"
] | 1 | 2019-10-08T12:45:08.000Z | 2019-10-08T12:45:08.000Z | from pathlib import Path
import click
from video_tools.utils import (
call_ffmpeg,
delete_file,
filter_compressible_files,
generate_output_filename,
print_style,
)
@click.group()
def compress_group():
pass
@compress_group.command()
@click.option("-r", "--rotate", default=False, is_flag=True)
@click.option("-d", "--delete", default=False, is_flag=True)
@click.option("--resolution", default=None)
@click.argument("initial_path", type=click.Path(exists=True))
def compress(initial_path, delete, rotate, resolution):
p = Path(initial_path)
if p.is_dir():
files = list(filter_compressible_files(p.glob("**/*.*")))
else:
files = [p]
total_files = len(files)
for index, in_file in enumerate(files):
out_file = generate_output_filename(in_file)
print_style(f"Compressing file {index + 1} of {total_files}")
print_style(f"Input file: {in_file}")
print_style(f"Output file: {out_file}")
try:
call_ffmpeg(in_file, out_file, rotate, resolution)
except Exception:
continue
else:
if delete and out_file.exists():
delete_file(in_file)
| 25.041667 | 69 | 0.650582 |
2e4a2603d1019c250d930cd9487d1f3fe115b911 | 7,881 | py | Python | libs/pyzmq/Leap_sample/Leap_sample.py | quanhua92/learning-notes | a9c50d3955c51bb58f4b012757c550b76c5309ef | [
"Apache-2.0"
] | null | null | null | libs/pyzmq/Leap_sample/Leap_sample.py | quanhua92/learning-notes | a9c50d3955c51bb58f4b012757c550b76c5309ef | [
"Apache-2.0"
] | null | null | null | libs/pyzmq/Leap_sample/Leap_sample.py | quanhua92/learning-notes | a9c50d3955c51bb58f4b012757c550b76c5309ef | [
"Apache-2.0"
] | null | null | null | import zmq
class SampleListener:
context = None
zmq_socket = None
def on_init(self):
print("\nInitialized")
self.context = zmq.Context()
self.zmq_socket = self.context.socket(zmq.PUSH)
self.zmq_socket.bind("tcp://127.0.0.1:5557")
def on_frame(self):
print("\nOn Frame")
a = b = c = 1
work_message = str(a)+","+str(b)+","+str(c)+'\n'
self.zmq_socket.send_json(work_message)
def main():
# Create a sample listener and controller
listener = SampleListener()
listener.on_init()
for i in range(100):
listener.on_frame()
if __name__ == "__main__":
main()
# import os, sys, thread, time
# import Leap
# import csv
# import zmq
# from Leap import CircleGesture, KeyTapGesture, ScreenTapGesture, SwipeGesture
# def write_csv(data):
# with open('output.csv','a') as fp:
# a = csv.writer(fp)
# a.writerows(data)
# class SampleListener(Leap.Listener):
# finger_names = ['Thumb', 'Index', 'Middle', 'Ring', 'Pinky']
# bone_names = ['Metarpal', 'Proximal', 'Intermediate', 'Distal']
# state_names = ['STATE_INVALID', 'STATE_START', 'STATE_UPDATE', 'STATE_END']
# context = None
# zmq_socket = None
# count = 0
# alpha = 0.5
# min_angle_PP_MC_index = 180
# max_angle_PP_MC_index = 0
# min_angle_MP_PP_index = 180
# max_angle_MP_PP_index = 0
# min_angle_DP_MP_index = 180
# max_angle_DP_MP_index = 0
# angle_PP_MC_index_1 = 0
# angle_PP_MC_index_0 = 0
# angle_MP_PP_index_1 = 0
# angle_MP_PP_index_0 = 0
# angle_DP_MP_index_1 = 0
# angle_DP_MP_index_0 = 0
# def on_init(self, controller):
# print "\nInitialized"
# self.context = zmq.Context()
# self.zmq_socket = self.context.socket(zmq.PUSH)
# self.zmq_socket.bind("tcp://127.0.0.1:5557")
# self.data=[0,0,0]
# def on_connect(self, controller):
# print "\nMotion Sensor Connected"
# controller.enable_gesture(Leap.Gesture.TYPE_CIRCLE);
# controller.enable_gesture(Leap.Gesture.TYPE_KEY_TAP);
# controller.enable_gesture(Leap.Gesture.TYPE_SCREEN_TAP);
# controller.enable_gesture(Leap.Gesture.TYPE_SWIPE);
# def on_disconnect(self, controller):
# print "\nMotion Sensor Disconnected"
# def on_exit(self, controller):
# print "Exit!"
# def on_frame(self, controller):
# ##ser = serial.Serial('COM4',1000000)
# frame = controller.frame()
# for hand in frame.hands:
# print "\ncount: ", SampleListener.count
# index_finger_list = hand.fingers.finger_type(Leap.Finger.TYPE_INDEX)
# index_finger = index_finger_list[0] #since there is only one per hand
# metacarpal_index = index_finger.bone(Leap.Bone.TYPE_METACARPAL)
# direction_metacarpal_index = metacarpal_index.direction
# ## print direction_metacarpal_index
# ## Angle of Proximal to Metacarpal
# proximal_index = index_finger.bone(Leap.Bone.TYPE_PROXIMAL)
# direction_proximal_index = proximal_index.direction
# ## print direction_proximal_index
# SampleListener.angle_PP_MC_index_1 = (direction_proximal_index.angle_to(direction_metacarpal_index) / 3.14 * 180)
# if (SampleListener.angle_PP_MC_index_1 < SampleListener.min_angle_PP_MC_index):
# SampleListener.min_angle_PP_MC_index = SampleListener.angle_PP_MC_index_1
# if (SampleListener.angle_PP_MC_index_1 > SampleListener.max_angle_PP_MC_index):
# SampleListener.max_angle_PP_MC_index = SampleListener.angle_PP_MC_index_1
# ## Angle of Intermediate to Proximal
# intermediate_index = index_finger.bone(Leap.Bone.TYPE_INTERMEDIATE)
# direction_intermediate_index = intermediate_index.direction
# ## print direction_proximal_index
# SampleListener.angle_MP_PP_index_1 = (direction_intermediate_index.angle_to(direction_proximal_index) / 3.14 * 180)
# if (SampleListener.angle_MP_PP_index_1 < SampleListener.min_angle_MP_PP_index):
# SampleListener.min_angle_MP_PP_index = SampleListener.angle_MP_PP_index_1
# if (SampleListener.angle_MP_PP_index_1 > SampleListener.max_angle_MP_PP_index):
# SampleListener.max_angle_MP_PP_index = SampleListener.angle_MP_PP_index_1
# ## Angle of Distal to Intermediate
# distal_index = index_finger.bone(Leap.Bone.TYPE_DISTAL)
# direction_distal_index = distal_index.direction
# ## print direction_distal_index
# SampleListener.angle_DP_MP_index_1 = (direction_distal_index.angle_to(direction_intermediate_index) / 3.14 * 180)
# if (SampleListener.angle_DP_MP_index_1 < SampleListener.min_angle_DP_MP_index):
# SampleListener.min_angle_DP_MP_index = SampleListener.angle_DP_MP_index_1
# if (SampleListener.angle_DP_MP_index_1 > SampleListener.max_angle_DP_MP_index):
# SampleListener.max_angle_DP_MP_index = SampleListener.angle_DP_MP_index_1
# ## Send index to Servo
# ##This segment is to filter noises with alpha index:
# angle_PP_MC_index = (SampleListener.alpha * SampleListener.angle_PP_MC_index_1 + (1 - SampleListener.alpha) * SampleListener.angle_PP_MC_index_0)
# angle_MP_PP_index = (SampleListener.alpha * SampleListener.angle_MP_PP_index_1 + (1 - SampleListener.alpha) * SampleListener.angle_MP_PP_index_0)
# angle_DP_MP_index = (SampleListener.alpha * SampleListener.angle_DP_MP_index_1 + (1 - SampleListener.alpha) * SampleListener.angle_DP_MP_index_0)
# ##print "PP_MC: " + str(round(angle_PP_MC_index, 2)) + " MP_PP: " + str(round(angle_MP_PP_index, 2)) + " DP_MP: " + str(round(angle_DP_MP_index, 2))
# a = round(angle_PP_MC_index*1.15, 2)
# b = round(angle_MP_PP_index, 2)
# c = round(angle_DP_MP_index, 2)
# self.data=[a,b,c]
# print self.data
# ## Writing 3 angle values to csv and txt
# fd = open('data_leapmotion.txt','a')
# fd.write(str(a)+","+str(b)+","+str(c)+'\n')
# fd.close()
# '''
# with open('data_leapmotion.txt', 'rb') as f_data, open('data_leapmotion.csv', 'wb') as f_output:
# csv_data = csv.reader(f_data)
# csv_output = csv.writer(f_output)
# csv_output.writerow(['PP_MC', 'MP_PP', 'DP_MP'])
# for row in csv_data:
# csv_output.writerow(row)
# '''
# # Start your result manager and workers before you start your producer
# work_message = str(a)+","+str(b)+","+str(c)+'\n'
# self.zmq_socket.send_json(work_message)
# time.sleep(0.05)
# SampleListener.angle_PP_MC_index_0 = SampleListener.angle_PP_MC_index_1
# SampleListener.angle_MP_PP_index_0 = SampleListener.angle_MP_PP_index_1
# SampleListener.angle_DP_MP_index_0 = SampleListener.angle_DP_MP_index_1
# SampleListener.count += 1
# def main():
# # Create a sample listener and controller
# listener = SampleListener()
# controller = Leap.Controller()
# # Have the sample listener receive events from the controller
# controller.add_listener(listener)
# # Keep this process running until Enter is pressed
# print "\nPress Enter to quit..."
# try:
# sys.stdin.readline()
# except KeyboardInterrupt:
# pass
# finally:
# # Remove the sample listener when done
# controller.remove_listener(listener)
# if __name__ == "__main__":
# main() | 42.370968 | 162 | 0.645984 |
6868ccb4108888ca08720bd241e8a0115ff8f7f7 | 8,718 | py | Python | DataStreamer/_Rtree.py | SanityLacking/Data-streamer | d9a051dd2c5d26aad58cceb9ea24a4e7cb0cf779 | [
"MIT"
] | 2 | 2020-03-01T11:12:18.000Z | 2021-11-25T04:03:26.000Z | DataStreamer/_Rtree.py | SanityLacking/DataStreamer | d9a051dd2c5d26aad58cceb9ea24a4e7cb0cf779 | [
"MIT"
] | null | null | null | DataStreamer/_Rtree.py | SanityLacking/DataStreamer | d9a051dd2c5d26aad58cceb9ea24a4e7cb0cf779 | [
"MIT"
] | null | null | null | '''
Rtree.py
Author: Yi Liu
Author: Yi Liu https://github.com/moment-of-peace/python-rtree
Modified by: Cailen Robertson https://github.com/SanityLacking
Classes representing parts of a r-tree
'''
import math
# a point in r-tree
class Point:
def __init__(self, pointInfo):
self.ident = pointInfo[0]
self.x = pointInfo[1]
self.y = pointInfo[2]
# get point position
def position(self, index):
if index == 1:
return self.x
elif index == 2:
return self.y
# r-tree node (extended by leaf and branch)
class Node:
def __init__(self, Bvalue, level):
self.childList = []
self.range = []
self.centre = []
self.Bvalue = Bvalue
self.paren = None
self.level = level
# add a new child (may be a point or a node) to current node
def addChild(self, child):
self.childList.append(child)
self.update(child)
# update the cover range of a node when adding a new point or node
def update(self, child):
# update x range and y range
if isinstance(child, Point):
self.updateRange([child.x, child.x, child.y, child.y])
elif isinstance(child, Node):
self.updateRange(child.range)
# update the centre coordinates
self.centre[0] = sum(self.range[0:2])/2
self.centre[1] = sum(self.range[2:4])/2
# assistant function of "update" function
def updateRange(self, newRange):
# compare and update range
if newRange[0] < self.range[0]:
self.range[0] = newRange[0]
if newRange[1] > self.range[1]:
self.range[1] = newRange[1]
if newRange[2] < self.range[2]:
self.range[2] = newRange[2]
if newRange[3] > self.range[3]:
self.range[3] = newRange[3]
# return whether the current node is overflow
def isOverFlow(self):
if len(self.childList) > self.Bvalue:
return True
else:
return False
# the distance from a given point to the node centre
def disToCentre(self, point):
return ((self.centre[0]-point.x)**2+(self.centre[1]-point.y)**2)**0.5
def getIncrease(self, point):
result = 0
# increase on x axis
if point.x > self.range[1]:
result += point.x-self.range[1]
elif point.x < self.range[0]:
result += self.range[0] - point.x
# increase on y axis
if point.y > self.range[3]:
result += point.y - self.range[3]
elif point.y < self.range[2]:
result += self.range[2] - point.y
return result
# the perimeter of current node
def getPerimeter(self):
return self.range[1]-self.range[0]+self.range[3]-self.range[2]
# split a node, overridden by Leaf and Branch
def split(self):
return None
# a leaf node which contains only points
class Leaf(Node):
def __init__(self, Bvalue, level, point):
super().__init__(Bvalue, level)
self.range = [point.x, point.x, point.y, point.y]
self.centre = [point.x, point.y]
def split(self):
# sort by x coordinate
self.sortChildren(1)
nodes = self.getBestSplit()
periSum = nodes[0].getPerimeter() + nodes[1].getPerimeter()
# sort by y coordinate
self.sortChildren(2)
newNodes = self.getBestSplit()
newSum = newNodes[0].getPerimeter() + newNodes[1].getPerimeter()
# return the best split
if newSum < periSum:
return newNodes
else:
return nodes
# sort the childList by x if index is 1, by y if index is 2
def sortChildren(self, index):
length = len(self.childList)
for i in range(0, length):
for j in range(i+1, length):
if self.childList[i].position(index) > self.childList[j].position(index):
temp = self.childList[i]
self.childList[i] = self.childList[j]
self.childList[j] = temp
# get best split based on a sorted children list
def getBestSplit(self):
# used to store the minimal sum of perimeters
periSum = float('inf')
# used to store the best split
nodes = []
b = math.floor(0.4 * self.Bvalue)
for i in range(b, len(self.childList) - b + 1):
# the set of the first i rectangles
node1 = Leaf(self.Bvalue, 1, self.childList[0])
node1.paren = self.paren
# the MBR of the first set
for j in range(0, i):
node1.addChild(self.childList[j])
# the set of the remained rectangles
node2 = Leaf(self.Bvalue, 1, self.childList[i])
node2.paren = self.paren
# the MBR of the second set
for j in range(i, len(self.childList)):
node2.addChild(self.childList[j])
# check whether this is a better split
newSum = node1.getPerimeter() + node2.getPerimeter()
if newSum < periSum:
periSum = newSum
nodes = [node1,node2]
# return the best split
return nodes
# a branch node which contains only nodes
class Branch(Node):
def __init__(self, Bvalue, level, node):
super().__init__(Bvalue, level)
self.range = node.range[:]
self.centre = node.centre[:]
# choose a child which has a shortest distance from a given point
def chooseChild(self, point):
result = None
increase = None
for child in self.childList:
newIncrease = child.disToCentre(point)
#newIncrease = child.getIncrease(point)
if increase == None:
increase = newIncrease
result = child
elif increase != 0 and newIncrease/increase > 0.93 and newIncrease/increase < 1.07:
if len(result.childList)/len(child.childList)>2:
increase = newIncrease
result = child
elif newIncrease < increase:
increase = newIncrease
result = child
return result
def split(self):
# sort by xleft and get the sum of perimeter
self.sortChildren(0)
nodes = self.getBestSplit()
periSum = nodes[0].getPerimeter() + nodes[1].getPerimeter()
# sort by xright, ybottom, ytop respectively
for i in range(1,4):
self.sortChildren(i)
newNodes = self.getBestSplit()
newSum = newNodes[0].getPerimeter() + newNodes[1].getPerimeter()
# check whether this is a better split
if newSum < periSum:
periSum = newSum
nodes = newNodes
# set nodes parents and return the best split
for node in nodes[0].childList:
node.paren = nodes[0]
for node in nodes[1].childList:
node.paren = nodes[1]
return nodes
# sort the childList by different elements of self.range
def sortChildren(self, index):
length = len(self.childList)
for i in range(0, length):
for j in range(i+1, length):
if self.childList[i].range[index] > self.childList[j].range[index]:
temp = self.childList[i]
self.childList[i] = self.childList[j]
self.childList[j] = temp
# get best split based on a sorted children list
def getBestSplit(self):
# used to store the minimal sum of perimeters
periSum = float('inf')
# used to store the best split
nodes = []
b = math.floor(0.4 * self.Bvalue)
for i in range(b, len(self.childList) - b + 1):
# the set of the first i rectangles
node1 = Branch(self.Bvalue, self.level, self.childList[0])
node1.paren = self.paren
# the MBR of the first set
for j in range(0, i):
node1.addChild(self.childList[j])
# the set of the remained rectangles
node2 = Branch(self.Bvalue, self.level, self.childList[i])
node2.paren = self.paren
# the MBR of the second set
for j in range(i, len(self.childList)):
node2.addChild(self.childList[j])
# check whether this is a better split
newSum = node1.getPerimeter() + node2.getPerimeter()
if newSum < periSum:
periSum = newSum
nodes = [node1,node2]
# return the best split
return nodes
| 34.872 | 95 | 0.563088 |
6979cbcd4e71e27d30aa2f87fef53d9341bb5645 | 237,603 | py | Python | buildscripts/cpplint.py | danx0r/mongo | 70d4944c235bcdf7fbbc63971099563d2af72956 | [
"Apache-2.0"
] | 72 | 2020-06-12T06:33:41.000Z | 2021-03-22T03:15:56.000Z | buildscripts/cpplint.py | danx0r/mongo | 70d4944c235bcdf7fbbc63971099563d2af72956 | [
"Apache-2.0"
] | 9 | 2020-07-02T09:36:49.000Z | 2021-03-25T23:54:00.000Z | buildscripts/cpplint.py | danx0r/mongo | 70d4944c235bcdf7fbbc63971099563d2af72956 | [
"Apache-2.0"
] | 14 | 2020-06-12T03:08:03.000Z | 2021-02-03T11:43:09.000Z | #!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuming that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
linelength=80
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through liner.
"linelength" allows to specify the allowed line length for the project.
CPPLINT.cfg has an effect on files in the same directory and all
sub-directories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all sub-directories.
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'mongo/polyfill',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/indentation_namespace',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# These headers are excluded from [build/include] and [build/include_order]
# checks:
# - Anything not following google file name conventions (containing an
# uppercase character, such as Python.h or nsStringAPI.h, for example).
# - Lua headers.
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# The allowed extensions for file names
# This is set by --extensions flag.
_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"""Resets the set of NOLINT suppressions to empty."""
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(object):
"""Tracks line numbers for includes, and the order in which includes appear.
include_list contains list of lists of (header, line number) pairs.
It's a lists of lists rather than just one flat list to make it
easier to update across preprocessor boundaries.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
self.ResetSection('')
def FindHeader(self, header):
"""Check if a header has already been included.
Args:
header: header to check.
Returns:
Line number of previous occurrence, or -1 if the header has not
been seen before.
"""
for section_list in self.include_list:
for f in section_list:
if f[0] == header:
return f[1]
return -1
def ResetSection(self, directive):
"""Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
# Update list of includes. Note that we never pop from the
# include list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = []
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
# backup of filter list. Used to restore the state after each file.
self._filters_backup = self.filters[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
self.AddFilters(filters)
def AddFilters(self, filters):
""" Adds more filters to the existing list of error-message filters. """
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def BackupFilters(self):
""" Saves the current filter list to backup storage."""
self._filters_backup = self.filters[:]
def RestoreFilters(self):
""" Restores filters previously backed up."""
self.filters = self._filters_backup[:]
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
def _AddFilters(filters):
"""Adds more filter overrides.
Unlike _SetFilters, this function does not reset the current list of filters
available.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.AddFilters(filters)
def _BackupFilters():
""" Saves the current filter list to backup storage."""
_cpplint_state.BackupFilters()
def _RestoreFilters():
""" Restores filters previously backed up."""
_cpplint_state.RestoreFilters()
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo(object):
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw_lines member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1)
def make_polyfill_regex():
polyfill_required_names = [
'_',
'adopt_lock',
'async',
'chrono',
'condition_variable',
'condition_variable_any',
'cv_status',
'defer_lock',
'future',
'future_status',
'launch',
'lock_guard',
'mutex',
'notify_all_at_thread_exit',
'packaged_task',
'promise',
'recursive_mutex',
'shared_lock,',
'shared_mutex',
'shared_timed_mutex',
'this_thread(?!::at_thread_exit)',
'thread',
'timed_mutex',
'try_to_lock',
'unique_lock',
'unordered_map',
'unordered_multimap',
'unordered_multiset',
'unordered_set',
]
qualified_names = ['boost::' + name + "\\b" for name in polyfill_required_names]
qualified_names.extend('std::' + name + "\\b" for name in polyfill_required_names)
qualified_names_regex = '|'.join(qualified_names)
return re.compile(qualified_names_regex)
_RE_PATTERN_MONGO_POLYFILL=make_polyfill_regex()
def CheckForMongoPolyfill(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
if re.search(_RE_PATTERN_MONGO_POLYFILL, line):
error(filename, linenum, 'mongodb/polyfill', 5,
'Illegal use of banned name from std::/boost::, use mongo::stdx:: variant instead')
def CheckForMongoAtomic(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
if re.search('std::atomic', line):
error(filename, linenum, 'mongodb/stdatomic', 5,
'Illegal use of prohibited std::atomic<T>, use AtomicWord<T> or other types '
'from "mongo/platform/atomic_word.h"')
def CheckForMongoVolatile(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
if re.search('[^_]volatile', line) and not "__asm__" in line:
error(filename, linenum, 'mongodb/volatile', 5,
'Illegal use of the volatile storage keyword, use AtomicWord instead '
'from "mongo/platform/atomic_word.h"')
def CheckForNonMongoAssert(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
if re.search(r'\bassert\s*\(', line):
error(filename, linenum, 'mongodb/assert', 5,
'Illegal use of the bare assert function, use a function from assert_utils.h instead.')
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
for i in lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self):
_BlockInfo.__init__(self, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
self.check_namespace_indentation = True
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
"""Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
"""Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: position just after the suspected template argument.
Returns:
True if (linenum, pos) is inside template arguments.
"""
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo())
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style. Also look for
# non-single-argument constructors which are also technically valid, but
# strongly suggest something is wrong.
explicit_constructor_match = Match(
r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 0,
'Constructors that require multiple arguments '
'should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
nesting_state.previous_stack_top == nesting_state.stack[-2])
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
CheckItemIndentationInNamespace(filename, clean_lines.elided,
line, error)
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the //.
if Match(r'//[^ ]*\w', comment):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'return []() {};'
if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. Those are checked separately
# in CheckRValueReference
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<([^\s,=])', line)
if (match and match.group(1) != '(' and
not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def CheckBracesSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def IsTemplateParameterList(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is the end of template<>.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is end of a template parameter list, False otherwise.
"""
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, column)
if (startpos > -1 and
Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])):
return True
return False
def IsRValueType(clean_lines, nesting_state, linenum, column):
"""Check if the token ending on (linenum, column) is a type.
Assumes that text to the right of the column is "&&" or a function
name.
Args:
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is a type, False if we are not sure.
"""
prefix = clean_lines.elided[linenum][0:column]
# Get one word to the left. If we failed to do so, this is most
# likely not a type, since it's unlikely that the type name and "&&"
# would be split across multiple lines.
match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix)
if not match:
return False
# Check text following the token. If it's "&&>" or "&&," or "&&...", it's
# most likely a rvalue reference used inside a template.
suffix = clean_lines.elided[linenum][column:]
if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix):
return True
# Check for simple type and end of templates:
# int&& variable
# vector<int>&& variable
#
# Because this function is called recursively, we also need to
# recognize pointer and reference types:
# int* Function()
# int& Function()
if match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool',
'short', 'int', 'long', 'signed', 'unsigned',
'float', 'double', 'void', 'auto', '>', '*', '&']:
return True
# If we see a close parenthesis, look for decltype on the other side.
# decltype would unambiguously identify a type, anything else is
# probably a parenthesized expression and not a type.
if match.group(2) == ')':
return IsDecltype(
clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1)
# Check for casts and cv-qualifiers.
# match.group(1) remainder
# -------------- ---------
# const_cast< type&&
# const type&&
# type const&&
if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|'
r'reinterpret_cast\s*<|\w+\s)\s*$',
match.group(1)):
return True
# Look for a preceding symbol that might help differentiate the context.
# These are the cases that would be ambiguous:
# match.group(1) remainder
# -------------- ---------
# Call ( expression &&
# Declaration ( type&&
# sizeof ( type&&
# if ( expression &&
# while ( expression &&
# for ( type&&
# for( ; expression &&
# statement ; type&&
# block { type&&
# constructor { expression &&
start = linenum
line = match.group(1)
match_symbol = None
while start >= 0:
# We want to skip over identifiers and commas to get to a symbol.
# Commas are skipped so that we can find the opening parenthesis
# for function parameter lists.
match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line)
if match_symbol:
break
start -= 1
line = clean_lines.elided[start]
if not match_symbol:
# Probably the first statement in the file is an rvalue reference
return True
if match_symbol.group(2) == '}':
# Found closing brace, probably an indicate of this:
# block{} type&&
return True
if match_symbol.group(2) == ';':
# Found semicolon, probably one of these:
# for(; expression &&
# statement; type&&
# Look for the previous 'for(' in the previous lines.
before_text = match_symbol.group(1)
for i in xrange(start - 1, max(start - 6, 0), -1):
before_text = clean_lines.elided[i] + before_text
if Search(r'for\s*\([^{};]*$', before_text):
# This is the condition inside a for-loop
return False
# Did not find a for-init-statement before this semicolon, so this
# is probably a new statement and not a condition.
return True
if match_symbol.group(2) == '{':
# Found opening brace, probably one of these:
# block{ type&& = ... ; }
# constructor{ expression && expression }
# Look for a closing brace or a semicolon. If we see a semicolon
# first, this is probably a rvalue reference.
line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1]
end = start
depth = 1
while True:
for ch in line:
if ch == ';':
return True
elif ch == '{':
depth += 1
elif ch == '}':
depth -= 1
if depth == 0:
return False
end += 1
if end >= clean_lines.NumLines():
break
line = clean_lines.elided[end]
# Incomplete program?
return False
if match_symbol.group(2) == '(':
# Opening parenthesis. Need to check what's to the left of the
# parenthesis. Look back one extra line for additional context.
before_text = match_symbol.group(1)
if linenum > 1:
before_text = clean_lines.elided[linenum - 1] + before_text
before_text = match_symbol.group(1)
# Patterns that are likely to be types:
# [](type&&
# for (type&&
# sizeof(type&&
# operator=(type&&
#
if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text):
return True
# Patterns that are likely to be expressions:
# if (expression &&
# while (expression &&
# : initializer(expression &&
# , initializer(expression &&
# ( FunctionCall(expression &&
# + FunctionCall(expression &&
# + (expression &&
#
# The last '+' represents operators such as '+' and '-'.
if Search(r'(?:\bif|\bwhile|[-+=%^(<!?:,&*]\s*)$', before_text):
return False
# Something else. Check that tokens to the left look like
# return_type function_name
match_func = Match(r'^(.*)\s+\w(?:\w|::)*(?:<[^<>]*>)?\s*$',
match_symbol.group(1))
if match_func:
# Check for constructors, which don't have return types.
if Search(r'\b(?:explicit|inline)$', match_func.group(1)):
return True
implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix)
if (implicit_constructor and
implicit_constructor.group(1) == implicit_constructor.group(2)):
return True
return IsRValueType(clean_lines, nesting_state, linenum,
len(match_func.group(1)))
# Nothing before the function name. If this is inside a block scope,
# this is probably a function call.
return not (nesting_state.previous_stack_top and
nesting_state.previous_stack_top.IsBlockInfo())
if match_symbol.group(2) == '>':
# Possibly a closing bracket, check that what's on the other side
# looks like the start of a template.
return IsTemplateParameterList(
clean_lines, start, len(match_symbol.group(1)))
# Some other symbol, usually something like "a=b&&c". This is most
# likely not a type.
return False
def IsDeletedOrDefault(clean_lines, linenum):
"""Check if current constructor or operator is deleted or default.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if this is a deleted or default constructor.
"""
open_paren = clean_lines.elided[linenum].find('(')
if open_paren < 0:
return False
(close_line, _, close_paren) = CloseExpression(
clean_lines, linenum, open_paren)
if close_paren < 0:
return False
return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
def IsRValueAllowed(clean_lines, linenum):
"""Check if RValue reference is allowed on a particular line.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if line is within the region where RValue references are allowed.
"""
# Allow region marked by PUSH/POP macros
for i in xrange(linenum, 0, -1):
line = clean_lines.elided[i]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
if not line.endswith('PUSH'):
return False
for j in xrange(linenum, clean_lines.NumLines(), 1):
line = clean_lines.elided[j]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
return line.endswith('POP')
# Allow operator=
line = clean_lines.elided[linenum]
if Search(r'\boperator\s*=\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
# Allow constructors
match = Match(r'\s*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line)
if match and match.group(1) == match.group(2):
return IsDeletedOrDefault(clean_lines, linenum)
if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
if Match(r'\s*[\w<>]+\s*\(', line):
previous_line = 'ReturnType'
if linenum > 0:
previous_line = clean_lines.elided[linenum - 1]
if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line):
return IsDeletedOrDefault(clean_lines, linenum)
return False
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
"""Check for rvalue references.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Find lines missing spaces around &&.
# TODO(unknown): currently we don't check for rvalue references
# with spaces surrounding the && to avoid false positives with
# boolean expressions.
line = clean_lines.elided[linenum]
match = Match(r'^(.*\S)&&', line)
if not match:
match = Match(r'(.*)&&\S', line)
if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)):
return
# Either poorly formed && or an rvalue reference, check the context
# to get a more accurate error message. Mostly we want to determine
# if what's to the left of "&&" is a type or not.
and_pos = len(match.group(1))
if IsRValueType(clean_lines, nesting_state, linenum, and_pos):
if not IsRValueAllowed(clean_lines, linenum):
error(filename, linenum, 'build/c++11', 3,
'RValue references are an unapproved C++ feature.')
else:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around &&')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on compound
# literals and lambdas.
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckRValueReference(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special include conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
match = Match(r'#include\s*"([^/]+\.h)"', line)
if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
# Suggest a different header for ostream
if include == 'ostream':
error(filename, linenum, 'readability/streams', 3,
'For logging, include "base/logging.h" instead of <ostream>.')
else:
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_COPY_AND_ASSIGN DISALLOW_IMPLICIT_CONSTRUCTORS is present,
# then it should be the last thing in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
"""Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False
def IsInitializerList(clean_lines, linenum):
"""Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
# a choice, so any non-const references should not be blamed on
# derived function.
if IsDerivedFunction(clean_lines, linenum):
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid initializer lists. We only need to scan back from the
# current line for something that starts with ':'.
#
# We don't need to check the current line, since the '&' would
# appear inside the second set of parentheses on the current line as
# opposed to the first set.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
if not Search(r'[),]\s*$', previous_line):
break
if Match(r'^\s*:\s+\S', previous_line):
return
# Avoid preprocessors
if Search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
if IsInitializerList(clean_lines, linenum):
return
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
"""Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
# matched_new_or_template is used to silence two false positives:
# - New operators
# - Template arguments with function types
#
# For template arguments, we match on types immediately following
# an opening bracket without any spaces. This is a fast way to
# silence the common case where the function type is the first
# template argument. False negative with less-than comparison is
# avoided because those operators are usually followed by a space.
#
# function<double(double)> // bracket + no space = false positive
# value < double(42) // bracket + space = true positive
matched_new_or_template = match.group(1)
# Avoid arrays by looking for brackets that come after the closing
# parenthesis.
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
# Other things to ignore:
# - Function pointers
# - Casts to pointer types
# - Placement new
# - Alias declarations
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
#
# Some non-identifier character is required before the '&' for the
# expression to be recognized as a cast. These are casts:
# expression = &static_cast<int*>(temporary());
# function(&(int*)(temporary()));
#
# This is not a cast:
# reference_type&(int* function_param);
match = Search(
r'(?:[^\w]&\(([^)]+)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match and match.group(1) != '*':
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# Function((function_pointer_arg)(int), int param)
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),])',
remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
raw_line = clean_lines.raw_lines[linenum]
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
"""Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_dict.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):
"""Check that default lambda captures are not used.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# A lambda introducer specifies a default capture if it starts with "[="
# or if it starts with "[&" _not_ followed by an identifier.
match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line)
if match:
# Found a potential error, check what comes after the lambda-introducer.
# If it's not open parenthesis (for lambda-declarator) or open brace
# (for compound-statement), it's not a lambda.
line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1)))
if pos >= 0 and Match(r'^\s*[{(]', line[pos:]):
error(filename, linenum, 'build/c++11',
4, # 4 = high confidence
'Default lambda captures are an unapproved C++ feature.')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*\bvirtual\b)', line)
if not virtual: return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(1))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Check that at most one of "override" or "final" is present, not both
line = clean_lines.elided[linenum]
if Search(r'\boverride\b', line) and Search(r'\bfinal\b', line):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
# Returns true if we are at a new block, and it is directly
# inside of a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
"""Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is a forward declared class.
Returns:
Whether or not the new block is directly in a namespace.
"""
if is_forward_declaration:
if len(nesting_state.stack) >= 1 and (
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
return True
else:
return False
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.stack[-2], _NamespaceInfo))
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
"""This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace.
"""
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
# If we are in a macro, we do not want to check the namespace indentation.
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
# Call this method if the line is directly inside of a namespace.
# If the line above is blank (excluding comments) or the start of
# an inner namespace, it cannot be indented.
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
error):
line = raw_lines_no_comments[linenum]
if Match(r'^\s+', line):
error(filename, linenum, 'runtime/indentation_namespace', 4,
'Do not indent within a namespace')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
CheckForMongoPolyfill(filename, clean_lines, line, error)
CheckForMongoAtomic(filename, clean_lines, line, error)
CheckForMongoVolatile(filename, clean_lines, line, error)
CheckForNonMongoAssert(filename, clean_lines, line, error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckDefaultLambdaCaptures(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Flag unapproved C++11 headers.
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
if include and include.group(1) in ('cfenv',
'condition_variable',
'fenv.h',
'future',
'mutex',
'thread',
'chrono',
'ratio',
'regex',
'system_error',
):
error(filename, linenum, 'build/c++11', 5,
('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
# utility
'forward',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
""" Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further.
"""
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
sys.stderr.write('Ignoring "%s": file excluded by "%s". '
'File path component "%s" matches '
'pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
elif name == 'linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
sys.stderr.write('Line length must be numeric.')
else:
sys.stderr.write(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
sys.stderr.write(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for filter in reversed(cfg_filters):
_AddFilters(filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
#sys.stderr.write('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| 38.236724 | 97 | 0.652025 |
e1ad786574e503b51fe78f121e61c54f60bd1177 | 2,331 | py | Python | test/test_dynsys_equil.py | artemyk/dynpy | c2914ac315083ad76707a7fcb2c8800a2ec52944 | [
"BSD-2-Clause"
] | 6 | 2015-03-05T04:13:22.000Z | 2021-09-24T02:46:09.000Z | test/test_dynsys_equil.py | artemyk/dynpy | c2914ac315083ad76707a7fcb2c8800a2ec52944 | [
"BSD-2-Clause"
] | null | null | null | test/test_dynsys_equil.py | artemyk/dynpy | c2914ac315083ad76707a7fcb2c8800a2ec52944 | [
"BSD-2-Clause"
] | 6 | 2015-03-05T04:16:06.000Z | 2021-07-03T13:26:26.000Z | import numpy as np
import scipy.sparse as ss
import dynpy
from dynpy.mx import DenseMatrix, SparseMatrix
from dynpy.graphdynamics import RandomWalkerEnsemble
kc = dynpy.sample_nets.karateclub_net
initState = np.zeros(kc.shape[0], 'float')
initState[ 5 ] = 1
def very_close(mx1, mx2):
fmax = (lambda x: x.max()) if ss.issparse(mx1) else np.max
fmin = (lambda x: x.min()) if ss.issparse(mx1) else np.min
return fmax(mx1-mx2) < 1e-5 and fmin(mx1-mx2) > -1e-5
def test_dense_discrete_equil_vs_iter():
# Dense discrete time
rw = RandomWalkerEnsemble(graph=kc, issparse=False)
e1 = rw.iterate(initState, max_time = 100)
e2 = rw.get_equilibrium_distribution()
assert( very_close(e1 , e2) )
def test_dense_continuous_vs_discrete():
# Dense continuous time
rw1 = RandomWalkerEnsemble(graph=kc, issparse=False)
rw2 = RandomWalkerEnsemble(graph=kc, discrete_time=False, issparse=False)
e2 = rw1.get_equilibrium_distribution()
e2ct = rw2.get_equilibrium_distribution()
assert( very_close(e2ct , e2) )
def test_dense_continuous_equil_vs_iter():
# Dense continuous time
rw = RandomWalkerEnsemble(graph=kc, discrete_time=False, issparse=False)
e1 = rw.iterate(initState, max_time = 100)
e2ct = rw.get_equilibrium_distribution()
assert( very_close(e2ct , e1) )
def test_sparse_discrete_equil_vs_iter():
# Sparse discrete time
rw = RandomWalkerEnsemble(graph=kc, issparse=True)
e1 = rw.iterate(initState, max_time = 100)
e2 = rw.get_equilibrium_distribution()
assert( very_close(e1 , e2) )
def test_sparse_continuous_vs_discrete():
# Sparse continuous time
rw1 = RandomWalkerEnsemble(graph=kc, issparse=False)
rw2 = RandomWalkerEnsemble(graph=kc, discrete_time=False, issparse=True)
e2 = rw1.get_equilibrium_distribution()
e2ct = rw2.get_equilibrium_distribution()
assert( very_close(e2ct , e2) )
def test_sparse_continuous_equil_vs_iter():
# Sparse continuous time
rw = RandomWalkerEnsemble(graph=kc, discrete_time=False, issparse=True)
e1 = rw.iterate(initState, max_time = 100)
e2ct = rw.get_equilibrium_distribution()
assert( very_close(e2ct , e1) )
| 35.861538 | 81 | 0.688117 |
29003b3570cc956b82fc4cabcf96213297708b20 | 425 | py | Python | fundamentos/007_opAritm.py | daldantas/Curso-Python | 96d09393b1ad278990156906e965c0d7adb4f4aa | [
"MIT"
] | null | null | null | fundamentos/007_opAritm.py | daldantas/Curso-Python | 96d09393b1ad278990156906e965c0d7adb4f4aa | [
"MIT"
] | null | null | null | fundamentos/007_opAritm.py | daldantas/Curso-Python | 96d09393b1ad278990156906e965c0d7adb4f4aa | [
"MIT"
] | null | null | null | n1 = int(input('Digite um número: '))
n2 = int(input('Digite outro: '))
s = n1 + n2
m = n1 * n2
d = n1 / n2
di = n1 // n2
e = n1 ** n2
r = n1 % n2
top = ' Início '
bot = ' Fim '
print('\n Operadores Aritméticos \n')
print('{:=^30}'.format(top))
print('\n Soma: {} \n Mult: {} \n div : {:.2f} \n divi: {} \n expo: {}'.format(s, m, d, di, e), end='')
print('\n rest: {} \n'.format(r))
print('{:=^30}'.format(bot)) | 28.333333 | 104 | 0.505882 |
44dd3e225e19be4b88c2bc3b2fd809bbb71e9200 | 291 | py | Python | projects/shixiseng/shixiseng/pipelines.py | 18839782321/lyf.github.io | 12c065f03d69dd7d99af27ded6c216f1954f8c7f | [
"MIT"
] | 13 | 2020-07-15T03:26:33.000Z | 2022-01-23T01:59:14.000Z | projects/shixiseng/shixiseng/pipelines.py | 18839782321/lyf.github.io | 12c065f03d69dd7d99af27ded6c216f1954f8c7f | [
"MIT"
] | 5 | 2021-03-31T20:07:10.000Z | 2022-03-02T14:54:19.000Z | projects/shixiseng/shixiseng/pipelines.py | 18839782321/lyf.github.io | 12c065f03d69dd7d99af27ded6c216f1954f8c7f | [
"MIT"
] | 5 | 2020-08-13T16:09:14.000Z | 2021-12-16T08:26:57.000Z | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class ShixisengPipeline(object):
def process_item(self, item, spider):
return item
| 24.25 | 66 | 0.714777 |
56126d78c4b6490ff1dce3638014bc41b6ffb056 | 1,749 | py | Python | src/main/resources/pydev_tunnel/tunnel.py | gdlg/k8s-debugger-pycharm-pluggin | 30354f8e6ce3f979650c032e485137ec3f113a2c | [
"Apache-2.0"
] | null | null | null | src/main/resources/pydev_tunnel/tunnel.py | gdlg/k8s-debugger-pycharm-pluggin | 30354f8e6ce3f979650c032e485137ec3f113a2c | [
"Apache-2.0"
] | null | null | null | src/main/resources/pydev_tunnel/tunnel.py | gdlg/k8s-debugger-pycharm-pluggin | 30354f8e6ce3f979650c032e485137ec3f113a2c | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Grégoire Payen de La Garanderie. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
from dispatcher import Dispatcher
from pipe_client_server import PipeClientServer
from pydev_server_monitor import PydevServerMonitor
import sys
import subprocess
import os
import logging
is_local = len(sys.argv) > 1
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
format_header = "local" if is_local else "remote"
formatter = logging.Formatter('%(asctime)s - '+format_header+' %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
if is_local:
#Local connection worker.
#
#Start the child connection (the remote), establish the pipe between the parent and child process,
#then add a monitor for the local Pydev server.
local_port = sys.argv[1]
worker_command = sys.argv[2:]
child = subprocess.Popen(worker_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
dispatcher = Dispatcher(auto_stop=True)
dispatcher.add_processor(PipeClientServer(dispatcher, child.stdout, child.stdin))
server_monitor = PydevServerMonitor(dispatcher, local_port)
if server_monitor.is_socket_alive():
dispatcher.add_server_monitor(server_monitor)
else:
# Remote connection worker.
#
# Establish the pipe between the parent and child process.
dispatcher = Dispatcher(auto_stop=False)
dispatcher.add_processor(PipeClientServer(dispatcher, sys.stdin, sys.stdout))
child = None
# Finally, start the main loop
dispatcher.dispatch_loop()
if child is not None:
child.terminate()
child.wait()
| 30.684211 | 150 | 0.760435 |
284e7be9f732aa086a029b5b9d8bca68acb295fa | 476 | py | Python | plotly/validators/layout/annotation/_valign.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 2 | 2020-03-24T11:41:14.000Z | 2021-01-14T07:59:43.000Z | plotly/validators/layout/annotation/_valign.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | null | null | null | plotly/validators/layout/annotation/_valign.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 4 | 2019-06-03T14:49:12.000Z | 2022-01-06T01:05:12.000Z | import _plotly_utils.basevalidators
class ValignValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name='valign', parent_name='layout.annotation', **kwargs
):
super(ValignValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='arraydraw',
role='style',
values=['top', 'middle', 'bottom'],
**kwargs
)
| 28 | 77 | 0.617647 |
1991565e0567d2f07f55f0dafb344e8eb52facd1 | 3,068 | py | Python | tukey/dashboards/settings/project/forms.py | Li-Ko/tukey_portal | 8dc395ef1a1ebaa806d23c88ce51460e6c202921 | [
"Apache-2.0"
] | null | null | null | tukey/dashboards/settings/project/forms.py | Li-Ko/tukey_portal | 8dc395ef1a1ebaa806d23c88ce51460e6c202921 | [
"Apache-2.0"
] | null | null | null | tukey/dashboards/settings/project/forms.py | Li-Ko/tukey_portal | 8dc395ef1a1ebaa806d23c88ce51460e6c202921 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django import shortcuts
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class DownloadOpenRCForm(forms.SelfHandlingForm):
tenant = forms.ChoiceField(label=_("Select a Project"))
def __init__(self, request, *args, **kwargs):
super(DownloadOpenRCForm, self).__init__(request, *args, **kwargs)
# Populate tenant choices
tenant_choices = []
try:
tenants = api.tenant_list(request)
except:
tenants = []
exceptions.handle(request, _("Unable to retrieve project list."))
for tenant in tenants:
if tenant.enabled:
tenant_choices.append((tenant.id, tenant.name))
self.fields['tenant'].choices = tenant_choices
def handle(self, request, data):
try:
tenant_id = data['tenant']
tenant_name = dict(self.fields['tenant'].choices)[tenant_id]
keystone_url = api.url_for(request,
'identity',
endpoint_type='publicURL')
for old, new in settings.API_ENDPOINT_REPLACEMENTS:
keystone_url = keystone_url.replace(old, new)
context = {'user': request.user,
'auth_url': keystone_url,
'tenant_id': tenant_id,
'tenant_name': tenant_name}
response = shortcuts.render(request,
'settings/project/openrc.sh.template',
context,
content_type="text/plain")
response['Content-Disposition'] = 'attachment; filename=openrc.sh'
response['Content-Length'] = str(len(response.content))
return response
except Exception, e:
LOG.exception("Exception in DownloadOpenRCForm.")
messages.error(request, _('Error Downloading RC File: %s') % e)
return shortcuts.redirect(request.build_absolute_uri())
| 36.963855 | 78 | 0.625163 |
1d5110749d9c4dbf759343e9bd66c51bd31713d7 | 6,176 | py | Python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2019_06_01/operations/_usages_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-09T08:59:13.000Z | 2022-03-09T08:59:13.000Z | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2019_06_01/operations/_usages_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2019_06_01/operations/_usages_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_location_request(
subscription_id: str,
location: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"location": _SERIALIZER.url("location", location, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class UsagesOperations(object):
"""UsagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_location(
self,
location: str,
**kwargs: Any
) -> Iterable["_models.UsageListResult"]:
"""Gets the current usage count and the limit for the resources of the location under the
subscription.
:param location: The location of the Azure Storage resource.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsageListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2019_06_01.models.UsageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_location_request(
subscription_id=self._config.subscription_id,
location=location,
template_url=self.list_by_location.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_location_request(
subscription_id=self._config.subscription_id,
location=location,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("UsageListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages'} # type: ignore
| 40.900662 | 146 | 0.66386 |
2d793fe298b33e659b3568937d000255d835b32c | 12,212 | py | Python | pylot/perception/segmentation/segmented_frame.py | mageofboy/pylot | c3154dc24c9429b9916274894c72ef92e03c946d | [
"Apache-2.0"
] | 231 | 2019-06-05T00:22:00.000Z | 2022-03-28T06:15:00.000Z | pylot/perception/segmentation/segmented_frame.py | mageofboy/pylot | c3154dc24c9429b9916274894c72ef92e03c946d | [
"Apache-2.0"
] | 108 | 2019-06-27T16:28:01.000Z | 2022-03-28T19:14:18.000Z | pylot/perception/segmentation/segmented_frame.py | mageofboy/pylot | c3154dc24c9429b9916274894c72ef92e03c946d | [
"Apache-2.0"
] | 80 | 2019-06-07T01:08:13.000Z | 2022-03-28T01:44:42.000Z | import os
import PIL.Image as Image
import cv2
import numpy as np
import pylot.utils
from pylot.perception.detection.utils import BoundingBox2D
from skimage import measure
# Semantic Labels
CITYSCAPES_LABELS = {
0: "unlabeled",
1: "building",
2: "fence",
3: "other",
4: "person",
5: "pole",
6: "road_line",
7: "road",
8: "sidewalk",
9: "vegetation",
10: "car",
11: "wall",
12: "traffic_sign",
13: "sky",
14: "ground",
15: "bridge",
16: "rail_track",
17: "guard_rail",
18: "traffic_light",
19: "static",
20: "dynamic",
21: "water",
22: "terrain",
}
# Cityscapes palette.
CITYSCAPES_CLASSES = {
0: [0, 0, 0], # None
1: [70, 70, 70], # Buildings
2: [190, 153, 153], # Fences
3: [72, 0, 90], # Other
4: [220, 20, 60], # Pedestrians
5: [153, 153, 153], # Poles
6: [157, 234, 50], # RoadLines
7: [128, 64, 128], # Roads
8: [244, 35, 232], # Sidewalks
9: [107, 142, 35], # Vegetation
10: [0, 0, 255], # Vehicles
11: [102, 102, 156], # Walls
12: [220, 220, 0], # TrafficSigns
13: [70, 130, 180], # Sky
14: [81, 0, 81], # Ground
15: [150, 100, 100], # Bridge
16: [230, 150, 140], # RailTrack
17: [180, 165, 180], # GuardRail
18: [250, 170, 30], # TrafficLight
19: [110, 190, 160], # Static
20: [170, 120, 50], # Dynamic
21: [45, 60, 150], # Water
22: [145, 170, 100] # Terrain
}
# XXX(ionel): Note! These classes do not cover all
# the classes from CITYSCAPES. Hence, we can't compare segmentation
# outputs to ground truth.
class SegmentedFrame(object):
"""Stores a semantically segmented frame.
Args:
frame: A numpy array storring the segmented frame.
encoding (:obj:`str`): The encoding of the frame (simulator | cityscapes). # noqa: E501
camera_setup (:py:class:`~pylot.drivers.sensor_setup.SegmentedCameraSetup`): # noqa: E501
The camera setup used by the sensor that generated this frame.
Attributes:
frame: A numpy array storring the segmented frame.
encoding (:obj:`str`): The encoding of the frame (simulator | cityscapes). # noqa: E501
camera_setup (:py:class:`~pylot.drivers.sensor_setup.SegmentedCameraSetup`): # noqa: E501
The camera setup used by the sensor that generated this frame.
"""
def __init__(self, frame, encoding, camera_setup):
if encoding == 'simulator' or encoding == 'cityscapes':
self._frame = frame
self.encoding = encoding
self.camera_setup = camera_setup
else:
raise ValueError(
'Unexpected encoding {} for segmented frame'.format(encoding))
self._class_masks = None
@classmethod
def from_simulator_image(cls, simulator_image, camera_setup):
"""Creates a pylot camera frame from a simulator frame.
Note:
This conversion is slow.
Returns:
:py:class:`.SegmentedFrame`: A segmented camera frame.
"""
# Converts the array containing simulator semantic segmentation labels
# to a 2D array containing the label of each pixel.
from carla import Image
if not isinstance(simulator_image, Image):
raise ValueError('simulator_image should be of type Image')
__frame = np.frombuffer(simulator_image.raw_data,
dtype=np.dtype("uint8"))
__frame = np.reshape(
__frame, (simulator_image.height, simulator_image.width, 4))
return cls(__frame[:, :, 2], 'simulator', camera_setup)
def as_cityscapes_palette(self):
"""Returns the frame to the CARLA cityscapes pallete.
Returns:
A numpy array.
"""
if self.encoding == 'cityscapes':
return self._frame
else:
result = np.zeros((self._frame.shape[0], self._frame.shape[1], 3),
dtype=np.uint8)
for key, value in CITYSCAPES_CLASSES.items():
result[np.where(self._frame == key)] = value
return result
def as_numpy_array(self):
"""Returns the segmented frame as a numpy array."""
return self._frame
def transform_to_cityscapes(self):
"""Transforms the frame to a cityscapes frame."""
self._frame = self.as_cityscapes_palette()
self.encoding = 'cityscapes'
def in_frame(self, point):
"""Checks if a point is within the frame."""
return (0 <= point.x <= self.camera_setup.width
and 0 <= point.y <= self.camera_setup.height)
def get_traffic_sign_bounding_boxes(self, min_width=2, min_height=3):
"""Extracts traffic sign bounding boxes from the frame.
Returns:
list(:py:class:`~pylot.perception.detection.utils.BoundingBox2D`):
Traffic sign bounding boxes.
"""
assert self.encoding == 'simulator', \
'Not implemented on cityscapes encoding'
# Set the pixels we are interested in to True.
traffic_signs_frame = self._get_traffic_sign_pixels()
# Extracts bounding box from frame.
bboxes = []
# Labels the connected segmented pixels.
map_labeled = measure.label(traffic_signs_frame, connectivity=1)
# Extract the regions out of the labeled frames.
for region in measure.regionprops(map_labeled):
x_min = region.bbox[1]
x_max = region.bbox[3]
y_min = region.bbox[0]
y_max = region.bbox[2]
# Filter the bboxes that are extremely small.
if x_max - x_min > min_width and y_max - y_min > min_height:
bboxes.append(BoundingBox2D(x_min, x_max, y_min, y_max))
return bboxes
def _get_per_class_masks(self):
""" Build a cache of class key to frame mask."""
assert self.encoding == 'simulator', \
'Not implemented on cityscapes encoding'
if self._class_masks is not None:
return self._class_masks
else:
self._class_masks = []
for key, value in CITYSCAPES_CLASSES.items():
mask = np.zeros((self._frame.shape[0], self._frame.shape[1]))
mask[np.where(self._frame == key)] = 1
self._class_masks.append(mask)
return self._class_masks
def compute_semantic_iou(self, other_frame):
"""Computes IoU for a segmented frame.
Args:
other_frame (:py:class:`.SegmentedFrame`): The frame for which to
compute IoU.
Returns:
A tuple comprising of mIoU and a list of IoUs.
"""
assert (self.encoding == 'cityscapes' and other_frame.encoding
== 'cityscapes'), 'Not implemented on simulator encoding'
iou = {}
for key, value in CITYSCAPES_CLASSES.items():
# Do not include None in the mIoU
if key == 0:
continue
target = np.zeros((self._frame.shape[0], self._frame.shape[1], 3))
prediction = np.zeros(
(self._frame.shape[0], self._frame.shape[1], 3))
target[np.where(self._frame == value)] = 1
prediction[np.where(other_frame._frame == value)] = 1
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
sum_intersection = np.sum(intersection)
sum_union = np.sum(union)
# Ignore non-existing classes.
if sum_union > 0:
iou[key] = float(sum_intersection) / float(sum_union)
mean_iou = np.mean(list(iou.values()))
return (mean_iou, iou)
def compute_semantic_iou_using_masks(self, other_frame):
"""Computes IoU for a segmented frame.
Computes IoU from per class image masks. This method is the fastest if
the frames already have precomputed masks.
Args:
other_frame (:py:class:`.SegmentedFrame`): The frame for which to
compute IoU.
Returns:
A tuple comprising of mIoU and a list of IoUs.
"""
assert self.encoding == 'simulator' and \
other_frame.encoding == 'simulator', \
'Not implemented on cityscapes encoding'
masks = self._get_per_class_masks()
other_masks = other_frame._get_per_class_masks()
iou = {}
for i in range(1, len(CITYSCAPES_CLASSES)):
intersection = np.logical_and(masks[i], other_masks[i])
union = np.logical_or(masks[i], other_masks[i])
sum_intersection = np.sum(intersection)
sum_union = np.sum(union)
# Ignore non-existing classes.
if sum_union > 0:
iou[i] = float(sum_intersection) / float(sum_union)
mean_iou = np.mean(list(iou.values()))
return (mean_iou, iou)
def save_per_class_masks(self, data_path, timestamp):
assert self.encoding == 'simulator', \
'Not implemented on cityscapes encoding'
masks = self._get_per_class_masks()
assert len(timestamp.coordinates) == 1
for k, v in CITYSCAPES_LABELS.items():
file_name = os.path.join(
data_path, '{}-{}.png'.format(v, timestamp.coordinates[0]))
img = Image.fromarray(masks[k])
img = img.convert('RGB')
img.save(file_name)
def save(self, timestamp, data_path, file_base):
"""Saves the segmented frame to a file.
Args:
timestamp (:obj:`int`): Timestamp associated with the segmented
frame.
data_path (:obj:`str`): Path where to save the segmented frame.
file_base (:obj:`str`): Base name of the file.
"""
file_name = os.path.join(data_path,
'{}-{}.png'.format(file_base, timestamp))
img = Image.fromarray(self.as_cityscapes_palette())
img.save(file_name)
def visualize(self, pygame_display, timestamp=None):
import pygame
cityscapes_frame = self.as_cityscapes_palette()
if timestamp is not None:
pylot.utils.add_timestamp(cityscapes_frame, timestamp)
image_np = np.transpose(cityscapes_frame, (1, 0, 2))
pygame.surfarray.blit_array(pygame_display, image_np)
pygame.display.flip()
def draw_box(self, start_point, end_point, color, thickness=3):
"""Draw a colored box defined by start_point, end_point."""
start = (int(start_point.x), int(start_point.y))
end = (int(end_point.x), int(end_point.y))
cv2.rectangle(self._frame, start, end, color, thickness)
def draw_point(self, point, color, r=3):
"""Draws a colored point on the segmented frame."""
cv2.circle(self._frame, (int(point.x), int(point.y)), r, color, -1)
def draw_text(self, point, text, color=(255, 255, 255)):
cv2.putText(self._frame,
text, (int(point.x), int(point.y)),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
color,
thickness=1,
lineType=cv2.LINE_AA)
def _get_traffic_sign_pixels(self):
"""Returns a frame with the traffic sign pixels set to True."""
# Shape is height, width
traffic_signs_frame = np.zeros(
(self._frame.shape[0], self._frame.shape[1]), dtype=np.bool)
# 12 is the key for TrafficSigns segmentation in CARLA.
# Apply mask to only select traffic signs and traffic lights.
traffic_signs_frame[np.where(
np.logical_or(self._frame == 12, self._frame == 18))] = True
return traffic_signs_frame
def __repr__(self):
return 'SegmentedFrame(encoding: {}, camera_setup: {}, frame: {})'.format( # noqa: E501
self.encoding, self.camera_setup, self._frame)
def __str__(self):
return 'SegmentedFrame(encoding: {}, camera_setup: {}, frame: {})'.format( # noqa: E501
self.encoding, self.camera_setup, self._frame)
| 37.80805 | 98 | 0.593269 |
05fb7d6810b2bfcba4646e2e5d8ef12efa2fce49 | 4,819 | py | Python | app/api/v1/views/products_views.py | kwanj-k/storemanager-API | e51511545a717341a7b1eb100eb3eab625a8b011 | [
"MIT"
] | 1 | 2019-05-08T08:39:08.000Z | 2019-05-08T08:39:08.000Z | app/api/v1/views/products_views.py | kwanj-k/storemanager-API | e51511545a717341a7b1eb100eb3eab625a8b011 | [
"MIT"
] | 2 | 2019-10-21T17:56:01.000Z | 2019-10-29T07:36:39.000Z | app/api/v1/views/products_views.py | kwanj-k/storemanager-API | e51511545a717341a7b1eb100eb3eab625a8b011 | [
"MIT"
] | null | null | null | """
This file contains all the product related resources
"""
# Third party imports
from flask import request, json, abort
from flask_restplus import Resource
from flask_jwt_extended import jwt_required, get_jwt_identity
# Local application imports
from app.api.v1.models.products import Product
from app.api.v1.models.sales import Sale
from app.api.v1.models.db import Db
from app.api.v1.views.expect import ProductEtn, SaleEtn
from app.api.common.validators import product_validator, sales_validator, product_update_validator, admin_required
new_p = ProductEtn().products
v1 = ProductEtn().v1
@v1.route('')
class Products(Resource):
@v1.doc( security='apikey')
@jwt_required
@admin_required
@v1.expect(new_p)
def post(self):
"""
Add a product to the manager
"""
json_data = request.get_json(force=True)
product_validator(json_data)
p = Db.get_product(json_data['name'])
if p:
msg = 'Product already exists.Update product inventory instead'
abort(406, msg)
email = get_jwt_identity()
user = Db.get_user(email=email)
store_id = user.store_id
new_product = Product(store_id, json_data['name'],
json_data['inventory'],
json_data['price'])
Db.products.append(new_product)
res = new_product.json_dump()
return {"status": "Success!", "data": res}, 201
@v1.doc( security='apikey')
@jwt_required
def get(self):
"""
Get all products
"""
products = Db.products
email = get_jwt_identity()
user = Db.get_user(email=email)
store_id = user.store_id
res = [p.json_dump() for p in products if p.store_id == store_id]
if len(products) < 1:
res ={"message":'There are no products at this time'},404
return res
new_s = SaleEtn().sales
@v1.route('<int:id>')
class Products1(Resource):
@v1.doc( security='apikey')
@jwt_required
@v1.expect(new_s)
def post(self, id):
"""
Sell product
"""
json_data = request.get_json(force=True)
sales_validator(json_data)
email = get_jwt_identity()
user = Db.get_user(email=email)
store_id = user.store_id
number = json_data['number']
product = Db.get_p_by_id(id)
if product:
price = product.price
amount = number * price
if product.inventory < number:
d = product.inventory
msg = 'There are only {} {} available'.format(d, product.name)
return abort(400, msg)
new_sale = Sale(store_id, product.name, number, amount)
Db.sales.append(new_sale)
res1 = new_sale.json_dump()
new_inv = product.inventory - number
product.inventory = new_inv
return {"status": "Success!", "data": res1}, 201
msg = 'Product does not exist'
return {"message": msg}, 404
@v1.doc( security='apikey')
@jwt_required
def get(self, id):
"""
Get a specific product
"""
email = get_jwt_identity()
user = Db.get_user(email=email)
store_id = user.store_id
p = Db.get_p_by_id(id)
if p.store_id != store_id:
msg = 'Product does not exist'
abort(404, msg)
return {"status": "Success", "data": p.json_dump()}, 200
@v1.doc( security='apikey')
@jwt_required
@admin_required
@v1.expect(new_p)
def put(self, id):
"""
Edit a product
"""
p = Db.get_p_by_id(id)
email = get_jwt_identity()
user = Db.get_user(email=email)
store_id = user.store_id
if p.store_id != store_id:
msg = 'Product does not exist'
abort(404, msg)
json_data = request.get_json(force=True)
product_update_validator(json_data)
name = json_data['name']
inventory = json_data['inventory']
price = json_data['price']
if name:
p.name = name
if inventory:
p.inventory = inventory
if price:
p.price = price
return {"status": "Success!", "data": p.json_dump()}, 200
@v1.doc( security='apikey')
@jwt_required
@admin_required
def delete(self, id):
"""
Delete a product
"""
p = Db.get_p_by_id(id)
email = get_jwt_identity()
user = Db.get_user(email=email)
store_id = user.store_id
if p.store_id != store_id:
msg = 'Product does not exist'
abort(404, msg)
Db.products.remove(p)
return {"status": "Deleted!", "data": p.json_dump()}, 200
| 29.746914 | 114 | 0.579581 |
94d2c8f36675dc7b164f9fa90fd6ac8073659521 | 2,398 | py | Python | tests/acceptance/test_accept_organization_invite.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | tests/acceptance/test_accept_organization_invite.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | tests/acceptance/test_accept_organization_invite.py | learninto/sentry | 4f9f564841498b3af49c1677d6b61f3e47b01923 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from django.db.models import F
from sentry.testutils import AcceptanceTestCase
from sentry.models import Organization, AuthProvider
class AcceptOrganizationInviteTest(AcceptanceTestCase):
def setUp(self):
super(AcceptOrganizationInviteTest, self).setUp()
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(name="Rowdy Tiger", owner=None)
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.member = self.create_member(
user=None,
email="bar@example.com",
organization=self.org,
role="owner",
teams=[self.team],
)
def test_invite_simple(self):
self.login_as(self.user)
self.browser.get(self.member.get_invite_link().split("/", 3)[-1])
self.browser.wait_until('[data-test-id="accept-invite"]')
self.browser.snapshot(name="accept organization invite")
assert self.browser.element_exists('[aria-label="join-organization"]')
def test_invite_not_authenticated(self):
self.browser.get(self.member.get_invite_link().split("/", 3)[-1])
self.browser.wait_until('[data-test-id="accept-invite"]')
assert self.browser.element_exists('[aria-label="create-account"]')
def test_invite_2fa_enforced_org(self):
self.org.update(flags=F("flags").bitor(Organization.flags.require_2fa))
self.browser.get(self.member.get_invite_link().split("/", 3)[-1])
self.browser.wait_until('[data-test-id="accept-invite"]')
assert not self.browser.element_exists_by_test_id("2fa-warning")
self.login_as(self.user)
self.org.update(flags=F("flags").bitor(Organization.flags.require_2fa))
self.browser.get(self.member.get_invite_link().split("/", 3)[-1])
self.browser.wait_until('[data-test-id="accept-invite"]')
assert self.browser.element_exists_by_test_id("2fa-warning")
def test_invite_sso_org(self):
AuthProvider.objects.create(organization=self.org, provider="google")
self.browser.get(self.member.get_invite_link().split("/", 3)[-1])
self.browser.wait_until('[data-test-id="accept-invite"]')
assert self.browser.element_exists_by_test_id("suggests-sso")
assert self.browser.element_exists('[aria-label="sso-login"]')
| 45.245283 | 81 | 0.683069 |
206de274cb191e233c0c0778f5599c0e785b757e | 327 | py | Python | tutorials/W0D4_Calculus/solutions/W0D4_Tutorial2_Solution_7cf110c5.py | eduardojdiniz/CompNeuro | 20269e66540dc4e802273735c97323020ee37406 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 2,294 | 2020-05-11T12:05:35.000Z | 2022-03-28T21:23:34.000Z | tutorials/W0D4_Calculus/solutions/W0D4_Tutorial2_Solution_7cf110c5.py | pellet/course-content | bb383857992469e0e7a9c36639ac0d05e842d9bd | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 629 | 2020-05-11T15:42:26.000Z | 2022-03-29T12:23:35.000Z | tutorials/W0D4_Calculus/solutions/W0D4_Tutorial2_Solution_7cf110c5.py | pellet/course-content | bb383857992469e0e7a9c36639ac0d05e842d9bd | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 917 | 2020-05-11T12:47:53.000Z | 2022-03-31T12:14:41.000Z |
"""
1. Negative values of alpha result in an exponential decrease to 0 a stable solution.
2. Positive Values of alpha in an exponential increases to infinity.
3. Alpha equal to 0 is a unique point known as an equilibrium point when the
dp/dt=0 and there is no change in population. This is known as a stable point.
""" | 46.714286 | 87 | 0.749235 |
3806c9e37b320bd5bb3ceb1bec44ca9bd4d9ae05 | 249 | py | Python | 326. Power of Three.py | XinchaoGou/MyLeetCode | bba0ab077374f7da2cb1a990266bc59fa7ddf23c | [
"MIT"
] | null | null | null | 326. Power of Three.py | XinchaoGou/MyLeetCode | bba0ab077374f7da2cb1a990266bc59fa7ddf23c | [
"MIT"
] | null | null | null | 326. Power of Three.py | XinchaoGou/MyLeetCode | bba0ab077374f7da2cb1a990266bc59fa7ddf23c | [
"MIT"
] | null | null | null | class Solution:
def isPowerOfThree(self, n: int) -> bool:
if n <= 0:
return False
while n != 1:
if n % 3 == 0:
n = n // 3
else:
return False
return True
| 22.636364 | 45 | 0.389558 |
592fb8de49db7c12cfcd882a1f9cf24bcebfc126 | 675 | py | Python | Cloud_Data_Warehousing/create_tables.py | bayatim/udacityDataEngineeringProjects | d3533eaec27c3b6af4d1f4b3e7bf385b3106121f | [
"MIT"
] | 1 | 2021-02-05T10:45:22.000Z | 2021-02-05T10:45:22.000Z | Cloud_Data_Warehousing/create_tables.py | bayatim/udacityDataEngineeringProjects | d3533eaec27c3b6af4d1f4b3e7bf385b3106121f | [
"MIT"
] | 6 | 2020-12-27T21:30:34.000Z | 2021-02-05T09:10:33.000Z | Cloud_Data_Warehousing/create_tables.py | bayatim/udacityDataEngineeringProjects | d3533eaec27c3b6af4d1f4b3e7bf385b3106121f | [
"MIT"
] | null | null | null | import configparser
import psycopg2
from sql_queries import create_table_queries, drop_table_queries
def drop_tables(cur, conn):
for query in drop_table_queries:
cur.execute(query)
conn.commit()
def create_tables(cur, conn):
for query in create_table_queries:
cur.execute(query)
conn.commit()
def main():
config = configparser.ConfigParser()
config.read('dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
drop_tables(cur, conn)
create_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main() | 21.09375 | 112 | 0.663704 |
fdc38cda6fc2284b0ee3702407d87bff909fa646 | 38,951 | py | Python | salt/modules/gpg.py | calinrailean/salt | 6283b51bb44b734a3db82caaae4f4c4e32a9b9b3 | [
"Apache-2.0"
] | 2 | 2018-11-08T02:59:24.000Z | 2021-01-04T00:30:50.000Z | salt/modules/gpg.py | calinrailean/salt | 6283b51bb44b734a3db82caaae4f4c4e32a9b9b3 | [
"Apache-2.0"
] | 4 | 2020-09-04T10:19:34.000Z | 2020-11-09T12:55:59.000Z | salt/modules/gpg.py | calinrailean/salt | 6283b51bb44b734a3db82caaae4f4c4e32a9b9b3 | [
"Apache-2.0"
] | 5 | 2017-06-16T23:48:13.000Z | 2021-04-08T17:43:48.000Z | # -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome) # pylint: disable=unexpected-keyword-arg
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
text = salt.utils.stringutils.to_unicode(_fp.read())
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= _LooseVersion('1.3.1'):
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= _LooseVersion('1.3.1'):
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'wb') as fout:
fout.write(salt.utils.stringutils.to_bytes(signed_data.data))
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user)
if text:
verified = gpg.verify(text)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = salt.utils.stringutils.to_unicode(_fp.read())
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
| 30.24146 | 116 | 0.5834 |
77bdbda17f70664c988db525fb48e7555147e0d1 | 857 | py | Python | Interesting Python Questions/merge sort.py | liu-yunfei/Python | 314cdc98f32f4f0de2c0904279865b944e34dd75 | [
"MIT"
] | 1 | 2020-10-08T09:29:59.000Z | 2020-10-08T09:29:59.000Z | Interesting Python Questions/merge sort.py | liu-yunfei/Python | 314cdc98f32f4f0de2c0904279865b944e34dd75 | [
"MIT"
] | 1 | 2021-01-30T12:04:51.000Z | 2021-01-30T12:05:37.000Z | Interesting Python Questions/merge sort.py | liu-yunfei/Python | 314cdc98f32f4f0de2c0904279865b944e34dd75 | [
"MIT"
] | null | null | null | def merge(lefthalf, righthalf):
answerList = []
while len(lefthalf) > 0 and len(righthalf) > 0:
if lefthalf[0] < righthalf[0]:
answerList += [lefthalf[0]]
lefthalf = lefthalf[1:]
else:
answerList += [righthalf[0]]
righthalf = righthalf[1:]
for i in lefthalf:
answerList += [i]
for j in righthalf:
answerList += [j]
return answerList
def mergesort(x):
if len(x) == 0 or len(x) == 1:
return x
else:
middle = int(len(x)/2)
a = mergesort(x[:middle])
b = mergesort(x[middle:])
return merge(a,b)
def main():
aList = [10, 5, 2, 9, 6, 3, 4, 8, 1, 7]
print(mergesort(aList))
list1 = [1, 4, 6, 10]
list2 = [5, 7, 8, 11, 12]
print(merge(list1, list2))
main()
| 23.805556 | 52 | 0.492415 |
6fba46e83f779618b4e8f5e7c2cb668565f49a2a | 2,528 | py | Python | hsaudiotag/id3v1.py | kaisenlinux/hsaudiotag | 394bc166f51caa6f9ac333fe1a3d662f411d5716 | [
"BSD-3-Clause"
] | null | null | null | hsaudiotag/id3v1.py | kaisenlinux/hsaudiotag | 394bc166f51caa6f9ac333fe1a3d662f411d5716 | [
"BSD-3-Clause"
] | 8 | 2015-02-14T05:29:45.000Z | 2015-03-09T13:49:28.000Z | src/hsaudiotag/id3v1.py | jmtchllrx/pyMuse | d85ca140ad572695349c03ee4cd0a8e5f960a045 | [
"MIT"
] | null | null | null | # Created By: Virgil Dupras
# Created On: 2004/12/07
# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
import struct
from .util import FileOrPath
from .genres import genre_by_index
TAG_VERSION_1_0 = 1
TAG_VERSION_1_1 = 2
#id3v1 specs
#0-2:"TAG"
#3-32:Title
#33-62:Artist
#63-92:Album
#93-96:Year
#97-126:Comment
#127:Genre
def _arrange_id3_field(raw_field):
"""Format the read field properly
This function takes only the part of the string before the first \0 char.
After this, it checks if the string has to be converted to unicode and convert it if it indeed does.
"""
decoded = str(raw_field, 'iso8859-1')
result = decoded.split('\0')
if len(result) > 0:
result = result[0].rstrip().replace('\n', ' ').replace('\r', ' ')
else:
result = ''
return result
class Id3v1(object):
def __init__(self, infile):
self.version = 0
self.size = 0
self.title = ''
self.artist = ''
self.album = ''
self.year = ''
self.genre = ''
self.comment = ''
self.track = 0
with FileOrPath(infile) as fp:
self._read_file(fp)
def _read_file(self, fp):
fp.seek(0, 2)
position = fp.tell()
if position and position >= 128:
fp.seek(-128, 2)
self._read_tag(fp.read(128))
def _read_tag(self, data):
if data[0:3] != b'TAG':
return
#check if the comment field contains track info
if ((data[125] == 0) and (data[126] != 0)) or ((data[125] == 0x20) and (data[126] != 0x20)):
#We have a v1.1
self.version = TAG_VERSION_1_1
self.track = min(data[126], 99)
self.comment = _arrange_id3_field(data[97:125])
else:
self.version = TAG_VERSION_1_0
self.track = 0
self.comment = _arrange_id3_field(data[97:127])
self.title = _arrange_id3_field(data[3:33])
self.artist = _arrange_id3_field(data[33:63])
self.album = _arrange_id3_field(data[63:93])
self.year = _arrange_id3_field(data[93:97])
genre = data[127]
self.genre = genre_by_index(genre)
self.size = 128
@property
def exists(self):
return self.size > 0
| 29.395349 | 104 | 0.599288 |
7fdeb0e2dfc8a0d11711fef3e805482d346c616d | 5,782 | py | Python | sfaira/unit_tests/tests_by_submodule/data/dataset/test_dataset.py | johnmous/sfaira | c50240a74530e614ab7681bf9c63b04cb815b361 | [
"BSD-3-Clause"
] | null | null | null | sfaira/unit_tests/tests_by_submodule/data/dataset/test_dataset.py | johnmous/sfaira | c50240a74530e614ab7681bf9c63b04cb815b361 | [
"BSD-3-Clause"
] | null | null | null | sfaira/unit_tests/tests_by_submodule/data/dataset/test_dataset.py | johnmous/sfaira | c50240a74530e614ab7681bf9c63b04cb815b361 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import os
import pytest
import sfaira.versions.genomes
from sfaira.consts import AdataIdsSfaira
from sfaira.data import DatasetSuperGroup, DatasetInteractive
from sfaira.data import Universe
from sfaira.unit_tests.data_for_tests.loaders import RELEASE_HUMAN, PrepareData
from sfaira.unit_tests.directories import DIR_TEMP, DIR_DATA_LOADERS_CACHE
def test_dsgs_instantiate():
_ = Universe(data_path=DIR_DATA_LOADERS_CACHE, meta_path=DIR_DATA_LOADERS_CACHE, cache_path=DIR_DATA_LOADERS_CACHE)
def test_dsgs_crossref():
"""
Tests if crossref attributes can be retrieved for all data loader entries with DOI journal defined.
Attributes tested:
- title
"""
universe = Universe(data_path=DIR_DATA_LOADERS_CACHE, meta_path=DIR_DATA_LOADERS_CACHE,
cache_path=DIR_DATA_LOADERS_CACHE)
for k, v in universe.datasets.items():
title = v.title
if title is None:
if v.doi_journal is not None and "no_doi" not in v.doi_journal:
raise ValueError(f"did not retrieve title for data set {k} with DOI: {v.doi_journal}.")
@pytest.mark.parametrize("organ", ["lung"])
def test_dsgs_subset_dataset_wise(organ: str):
"""
Tests if subsetting results only in datasets of the desired characteristics.
"""
ds = PrepareData().prepare_dsg(load=False)
ds.subset(key="organism", values=["Homo sapiens"])
ds.subset(key="organ", values=[organ])
ds.load()
for x in ds.dataset_groups:
for k, v in x.datasets.items():
assert v.organism == "Homo sapiens", v.organism
assert v.ontology_container_sfaira.organ.is_a(query=v.organ, reference=organ), v.organ
def test_dsgs_config_write_load():
fn = os.path.join(DIR_TEMP, "config.csv")
ds = PrepareData().prepare_dsg(load=False)
ds.subset(key="organism", values=["Homo sapiens"])
ds.subset(key="organ", values=["lung"])
ds.load()
ds.write_config(fn=fn)
ds2 = PrepareData().prepare_dsg()
ds2.load_config(fn=fn)
assert np.all(ds.ids == ds2.ids)
def test_dsgs_adata():
ds = PrepareData().prepare_dsg(load=False)
ds.subset(key="organism", values=["Homo sapiens"])
ds.subset(key="organ", values=["lung"])
ds.load()
_ = ds.adata_ls
def test_dsgs_load():
ds = PrepareData().prepare_dsg(load=False)
ds.subset(key="organism", values=["Homo sapiens"])
ds.subset(key="organ", values=["lung"])
ds.load()
@pytest.mark.parametrize("celltype", ["T cell"])
def test_dsgs_subset_cell_wise(celltype: str):
"""
Tests if sub-setting results only in datasets of the desired characteristics.
"""
organ = "lung"
ds = PrepareData().prepare_dsg(load=False)
ds.subset(key="organism", values=["Homo sapiens"])
ds.subset(key="organ", values=[organ])
ds.load()
ds.subset_cells(key="cell_type", values=celltype)
for x in ds.dataset_groups:
for k, v in x.datasets.items():
assert v.organism == "Homo sapiens", v.id
assert v.ontology_container_sfaira.organ.is_a(query=v.organ, reference=organ), v.organ
for y in np.unique(v.adata.obs[v._adata_ids.cell_type].values):
assert v.ontology_container_sfaira.cell_type.is_a(query=y, reference=celltype), y
@pytest.mark.parametrize("match_to_release", [RELEASE_HUMAN, {"Homo sapiens": RELEASE_HUMAN}])
@pytest.mark.parametrize("remove_gene_version", [False, True])
@pytest.mark.parametrize("subset_genes_to_type", [None, "protein_coding"])
def test_dsgs_streamline_features(match_to_release: str, remove_gene_version: bool, subset_genes_to_type: str):
ds = PrepareData().prepare_dsg(load=False)
ds.subset(key="organism", values=["Homo sapiens"])
ds.subset(key="organ", values=["lung"])
ds.load()
ds.streamline_features(remove_gene_version=remove_gene_version, match_to_release=match_to_release,
subset_genes_to_type=subset_genes_to_type)
gc = sfaira.versions.genomes.GenomeContainer(
organism="Homo Sapiens",
release=match_to_release["Homo sapiens"] if isinstance(match_to_release, dict) else match_to_release)
gc.set(biotype=subset_genes_to_type)
for x in ds.datasets.values():
assert x.adata.var["gene_symbol"].tolist() == gc.symbols
def test_dsg_load():
ds = PrepareData().prepare_dsg(load=False)
ds.subset(key="organism", values=["Homo sapiens"])
ds.subset(key="organ", values=["lung"])
ds = DatasetSuperGroup(dataset_groups=[ds])
ds.load()
def test_dsg_adata():
ds = PrepareData().prepare_dsg(load=False)
ds.subset(key="organism", values=["Homo sapiens"])
ds.subset(key="organ", values=["lung"])
ds = DatasetSuperGroup(dataset_groups=[ds])
_ = ds.adata
def test_ds_interactive():
adata_ids = AdataIdsSfaira()
# Prepare object:
ds = PrepareData().prepare_dsg(load=False)
ds.subset(key="doi_journal", values=["no_doi_mock1"])
ds.load()
adata = ds.adata_ls[0]
di = DatasetInteractive(data=adata, feature_id_col="index")
di.organism = "Homo sapiens"
di.organ = "lung"
di.cell_type_obs_key = "free_annotation"
# Test that adata is accessible in non-streamlined object:
_ = di.adata
# Test streamlining:
di.streamline_features(match_to_release=RELEASE_HUMAN)
di.streamline_metadata(schema="sfaira")
# Test entries in streamlined object:
adata_di = di.adata
assert adata_ids.cell_type in adata_di.obs.columns
assert adata_ids.cell_type + adata_ids.onto_id_suffix in adata_di.obs.columns
assert adata_ids.organ in adata_di.uns.keys()
assert np.all(adata_di.obs[adata_ids.cell_type].values == adata.obs["free_annotation"].values)
assert adata_di.uns[adata_ids.organ] == "lung"
| 38.291391 | 119 | 0.70166 |
dec811c91954d823bdc8eb71faadd83662f1233e | 4,191 | py | Python | galaxiaclient/common/client.py | WiproOpenSourcePractice/galaxia | baa6ea0a2192625dce2df7daddb1d983520bb7ab | [
"Apache-2.0"
] | 25 | 2016-04-27T14:45:59.000Z | 2020-05-21T00:14:56.000Z | galaxiaclient/common/client.py | WiproOpenSource/galaxia | baa6ea0a2192625dce2df7daddb1d983520bb7ab | [
"Apache-2.0"
] | 62 | 2016-04-27T14:13:06.000Z | 2016-11-16T05:12:21.000Z | galaxiaclient/common/client.py | WiproOpenSource/galaxia | baa6ea0a2192625dce2df7daddb1d983520bb7ab | [
"Apache-2.0"
] | 20 | 2016-05-01T14:28:09.000Z | 2018-10-25T18:11:29.000Z | # Copyright 2016 - Wipro Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Interface to create a http client and invoke request to galaxia api
"""
import json
import requests
import requests.exceptions as req_except
def create_request_data(**kwargs):
"""This method creates & returns json data
:param kwargs: key/value pair
:return: json data
"""
data = {}
for key, value in kwargs.iteritems():
if "list" in key and not (value is None) and not "metrics_list" in key:
temp = split_and_convert(key,value)
data[key]=temp
elif "metrics_list" in key:
data[key] = [{'name': n} for n in value]
else:
data[key]=value
return json.dumps(data)
def split_and_convert(key, value):
values = value.split(',')
temp= []
for i in values:
temp.append(i)
return temp
def http_request(request_type, request_url, headers, payload):
"""
:param request_type - Valid values GET, POST, DELETE
:param request_url - HTTP Request URL to galaxia api
:param headers - HTTP Request headers
:param payload - Payload for GET or POST request
"""
resp = {}
try:
if request_type == 'POST':
resp = requests.post(request_url, data = payload,
headers = headers)
if request_type == 'GET':
resp = requests.get(request_url, params = payload,
headers = headers)
if request_type == 'PUT':
resp = requests.put(request_url, data = payload,headers = headers)
if request_type == 'DELETE':
resp = requests.delete(request_url, data = payload, headers=headers)
except req_except.ConnectionError as ex:
print "HTTP Client is unable to reach the service @ %s" % request_url
print "Response Code is " + resp.status_code
raise ex
except req_except.HTTPError as ex:
print "HTTP Client hit an unknown exception with error code %s\
and error %s" % (ex.errno, ex.strerror)
except Exception as ex:
print "HTTP Client hit an exception with error %s" % ex.message
parse_http_response(resp, request_url)
return resp
def concatenate_url(endpoint, url):
""" concatenate endpoint & url
:param endpoint: the base url for example http://localhost:port/v1
:param url: the target url for example "/dashboard"
:return: concatenated url for example http://localhost:port/v1/dashboard
"""
return "%s/%s" % (endpoint.rstrip("/"), url.rstrip("/"))
def parse_http_response(resp, request_url):
if resp.status_code == 200:
pass
elif resp.status_code == 404:
print "Unable to reach the request resource @ %s" % (request_url)
raise Exception
elif resp.status_code == 401:
print "Authentication failed for resource @ %s" % request_url
raise Exception
elif resp.status_code == 403:
print "Authorization failed for resource @ %s with http error code %s"\
% (request_url, resp.status_code)
raise Exception
elif resp.status_code == 408:
print "Request timed out for resource @ %s with http error code %s" \
% (request_url, resp.status_code)
raise Exception
elif resp.status_code >= 500:
print "Server failed to fullfil the request for resource @ %s with http\
error code %s" % (request_url, resp.status_code)
raise Exception
else:
print "Unable to process the request for resource @ %s with http error\
code %s" % (request_url, resp.status_code)
raise Exception
| 33.798387 | 80 | 0.642806 |
2b924f7e9a07dfd9ea3291847da4bc9afbb54764 | 17,764 | py | Python | st2common/st2common/services/action.py | avezraj/st2 | 519c7f6819e52fb289c440bb7d1df7b558bb9ed7 | [
"Apache-2.0"
] | null | null | null | st2common/st2common/services/action.py | avezraj/st2 | 519c7f6819e52fb289c440bb7d1df7b558bb9ed7 | [
"Apache-2.0"
] | null | null | null | st2common/st2common/services/action.py | avezraj/st2 | 519c7f6819e52fb289c440bb7d1df7b558bb9ed7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import six
from st2common import log as logging
from st2common.constants import action as action_constants
from st2common.exceptions import actionrunner as runner_exc
from st2common.exceptions import db as db_exc
from st2common.exceptions import trace as trace_exc
from st2common.persistence.liveaction import LiveAction
from st2common.persistence.execution import ActionExecution
from st2common.persistence.execution import ActionExecutionOutput
from st2common.models.db.execution import ActionExecutionOutputDB
from st2common.runners import utils as runners_utils
from st2common.services import executions
from st2common.services import trace as trace_service
from st2common.util import date as date_utils
from st2common.util import action_db as action_utils
from st2common.util import schema as util_schema
__all__ = [
'request',
'create_request',
'publish_request',
'is_action_canceled_or_canceling',
'request_pause',
'request_resume',
'store_execution_output_data',
]
LOG = logging.getLogger(__name__)
def _get_immutable_params(parameters):
if not parameters:
return []
return [k for k, v in six.iteritems(parameters) if v.get('immutable', False)]
def create_request(liveaction, action_db=None, runnertype_db=None):
"""
Create an action execution.
:param action_db: Action model to operate one. If not provided, one is retrieved from the
database using values from "liveaction".
:type action_db: :class:`ActionDB`
:param runnertype_db: Runner model to operate one. If not provided, one is retrieved from the
database using values from "liveaction".
:type runnertype_db: :class:`RunnerTypeDB`
:return: (liveaction, execution)
:rtype: tuple
"""
# We import this here to avoid conflicts w/ runners that might import this
# file since the runners don't have the config context by default.
from st2common.metrics.base import get_driver
# Use the user context from the parent action execution. Subtasks in a workflow
# action can be invoked by a system user and so we want to use the user context
# from the original workflow action.
parent_context = executions.get_parent_context(liveaction) or {}
parent_user = parent_context.get('user', None)
if parent_user:
liveaction.context['user'] = parent_user
# Validate action
if not action_db:
action_db = action_utils.get_action_by_ref(liveaction.action)
if not action_db:
raise ValueError('Action "%s" cannot be found.' % liveaction.action)
if not action_db.enabled:
raise ValueError('Unable to execute. Action "%s" is disabled.' % liveaction.action)
if not runnertype_db:
runnertype_db = action_utils.get_runnertype_by_name(action_db.runner_type['name'])
if not hasattr(liveaction, 'parameters'):
liveaction.parameters = dict()
# For consistency add pack to the context here in addition to RunnerContainer.dispatch() method
liveaction.context['pack'] = action_db.pack
# Validate action parameters.
schema = util_schema.get_schema_for_action_parameters(action_db, runnertype_db)
validator = util_schema.get_validator()
util_schema.validate(liveaction.parameters, schema, validator, use_default=True,
allow_default_none=True)
# validate that no immutable params are being overriden. Although possible to
# ignore the override it is safer to inform the user to avoid surprises.
immutables = _get_immutable_params(action_db.parameters)
immutables.extend(_get_immutable_params(runnertype_db.runner_parameters))
overridden_immutables = [p for p in six.iterkeys(liveaction.parameters) if p in immutables]
if len(overridden_immutables) > 0:
raise ValueError('Override of immutable parameter(s) %s is unsupported.'
% str(overridden_immutables))
# Set notification settings for action.
# XXX: There are cases when we don't want notifications to be sent for a particular
# execution. So we should look at liveaction.parameters['notify']
# and not set liveaction.notify.
if not _is_notify_empty(action_db.notify):
liveaction.notify = action_db.notify
# Write to database and send to message queue.
liveaction.status = action_constants.LIVEACTION_STATUS_REQUESTED
liveaction.start_timestamp = date_utils.get_datetime_utc_now()
# Set the "action_is_workflow" attribute
liveaction.action_is_workflow = action_db.is_workflow()
# Publish creation after both liveaction and actionexecution are created.
liveaction = LiveAction.add_or_update(liveaction, publish=False)
# Get trace_db if it exists. This could throw. If it throws, we have to cleanup
# liveaction object so we don't see things in requested mode.
trace_db = None
try:
_, trace_db = trace_service.get_trace_db_by_live_action(liveaction)
except db_exc.coditationDBObjectNotFoundError as e:
_cleanup_liveaction(liveaction)
raise trace_exc.TraceNotFoundException(six.text_type(e))
execution = executions.create_execution_object(liveaction=liveaction, action_db=action_db,
runnertype_db=runnertype_db, publish=False)
if trace_db:
trace_service.add_or_update_given_trace_db(
trace_db=trace_db,
action_executions=[
trace_service.get_trace_component_for_action_execution(execution, liveaction)
])
get_driver().inc_counter('action.executions.%s' % (liveaction.status))
return liveaction, execution
def publish_request(liveaction, execution):
"""
Publish an action execution.
:return: (liveaction, execution)
:rtype: tuple
"""
# Assume that this is a creation.
LiveAction.publish_create(liveaction)
LiveAction.publish_status(liveaction)
ActionExecution.publish_create(execution)
# TODO: This results in two queries, optimize it
# extra = {'liveaction_db': liveaction, 'execution_db': execution}
extra = {}
LOG.audit('Action execution requested. LiveAction.id=%s, ActionExecution.id=%s' %
(liveaction.id, execution.id), extra=extra)
return liveaction, execution
def request(liveaction):
liveaction, execution = create_request(liveaction)
liveaction, execution = publish_request(liveaction, execution)
return liveaction, execution
def update_status(liveaction, new_status, result=None, publish=True):
if liveaction.status == new_status:
return liveaction
old_status = liveaction.status
updates = {
'liveaction_id': liveaction.id,
'status': new_status,
'result': result,
'publish': False
}
if new_status in action_constants.LIVEACTION_COMPLETED_STATES:
updates['end_timestamp'] = date_utils.get_datetime_utc_now()
liveaction = action_utils.update_liveaction_status(**updates)
action_execution = executions.update_execution(liveaction)
msg = ('The status of action execution is changed from %s to %s. '
'<LiveAction.id=%s, ActionExecution.id=%s>' % (old_status,
new_status, liveaction.id, action_execution.id))
extra = {
'action_execution_db': action_execution,
'liveaction_db': liveaction
}
LOG.audit(msg, extra=extra)
LOG.info(msg)
# Invoke post run if liveaction status is completed or paused.
if (new_status in action_constants.LIVEACTION_COMPLETED_STATES or
new_status == action_constants.LIVEACTION_STATUS_PAUSED):
runners_utils.invoke_post_run(liveaction)
if publish:
LiveAction.publish_status(liveaction)
return liveaction
def is_action_canceled_or_canceling(liveaction_id):
liveaction_db = action_utils.get_liveaction_by_id(liveaction_id)
return liveaction_db.status in [action_constants.LIVEACTION_STATUS_CANCELED,
action_constants.LIVEACTION_STATUS_CANCELING]
def is_action_paused_or_pausing(liveaction_id):
liveaction_db = action_utils.get_liveaction_by_id(liveaction_id)
return liveaction_db.status in [action_constants.LIVEACTION_STATUS_PAUSED,
action_constants.LIVEACTION_STATUS_PAUSING]
def request_cancellation(liveaction, requester):
"""
Request cancellation of an action execution.
:return: (liveaction, execution)
:rtype: tuple
"""
if liveaction.status == action_constants.LIVEACTION_STATUS_CANCELING:
return liveaction
if liveaction.status not in action_constants.LIVEACTION_CANCELABLE_STATES:
raise Exception(
'Unable to cancel liveaction "%s" because it is already in a '
'completed state.' % liveaction.id
)
result = {
'message': 'Action canceled by user.',
'user': requester
}
# Run cancelation sequence for liveaction that is in running state or
# if the liveaction is operating under a workflow.
if ('parent' in liveaction.context or
liveaction.status in action_constants.LIVEACTION_STATUS_RUNNING):
status = action_constants.LIVEACTION_STATUS_CANCELING
else:
status = action_constants.LIVEACTION_STATUS_CANCELED
liveaction = update_status(liveaction, status, result=result)
execution = ActionExecution.get(liveaction__id=str(liveaction.id))
return (liveaction, execution)
def request_pause(liveaction, requester):
"""
Request pause for a running action execution.
:return: (liveaction, execution)
:rtype: tuple
"""
# Validate that the runner type of the action supports pause.
action_db = action_utils.get_action_by_ref(liveaction.action)
if not action_db:
raise ValueError(
'Unable to pause liveaction "%s" because the action "%s" '
'is not found.' % (liveaction.id, liveaction.action)
)
if action_db.runner_type['name'] not in action_constants.WORKFLOW_RUNNER_TYPES:
raise runner_exc.InvalidActionRunnerOperationError(
'Unable to pause liveaction "%s" because it is not supported by the '
'"%s" runner.' % (liveaction.id, action_db.runner_type['name'])
)
if (liveaction.status == action_constants.LIVEACTION_STATUS_PAUSING or
liveaction.status == action_constants.LIVEACTION_STATUS_PAUSED):
execution = ActionExecution.get(liveaction__id=str(liveaction.id))
return (liveaction, execution)
if liveaction.status != action_constants.LIVEACTION_STATUS_RUNNING:
raise runner_exc.UnexpectedActionExecutionStatusError(
'Unable to pause liveaction "%s" because it is not in a running state.'
% liveaction.id
)
liveaction = update_status(liveaction, action_constants.LIVEACTION_STATUS_PAUSING)
execution = ActionExecution.get(liveaction__id=str(liveaction.id))
return (liveaction, execution)
def request_resume(liveaction, requester):
"""
Request resume for a paused action execution.
:return: (liveaction, execution)
:rtype: tuple
"""
# Validate that the runner type of the action supports pause.
action_db = action_utils.get_action_by_ref(liveaction.action)
if not action_db:
raise ValueError(
'Unable to resume liveaction "%s" because the action "%s" '
'is not found.' % (liveaction.id, liveaction.action)
)
if action_db.runner_type['name'] not in action_constants.WORKFLOW_RUNNER_TYPES:
raise runner_exc.InvalidActionRunnerOperationError(
'Unable to resume liveaction "%s" because it is not supported by the '
'"%s" runner.' % (liveaction.id, action_db.runner_type['name'])
)
running_states = [
action_constants.LIVEACTION_STATUS_RUNNING,
action_constants.LIVEACTION_STATUS_RESUMING
]
if liveaction.status in running_states:
execution = ActionExecution.get(liveaction__id=str(liveaction.id))
return (liveaction, execution)
if liveaction.status != action_constants.LIVEACTION_STATUS_PAUSED:
raise runner_exc.UnexpectedActionExecutionStatusError(
'Unable to resume liveaction "%s" because it is in "%s" state and '
'not in "paused" state.' % (liveaction.id, liveaction.status)
)
liveaction = update_status(liveaction, action_constants.LIVEACTION_STATUS_RESUMING)
execution = ActionExecution.get(liveaction__id=str(liveaction.id))
return (liveaction, execution)
def get_parent_liveaction(liveaction_db):
"""Get the liveaction for the parent workflow
Useful for finding the parent workflow. Pass in any LiveActionDB instance,
and this function will return the liveaction of the parent workflow.
:param liveaction_db: The LiveActionDB instance for which to find the parent.
:rtype: LiveActionDB
"""
parent = liveaction_db.context.get('parent')
if not parent:
return None
parent_execution_db = ActionExecution.get(id=parent['execution_id'])
parent_liveaction_db = LiveAction.get(id=parent_execution_db.liveaction['id'])
return parent_liveaction_db
def get_parent_execution(execution_db):
"""Get the action execution for the parent workflow
Useful for finding the parent workflow. Pass in any ActionExecutionDB instance,
and this function will return the action execution of the parent workflow.
:param execution_db: The ActionExecutionDB instance for which to find the parent.
:rtype: ActionExecutionDB
"""
if not execution_db.parent:
return None
parent_execution_db = ActionExecution.get(id=execution_db.parent)
return parent_execution_db
def get_root_liveaction(liveaction_db):
"""Recursively ascends until the root liveaction is found
Useful for finding an original parent workflow. Pass in any LiveActionDB instance,
and this function will eventually return the top-most liveaction, even if the two
are one and the same.
:param liveaction_db: The LiveActionDB instance for which to find the root parent.
:rtype: LiveActionDB
"""
parent_liveaction_db = get_parent_liveaction(liveaction_db)
return get_root_liveaction(parent_liveaction_db) if parent_liveaction_db else liveaction_db
def get_root_execution(execution_db):
"""Recursively ascends until the root action execution is found
Useful for finding an original parent workflow. Pass in any ActionExecutionDB instance,
and this function will eventually return the top-most action execution, even if the two
are one and the same.
:param execution_db: The ActionExecutionDB instance for which to find the root parent.
:rtype: ActionExecutionDB
"""
parent_execution_db = get_parent_execution(execution_db)
return get_root_execution(parent_execution_db) if parent_execution_db else execution_db
def store_execution_output_data(execution_db, action_db, data, output_type='output',
timestamp=None):
"""
Store output from an execution as a new document in the collection.
"""
execution_id = str(execution_db.id)
action_ref = action_db.ref
runner_ref = getattr(action_db, 'runner_type', {}).get('name', 'unknown')
timestamp = timestamp or date_utils.get_datetime_utc_now()
output_db = ActionExecutionOutputDB(execution_id=execution_id,
action_ref=action_ref,
runner_ref=runner_ref,
timestamp=timestamp,
output_type=output_type,
data=data)
output_db = ActionExecutionOutput.add_or_update(output_db, publish=True,
dispatch_trigger=False)
return output_db
def is_children_active(liveaction_id):
execution_db = ActionExecution.get(liveaction__id=str(liveaction_id))
if execution_db.runner['name'] not in action_constants.WORKFLOW_RUNNER_TYPES:
return False
children_execution_dbs = ActionExecution.query(parent=str(execution_db.id))
inactive_statuses = (
action_constants.LIVEACTION_COMPLETED_STATES +
[action_constants.LIVEACTION_STATUS_PAUSED, action_constants.LIVEACTION_STATUS_PENDING]
)
completed = [
child_exec_db.status in inactive_statuses
for child_exec_db in children_execution_dbs
]
return (not all(completed))
def _cleanup_liveaction(liveaction):
try:
LiveAction.delete(liveaction)
except:
LOG.exception('Failed cleaning up LiveAction: %s.', liveaction)
pass
def _is_notify_empty(notify_db):
"""
notify_db is considered to be empty if notify_db is None and neither
of on_complete, on_success and on_failure have values.
"""
if not notify_db:
return True
return not (notify_db.on_complete or notify_db.on_success or notify_db.on_failure)
| 36.327198 | 99 | 0.715154 |
027f64020ccf8265a99855eed8c2ac923dafcfab | 9,893 | py | Python | analyse-classifications.py | zooniverse/comet-hunters-volcrowe-experiment | 570233a2595a06367f234766d37e25584fb213bb | [
"Apache-2.0"
] | null | null | null | analyse-classifications.py | zooniverse/comet-hunters-volcrowe-experiment | 570233a2595a06367f234766d37e25584fb213bb | [
"Apache-2.0"
] | null | null | null | analyse-classifications.py | zooniverse/comet-hunters-volcrowe-experiment | 570233a2595a06367f234766d37e25584fb213bb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
__author__ = 'alex'
from collections import OrderedDict
from datetime import datetime, date, timedelta
import unicodecsv as csv
import sys
import os
import time
import json
import numpy
import matplotlib.pyplot as plt
import pickle
OUTLIER_LOW_CUTOFF = 1
OUTLIER_HIGH_CUTOFF = 100
NUMBER_OF_HISTOGRAM_BINS = 25
if len(sys.argv) > 1 and sys.argv[1]=="skip":
skip_analysis=True
else:
skip_analysis=False
def restart_line():
sys.stdout.write('\r')
sys.stdout.flush()
def get_user_session_classification_counts(classifications_by_user_session):
counts_of_classifications_per_user_session = {}
for user_session_id in classifications_by_user_session:
counts_of_classifications_per_user_session[user_session_id] = len(classifications_by_user_session[user_session_id])
return counts_of_classifications_per_user_session
def get_headers_with_indices(headers):
s = "Available columns:\n"
i=0
for v in headers:
s += " %s: %s\n" % (i, v)
i += 1
return s
def get_field_list(field_array, column_index):
s = "Available metadata fields found (in JSON string in column %s):\n" % column_index
for v in field_array:
s += " %s\n" % v
return s
def getWeekNumber(dateString):
(y, m, d) = [int(i) for i in dateString.split("-")]
return date(y,m,d).isocalendar()[1]
def averageLen(lst):
lengths = [len(i) for i in lst]
return 0 if len(lengths) == 0 else (float(sum(lengths)) / len(lengths))
def get_user_session_id(user_name, session):
return "%s-%s" % (user_name, session)
def get_nice_now():
return datetime.now().strftime('%H:%M:%S')
if not skip_analysis:
print "\nScanning classifications CSV (started at %s)...\n" % get_nice_now()
classifications_analysed = 0
filename = 'data/comet-hunters-classifications.csv'
total = sum(1 for line in open(filename)) - 1
classifications = csv.reader(open(filename, 'rU'), dialect=csv.excel_tab, delimiter=',', quotechar='"')
headers = classifications.next()
metadata_field_index = headers.index("metadata")
user_name_field_index = headers.index("user_name")
classification_id_field_index = headers.index("classification_id")
print get_headers_with_indices(headers)
metadata_fields = []
classifications_by_user_session = {}
skipped_due_to_no_session_set = 0
classifications_by_day = {}
classifications_by_week = {}
classifications_by_user = {}
users_by_day = {}
users_by_week = {}
print "Total classifications (data rows) in CSV: %s\n" % total
for classification in classifications:
skip_this_one = False
classifications_analysed += 1
if classifications_analysed % 1000 == 0:
restart_line()
pc = int(100*(float(classifications_analysed)/float(total)))
sys.stdout.write("%s - %s classifications examined (%s%%)..." % (get_nice_now(), classifications_analysed, pc))
if metadata_field_index > 0:
metadata = json.loads(classification[metadata_field_index])
for field in metadata:
if field not in metadata_fields:
metadata_fields.append(field)
if "session" in metadata:
session_id = metadata["session"]
else:
skipped_due_to_no_session_set += 1
skip_this_one = True
if "finished_at" in metadata:
finished_at = metadata["finished_at"][:10]
date_of_this_classification = finished_at
if date_of_this_classification in classifications_by_day:
classifications_by_day[date_of_this_classification] += 1
else:
classifications_by_day[date_of_this_classification] = 1
weekNum = getWeekNumber(finished_at)
if weekNum in classifications_by_week:
classifications_by_week[weekNum] += 1
else:
classifications_by_week[weekNum] = 1
if user_name_field_index > -1:
user_name = classification[user_name_field_index]
if date_of_this_classification in users_by_day:
if user_name not in users_by_day[date_of_this_classification]:
users_by_day[date_of_this_classification].append(user_name)
else:
users_by_day[date_of_this_classification] = [user_name]
if weekNum in users_by_week:
if user_name not in users_by_week[weekNum]:
users_by_week[weekNum].append(user_name)
else:
users_by_week[weekNum] = [user_name]
if user_name not in classifications_by_user:
classifications_by_user[user_name] = 1
else:
classifications_by_user[user_name] += 1
if not skip_this_one and classification_id_field_index > -1:
if user_name_field_index > -1:
user_name = classification[user_name_field_index]
user_session_id = get_user_session_id(user_name, session_id)
if user_session_id not in classifications_by_user_session:
classifications_by_user_session[user_session_id]=[]
classification_id = classification[classification_id_field_index]
classifications_by_user_session[user_session_id].append(classification_id)
if classifications_analysed < total:
sys.stdout.flush()
print "\n\nProcessed a total of %s classifications (Finished at %s).\n" % (classifications_analysed, get_nice_now())
print get_field_list(metadata_fields, metadata_field_index)
skipped_pc = float(skipped_due_to_no_session_set)/float(total)
print "Classifications skipped due to no session set: %s [%s%% of total]\n" % (skipped_due_to_no_session_set, skipped_pc)
classification_session_counts = get_user_session_classification_counts(classifications_by_user_session)
original_no_of_user_sessions = len(classification_session_counts)
low_ones = 0
for user_session_id in classification_session_counts.keys():
if classification_session_counts[user_session_id] <= OUTLIER_LOW_CUTOFF:
low_ones += 1
del classification_session_counts[user_session_id]
big_ones = 0
for user_session_id in classification_session_counts.keys():
if classification_session_counts[user_session_id] >= OUTLIER_HIGH_CUTOFF:
big_ones += 1
del classification_session_counts[user_session_id]
average_classifications_per_user_session = numpy.mean(classification_session_counts.values())
max_classifications_per_user_session = numpy.max(classification_session_counts.values())
no_of_user_sessions = len(classification_session_counts)
average_classifications_per_day = numpy.mean(classifications_by_day.values())
average_classifications_per_week = numpy.mean(classifications_by_week.values())
average_users_per_day = averageLen(users_by_day.values())
average_users_per_week = averageLen(users_by_week.values())
average_classifications_per_user = numpy.mean(classifications_by_user.values())
print "Determined classification counts per user session for %s user sessions from an initial %s ..." % \
(no_of_user_sessions, original_no_of_user_sessions)
print " - %s had less than or equal to %s classification(s) and were deleted as outliers." % (low_ones,
OUTLIER_LOW_CUTOFF)
print " - %s had equal to or more than %s classifications and were deleted as outliers." % (big_ones,
OUTLIER_HIGH_CUTOFF)
print " - of those remaining, the maximum session length was %s." % max_classifications_per_user_session
print " - of those remaining, the average session length was %s." % average_classifications_per_user_session
print " - of those remaining, the average classifications per day was %s." % average_classifications_per_day
print " - of those remaining, the average classifications per week was %s." % average_classifications_per_week
print " - of those remaining, the average users per day was %s." % average_users_per_day
print " - of those remaining, the average users per week was %s." % average_users_per_week
print " - of those remaining, the average classifications per user was %s." % average_classifications_per_user
print "\nSaving analysis to file..."
pickle.dump([classification_session_counts,max_classifications_per_user_session], open('temp-data.p', 'wb'))
if 'classification_session_counts' not in vars() and 'max_classifications_per_user_session' not in vars():
print "Loading analysis from last time..."
[classification_session_counts, max_classifications_per_user_session] = pickle.load(open('temp-data.p', "rb"))
print "\nWriting histogram to graphs/session-length-histogram.png ..."
step = int(float(max_classifications_per_user_session) / float(NUMBER_OF_HISTOGRAM_BINS))
bins = numpy.arange(0, max_classifications_per_user_session, step)
session_lengths = classification_session_counts.values()
plt.hist(session_lengths, bins=bins)
plt.xticks(bins)
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
plt.xlabel('Session Length', fontsize=16)
plt.ylabel('Number of User Sessions of this Length', fontsize=16)
plt.savefig('graphs/session-length-histogram.png')
plt.clf()
plt.close()
#wrfile = open("output/subjects_activity.csv", 'w')
#writer = csv.writer(wrfile, delimiter=',', quoting=csv.QUOTE_NONNUMERIC, dialect='excel', encoding='utf-8')
#writer.writerow(["Subject ID", "First Classification ID", "First Classification Date", "Activity Days"])
# write the subject activity to a csv file
#for subject in subject_activity:
# row = [subject, subject_activity[subject]["first_classification_id"],
# subject_activity[subject]["first_classification_date"],
# ','.join([i.strftime('%d-%b-%Y') for i in subject_activity[subject]["active_days"]])]
# outrow = []
# for el in row:
# if isinstance(el, str):
# outrow.append(unicode(el.decode('utf-8')))
# else:
# outrow.append(el)
# writer.writerow(outrow)
#wrfile.close()
print "\nDone.\n"
| 42.459227 | 123 | 0.729304 |
fcfcd47b360bbfa240f2edf927bc9b684203539d | 10,361 | py | Python | pokedex/db/multilang.py | ymin1103/pokedex | 18925edcd3ad71dd912a94ce4b1bc1435943937a | [
"MIT"
] | 1,177 | 2015-01-09T02:17:26.000Z | 2022-03-30T05:29:06.000Z | pokedex/db/multilang.py | ymin1103/pokedex | 18925edcd3ad71dd912a94ce4b1bc1435943937a | [
"MIT"
] | 224 | 2015-01-13T04:44:52.000Z | 2022-03-28T18:16:35.000Z | pokedex/db/multilang.py | ymin1103/pokedex | 18925edcd3ad71dd912a94ce4b1bc1435943937a | [
"MIT"
] | 810 | 2015-01-14T22:40:39.000Z | 2022-03-23T16:24:54.000Z | from sqlalchemy.ext.associationproxy import association_proxy, AssociationProxy
from sqlalchemy.orm import Query, mapper, relationship, synonym
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.orm.scoping import ScopedSession
from sqlalchemy.orm.session import Session, object_session
from sqlalchemy.schema import Column, ForeignKey, Table
from sqlalchemy.sql.expression import and_, bindparam, select, exists
from sqlalchemy.sql.operators import ColumnOperators
from sqlalchemy.types import Integer
from pokedex.db import markdown
class LocalAssociationProxy(AssociationProxy, ColumnOperators):
"""An association proxy for names in the default language
Over the regular association_proxy, this provides sorting and filtering
capabilities, implemented via SQL subqueries.
"""
def __clause_element__(self):
q = select([self.remote_attr])
q = q.where(self.target_class.foreign_id == self.owning_class.id)
q = q.where(self.target_class.local_language_id == bindparam('_default_language_id'))
return q
def operate(self, op, *other, **kwargs):
q = select([self.remote_attr])
q = q.where(self.target_class.foreign_id == self.owning_class.id)
q = q.where(self.target_class.local_language_id == bindparam('_default_language_id'))
q = q.where(op(self.remote_attr, *other))
return exists(q)
def _getset_factory_factory(column_name, string_getter):
"""Hello! I am a factory for creating getset_factory functions for SQLA.
I exist to avoid the closure-in-a-loop problem.
"""
def getset_factory(underlying_type, instance):
def getter(translations):
if translations is None:
return None
text = getattr(translations, column_name)
if text is None:
return text
session = object_session(translations)
language = translations.local_language
return string_getter(text, session, language)
def setter(translations, value):
# The string must be set on the Translation directly.
raise AttributeError("Cannot set %s" % column_name)
return getter, setter
return getset_factory
def create_translation_table(_table_name, foreign_class, relation_name,
language_class, relation_lazy='select', **kwargs):
"""Creates a table that represents some kind of data attached to the given
foreign class, but translated across several languages. Returns the new
table's mapped class. It won't be declarative, but it will have a
`__table__` attribute so you can retrieve the Table object.
`foreign_class` must have a `__singlename__`, currently only used to create
the name of the foreign key column.
Also supports the notion of a default language, which is attached to the
session. This is English by default, for historical and practical reasons.
Usage looks like this:
class Foo(Base): ...
create_translation_table('foo_bars', Foo, 'bars',
name = Column(...),
)
# Now you can do the following:
foo.name
foo.name_map['en']
foo.foo_bars['en']
foo.name_map['en'] = "new name"
del foo.name_map['en']
q.options(joinedload(Foo.bars_local))
q.options(joinedload(Foo.bars))
The following properties are added to the passed class:
- `(relation_name)`, a relation to the new table. It uses a dict-based
collection class, where the keys are language identifiers and the values
are rows in the created tables.
- `(relation_name)_local`, a relation to the row in the new table that
matches the current default language.
- `(relation_name)_table`, the class created by this function.
Note that these are distinct relations. Even though the former necessarily
includes the latter, SQLAlchemy doesn't treat them as linked; loading one
will not load the other. Modifying both within the same transaction has
undefined behavior.
For each column provided, the following additional attributes are added to
Foo:
- `(column)_map`, an association proxy onto `foo_bars`.
- `(column)`, an association proxy onto `foo_bars_local`.
Pardon the naming disparity, but the grammar suffers otherwise.
Modifying these directly is not likely to be a good idea.
For Markdown-formatted columns, `(column)_map` and `(column)` will give
Markdown objects.
"""
# n.b.: language_class only exists for the sake of tests, which sometimes
# want to create tables entirely separate from the pokedex metadata
foreign_key_name = foreign_class.__singlename__ + '_id'
Translations = type(_table_name, (object,), {
'_language_identifier': association_proxy('local_language', 'identifier'),
'relation_name': relation_name,
'__tablename__': _table_name,
})
# Create the table object
table = Table(_table_name, foreign_class.__table__.metadata,
Column(foreign_key_name, Integer, ForeignKey(foreign_class.id),
primary_key=True, nullable=False,
doc=u"ID of the %s these texts relate to" % foreign_class.__singlename__),
Column('local_language_id', Integer, ForeignKey(language_class.id),
primary_key=True, nullable=False,
doc=u"Language these texts are in"),
)
Translations.__table__ = table
# Add ye columns
# Column objects have a _creation_order attribute in ascending order; use
# this to get the (unordered) kwargs sorted correctly
kwitems = list(kwargs.items())
kwitems.sort(key=lambda kv: kv[1]._creation_order)
for name, column in kwitems:
column.name = name
table.append_column(column)
# Construct ye mapper
mapper(Translations, table, properties={
'foreign_id': synonym(foreign_key_name),
'local_language': relationship(language_class,
primaryjoin=table.c.local_language_id == language_class.id,
innerjoin=True),
})
# Add full-table relations to the original class
# Foo.bars_table
setattr(foreign_class, relation_name + '_table', Translations)
# Foo.bars
setattr(foreign_class, relation_name, relationship(Translations,
primaryjoin=foreign_class.id == Translations.foreign_id,
collection_class=attribute_mapped_collection('local_language'),
))
# Foo.bars_local
# This is a bit clever; it uses bindparam() to make the join clause
# modifiable on the fly. db sessions know the current language and
# populate the bindparam.
# The 'dummy' value is to trick SQLA; without it, SQLA thinks this
# bindparam is just its own auto-generated clause and everything gets
# fucked up.
local_relation_name = relation_name + '_local'
setattr(foreign_class, local_relation_name, relationship(Translations,
primaryjoin=and_(
Translations.foreign_id == foreign_class.id,
Translations.local_language_id == bindparam('_default_language_id',
value='dummy', type_=Integer, required=True),
),
foreign_keys=[Translations.foreign_id, Translations.local_language_id],
uselist=False,
lazy=relation_lazy,
))
# Add per-column proxies to the original class
for name, column in kwitems:
getset_factory = None
string_getter = column.info.get('string_getter')
if string_getter:
getset_factory = _getset_factory_factory(
column.name, string_getter)
# Class.(column) -- accessor for the default language's value
setattr(foreign_class, name,
LocalAssociationProxy(local_relation_name, name,
getset_factory=getset_factory))
# Class.(column)_map -- accessor for the language dict
# Need a custom creator since Translations doesn't have an init, and
# these are passed as *args anyway
def creator(language, value):
row = Translations()
row.local_language = language
setattr(row, name, value)
return row
setattr(foreign_class, name + '_map',
association_proxy(relation_name, name, creator=creator,
getset_factory=getset_factory))
# Add to the list of translation classes
foreign_class.translation_classes.append(Translations)
# Done
return Translations
class MultilangQuery(Query):
def _execute_and_instances(self, *args, **kwargs):
# Set _default_language_id param if it hasn't been set by the time the query is executed.
# XXX This is really hacky and we should figure out a cleaner method.
if '_default_language_id' not in self._params or self._params['_default_language_id'] == 'dummy':
self._params = self._params.copy()
self._params['_default_language_id'] = self.session.default_language_id
return super(MultilangQuery, self)._execute_and_instances(*args, **kwargs)
class MultilangSession(Session):
"""A tiny Session subclass that adds support for a default language.
Needs to be used with `MultilangScopedSession`, below.
"""
default_language_id = None
markdown_extension_class = markdown.PokedexLinkExtension
def __init__(self, *args, **kwargs):
if 'default_language_id' in kwargs:
self.default_language_id = kwargs.pop('default_language_id')
markdown_extension_class = kwargs.pop('markdown_extension_class',
self.markdown_extension_class)
self.markdown_extension = markdown_extension_class(self)
kwargs.setdefault('query_cls', MultilangQuery)
super(MultilangSession, self).__init__(*args, **kwargs)
class MultilangScopedSession(ScopedSession):
"""Dispatches language selection to the attached Session."""
@property
def default_language_id(self):
"""Passes the new default language id through to the current session.
"""
return self.registry().default_language_id
@default_language_id.setter
def default_language_id(self, new):
self.registry().default_language_id = new
@property
def markdown_extension(self):
return self.registry().markdown_extension
| 40.631373 | 105 | 0.694817 |
bde08d0a22c5fdda5f8578c6dc7d32acda03daa9 | 86 | py | Python | supstream/__version__.py | yaniv-aknin/django-static-upstream | 6b829317db73fa1ca567711a4f0ae9eef9c09d7b | [
"MIT"
] | 1 | 2015-11-08T16:08:07.000Z | 2015-11-08T16:08:07.000Z | supstream/__version__.py | yaniv-aknin/django-static-upstream | 6b829317db73fa1ca567711a4f0ae9eef9c09d7b | [
"MIT"
] | null | null | null | supstream/__version__.py | yaniv-aknin/django-static-upstream | 6b829317db73fa1ca567711a4f0ae9eef9c09d7b | [
"MIT"
] | null | null | null | __version__=(0,1,1)
__version_str__=".".join(str(element) for element in __version__)
| 28.666667 | 65 | 0.767442 |
5dec95e34b1de527ac8fec74d79b3e373e50808f | 287 | py | Python | test.py | Asif-ak/Web-Scrapping-BeautifulSoup | 29506481ecca4dd0d036b4ac58be2fbaa0878c00 | [
"Unlicense"
] | null | null | null | test.py | Asif-ak/Web-Scrapping-BeautifulSoup | 29506481ecca4dd0d036b4ac58be2fbaa0878c00 | [
"Unlicense"
] | null | null | null | test.py | Asif-ak/Web-Scrapping-BeautifulSoup | 29506481ecca4dd0d036b4ac58be2fbaa0878c00 | [
"Unlicense"
] | null | null | null | import requests, bs4
a=requests.get('http://www.tsinghua.edu.cn/publish/hyen/1694/2011/20110113141747864991749/20110113141747864991749_.html')
#print(a.text)
soup=bs4.BeautifulSoup(a.text,'lxml')
title=soup.select('div')
#print(title)
for titles in title:
print(titles.text) | 31.888889 | 122 | 0.756098 |
8a454f3fdddb88be2df6a26b9077ff32387ad36c | 5,324 | py | Python | tools/filters/gff_to_bed_converter.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | 2 | 2016-02-23T00:09:14.000Z | 2019-02-11T07:48:44.000Z | tools/filters/gff_to_bed_converter.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | 1 | 2015-02-21T18:48:19.000Z | 2015-02-27T15:50:32.000Z | tools/filters/gff_to_bed_converter.py | blankenberg/galaxy-data-resource | ca32a1aafd64948f489a4e5cf88096f32391b1d9 | [
"CC-BY-3.0"
] | 6 | 2015-05-27T13:09:50.000Z | 2019-02-11T07:48:46.000Z | #!/usr/bin/env python
import sys
from galaxy import eggs
from galaxy.datatypes.util.gff_util import parse_gff_attributes
assert sys.version_info[:2] >= ( 2, 4 )
def get_bed_line( chrom, name, strand, blocks ):
""" Returns a BED line for given data. """
if len( blocks ) == 1:
# Use simple BED format if there is only a single block:
# chrom, chromStart, chromEnd, name, score, strand
#
start, end = blocks[0]
return "%s\t%i\t%i\t%s\t0\t%s\n" % ( chrom, start, end, name, strand )
#
# Build lists for transcript blocks' starts, sizes.
#
# Get transcript start, end.
t_start = sys.maxint
t_end = -1
for block_start, block_end in blocks:
if block_start < t_start:
t_start = block_start
if block_end > t_end:
t_end = block_end
# Get block starts, sizes.
block_starts = []
block_sizes = []
for block_start, block_end in blocks:
block_starts.append( str( block_start - t_start ) )
block_sizes.append( str( block_end - block_start ) )
#
# Create BED entry.
# Bed format: chrom, chromStart, chromEnd, name, score, strand, \
# thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts
#
# Render complete feature with thick blocks. There's no clear way to do this unless
# we analyze the block names, but making everything thick makes more sense than
# making everything thin.
#
return "%s\t%i\t%i\t%s\t0\t%s\t%i\t%i\t0\t%i\t%s\t%s\n" % \
( chrom, t_start, t_end, name, strand, t_start, t_end, len( block_starts ),
",".join( block_sizes ), ",".join( block_starts ) )
def __main__():
input_name = sys.argv[1]
output_name = sys.argv[2]
skipped_lines = 0
first_skipped_line = 0
out = open( output_name, 'w' )
i = 0
cur_transcript_chrom = None
cur_transcript_id = None
cur_transcript_strand = None
cur_transcripts_blocks = [] # (start, end) for each block.
for i, line in enumerate( file( input_name ) ):
line = line.rstrip( '\r\n' )
if line and not line.startswith( '#' ):
try:
# GFF format: chrom source, name, chromStart, chromEnd, score, strand, attributes
elems = line.split( '\t' )
start = str( long( elems[3] ) - 1 )
coords = [ long( start ), long( elems[4] ) ]
strand = elems[6]
if strand not in ['+', '-']:
strand = '+'
attributes = parse_gff_attributes( elems[8] )
t_id = attributes.get( "transcript_id", None )
if not t_id:
#
# No transcript ID, so write last transcript and write current line as its own line.
#
# Write previous transcript.
if cur_transcript_id:
# Write BED entry.
out.write( get_bed_line( cur_transcript_chrome, cur_transcript_id, cur_transcript_strand, cur_transcripts_blocks ) )
# Replace any spaces in the name with underscores so UCSC will not complain.
name = elems[2].replace(" ", "_")
out.write( get_bed_line( elems[0], name, strand, [ coords ] ) )
continue
# There is a transcript ID, so process line at transcript level.
if t_id == cur_transcript_id:
# Line is element of transcript and will be a block in the BED entry.
cur_transcripts_blocks.append( coords )
continue
#
# Line is part of new transcript; write previous transcript and start
# new transcript.
#
# Write previous transcript.
if cur_transcript_id:
# Write BED entry.
out.write( get_bed_line( cur_transcript_chrome, cur_transcript_id, cur_transcript_strand, cur_transcripts_blocks ) )
# Start new transcript.
cur_transcript_chrome = elems[0]
cur_transcript_id = t_id
cur_transcript_strand = strand
cur_transcripts_blocks = []
cur_transcripts_blocks.append( coords )
except:
skipped_lines += 1
if not first_skipped_line:
first_skipped_line = i + 1
else:
skipped_lines += 1
if not first_skipped_line:
first_skipped_line = i + 1
# Write last transcript.
if cur_transcript_id:
# Write BED entry.
out.write( get_bed_line( cur_transcript_chrome, cur_transcript_id, cur_transcript_strand, cur_transcripts_blocks ) )
out.close()
info_msg = "%i lines converted to BED. " % ( i + 1 - skipped_lines )
if skipped_lines > 0:
info_msg += "Skipped %d blank/comment/invalid lines starting with line #%d." %( skipped_lines, first_skipped_line )
print info_msg
if __name__ == "__main__": __main__()
| 39.731343 | 140 | 0.552216 |
8866874dd908014dc19fe9495e196323ce8a3015 | 435 | py | Python | telegram_gcloner/utils/callback.py | renanjsilv/CloneBot_Heroku | fef15f3bc66c1fbbdd69df2cfb10461691d17dbe | [
"MIT"
] | null | null | null | telegram_gcloner/utils/callback.py | renanjsilv/CloneBot_Heroku | fef15f3bc66c1fbbdd69df2cfb10461691d17dbe | [
"MIT"
] | null | null | null | telegram_gcloner/utils/callback.py | renanjsilv/CloneBot_Heroku | fef15f3bc66c1fbbdd69df2cfb10461691d17dbe | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
from telegram.ext import CallbackContext
logger = logging.getLogger(__name__)
def callback_delete_message(context: CallbackContext):
(chat_id, message_id) = context.job.context
try:
context.bot.delete_message(chat_id=chat_id, message_id=message_id)
except Exception as e:
logger.warning('não pode excluir mensagem {}: {}'.format(message_id, e))
| 27.1875 | 80 | 0.724138 |
a15294df5789e3065ddb49b0439c31a92e918c6d | 1,627 | py | Python | backwork/backup.py | Bhaskers-Blu-Org1/backwork | b963845e1f17baa81e3697be838dc245dc6c0b48 | [
"Apache-2.0"
] | 2 | 2019-09-16T04:18:40.000Z | 2020-02-06T15:23:20.000Z | backwork/backup.py | IBM/backwork | b963845e1f17baa81e3697be838dc245dc6c0b48 | [
"Apache-2.0"
] | null | null | null | backwork/backup.py | IBM/backwork | b963845e1f17baa81e3697be838dc245dc6c0b48 | [
"Apache-2.0"
] | 2 | 2019-11-02T15:07:13.000Z | 2020-06-29T14:49:02.000Z | """Handle backup subcommand.
Backup commands should receive an argument specifying where to store the backup
file. It should also provide the options to archive, compress and timestamp the
backup into a single file to facilitate storage.
"""
import os
from .lib import utils
__all__ = ["parse_args", "backup", "BackupError"]
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
ENGINES = utils.load_engines("backwork.backups")
def parse_args(subparsers):
"""Parse command line arguments passed to the backup command."""
backup_parser = subparsers.add_parser("backup",
description="""Perform database
backups. Run `backwork backup
{database_type} -h` for more details
on each supported database.""")
backup_parser.add_argument("-U", "--upload", action="store_true",
help="""output backup data to stdout to allow
piping it to an upload command""")
# load engines' parsers
backup_subparsers = backup_parser.add_subparsers(dest="type")
for _, klass in ENGINES.items():
klass.parse_args(backup_subparsers)
def backup(args, extra):
"""Invoke the backup method from the specified database type."""
engine = ENGINES.get(args.type, None)
if engine is None:
raise BackupError("Backup method '%s' not found.", args.type)
engine(args, extra).backup()
class BackupError(Exception):
"""Custom Exception raised by backup engines."""
pass
| 36.977273 | 79 | 0.630608 |
97bbce257c198ea53a75f6da805d4fc7d54108a5 | 17,766 | py | Python | allel/stats/mendel.py | smbadiwe/scikit-allel | 4432362fc2dea5706ad358f6b4bab4186fb70a60 | [
"MIT"
] | 1 | 2020-06-29T15:29:05.000Z | 2020-06-29T15:29:05.000Z | allel/stats/mendel.py | smbadiwe/scikit-allel | 4432362fc2dea5706ad358f6b4bab4186fb70a60 | [
"MIT"
] | null | null | null | allel/stats/mendel.py | smbadiwe/scikit-allel | 4432362fc2dea5706ad358f6b4bab4186fb70a60 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
from allel.compat import memoryview_safe
from allel.model.ndarray import GenotypeArray, HaplotypeArray
from allel.util import check_ploidy, check_min_samples, check_type, check_dtype
from allel.opt.stats import phase_progeny_by_transmission as _opt_phase_progeny_by_transmission, \
phase_parents_by_transmission as _opt_phase_parents_by_transmission
def mendel_errors(parent_genotypes, progeny_genotypes):
"""Locate genotype calls not consistent with Mendelian transmission of
alleles.
Parameters
----------
parent_genotypes : array_like, int, shape (n_variants, 2, 2)
Genotype calls for the two parents.
progeny_genotypes : array_like, int, shape (n_variants, n_progeny, 2)
Genotype calls for the progeny.
Returns
-------
me : ndarray, int, shape (n_variants, n_progeny)
Count of Mendel errors for each progeny genotype call.
Examples
--------
The following are all consistent with Mendelian transmission. Note that a
value of 0 is returned for missing calls::
>>> import allel
>>> import numpy as np
>>> genotypes = np.array([
... # aa x aa -> aa
... [[0, 0], [0, 0], [0, 0], [-1, -1], [-1, -1], [-1, -1]],
... [[1, 1], [1, 1], [1, 1], [-1, -1], [-1, -1], [-1, -1]],
... [[2, 2], [2, 2], [2, 2], [-1, -1], [-1, -1], [-1, -1]],
... # aa x ab -> aa or ab
... [[0, 0], [0, 1], [0, 0], [0, 1], [-1, -1], [-1, -1]],
... [[0, 0], [0, 2], [0, 0], [0, 2], [-1, -1], [-1, -1]],
... [[1, 1], [0, 1], [1, 1], [0, 1], [-1, -1], [-1, -1]],
... # aa x bb -> ab
... [[0, 0], [1, 1], [0, 1], [-1, -1], [-1, -1], [-1, -1]],
... [[0, 0], [2, 2], [0, 2], [-1, -1], [-1, -1], [-1, -1]],
... [[1, 1], [2, 2], [1, 2], [-1, -1], [-1, -1], [-1, -1]],
... # aa x bc -> ab or ac
... [[0, 0], [1, 2], [0, 1], [0, 2], [-1, -1], [-1, -1]],
... [[1, 1], [0, 2], [0, 1], [1, 2], [-1, -1], [-1, -1]],
... # ab x ab -> aa or ab or bb
... [[0, 1], [0, 1], [0, 0], [0, 1], [1, 1], [-1, -1]],
... [[1, 2], [1, 2], [1, 1], [1, 2], [2, 2], [-1, -1]],
... [[0, 2], [0, 2], [0, 0], [0, 2], [2, 2], [-1, -1]],
... # ab x bc -> ab or ac or bb or bc
... [[0, 1], [1, 2], [0, 1], [0, 2], [1, 1], [1, 2]],
... [[0, 1], [0, 2], [0, 0], [0, 1], [0, 1], [1, 2]],
... # ab x cd -> ac or ad or bc or bd
... [[0, 1], [2, 3], [0, 2], [0, 3], [1, 2], [1, 3]],
... ])
>>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:])
>>> me
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
The following are cases of 'non-parental' inheritance where one or two
alleles are found in the progeny that are not present in either parent.
Note that the number of errors may be 1 or 2 depending on the number of
non-parental alleles::
>>> genotypes = np.array([
... # aa x aa -> ab or ac or bb or cc
... [[0, 0], [0, 0], [0, 1], [0, 2], [1, 1], [2, 2]],
... [[1, 1], [1, 1], [0, 1], [1, 2], [0, 0], [2, 2]],
... [[2, 2], [2, 2], [0, 2], [1, 2], [0, 0], [1, 1]],
... # aa x ab -> ac or bc or cc
... [[0, 0], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]],
... [[0, 0], [0, 2], [0, 1], [1, 2], [1, 1], [1, 1]],
... [[1, 1], [0, 1], [1, 2], [0, 2], [2, 2], [2, 2]],
... # aa x bb -> ac or bc or cc
... [[0, 0], [1, 1], [0, 2], [1, 2], [2, 2], [2, 2]],
... [[0, 0], [2, 2], [0, 1], [1, 2], [1, 1], [1, 1]],
... [[1, 1], [2, 2], [0, 1], [0, 2], [0, 0], [0, 0]],
... # ab x ab -> ac or bc or cc
... [[0, 1], [0, 1], [0, 2], [1, 2], [2, 2], [2, 2]],
... [[0, 2], [0, 2], [0, 1], [1, 2], [1, 1], [1, 1]],
... [[1, 2], [1, 2], [0, 1], [0, 2], [0, 0], [0, 0]],
... # ab x bc -> ad or bd or cd or dd
... [[0, 1], [1, 2], [0, 3], [1, 3], [2, 3], [3, 3]],
... [[0, 1], [0, 2], [0, 3], [1, 3], [2, 3], [3, 3]],
... [[0, 2], [1, 2], [0, 3], [1, 3], [2, 3], [3, 3]],
... # ab x cd -> ae or be or ce or de
... [[0, 1], [2, 3], [0, 4], [1, 4], [2, 4], [3, 4]],
... ])
>>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:])
>>> me
array([[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 2, 2],
[1, 1, 1, 2],
[1, 1, 1, 2],
[1, 1, 1, 2],
[1, 1, 1, 1]])
The following are cases of 'hemi-parental' inheritance, where progeny
appear to have inherited two copies of an allele found only once in one of
the parents::
>>> genotypes = np.array([
... # aa x ab -> bb
... [[0, 0], [0, 1], [1, 1], [-1, -1]],
... [[0, 0], [0, 2], [2, 2], [-1, -1]],
... [[1, 1], [0, 1], [0, 0], [-1, -1]],
... # ab x bc -> aa or cc
... [[0, 1], [1, 2], [0, 0], [2, 2]],
... [[0, 1], [0, 2], [1, 1], [2, 2]],
... [[0, 2], [1, 2], [0, 0], [1, 1]],
... # ab x cd -> aa or bb or cc or dd
... [[0, 1], [2, 3], [0, 0], [1, 1]],
... [[0, 1], [2, 3], [2, 2], [3, 3]],
... ])
>>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:])
>>> me
array([[1, 0],
[1, 0],
[1, 0],
[1, 1],
[1, 1],
[1, 1],
[1, 1],
[1, 1]])
The following are cases of 'uni-parental' inheritance, where progeny
appear to have inherited both alleles from a single parent::
>>> genotypes = np.array([
... # aa x bb -> aa or bb
... [[0, 0], [1, 1], [0, 0], [1, 1]],
... [[0, 0], [2, 2], [0, 0], [2, 2]],
... [[1, 1], [2, 2], [1, 1], [2, 2]],
... # aa x bc -> aa or bc
... [[0, 0], [1, 2], [0, 0], [1, 2]],
... [[1, 1], [0, 2], [1, 1], [0, 2]],
... # ab x cd -> ab or cd
... [[0, 1], [2, 3], [0, 1], [2, 3]],
... ])
>>> me = allel.mendel_errors(genotypes[:, :2], genotypes[:, 2:])
>>> me
array([[1, 1],
[1, 1],
[1, 1],
[1, 1],
[1, 1],
[1, 1]])
"""
# setup
parent_genotypes = GenotypeArray(parent_genotypes)
progeny_genotypes = GenotypeArray(progeny_genotypes)
check_ploidy(parent_genotypes.ploidy, 2)
check_ploidy(progeny_genotypes.ploidy, 2)
# transform into per-call allele counts
max_allele = max(parent_genotypes.max(), progeny_genotypes.max())
parent_gc = parent_genotypes.to_allele_counts(max_allele=max_allele, dtype='i1')
progeny_gc = progeny_genotypes.to_allele_counts(max_allele=max_allele, dtype='i1')
# detect nonparental and hemiparental inheritance by comparing allele
# counts between parents and progeny
max_progeny_gc = parent_gc.clip(max=1).sum(axis=1)
max_progeny_gc = max_progeny_gc[:, np.newaxis, :]
me = (progeny_gc - max_progeny_gc).clip(min=0).sum(axis=2)
# detect uniparental inheritance by finding cases where no alleles are
# shared between parents, then comparing progeny allele counts to each
# parent
p1_gc = parent_gc[:, 0, np.newaxis, :]
p2_gc = parent_gc[:, 1, np.newaxis, :]
# find variants where parents don't share any alleles
is_shared_allele = (p1_gc > 0) & (p2_gc > 0)
no_shared_alleles = ~np.any(is_shared_allele, axis=2)
# find calls where progeny genotype is identical to one or the other parent
me[no_shared_alleles &
(np.all(progeny_gc == p1_gc, axis=2) |
np.all(progeny_gc == p2_gc, axis=2))] = 1
# retrofit where either or both parent has a missing call
me[np.any(parent_genotypes.is_missing(), axis=1)] = 0
return me
# constants to represent inheritance states
INHERIT_UNDETERMINED = 0
INHERIT_PARENT1 = 1
INHERIT_PARENT2 = 2
INHERIT_NONSEG_REF = 3
INHERIT_NONSEG_ALT = 4
INHERIT_NONPARENTAL = 5
INHERIT_PARENT_MISSING = 6
INHERIT_MISSING = 7
def paint_transmission(parent_haplotypes, progeny_haplotypes):
"""Paint haplotypes inherited from a single diploid parent according to
their allelic inheritance.
Parameters
----------
parent_haplotypes : array_like, int, shape (n_variants, 2)
Both haplotypes from a single diploid parent.
progeny_haplotypes : array_like, int, shape (n_variants, n_progeny)
Haplotypes found in progeny of the given parent, inherited from the
given parent. I.e., haplotypes from gametes of the given parent.
Returns
-------
painting : ndarray, uint8, shape (n_variants, n_progeny)
An array of integers coded as follows: 1 = allele inherited from
first parental haplotype; 2 = allele inherited from second parental
haplotype; 3 = reference allele, also carried by both parental
haplotypes; 4 = non-reference allele, also carried by both parental
haplotypes; 5 = non-parental allele; 6 = either or both parental
alleles missing; 7 = missing allele; 0 = undetermined.
Examples
--------
>>> import allel
>>> haplotypes = allel.HaplotypeArray([
... [0, 0, 0, 1, 2, -1],
... [0, 1, 0, 1, 2, -1],
... [1, 0, 0, 1, 2, -1],
... [1, 1, 0, 1, 2, -1],
... [0, 2, 0, 1, 2, -1],
... [0, -1, 0, 1, 2, -1],
... [-1, 1, 0, 1, 2, -1],
... [-1, -1, 0, 1, 2, -1],
... ], dtype='i1')
>>> painting = allel.paint_transmission(haplotypes[:, :2],
... haplotypes[:, 2:])
>>> painting
array([[3, 5, 5, 7],
[1, 2, 5, 7],
[2, 1, 5, 7],
[5, 4, 5, 7],
[1, 5, 2, 7],
[6, 6, 6, 7],
[6, 6, 6, 7],
[6, 6, 6, 7]], dtype=uint8)
"""
# check inputs
parent_haplotypes = HaplotypeArray(parent_haplotypes)
progeny_haplotypes = HaplotypeArray(progeny_haplotypes)
if parent_haplotypes.n_haplotypes != 2:
raise ValueError('exactly two parental haplotypes should be provided')
# convenience variables
parent1 = parent_haplotypes[:, 0, np.newaxis]
parent2 = parent_haplotypes[:, 1, np.newaxis]
progeny_is_missing = progeny_haplotypes < 0
parent_is_missing = np.any(parent_haplotypes < 0, axis=1)
# need this for broadcasting, but also need to retain original for later
parent_is_missing_bc = parent_is_missing[:, np.newaxis]
parent_diplotype = GenotypeArray(parent_haplotypes[:, np.newaxis, :])
parent_is_hom_ref = parent_diplotype.is_hom_ref()
parent_is_het = parent_diplotype.is_het()
parent_is_hom_alt = parent_diplotype.is_hom_alt()
# identify allele calls where inheritance can be determined
is_callable = ~progeny_is_missing & ~parent_is_missing_bc
is_callable_seg = is_callable & parent_is_het
# main inheritance states
inherit_parent1 = is_callable_seg & (progeny_haplotypes == parent1)
inherit_parent2 = is_callable_seg & (progeny_haplotypes == parent2)
nonseg_ref = (is_callable & parent_is_hom_ref & (progeny_haplotypes == parent1))
nonseg_alt = (is_callable & parent_is_hom_alt & (progeny_haplotypes == parent1))
nonparental = (
is_callable & (progeny_haplotypes != parent1) & (progeny_haplotypes != parent2)
)
# record inheritance states
# N.B., order in which these are set matters
painting = np.zeros(progeny_haplotypes.shape, dtype='u1')
painting[inherit_parent1] = INHERIT_PARENT1
painting[inherit_parent2] = INHERIT_PARENT2
painting[nonseg_ref] = INHERIT_NONSEG_REF
painting[nonseg_alt] = INHERIT_NONSEG_ALT
painting[nonparental] = INHERIT_NONPARENTAL
painting[parent_is_missing] = INHERIT_PARENT_MISSING
painting[progeny_is_missing] = INHERIT_MISSING
return painting
def phase_progeny_by_transmission(g):
"""Phase progeny genotypes from a trio or cross using Mendelian
transmission.
Parameters
----------
g : array_like, int, shape (n_variants, n_samples, 2)
Genotype array, with parents as first two columns and progeny as
remaining columns.
Returns
-------
g : ndarray, int8, shape (n_variants, n_samples, 2)
Genotype array with progeny phased where possible.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([
... [[0, 0], [0, 0], [0, 0]],
... [[1, 1], [1, 1], [1, 1]],
... [[0, 0], [1, 1], [0, 1]],
... [[1, 1], [0, 0], [0, 1]],
... [[0, 0], [0, 1], [0, 0]],
... [[0, 0], [0, 1], [0, 1]],
... [[0, 1], [0, 0], [0, 1]],
... [[0, 1], [0, 1], [0, 1]],
... [[0, 1], [1, 2], [0, 1]],
... [[1, 2], [0, 1], [1, 2]],
... [[0, 1], [2, 3], [0, 2]],
... [[2, 3], [0, 1], [1, 3]],
... [[0, 0], [0, 0], [-1, -1]],
... [[0, 0], [0, 0], [1, 1]],
... ], dtype='i1')
>>> g = allel.phase_progeny_by_transmission(g)
>>> print(g.to_str(row_threshold=None))
0/0 0/0 0|0
1/1 1/1 1|1
0/0 1/1 0|1
1/1 0/0 1|0
0/0 0/1 0|0
0/0 0/1 0|1
0/1 0/0 1|0
0/1 0/1 0/1
0/1 1/2 0|1
1/2 0/1 2|1
0/1 2/3 0|2
2/3 0/1 3|1
0/0 0/0 ./.
0/0 0/0 1/1
>>> g.is_phased
array([[False, False, True],
[False, False, True],
[False, False, True],
[False, False, True],
[False, False, True],
[False, False, True],
[False, False, True],
[False, False, False],
[False, False, True],
[False, False, True],
[False, False, True],
[False, False, True],
[False, False, False],
[False, False, False]])
"""
# setup
g = GenotypeArray(g, dtype='i1', copy=True)
check_ploidy(g.ploidy, 2)
check_min_samples(g.n_samples, 3)
# run the phasing
# N.B., a copy has already been made, so no need to make memoryview safe
is_phased = _opt_phase_progeny_by_transmission(g.values)
g.is_phased = np.asarray(is_phased).view(bool)
# outputs
return g
def phase_parents_by_transmission(g, window_size):
"""Phase parent genotypes from a trio or cross, given progeny genotypes
already phased by Mendelian transmission.
Parameters
----------
g : GenotypeArray
Genotype array, with parents as first two columns and progeny as
remaining columns, where progeny genotypes are already phased.
window_size : int
Number of previous heterozygous sites to include when phasing each
parent. A number somewhere between 10 and 100 may be appropriate,
depending on levels of heterozygosity and quality of data.
Returns
-------
g : GenotypeArray
Genotype array with parents phased where possible.
"""
# setup
check_type(g, GenotypeArray)
check_dtype(g.values, 'i1')
check_ploidy(g.ploidy, 2)
if g.is_phased is None:
raise ValueError('genotype array must first have progeny phased by transmission')
check_min_samples(g.n_samples, 3)
# run the phasing
g._values = memoryview_safe(g.values)
g._is_phased = memoryview_safe(g.is_phased)
_opt_phase_parents_by_transmission(g.values, g.is_phased.view('u1'), window_size)
# outputs
return g
def phase_by_transmission(g, window_size, copy=True):
"""Phase genotypes in a trio or cross where possible using Mendelian
transmission.
Parameters
----------
g : array_like, int, shape (n_variants, n_samples, 2)
Genotype array, with parents as first two columns and progeny as
remaining columns.
window_size : int
Number of previous heterozygous sites to include when phasing each
parent. A number somewhere between 10 and 100 may be appropriate,
depending on levels of heterozygosity and quality of data.
copy : bool, optional
If False, attempt to phase genotypes in-place. Note that this is
only possible if the input array has int8 dtype, otherwise a copy is
always made regardless of this parameter.
Returns
-------
g : GenotypeArray
Genotype array with progeny phased where possible.
"""
# setup
g = np.asarray(g, dtype='i1')
g = GenotypeArray(g, copy=copy)
g._values = memoryview_safe(g.values)
check_ploidy(g.ploidy, 2)
check_min_samples(g.n_samples, 3)
# phase the progeny
is_phased = _opt_phase_progeny_by_transmission(g.values)
g.is_phased = np.asarray(is_phased).view(bool)
# phase the parents
_opt_phase_parents_by_transmission(g.values, is_phased, window_size)
return g
| 36.782609 | 98 | 0.507205 |
38a8ec426f0282fdef51946b6d0014e942969be9 | 12,183 | py | Python | Value_Based/C51/C51_1dim/agent.py | kyunghoon-jung/MacaronRL | b95be35fc95be7eb5aede2315a714984b282587a | [
"MIT"
] | 20 | 2020-10-05T07:07:46.000Z | 2021-05-23T02:18:43.000Z | Value_Based/C51/C51_1dim/agent.py | kyunghoon-jung/RL_implementation | b95be35fc95be7eb5aede2315a714984b282587a | [
"MIT"
] | null | null | null | Value_Based/C51/C51_1dim/agent.py | kyunghoon-jung/RL_implementation | b95be35fc95be7eb5aede2315a714984b282587a | [
"MIT"
] | null | null | null | import gym
import numpy as np
import time
import os
import cv2
import matplotlib.pyplot as plt
from IPython.display import clear_output
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from qnetwork import QNetwork
from replay_buffer import ReplayBuffer
import wandb
class Agent:
def __init__(self,
env: 'Environment',
input_dim: ('int: the width and height of pre-processed input image'),
num_frames: ('int: Total number of frames'),
eps_decay: ('float: Epsilon Decay_rate'),
gamma: ('float: Discount Factor'),
target_update_freq: ('int: Target Update Frequency (unit: frames)'),
current_update_freq: ('int: Behavioral Network Update Frequency (unit: frames'),
update_type: ('str: Update type for target network. Hard or Soft')='hard',
soft_update_tau: ('float: Soft update ratio')=None,
batch_size: ('int: Update batch size')=32,
buffer_size: ('int: Replay buffer size')=1000000,
update_start_buffer_size: ('int: Update starting buffer size')=50000,
learning_rate: ('float: Learning rate')=0.0004,
eps_min: ('float: Epsilon Min')=0.1,
eps_max: ('float: Epsilon Max')=1.0,
device_num: ('int: GPU device number')=0,
rand_seed: ('int: Random seed')=None,
plot_option: ('str: Plotting option')=False,
model_path: ('str: Model saving path')='./',
n_atoms: ('int: The number of atoms')=51, # Variables for Categprocal
Vmax: ('int: The maximum Q value')=10, # Variables for Categprocal
Vmin: ('int: The minimum Q value')=-10, # Variables for Categprocal
):
self.action_dim = env.action_space.n
self.device = torch.device(f'cuda:{device_num}' if torch.cuda.is_available() else 'cpu')
self.model_path = model_path
self.env = env
self.input_dim = input_dim
self.num_frames = num_frames
self.epsilon = eps_max
self.eps_decay = eps_decay
self.eps_min = eps_min
self.gamma = gamma
self.target_update_freq = target_update_freq
self.current_update_freq = current_update_freq
self.update_cnt = 0
self.update_type = update_type
self.tau = soft_update_tau
self.batch_size = batch_size
self.buffer_size = buffer_size
self.update_start = update_start_buffer_size
self.seed = rand_seed
self.plot_option = plot_option
self.scores = [-100000]
self.avg_scores = [-100000]
# Variables for C51
self.n_atoms = n_atoms
self.Vmin = Vmin
self.Vmax = Vmax
self.dz = (Vmax - Vmin) / (n_atoms - 1)
self.support = torch.linspace(Vmin, Vmax, n_atoms).to(self.device)
self.expanded_support = self.support.expand((batch_size, self.action_dim, n_atoms)).to(self.device)
self.q_behave = QNetwork(self.input_dim, self.action_dim, n_atoms=self.n_atoms).to(self.device)
self.q_target = QNetwork(self.input_dim, self.action_dim, n_atoms=self.n_atoms).to(self.device)
self.q_target.load_state_dict(self.q_behave.state_dict())
self.q_target.eval()
self.optimizer = optim.Adam(self.q_behave.parameters(), lr=learning_rate)
self.memory = ReplayBuffer(self.buffer_size, self.input_dim, self.batch_size)
if self.plot_option == 'wandb':
wandb.watch(self.q_behave)
def select_action(self, state: 'Must be pre-processed in the same way while updating current Q network. See def _compute_loss'):
if np.random.random() < self.epsilon:
return np.zeros(self.action_dim), self.env.action_space.sample()
else:
with torch.no_grad():
state = torch.FloatTensor(state).to(self.device).unsqueeze(0)
# Categorical RL
Expected_Qs = (self.q_behave(state)*self.expanded_support[0]).sum(2)
action = Expected_Qs.argmax(1)
return Expected_Qs.detach().cpu().numpy(), action.detach().item()
def get_init_state(self):
init_state = self.env.reset()
for _ in range(0): # Random initial starting point.
action = self.env.action_space.sample()
init_state, _, _, _ = self.env.step(action)
return init_state
def get_state(self, state, action):
next_state, reward, done, _ = self.env.step(action)
return reward, next_state, done
def store(self, state, action, reward, next_state, done):
self.memory.store(state, action, reward, next_state, done)
def update_current_q_net(self):
batch = self.memory.batch_load()
loss = self._compute_loss(batch)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def target_soft_update(self):
for target_param, current_param in zip(self.q_target.parameters(), self.q_behave.parameters()):
target_param.data.copy_(self.tau*current_param.data + (1.0-self.tau)*target_param.data)
def target_hard_update(self):
self.update_cnt = (self.update_cnt+1) % self.target_update_freq
if self.update_cnt==0:
self.q_target.load_state_dict(self.q_behave.state_dict())
def train(self):
tic = time.time()
losses = []
epsilons = []
score = 0
print("Storing initial buffer..")
state = self.get_init_state()
for frame_idx in range(1, self.update_start+1):
_, action = self.select_action(state)
reward, next_state, done = self.get_state(state, action)
self.store(state, action, np.clip(reward, -1, 1), next_state, done)
state = next_state
if done: state = self.get_init_state()
print("Done. Start learning..")
history_store = []
behave_update_cnt = 0
for frame_idx in range(1, self.num_frames+1):
Qs, action = self.select_action(state)
reward, next_state, done = self.get_state(state, action)
self.store(state, action, np.clip(reward, -1, 1), next_state, done)
history_store.append([state, Qs, action, reward, next_state, done])
loss = self.update_current_q_net()
behave_update_cnt = (behave_update_cnt+1)%self.current_update_freq
if self.update_type=='soft': self.target_soft_update()
elif behave_update_cnt == 0: self.target_hard_update()
score += reward
losses.append(loss)
if done:
self.model_save(frame_idx, score, self.scores, self.avg_scores, history_store, tic)
epsilons.append(self.epsilon)
self.plot_status(self.plot_option, frame_idx, score, self.scores, losses, epsilons)
score=0
state = self.get_init_state()
history_store = []
else: state = next_state
self._epsilon_step()
print("Total training time: {}(hrs)".format((time.time()-tic)/3600))
def _epsilon_step(self):
''' Epsilon decay control '''
eps_decay_list = [self.eps_decay, self.eps_decay/2.5, self.eps_decay/3.5, self.eps_decay/5.5]
if self.epsilon>0.30:
self.epsilon = max(self.epsilon-eps_decay_list[0], 0.1)
elif self.epsilon>0.25:
self.epsilon = max(self.epsilon-eps_decay_list[1], 0.1)
elif self.epsilon>1.7:
self.epsilon = max(self.epsilon-eps_decay_list[2], 0.1)
else:
self.epsilon = max(self.epsilon-eps_decay_list[3], 0.1)
def model_save(self, frame_idx, score, scores, avg_scores, history_store, tic):
'''model save when condition is satisfied'''
if score > max(self.scores):
torch.save(self.q_behave.state_dict(), self.model_path+'{}_Highest_Score_{}.pt'.format(frame_idx, score))
training_time = round((time.time()-tic)/3600, 1)
np.save(self.model_path+'{}_history_Highest_Score_{}_{}hrs.npy'.format(frame_idx, score, training_time), np.array(history_store, dtype=object))
print(" | Model saved. Highest score: {}, Training time: {}hrs".format(score, training_time), ' /'.join(os.getcwd().split('/')[-3:]))
self.scores.append(score)
if np.mean(self.scores[-10:]) > max(self.avg_scores):
torch.save(self.q_behave.state_dict(), self.model_path+'{}_Avg_Score_{}.pt'.format(frame_idx, np.mean(self.scores[-10:])))
training_time = round((time.time()-tic)/3600, 1)
np.save(self.model_path+'{}_history_Score_{}_{}hrs.npy'.format(frame_idx, score, training_time), np.array(history_store, dtype=object))
print(" | Model saved. Recent scores: {}, Training time: {}hrs".format(np.round(self.scores[-10:],2), training_time), ' /'.join(os.getcwd().split('/')[-3:]))
self.avg_scores.append(np.mean(self.scores[-10:]))
def plot_status(self, is_plot, frame_idx, score, scores, losses, epsilons):
if is_plot=='inline':
self._plot_inline(frame_idx, scores, losses, epsilons)
elif is_plot=='wandb':
wandb.log({'Score': score, 'Number of frames': frame_idx, 'loss(10 frames avg)': np.mean(losses[-10:]), 'Epsilon': self.epsilon})
print(score, end='\r')
else:
print(score, end='\r')
def _compute_loss(self, batch: "Dictionary (S, A, R', S', Dones)"):
# If normalization is used, it must be applied to 'state' and 'next_state' here. ex) state/255
states = torch.FloatTensor(batch['states']).to(self.device)
next_states = torch.FloatTensor(batch['next_states']).to(self.device)
actions = torch.LongTensor(batch['actions']).to(self.device)
rewards = torch.FloatTensor(batch['rewards'].reshape(-1, 1)).to(self.device)
dones = torch.FloatTensor(batch['dones'].reshape(-1, 1)).to(self.device)
log_behave_Q_dist = self.q_behave(states)[range(self.batch_size), actions].log()
with torch.no_grad():
# Compuating projected distribution for a categorical loss
behave_next_Q_dist = self.q_behave(next_states)
next_actions = torch.sum(behave_next_Q_dist*self.expanded_support, 2).argmax(1)
target_next_Q_dist = self.q_target(next_states)[range(self.batch_size), next_actions] # Double DQN.
Tz = rewards + self.gamma*(1 - dones)*self.expanded_support[:,0]
Tz.clamp_(self.Vmin, self.Vmax)
b = (Tz - self.Vmin) / self.dz
l = b.floor().long()
u = b.ceil().long()
l[(l==u) & (u>0)] -= 1
u[(u==0) & (l==0)] += 1
batch_init_indices = torch.linspace(0, (self.batch_size-1)*self.n_atoms, self.batch_size).long().unsqueeze(1).expand(self.batch_size, self.n_atoms).to(self.device)
proj_dist = torch.zeros(self.batch_size, self.n_atoms).to(self.device)
proj_dist.view(-1).index_add_(0, (l+batch_init_indices).view(-1), (target_next_Q_dist*(u-b)).view(-1))
proj_dist.view(-1).index_add_(0, (u+batch_init_indices).view(-1), (target_next_Q_dist*(b-l)).view(-1))
loss = torch.sum(-proj_dist*log_behave_Q_dist, 1).mean()
return loss
def _plot_inline(self, frame_idx, scores, losses, epsilons):
clear_output(True)
plt.figure(figsize=(20, 5), facecolor='w')
plt.subplot(131)
plt.title('frame %s. score: %s' % (frame_idx, np.mean(scores[-10:])))
plt.plot(scores)
plt.subplot(132)
plt.title('loss')
plt.plot(losses)
plt.subplot(133)
plt.title('epsilons')
plt.plot(epsilons)
plt.show()
| 46.678161 | 188 | 0.606747 |
1b7e99428936733bba947726f924d0f9881c3bfc | 2,478 | py | Python | adoctor-check-scheduler/adoctor_check_scheduler/tests/unit/test_retry_task.py | seandong37tt4qu/jeszhengq | 32b3737ab45e89e8c5b71cdce871cefd2c938fa8 | [
"MulanPSL-1.0"
] | null | null | null | adoctor-check-scheduler/adoctor_check_scheduler/tests/unit/test_retry_task.py | seandong37tt4qu/jeszhengq | 32b3737ab45e89e8c5b71cdce871cefd2c938fa8 | [
"MulanPSL-1.0"
] | null | null | null | adoctor-check-scheduler/adoctor_check_scheduler/tests/unit/test_retry_task.py | seandong37tt4qu/jeszhengq | 32b3737ab45e89e8c5b71cdce871cefd2c938fa8 | [
"MulanPSL-1.0"
] | null | null | null | #!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN 'AS IS' BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
"""
Author: YangYunYi
Date: 2021/9/1 23:24
docs: test_retry_task.py
description: test retry task
"""
import unittest
from adoctor_check_scheduler.check_scheduler.retry_task import RetryTask
class TestRetryTask(unittest.TestCase):
def setUp(self) -> None:
self.check_item_list = [{
"check_item": "check_item1",
"data_list": [{
"name": "node_cpu_seconds_total",
"type": "kpi",
"label": {
"cpu": "1",
"mode": "irq"
}
}],
"condition": "$0>1",
"plugin": "",
"description": "aaa"
}]
self.host_list = [{"host_id": "11111", "public_ip": "90.90.64.65"},
{"host_id": "22222", "public_ip": "90.90.64.64"},
{"host_id": "33333", "public_ip": "90.90.64.66"}]
def test_is_waiting(self):
task = RetryTask(11111, [111, 222], self.check_item_list, "admin", self.host_list)
self.assertFalse(task.is_waiting())
task.try_again()
self.assertTrue(task.is_waiting())
def test_is_use_up(self):
task = RetryTask(11111, [111, 222], self.check_item_list, "admin", self.host_list)
retry_count = RetryTask.max_retry_num
while retry_count > 0:
self.assertFalse(task.is_use_up())
task.try_again()
retry_count -= 1
task.try_again()
self.assertTrue(task.is_use_up())
def test_try_again(self):
task = RetryTask(11111, [111, 222], self.check_item_list, "admin", self.host_list)
task.try_again()
self.assertNotEqual(task._enqueue_time, -1)
self.assertEqual(task._enqueue_count, 1)
| 37.545455 | 98 | 0.563358 |
68248412320226885e776942d137ad0a7618d086 | 1,165 | py | Python | crawl/config.py | YuanruiZJU/GerritCrawler | 416eb74f4f8f50a461f849e579c12e3eba870d8c | [
"MIT"
] | 2 | 2020-06-27T03:32:49.000Z | 2020-07-22T15:06:46.000Z | crawl/config.py | YuanruiZJU/GerritCrawler | 416eb74f4f8f50a461f849e579c12e3eba870d8c | [
"MIT"
] | null | null | null | crawl/config.py | YuanruiZJU/GerritCrawler | 416eb74f4f8f50a461f849e579c12e3eba870d8c | [
"MIT"
] | 1 | 2020-06-27T03:32:54.000Z | 2020-06-27T03:32:54.000Z | import os
download_dir = 'E://GerritDownload/'
# number of code reviews in each step
num_one_step = 100
# REST API URL
urls = {}
urls['eclipse'] = 'https://git.eclipse.org/r/changes/?o=ALL_REVISIONS&o=ALL_FILES&o=ALL_COMMITS&o=MESSAGES&o=DETAILED_ACCOUNTS&n=%s' % num_one_step
urls['aosp'] = 'https://android-review.googlesource.com/changes/?o=ALL_REVISIONS&o=ALL_FILES&o=ALL_COMMITS&o=MESSAGES&o=DETAILED_ACCOUNTS&n=%s' % num_one_step
urls['libreoffice'] = 'https://gerrit.libreoffice.org/changes/?o=ALL_REVISIONS&o=ALL_FILES&o=ALL_COMMITS&o=MESSAGES&o=DETAILED_ACCOUNTS&n=%s' % num_one_step
urls['openstack'] = 'https://review.openstack.org/changes/?o=ALL_REVISIONS&o=ALL_FILES&o=ALL_COMMITS&o=MESSAGES&o=DETAILED_ACCOUNTS&n=%s' % num_one_step
urls['gerrit'] = 'https://review.gerrithub.io/changes/?o=ALL_REVISIONS&o=ALL_FILES&o=ALL_COMMITS&o=MESSAGES&o=DETAILED_ACCOUNTS&n=%s' % num_one_step
urls['qt'] = 'https://codereview.qt-project.org/changes/?o=ALL_REVISIONS&o=ALL_FILES&o=ALL_COMMITS&o=MESSAGES&o=DETAILED_ACCOUNTS&n=%s' % num_one_step
# log path
def log_path(project, status):
return os.path.join(download_dir, project, 'run' + status + '.log')
| 55.47619 | 158 | 0.761373 |
963a9133f25483a3dc7d23f6eea57554c04acff0 | 22,714 | py | Python | ventilation_wall_parameters.py | BRI-EES-House/03_ventilation_layer | 57e791dc94435392cf72e54152df38ee50d76ece | [
"MIT"
] | null | null | null | ventilation_wall_parameters.py | BRI-EES-House/03_ventilation_layer | 57e791dc94435392cf72e54152df38ee50d76ece | [
"MIT"
] | 2 | 2020-09-23T07:15:42.000Z | 2021-04-28T07:47:35.000Z | ventilation_wall_parameters.py | BRI-EES-House/03_ventilation_layer | 57e791dc94435392cf72e54152df38ee50d76ece | [
"MIT"
] | null | null | null | import itertools
import pandas as pd
import numpy as np
import global_number
import ventilation_wall as vw
import ventilation_wall_simplified as vws
import envelope_performance_factors as epf
import heat_transfer_coefficient as htc
class Log:
def write(self, msg):
"""
np.seterrのエラーログ記録用の処理(エラー発生時はコンソールにメッセージを出力する)
:param msg: エラーメッセージ
:return: なし
"""
print("LOG: %s" % msg)
def get_parameter_list() -> object:
"""
複数のパラメータの総当たりの組み合わせ(直積)のリストを作成する
:param なし
:return: 総当たりのパラメータリスト
"""
# 外気温度は、冬期条件(-10.0~10.0degC)、夏期条件(25.0~35.0degC)をそれぞれ与える
theta_e = np.array([-10.0, 0.0, 10.0, 25.0, 30.0, 35.0], dtype=float) # 外気温度, degree C
# 室内温度は、冬期条件(20.0degC)と夏期条件(27.0degC)を与える
theta_r = np.array([20.0, 27.0], dtype=float) # 室内温度, degree C
# 上記以外のパラメータには、一部を除いて想定される上下限値と中央値の3点を与える
j_surf = np.array([0.0, np.median([0.0, 1000.0]), 1000.0], dtype=float) # 外気側表面に入射する日射量, W/m2
a_surf = np.array([0.0, np.median([0.0, 1.0]), 1.0], dtype=float) # 外気側表面日射吸収率
C_1 = np.array([0.5, np.median([0.5, 100.0]), 100.0], dtype=float) # 外気側部材の熱コンダクタンス,W/(m2・K)
C_2 = np.array([0.1, np.median([0.1, 5.0]), 5.0], dtype=float) # 室内側部材の熱コンダクタンス, W/(m2・K)
l_h = np.array([3.0, np.median([3.0, 12.0]), 12.0], dtype=float) # 通気層の長さ, m
l_w = np.array([0.05, np.median([0.05, 10.0]), 10.0], dtype=float) # 通気層の幅, m
l_d = np.array([0.05, np.median([0.05, 0.3]), 0.3], dtype=float) # 通気層の厚さ, m
angle = np.array([0.0, np.median([0.0, 90.0]), 90.0], dtype=float) # 通気層の傾斜角, degree
v_a = np.array([0.0, np.median([0.0, 1.0]), 1.0], dtype=float) # 通気層の平均風速, m/s
l_s = [0.45] # 通気胴縁または垂木の間隔, m
emissivity_1 = [0.9] # 通気層に面する面1の放射率, -
emissivity_2 = np.array([0.1, np.median([0.1, 0.9]), 0.9], dtype=float) # 通気層に面する面2の放射率, -
parameter_list = list(itertools.product(theta_e, theta_r, j_surf, a_surf, C_1, C_2, l_h, l_w, l_d, angle,
v_a, l_s, emissivity_1, emissivity_2))
return parameter_list
def get_wall_status_data_by_detailed_calculation(calc_mode_h_cv: str, calc_mode_h_rv: str) -> pd.DataFrame:
"""
通気層を有する壁体の総当たりパラメータを取得し、各ケースの計算結果を保有するDataFrameを作成する
:param calc_mode_h_cv: 対流熱伝達率の計算モード
:param calc_mode_h_rv: 放射熱伝達率の計算モード
:return: DataFrame
"""
# パラメータの総当たりリストを作成する
parameter_name = ['theta_e', 'theta_r', 'j_surf', 'a_surf', 'C_1', 'C_2', 'l_h', 'l_w', 'l_d', 'angle',
'v_a', 'l_s', 'emissivity_1', 'emissivity_2']
df = pd.DataFrame(get_parameter_list(), columns=parameter_name)
# 固定値の設定
h_out = global_number.get_h_out()
h_in = global_number.get_h_in()
# 計算結果格納用配列を用意
theta_sat = [] # 相当外気温度[℃]
theta_out_surf = [] # 外気側表面温度[℃]
theta_1_surf = [] # 通気層に面する面1の表面温度[℃]
theta_2_surf = [] # 通気層に面する面1の表面温度[℃]
theta_in_surf = [] # 室内側表面温度[℃]
theta_as_ave = [] # 通気層の平均温度[℃]
effective_emissivity = [] # 有効放射率[-]
h_cv = [] # 通気層の対流熱伝達率[W/(m2・K)]
h_rv = [] # 通気層の放射熱伝達率[W/(m2・K)]
theta_as_e = [] # 通気層の等価温度[℃]
q_room_side = [] # 室内表面熱流[W/m2]
k_e = [] # 通気層を有する壁体の相当熱貫流率を求めるための補正係数[-]
heat_balance_0 = [] # 外気側表面の熱収支収支[W/m2]
heat_balance_1 = [] # 通気層に面する面1の熱収支[W/m2]
heat_balance_2 = [] # 通気層に面する面2の熱収支[W/m2]
heat_balance_3 = [] # 室内側表面の熱収支[W/m2]
heat_balance_4 = [] # 通気層内空気の熱収支[W/m2]
is_optimize_succeed = [] # 最適化が正常に終了したかどうか
optimize_message = [] # 最適化の終了メッセージ
# エラーログ出力用の設定
log = Log()
saved_handler = np.seterrcall(log)
with np.errstate(all='log'): # withスコープ内でエラーが出た場合、Logを出力する
for row in df.itertuples():
print(row[0])
# パラメータを設定
parms = (vw.Parameters(theta_e=row.theta_e,
theta_r=row.theta_r,
J_surf=row.j_surf,
a_surf=row.a_surf,
C_1=row.C_1,
C_2=row.C_2,
l_h=row.l_h,
l_w=row.l_w,
l_d=row.l_d,
angle=row.angle,
v_a=row.v_a,
l_s=row.l_s,
emissivity_1=row.emissivity_1,
emissivity_2=row.emissivity_2))
# 通気層の状態値を取得
status = vw.get_wall_status_values(parms, calc_mode_h_cv, calc_mode_h_rv, h_out, h_in)
theta_out_surf.append(status.matrix_temp[0])
theta_1_surf.append(status.matrix_temp[1])
theta_2_surf.append(status.matrix_temp[2])
theta_in_surf.append(status.matrix_temp[3])
theta_as_ave.append(status.matrix_temp[4])
effective_emissivity.append(htc.effective_emissivity_parallel(emissivity_1=row.emissivity_1, emissivity_2=row.emissivity_2))
h_cv.append(status.h_cv)
h_rv.append(status.h_rv)
# 通気層の等価温度を取得
theta_as_e_buf = epf.get_theata_as_e(status.matrix_temp[4], status.matrix_temp[1],
status.h_cv, status.h_rv)
theta_as_e.append(theta_as_e_buf)
# 相当外気温度を計算
theta_sat_buf = epf.get_theta_SAT(row.theta_e, row.a_surf, row.j_surf, h_out)
theta_sat.append(theta_sat_buf)
# 通気層を有する壁体の相当熱貫流率を求めるための補正係数を取得
k_e.append(epf.get_k_e(theta_as_e_buf, row.theta_r, theta_sat_buf))
# 室内側表面熱流を計算
r_i_buf = epf.get_r_i(C_2=row.C_2)
q_room_side.append(epf.get_heat_flow_room_side_by_vent_layer_heat_resistance(r_i=r_i_buf, theta_2=status.matrix_temp[2], theta_r=row.theta_r))
# 各層の熱収支収支を取得
heat_balance_0.append(status.matrix_heat_balance[0])
heat_balance_1.append(status.matrix_heat_balance[1])
heat_balance_2.append(status.matrix_heat_balance[2])
heat_balance_3.append(status.matrix_heat_balance[3])
heat_balance_4.append(status.matrix_heat_balance[4])
# 最適化に関する情報を取得
is_optimize_succeed.append(status.is_optimize_succeed)
optimize_message.append(status.optimize_message)
# 計算結果をDataFrameに追加
df['theta_sat'] = theta_sat
df['theta_out_surf'] = theta_out_surf
df['theta_1_surf'] = theta_1_surf
df['theta_2_surf'] = theta_2_surf
df['theta_in_surf'] = theta_in_surf
df['theta_as_ave'] = theta_as_ave
df['effective_emissivity'] = effective_emissivity
df['h_cv'] = h_cv
df['h_rv'] = h_rv
df['theta_as_e'] = theta_as_e
df['k_e'] = k_e
df['q_room_side'] = q_room_side
df['heat_balance_0'] = heat_balance_0
df['heat_balance_1'] = heat_balance_1
df['heat_balance_2'] = heat_balance_2
df['heat_balance_3'] = heat_balance_3
df['heat_balance_4'] = heat_balance_4
df['is_optimize_succeed'] = is_optimize_succeed
df['optimize_message'] = optimize_message
return df
def get_wall_status_data_by_simplified_calculation_no_01() -> pd.DataFrame:
"""
通気層を有する壁体の総当たりパラメータを取得し、簡易計算法案No.1(簡易版の行列式)による計算結果を保有するDataFrameを作成する
:param: なし
:return: DataFrame
"""
# パラメータの総当たりリストを作成する
parameter_name = ['theta_e', 'theta_r', 'j_surf', 'a_surf', 'C_1', 'C_2', 'l_h', 'l_w', 'l_d', 'angle',
'v_a', 'l_s', 'emissivity_1', 'emissivity_2']
df = pd.DataFrame(get_parameter_list(), columns=parameter_name)
# 固定値の設定
h_out = global_number.get_h_out()
# 計算結果格納用配列を用意
theta_sat = [] # 相当外気温度[℃]
theta_1_surf = [] # 通気層に面する面1の表面温度[℃]
theta_2_surf = [] # 通気層に面する面1の表面温度[℃]]
theta_as_ave = [] # 通気層の平均温度[℃]
effective_emissivity = [] # 有効放射率[-]
h_cv = [] # 通気層の対流熱伝達率[W/(m2・K)]
h_rv = [] # 通気層の放射熱伝達率[W/(m2・K)]
q_room_side = [] # 室内表面熱流[W/m2]
# エラーログ出力用の設定
log = Log()
saved_handler = np.seterrcall(log)
with np.errstate(all='log'): # withスコープ内でエラーが出た場合、Logを出力する
for row in df.itertuples():
print(row[0])
# パラメータを設定
parms = (vw.Parameters(theta_e=row.theta_e,
theta_r=row.theta_r,
J_surf=row.j_surf,
a_surf=row.a_surf,
C_1=row.C_1,
C_2=row.C_2,
l_h=row.l_h,
l_w=row.l_w,
l_d=row.l_d,
angle=row.angle,
v_a=row.v_a,
l_s=row.l_s,
emissivity_1=row.emissivity_1,
emissivity_2=row.emissivity_2))
# 通気層の状態値を取得
temps, h_cv_buf, h_rv_buf, r_i_buf = vws.get_vent_wall_temperature_by_simplified_calculation_no_01(parm=parms, h_out=h_out)
theta_1_surf.append(temps[0])
theta_2_surf.append(temps[2])
theta_as_ave.append(temps[1])
effective_emissivity.append(htc.effective_emissivity_parallel(emissivity_1=row.emissivity_1, emissivity_2=row.emissivity_2))
h_cv.append(h_cv_buf)
h_rv.append(h_rv_buf)
# 相当外気温度を計算
theta_sat_buf = epf.get_theta_SAT(theta_e=row.theta_e, a_surf=row.a_surf, j_surf=row.j_surf, h_out=h_out)
theta_sat.append(theta_sat_buf)
# 室内側表面熱流を計算
q_room_side.append(epf.get_heat_flow_room_side_by_vent_layer_heat_resistance(r_i=r_i_buf, theta_2=temps[2], theta_r=row.theta_r))
# 計算結果をDataFrameに追加
df['theta_sat'] = theta_sat
df['theta_1_surf'] = theta_1_surf
df['theta_2_surf'] = theta_2_surf
df['theta_as_ave'] = theta_as_ave
df['effective_emissivity'] = effective_emissivity
df['h_cv'] = h_cv
df['h_rv'] = h_rv
df['q_room_side'] = q_room_side
return df
def get_wall_status_data_by_simplified_calculation_no_02() -> pd.DataFrame:
"""
通気層を有する壁体の総当たりパラメータを取得し、簡易計算法案No.2(簡易式)による計算結果を保有するDataFrameを作成する
:param: なし
:return: DataFrame
"""
# パラメータの総当たりリストを作成する
parameter_name = ['theta_e', 'theta_r', 'j_surf', 'a_surf', 'C_1', 'C_2', 'l_h', 'l_w', 'l_d', 'angle',
'v_a', 'l_s', 'emissivity_1', 'emissivity_2']
df = pd.DataFrame(get_parameter_list(), columns=parameter_name)
# 固定値の設定
h_out = global_number.get_h_out()
h_in = global_number.get_h_in()
# 計算結果格納用配列を用意
theta_sat = [] # 相当外気温度[℃]
theta_as_ave = [] # 通気層の平均温度[℃]
effective_emissivity = [] # 有効放射率[-]
h_cv = [] # 通気層の対流熱伝達率[W/(m2・K)]
h_rv = [] # 通気層の放射熱伝達率[W/(m2・K)]
u_o = [] # 室外側から通気層までの熱貫流率[W/(m2・K)]
u_i = [] # 室内側から通気層までの熱貫流率[W/(m2・K)]
q_room_side = [] # 室内表面熱流[W/m2]
# エラーログ出力用の設定
log = Log()
saved_handler = np.seterrcall(log)
with np.errstate(all='log'): # withスコープ内でエラーが出た場合、Logを出力する
for row in df.itertuples():
print(row[0])
# パラメータを設定
parms = (vw.Parameters(theta_e=row.theta_e,
theta_r=row.theta_r,
J_surf=row.j_surf,
a_surf=row.a_surf,
C_1=row.C_1,
C_2=row.C_2,
l_h=row.l_h,
l_w=row.l_w,
l_d=row.l_d,
angle=row.angle,
v_a=row.v_a,
l_s=row.l_s,
emissivity_1=row.emissivity_1,
emissivity_2=row.emissivity_2))
# 対流熱伝達率、放射熱伝達率を計算
effective_emissivity_buf = htc.effective_emissivity_parallel(emissivity_1=row.emissivity_1, emissivity_2=row.emissivity_2)
if parms.theta_r == 20.0:
h_cv_buf = htc.convective_heat_transfer_coefficient_simplified_winter(v_a=row.v_a)
h_rv_buf = htc.radiative_heat_transfer_coefficient_simplified_winter(
effective_emissivity=effective_emissivity_buf)
else:
h_cv_buf = htc.convective_heat_transfer_coefficient_simplified_summer(v_a=row.v_a)
h_rv_buf = htc.radiative_heat_transfer_coefficient_simplified_summer(
effective_emissivity=effective_emissivity_buf)
effective_emissivity.append(effective_emissivity_buf)
h_cv.append(h_cv_buf)
h_rv.append(h_rv_buf)
# 通気層平均温度を取得
theta_as_ave_buf, u_o_buf, u_i_buf = vws.get_vent_wall_temperature_by_simplified_calculation_no_02(parm=parms, h_out=h_out)
theta_as_ave.append(theta_as_ave_buf)
# 相当外気温度を計算
theta_sat.append(epf.get_theta_SAT(row.theta_e, row.a_surf, row.j_surf, h_out))
# 室外側から通気層までの熱貫流率、室内側から通気層までの熱貫流率
u_o.append(u_o_buf)
u_i.append(u_i_buf)
# 室内側表面熱流を計算
q_room_side.append(epf.get_heat_flow_room_side_by_vent_layer_heat_transfer_coeff(u_i=u_i_buf, theta_as_ave=theta_as_ave_buf, theta_r=row.theta_r))
# 計算結果をDataFrameに追加
df['theta_sat'] = theta_sat
df['theta_as_ave'] = theta_as_ave
df['effective_emissivity'] = effective_emissivity
df['h_cv'] = h_cv
df['h_rv'] = h_rv
df['u_o'] = u_o
df['u_i'] = u_i
df['q_room_side'] = q_room_side
return df
def get_wall_status_data_by_simplified_calculation_no_03() -> pd.DataFrame:
"""
通気層を有する壁体の総当たりパラメータを取得し、簡易計算法案No.3(通気層を有する壁体の修正熱貫流率、修正日射熱取得率から
室内表面熱流を求める)による計算結果を保有するDataFrameを作成する
:param: なし
:return: DataFrame
"""
# パラメータの総当たりリストを作成する
parameter_name = ['theta_e', 'theta_r', 'j_surf', 'a_surf', 'C_1', 'C_2', 'l_h', 'l_w', 'l_d', 'angle',
'v_a', 'l_s', 'emissivity_1', 'emissivity_2']
df = pd.DataFrame(get_parameter_list(), columns=parameter_name)
# 固定値の設定
h_out = global_number.get_h_out()
# 計算結果格納用配列を用意
theta_sat = [] # 相当外気温度[℃]
h_cv = [] # 通気層の対流熱伝達率[W/(m2・K)]
h_rv = [] # 通気層の放射熱伝達率[W/(m2・K)]
u_dash = [] # 修正熱貫流率[W/(m2・K)]
eta_dash = [] # 修正日射熱取得率[-]
q_room_side = [] # 室内表面熱流[W/m2]
# エラーログ出力用の設定
log = Log()
saved_handler = np.seterrcall(log)
with np.errstate(all='log'): # withスコープ内でエラーが出た場合、Logを出力する
for row in df.itertuples():
print(row[0])
# パラメータを設定
parms = (vw.Parameters(theta_e=row.theta_e,
theta_r=row.theta_r,
J_surf=row.j_surf,
a_surf=row.a_surf,
C_1=row.C_1,
C_2=row.C_2,
l_h=row.l_h,
l_w=row.l_w,
l_d=row.l_d,
angle=row.angle,
v_a=row.v_a,
l_s=row.l_s,
emissivity_1=row.emissivity_1,
emissivity_2=row.emissivity_2))
# 相当外気温度を計算
theta_sat.append(epf.get_theta_SAT(row.theta_e, row.a_surf, row.j_surf, h_out))
# 対流熱伝達率、放射熱伝達率、修正熱貫流率、修正日射熱取得率、室内側表面熱流を計算
h_cv_buf, h_rv_buf, u_dash_buf, eta_dash_buf, q_room_side_buf \
= vws.get_vent_wall_performance_factor_by_simplified_calculation_no_03(parm=parms, h_out=h_out)
# 配列に格納
h_cv.append(h_cv_buf)
h_rv.append(h_rv_buf)
u_dash.append(u_dash_buf)
eta_dash.append(eta_dash_buf)
q_room_side.append(q_room_side_buf)
# 計算結果をDataFrameに追加
df['theta_sat'] = theta_sat
df['h_cv'] = h_cv
df['h_rv'] = h_rv
df['u_dash'] = u_dash
df['eta_dash'] = eta_dash
df['q_room_side'] = q_room_side
return df
def get_wall_status_data_by_simplified_calculation_no_04() -> pd.DataFrame:
"""
通気層を有する壁体の総当たりパラメータを取得し、簡易計算法案No.4(簡易計算法案No.3をさらに簡略化)による計算結果を保有するDataFrameを作成する
:param: なし
:return: DataFrame
"""
# パラメータの総当たりリストを作成する
parameter_name = ['theta_e', 'theta_r', 'j_surf', 'a_surf', 'C_1', 'C_2', 'l_h', 'l_w', 'l_d', 'angle',
'v_a', 'l_s', 'emissivity_1', 'emissivity_2']
df = pd.DataFrame(get_parameter_list(), columns=parameter_name)
# 固定値の設定
h_out = global_number.get_h_out()
# 計算結果格納用配列を用意
theta_sat = [] # 相当外気温度[℃]
h_cv = [] # 通気層の対流熱伝達率[W/(m2・K)]
h_rv = [] # 通気層の放射熱伝達率[W/(m2・K)]
u_dash = [] # 修正熱貫流率[W/(m2・K)]
eta_dash = [] # 修正日射熱取得率[-]
q_room_side = [] # 室内表面熱流[W/m2]
# エラーログ出力用の設定
log = Log()
saved_handler = np.seterrcall(log)
with np.errstate(all='log'): # withスコープ内でエラーが出た場合、Logを出力する
for row in df.itertuples():
print(row[0])
# パラメータを設定
parms = (vw.Parameters(theta_e=row.theta_e,
theta_r=row.theta_r,
J_surf=row.j_surf,
a_surf=row.a_surf,
C_1=row.C_1,
C_2=row.C_2,
l_h=row.l_h,
l_w=row.l_w,
l_d=row.l_d,
angle=row.angle,
v_a=row.v_a,
l_s=row.l_s,
emissivity_1=row.emissivity_1,
emissivity_2=row.emissivity_2))
# 相当外気温度を計算
theta_sat.append(epf.get_theta_SAT(row.theta_e, row.a_surf, row.j_surf, h_out))
# 対流熱伝達率、放射熱伝達率、修正熱貫流率、修正日射熱取得率、室内側表面熱流を計算
h_cv_buf, h_rv_buf, u_dash_buf, eta_dash_buf, q_room_side_buf \
= vws.get_vent_wall_performance_factor_by_simplified_calculation_no_04(parm=parms, h_out=h_out)
# 配列に格納
h_cv.append(h_cv_buf)
h_rv.append(h_rv_buf)
u_dash.append(u_dash_buf)
eta_dash.append(eta_dash_buf)
q_room_side.append(q_room_side_buf)
# 計算結果をDataFrameに追加
df['theta_sat'] = theta_sat
df['h_cv'] = h_cv
df['h_rv'] = h_rv
df['u_dash'] = u_dash
df['eta_dash'] = eta_dash
df['q_room_side'] = q_room_side
return df
def dump_csv_all_case_result():
# 総当たりのパラメータと計算結果を取得し、CSVに出力
# 詳細計算
print("Detailed Calculation")
df = pd.DataFrame(get_wall_status_data_by_detailed_calculation("detailed", "detailed"))
df.to_csv("wall_status_data_frame_detailed.csv")
# 放射熱伝達率の検証: 冬期条件の簡易計算
print("Simplified Calculation: h_rv_winter")
df = pd.DataFrame(get_wall_status_data_by_detailed_calculation(calc_mode_h_cv="detailed", calc_mode_h_rv="simplified_winter"))
df.to_csv("wall_status_data_frame_h_rv_simplified_winter.csv")
# 放射熱伝達率の検証: 夏期条件の簡易計算
print("Simplified Calculation: h_rv_summer")
df = pd.DataFrame(get_wall_status_data_by_detailed_calculation(calc_mode_h_cv="detailed", calc_mode_h_rv="simplified_summer"))
df.to_csv("wall_status_data_frame_h_rv_simplified_summer.csv")
# 放射熱伝達率の検証: 放射熱伝達率ゼロ
print("Simplified Calculation: h_rv_zero")
df = pd.DataFrame(get_wall_status_data_by_detailed_calculation(calc_mode_h_cv="detailed", calc_mode_h_rv="simplified_zero"))
df.to_csv("wall_status_data_frame_h_rv_simplified_zero.csv")
# 放射熱伝達率の検証: 通年の簡易計算
print("Simplified Calculation: h_rv_all_season")
df = pd.DataFrame(get_wall_status_data_by_detailed_calculation(calc_mode_h_cv="detailed", calc_mode_h_rv="simplified_all_season"))
df.to_csv("wall_status_data_frame_h_rv_simplified_all_season.csv")
# 対流熱伝達率の検証: 冬期条件の簡易計算
print("Simplified Calculation: h_cv_winter")
df = pd.DataFrame(get_wall_status_data_by_detailed_calculation(calc_mode_h_cv="simplified_winter", calc_mode_h_rv="detailed"))
df.to_csv("wall_status_data_frame_h_cv_simplified_winter.csv")
# 対流熱伝達率の検証: 夏期条件の簡易計算
print("Simplified Calculation: h_cv_summer")
df = pd.DataFrame(get_wall_status_data_by_detailed_calculation(calc_mode_h_cv="simplified_summer", calc_mode_h_rv="detailed"))
df.to_csv("wall_status_data_frame_h_cv_simplified_summer.csv")
# 対流熱伝達率の検証: 通年の簡易計算
print("Simplified Calculation: h_cv_all_season")
df = pd.DataFrame(get_wall_status_data_by_detailed_calculation(calc_mode_h_cv="simplified_all_season", calc_mode_h_rv="detailed"))
df.to_csv("wall_status_data_frame_h_cv_simplified_all_season.csv")
# 簡易計算法案No.1(簡易版の行列式)による計算
print("Simplified Calculation No.1")
df = pd.DataFrame(get_wall_status_data_by_simplified_calculation_no_01())
df.to_csv("wall_status_data_frame_simplified_calculation_no01.csv")
# 簡易計算法案No.2(簡易式)による計算
print("Simplified Calculation No.2")
df = pd.DataFrame(get_wall_status_data_by_simplified_calculation_no_02())
df.to_csv("wall_status_data_frame_simplified_calculation_no02.csv")
# 簡易計算法案No.3(通気層を有する壁体の修正熱貫流率、修正日射熱取得率から室内表面熱流を求める)による計算
print("Simplified Calculation No.3")
df = pd.DataFrame(get_wall_status_data_by_simplified_calculation_no_03())
df.to_csv("wall_status_data_frame_simplified_calculation_no03.csv")
# 簡易計算法案No.4(簡易計算法案No.3をさらに簡略化)による計算
print("Simplified Calculation No.4")
df = pd.DataFrame(get_wall_status_data_by_simplified_calculation_no_04())
df.to_csv("wall_status_data_frame_simplified_calculation_no04.csv")
if __name__ == '__main__':
dump_csv_all_case_result()
# デバッグ用
# dump_csv_all_case_result()
# print(np.median([-20,40]))
| 40.344583 | 158 | 0.58224 |
886c7de79e0407af60f5b0ce3ef11869a60720b8 | 300 | py | Python | solutions/293_flip_game.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | solutions/293_flip_game.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | solutions/293_flip_game.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | class Solution:
def generatePossibleNextMoves(self, s: str) -> List[str]:
"""Running Time: O(n) where n is length of s.
"""
res = []
for i in range(len(s) - 1):
if s[i] == s[i+1] == '+':
res.append(s[:i] + '--' + s[i+2:])
return res
| 30 | 61 | 0.45 |
0d116b3741b9a69060689cc52cc88d9093f50cda | 1,748 | py | Python | src/richie/plugins/simple_picture/cms_plugins.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
] | 174 | 2018-04-14T23:36:01.000Z | 2022-03-10T09:27:01.000Z | src/richie/plugins/simple_picture/cms_plugins.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
] | 631 | 2018-04-04T11:28:53.000Z | 2022-03-31T11:18:31.000Z | src/richie/plugins/simple_picture/cms_plugins.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
] | 64 | 2018-06-27T08:35:01.000Z | 2022-03-10T09:27:43.000Z | """SimplePicture plugin for DjangoCMS."""
from django.utils.translation import gettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from djangocms_picture.models import Picture
from richie.apps.core.defaults import PLUGINS_GROUP
from .forms import SimplePictureForm
from .helpers import get_picture_info
@plugin_pool.register_plugin
class SimplePicturePlugin(CMSPluginBase):
"""
CMSPlugin to easily add an image when all display options for the image are determined by
code i.e via presets defined in the project settings or via rendering contexts in templates.
"""
allow_children = False
cache = True
disable_child_plugins = True
form = SimplePictureForm
model = Picture
module = PLUGINS_GROUP
name = _("Image")
render_template = "richie/simple_picture/picture.html"
render_plugin = True
fieldsets = ((None, {"fields": ["picture"]}),)
def render(self, context, instance, placeholder):
"""
Compute thumbnails and populate the context with all the information necessary to display
the image as defined in settings.
The image is chosen in the following order:
- preset matching a name passed in the context via the `picture_preset` parameter,
- preset matching the name of the placeholder in which the plugin is being used,
- fallback to the `default` preset.
"""
# Look for the name of a preset in the context and default to the name of the placeholder
preset_name = context.get("picture_preset", placeholder)
context["picture_info"] = get_picture_info(instance, preset_name)
context["instance"] = instance
return context
| 35.673469 | 97 | 0.719108 |
61f8bad6cb253d94d98c8d32c4ee06795f60e4f2 | 14 | py | Python | sampletest/__init__.py | lcskrishna/python-sample-project | 96976f63fc7f2956a0893a6d9a26894d1bfc7703 | [
"MIT"
] | null | null | null | sampletest/__init__.py | lcskrishna/python-sample-project | 96976f63fc7f2956a0893a6d9a26894d1bfc7703 | [
"MIT"
] | null | null | null | sampletest/__init__.py | lcskrishna/python-sample-project | 96976f63fc7f2956a0893a6d9a26894d1bfc7703 | [
"MIT"
] | null | null | null | import sample
| 7 | 13 | 0.857143 |
be338e71f57b849918f13881647c262cf4b08b6c | 1,223 | py | Python | coge/errors.py | asherkhb/PyCoGe_API | ad9e642399127187d9078585a4e65dd9df05f3f7 | [
"BSD-3-Clause"
] | null | null | null | coge/errors.py | asherkhb/PyCoGe_API | ad9e642399127187d9078585a4e65dd9df05f3f7 | [
"BSD-3-Clause"
] | null | null | null | coge/errors.py | asherkhb/PyCoGe_API | ad9e642399127187d9078585a4e65dd9df05f3f7 | [
"BSD-3-Clause"
] | null | null | null | import json
from datetime import datetime
class Error(Exception):
"""Base class for module exceptions."""
pass
class AuthError(Error):
"""Exception raised when authentication is invalid, or fails"""
def __init__(self, msg):
self.msg = msg
class InvalidResponseError(Error):
"""Exception raised when a request has an invalid response code."""
def __init__(self, response):
# Parse response.
self.status_code = response.status_code
try:
self.data = json.loads(response.text)['error']
self.desc = self.data.keys()[0]
self.msg = self.data[self.desc]
except ValueError:
self.desc = "UNKNOWN RESPONSE"
self.msg = response.text
# Print error message.
self.print_error()
def print_error(self):
print("[CoGe API] %s - ERROR - Invalid response (%d, %s): %s" %
(datetime.now(), self.status_code, self.desc, self.msg))
class InvalidIDError(Error):
"""Exception raised when an ID cannot be converted to an integer"""
def __init__(self, ids):
self.ids = ids
class InvalidCogeObject(Error):
def __init__(self, msg=''):
self.msg = msg | 27.795455 | 71 | 0.619787 |
e934a714b54580dd1b6de820150cd074fd9f5fa2 | 2,977 | py | Python | tests/unit/test_vowpal_wabbit.py | mlnethub/recommenders | a744690785455f75a633ce43b67d8ad5a79d5cd7 | [
"MIT"
] | 28 | 2021-11-12T08:26:40.000Z | 2022-03-27T07:21:24.000Z | tests/unit/test_vowpal_wabbit.py | awesomemachinelearning/recommenders | de3782cce370a446e14e6b47e87686867fb7e069 | [
"MIT"
] | 5 | 2021-11-10T02:58:32.000Z | 2022-03-21T16:13:11.000Z | tests/unit/test_vowpal_wabbit.py | awesomemachinelearning/recommenders | de3782cce370a446e14e6b47e87686867fb7e069 | [
"MIT"
] | 9 | 2021-11-03T07:14:47.000Z | 2022-02-22T13:42:04.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import pytest
from unittest import mock
import pandas as pd
from reco_utils.recommender.vowpal_wabbit.vw import VW
@pytest.fixture(scope="module")
def df():
return pd.DataFrame(
dict(user=[1, 3, 2], item=[8, 7, 7], rating=[1, 5, 3], timestamp=[1, 2, 3])
)
@pytest.fixture(scope="function")
def model():
model = VW(col_user="user", col_item="item", col_prediction="prediction", q="ui")
yield model
del model
@pytest.mark.vw
def test_vw_init_del():
model = VW()
tempdir = model.tempdir.name
assert os.path.exists(tempdir)
del model
assert not os.path.exists(tempdir)
@pytest.mark.vw
def test_to_vw_cmd():
expected = [
"vw",
"-l",
"0.1",
"--l1",
"0.2",
"--loss_function",
"logistic",
"--holdout_off",
"--rank",
"3",
"-t",
]
params = dict(
l=0.1,
l1=0.2,
loss_function="logistic",
holdout_off=True,
quiet=False,
rank=3,
t=True,
)
assert VW.to_vw_cmd(params=params) == expected
@pytest.mark.vw
def test_parse_train_cmd(model):
expected = [
"vw",
"--loss_function",
"logistic",
"--oaa",
"5",
"-f",
model.model_file,
"-d",
model.train_file,
]
params = dict(loss_function="logistic", oaa=5, f="test", d="data", quiet=False)
assert model.parse_train_params(params=params) == expected
@pytest.mark.vw
def test_parse_test_cmd(model):
expected = [
"vw",
"--loss_function",
"logistic",
"-d",
model.test_file,
"--quiet",
"-i",
model.model_file,
"-p",
model.prediction_file,
"-t",
]
params = dict(
loss_function="logistic", i="test", oaa=5, d="data", test_only=True, quiet=True
)
assert model.parse_test_params(params=params) == expected
@pytest.mark.vw
def test_to_vw_file(model, df):
expected = ["1 0|user 1 |item 8", "5 1|user 3 |item 7", "3 2|user 2 |item 7"]
model.to_vw_file(df, train=True)
with open(model.train_file, "r") as f:
assert f.read().splitlines() == expected
del model
@pytest.mark.vw
def test_fit_and_predict(model, df):
# generate fake predictions
with open(model.prediction_file, "w") as f:
f.writelines(["1 0\n", "3 1\n", "5 2\n"])
# patch subprocess call to vw
with mock.patch("reco_utils.recommender.vowpal_wabbit.vw.run") as mock_run:
model.fit(df)
result = model.predict(df)
expected = dict(
user=dict(enumerate([1, 3, 2])),
item=dict(enumerate([8, 7, 7])),
rating=dict(enumerate([1, 5, 3])),
timestamp=dict(enumerate([1, 2, 3])),
prediction=dict(enumerate([1, 3, 5])),
)
assert result.to_dict() == expected
| 22.9 | 87 | 0.570709 |
132805389a7d908aca581aca55b9510dd6be8418 | 11,310 | py | Python | torchvision/models/googlenet.py | ZJUGuoShuai/vision | a9940fe4b2b63bd82a2f853616e00fd0bd112f9a | [
"BSD-3-Clause"
] | null | null | null | torchvision/models/googlenet.py | ZJUGuoShuai/vision | a9940fe4b2b63bd82a2f853616e00fd0bd112f9a | [
"BSD-3-Clause"
] | null | null | null | torchvision/models/googlenet.py | ZJUGuoShuai/vision | a9940fe4b2b63bd82a2f853616e00fd0bd112f9a | [
"BSD-3-Clause"
] | 1 | 2020-01-10T12:50:14.000Z | 2020-01-10T12:50:14.000Z | import warnings
from collections import namedtuple
from typing import Optional, Tuple, List, Callable, Any
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .._internally_replaced_utils import load_state_dict_from_url
__all__ = ["GoogLeNet", "googlenet", "GoogLeNetOutputs", "_GoogLeNetOutputs"]
model_urls = {
# GoogLeNet ported from TensorFlow
"googlenet": "https://download.pytorch.org/models/googlenet-1378be20.pth",
}
GoogLeNetOutputs = namedtuple("GoogLeNetOutputs", ["logits", "aux_logits2", "aux_logits1"])
GoogLeNetOutputs.__annotations__ = {"logits": Tensor, "aux_logits2": Optional[Tensor], "aux_logits1": Optional[Tensor]}
# Script annotations failed with _GoogleNetOutputs = namedtuple ...
# _GoogLeNetOutputs set here for backwards compat
_GoogLeNetOutputs = GoogLeNetOutputs
def googlenet(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> "GoogLeNet":
r"""GoogLeNet (Inception v1) model architecture from
`"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_.
The required minimum input size of the model is 15x15.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
aux_logits (bool): If True, adds two auxiliary branches that can improve training.
Default: *False* when pretrained is True otherwise *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if "transform_input" not in kwargs:
kwargs["transform_input"] = True
if "aux_logits" not in kwargs:
kwargs["aux_logits"] = False
if kwargs["aux_logits"]:
warnings.warn(
"auxiliary heads in the pretrained googlenet model are NOT pretrained, " "so make sure to train them"
)
original_aux_logits = kwargs["aux_logits"]
kwargs["aux_logits"] = True
kwargs["init_weights"] = False
model = GoogLeNet(**kwargs)
state_dict = load_state_dict_from_url(model_urls["googlenet"], progress=progress)
model.load_state_dict(state_dict)
if not original_aux_logits:
model.aux_logits = False
model.aux1 = None # type: ignore[assignment]
model.aux2 = None # type: ignore[assignment]
return model
return GoogLeNet(**kwargs)
class GoogLeNet(nn.Module):
__constants__ = ["aux_logits", "transform_input"]
def __init__(
self,
num_classes: int = 1000,
aux_logits: bool = True,
transform_input: bool = False,
init_weights: Optional[bool] = None,
blocks: Optional[List[Callable[..., nn.Module]]] = None,
) -> None:
super(GoogLeNet, self).__init__()
if blocks is None:
blocks = [BasicConv2d, Inception, InceptionAux]
if init_weights is None:
warnings.warn(
"The default weight initialization of GoogleNet will be changed in future releases of "
"torchvision. If you wish to keep the old behavior (which leads to long initialization times"
" due to scipy/scipy#11299), please set init_weights=True.",
FutureWarning,
)
init_weights = True
assert len(blocks) == 3
conv_block = blocks[0]
inception_block = blocks[1]
inception_aux_block = blocks[2]
self.aux_logits = aux_logits
self.transform_input = transform_input
self.conv1 = conv_block(3, 64, kernel_size=7, stride=2, padding=3)
self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.conv2 = conv_block(64, 64, kernel_size=1)
self.conv3 = conv_block(64, 192, kernel_size=3, padding=1)
self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32)
self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64)
self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64)
self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64)
self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64)
self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64)
self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128)
self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128)
self.inception5b = inception_block(832, 384, 192, 384, 48, 128, 128)
if aux_logits:
self.aux1 = inception_aux_block(512, num_classes)
self.aux2 = inception_aux_block(528, num_classes)
else:
self.aux1 = None # type: ignore[assignment]
self.aux2 = None # type: ignore[assignment]
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(0.2)
self.fc = nn.Linear(1024, num_classes)
if init_weights:
self._initialize_weights()
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
torch.nn.init.trunc_normal_(m.weight, mean=0.0, std=0.01, a=-2, b=2)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _transform_input(self, x: Tensor) -> Tensor:
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
return x
def _forward(self, x: Tensor) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
# N x 3 x 224 x 224
x = self.conv1(x)
# N x 64 x 112 x 112
x = self.maxpool1(x)
# N x 64 x 56 x 56
x = self.conv2(x)
# N x 64 x 56 x 56
x = self.conv3(x)
# N x 192 x 56 x 56
x = self.maxpool2(x)
# N x 192 x 28 x 28
x = self.inception3a(x)
# N x 256 x 28 x 28
x = self.inception3b(x)
# N x 480 x 28 x 28
x = self.maxpool3(x)
# N x 480 x 14 x 14
x = self.inception4a(x)
# N x 512 x 14 x 14
aux1: Optional[Tensor] = None
if self.aux1 is not None:
if self.training:
aux1 = self.aux1(x)
x = self.inception4b(x)
# N x 512 x 14 x 14
x = self.inception4c(x)
# N x 512 x 14 x 14
x = self.inception4d(x)
# N x 528 x 14 x 14
aux2: Optional[Tensor] = None
if self.aux2 is not None:
if self.training:
aux2 = self.aux2(x)
x = self.inception4e(x)
# N x 832 x 14 x 14
x = self.maxpool4(x)
# N x 832 x 7 x 7
x = self.inception5a(x)
# N x 832 x 7 x 7
x = self.inception5b(x)
# N x 1024 x 7 x 7
x = self.avgpool(x)
# N x 1024 x 1 x 1
x = torch.flatten(x, 1)
# N x 1024
x = self.dropout(x)
x = self.fc(x)
# N x 1000 (num_classes)
return x, aux2, aux1
@torch.jit.unused
def eager_outputs(self, x: Tensor, aux2: Tensor, aux1: Optional[Tensor]) -> GoogLeNetOutputs:
if self.training and self.aux_logits:
return _GoogLeNetOutputs(x, aux2, aux1)
else:
return x # type: ignore[return-value]
def forward(self, x: Tensor) -> GoogLeNetOutputs:
x = self._transform_input(x)
x, aux1, aux2 = self._forward(x)
aux_defined = self.training and self.aux_logits
if torch.jit.is_scripting():
if not aux_defined:
warnings.warn("Scripted GoogleNet always returns GoogleNetOutputs Tuple")
return GoogLeNetOutputs(x, aux2, aux1)
else:
return self.eager_outputs(x, aux2, aux1)
class Inception(nn.Module):
def __init__(
self,
in_channels: int,
ch1x1: int,
ch3x3red: int,
ch3x3: int,
ch5x5red: int,
ch5x5: int,
pool_proj: int,
conv_block: Optional[Callable[..., nn.Module]] = None,
) -> None:
super(Inception, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1)
self.branch2 = nn.Sequential(
conv_block(in_channels, ch3x3red, kernel_size=1), conv_block(ch3x3red, ch3x3, kernel_size=3, padding=1)
)
self.branch3 = nn.Sequential(
conv_block(in_channels, ch5x5red, kernel_size=1),
# Here, kernel_size=3 instead of kernel_size=5 is a known bug.
# Please see https://github.com/pytorch/vision/issues/906 for details.
conv_block(ch5x5red, ch5x5, kernel_size=3, padding=1),
)
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True),
conv_block(in_channels, pool_proj, kernel_size=1),
)
def _forward(self, x: Tensor) -> List[Tensor]:
branch1 = self.branch1(x)
branch2 = self.branch2(x)
branch3 = self.branch3(x)
branch4 = self.branch4(x)
outputs = [branch1, branch2, branch3, branch4]
return outputs
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(
self, in_channels: int, num_classes: int, conv_block: Optional[Callable[..., nn.Module]] = None
) -> None:
super(InceptionAux, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.conv = conv_block(in_channels, 128, kernel_size=1)
self.fc1 = nn.Linear(2048, 1024)
self.fc2 = nn.Linear(1024, num_classes)
def forward(self, x: Tensor) -> Tensor:
# aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
x = F.adaptive_avg_pool2d(x, (4, 4))
# aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
x = self.conv(x)
# N x 128 x 4 x 4
x = torch.flatten(x, 1)
# N x 2048
x = F.relu(self.fc1(x), inplace=True)
# N x 1024
x = F.dropout(x, 0.7, training=self.training)
# N x 1024
x = self.fc2(x)
# N x 1000 (num_classes)
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels: int, out_channels: int, **kwargs: Any) -> None:
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
| 37.081967 | 119 | 0.598497 |
641db5f5430965629c20df0486b90fa9ee0dc533 | 3,524 | py | Python | aoc_2021/day18.py | guido-weber/AoC_2021 | f5d41ea0600f702857b2c479a67f4f9578afb52b | [
"Unlicense"
] | null | null | null | aoc_2021/day18.py | guido-weber/AoC_2021 | f5d41ea0600f702857b2c479a67f4f9578afb52b | [
"Unlicense"
] | null | null | null | aoc_2021/day18.py | guido-weber/AoC_2021 | f5d41ea0600f702857b2c479a67f4f9578afb52b | [
"Unlicense"
] | null | null | null | import functools
from aoc_2021 import common
test_input = """[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]
"""
def parse_input(lines: list[str]):
return [eval(line.strip()) for line in lines]
@functools.singledispatch
def magnitude(num: int):
return num
@magnitude.register
def _(num: list):
return 3 * magnitude(num[0]) + 2 * magnitude(num[1])
@functools.singledispatch
def add_to_right(num: int, to_add: int):
return True, num + to_add
@add_to_right.register
def _(num: list, to_add: int):
flag, repl = add_to_right(num[0], to_add)
if flag:
return True, [repl, num[1]]
flag, repl = add_to_right(num[1], to_add)
if flag:
return True, [num[0], repl]
return False, num
@functools.singledispatch
def add_to_left(num: int, to_add: int):
return True, num + to_add
@add_to_left.register
def _(num: list, to_add: int):
flag, repl = add_to_left(num[1], to_add)
if flag:
return True, [num[0], repl]
flag, repl = add_to_left(num[0], to_add)
if flag:
return True, [repl, num[1]]
return False, num
@functools.singledispatch
def explode(num: int, _: int):
return False, num, None, None
@explode.register
def _(num: list, depth: int):
if depth == 5:
return True, 0, num[0], num[1]
else:
flag, repl, left, right = explode(num[0], depth + 1)
if flag:
if right is None:
return True, [repl, num[1]], left, None
else:
added, a_repl = add_to_right(num[1], right)
return True, [repl, a_repl], left, None if added else right
flag, repl, left, right = explode(num[1], depth + 1)
if flag:
if left is None:
return True, [num[0], repl], None, right
else:
added, a_repl = add_to_left(num[0], left)
return True, [a_repl, repl], None if added else left, right
return False, num, None, None
@functools.singledispatch
def split(num: int):
if num >= 10:
left = num // 2
return True, [left, num - left]
return False, num
@split.register
def _(num: list):
done, repl = split(num[0])
if done:
return True, [repl, num[1]]
done, repl = split(num[1])
if done:
return True, [num[0], repl]
return False, num
def sf_add(num1: list, num2: list):
num = [num1, num2]
done = False
while not done:
exploded, num, _, _ = explode(num, 1)
if not exploded:
splitted, num = split(num)
done = not exploded and not splitted
return num
def part1(lines):
numbers = parse_input(lines)
num = functools.reduce(sf_add, numbers)
return magnitude(num)
def part2(lines):
numbers = parse_input(lines)
mm = 0
for i in range(len(numbers)):
for j in range(len(numbers)):
if i != j:
mm = max(mm, magnitude(sf_add(numbers[i], numbers[j])))
return mm
if __name__ == "__main__":
x = magnitude(
eval("[[[[6,6],[7,6]],[[7,7],[7,0]]],[[[7,7],[7,7]],[[7,8],[9,9]]]]")
)
assert x == 4140, x
common.run(18, test_input, 4140, test_input, 3993, part1, part2)
| 24.643357 | 77 | 0.547673 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.