hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71503d83257c56d9a08f215294410fe3f0189c1 | 4,679 | py | Python | venv/Lib/site-packages/pyrogram/parser/markdown.py | D1ne2021/jjhhhjj | a090da30983b3ef276dfe4cef2ded4526f36002a | [
"MIT"
] | 2 | 2021-12-13T07:09:55.000Z | 2022-01-12T12:15:20.000Z | venv/Lib/site-packages/pyrogram/parser/markdown.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pyrogram/parser/markdown.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
import html
import re
from typing import Optional
import pyrogram
from . import utils
from .html import HTML
BOLD_DELIM = "**"
ITALIC_DELIM = "__"
UNDERLINE_DELIM = "--"
STRIKE_DELIM = "~~"
CODE_DELIM = "`"
PRE_DELIM = "```"
MARKDOWN_RE = re.compile(r"({d})|\[(.+?)\]\((.+?)\)".format(
d="|".join(
["".join(i) for i in [
[rf"\{j}" for j in i]
for i in [
PRE_DELIM,
CODE_DELIM,
STRIKE_DELIM,
UNDERLINE_DELIM,
ITALIC_DELIM,
BOLD_DELIM
]
]]
)))
OPENING_TAG = "<{}>"
CLOSING_TAG = "</{}>"
URL_MARKUP = '<a href="{}">{}</a>'
FIXED_WIDTH_DELIMS = [CODE_DELIM, PRE_DELIM]
class Markdown:
def __init__(self, client: Optional["pyrogram.Client"]):
self.html = HTML(client)
async def parse(self, text: str, strict: bool = False):
if strict:
text = html.escape(text)
delims = set()
is_fixed_width = False
for i, match in enumerate(re.finditer(MARKDOWN_RE, text)):
start, _ = match.span()
delim, text_url, url = match.groups()
full = match.group(0)
if delim in FIXED_WIDTH_DELIMS:
is_fixed_width = not is_fixed_width
if is_fixed_width and delim not in FIXED_WIDTH_DELIMS:
continue
if text_url:
text = utils.replace_once(text, full, URL_MARKUP.format(url, text_url), start)
continue
if delim == BOLD_DELIM:
tag = "b"
elif delim == ITALIC_DELIM:
tag = "i"
elif delim == UNDERLINE_DELIM:
tag = "u"
elif delim == STRIKE_DELIM:
tag = "s"
elif delim == CODE_DELIM:
tag = "code"
elif delim == PRE_DELIM:
tag = "pre"
else:
continue
if delim not in delims:
delims.add(delim)
tag = OPENING_TAG.format(tag)
else:
delims.remove(delim)
tag = CLOSING_TAG.format(tag)
text = utils.replace_once(text, delim, tag, start)
return await self.html.parse(text)
@staticmethod
def unparse(text: str, entities: list):
text = utils.add_surrogates(text)
entities_offsets = []
for entity in entities:
entity_type = entity.type
start = entity.offset
end = start + entity.length
if entity_type == "bold":
start_tag = end_tag = BOLD_DELIM
elif entity_type == "italic":
start_tag = end_tag = ITALIC_DELIM
elif entity_type == "underline":
start_tag = end_tag = UNDERLINE_DELIM
elif entity_type == "strikethrough":
start_tag = end_tag = STRIKE_DELIM
elif entity_type == "code":
start_tag = end_tag = CODE_DELIM
elif entity_type in ("pre", "blockquote"):
start_tag = end_tag = PRE_DELIM
elif entity_type == "text_link":
url = entity.url
start_tag = "["
end_tag = f"]({url})"
elif entity_type == "text_mention":
user = entity.user
start_tag = "["
end_tag = f"](tg://user?id={user.id})"
else:
continue
entities_offsets.append((start_tag, start,))
entities_offsets.append((end_tag, end,))
# sorting by offset (desc)
entities_offsets.sort(key=lambda x: -x[1])
for entity, offset in entities_offsets:
text = text[:offset] + entity + text[offset:]
return utils.remove_surrogates(text)
| 30.986755 | 94 | 0.551186 |
import html
import re
from typing import Optional
import pyrogram
from . import utils
from .html import HTML
BOLD_DELIM = "**"
ITALIC_DELIM = "__"
UNDERLINE_DELIM = "--"
STRIKE_DELIM = "~~"
CODE_DELIM = "`"
PRE_DELIM = "```"
MARKDOWN_RE = re.compile(r"({d})|\[(.+?)\]\((.+?)\)".format(
d="|".join(
["".join(i) for i in [
[rf"\{j}" for j in i]
for i in [
PRE_DELIM,
CODE_DELIM,
STRIKE_DELIM,
UNDERLINE_DELIM,
ITALIC_DELIM,
BOLD_DELIM
]
]]
)))
OPENING_TAG = "<{}>"
CLOSING_TAG = "</{}>"
URL_MARKUP = '<a href="{}">{}</a>'
FIXED_WIDTH_DELIMS = [CODE_DELIM, PRE_DELIM]
class Markdown:
def __init__(self, client: Optional["pyrogram.Client"]):
self.html = HTML(client)
async def parse(self, text: str, strict: bool = False):
if strict:
text = html.escape(text)
delims = set()
is_fixed_width = False
for i, match in enumerate(re.finditer(MARKDOWN_RE, text)):
start, _ = match.span()
delim, text_url, url = match.groups()
full = match.group(0)
if delim in FIXED_WIDTH_DELIMS:
is_fixed_width = not is_fixed_width
if is_fixed_width and delim not in FIXED_WIDTH_DELIMS:
continue
if text_url:
text = utils.replace_once(text, full, URL_MARKUP.format(url, text_url), start)
continue
if delim == BOLD_DELIM:
tag = "b"
elif delim == ITALIC_DELIM:
tag = "i"
elif delim == UNDERLINE_DELIM:
tag = "u"
elif delim == STRIKE_DELIM:
tag = "s"
elif delim == CODE_DELIM:
tag = "code"
elif delim == PRE_DELIM:
tag = "pre"
else:
continue
if delim not in delims:
delims.add(delim)
tag = OPENING_TAG.format(tag)
else:
delims.remove(delim)
tag = CLOSING_TAG.format(tag)
text = utils.replace_once(text, delim, tag, start)
return await self.html.parse(text)
@staticmethod
def unparse(text: str, entities: list):
text = utils.add_surrogates(text)
entities_offsets = []
for entity in entities:
entity_type = entity.type
start = entity.offset
end = start + entity.length
if entity_type == "bold":
start_tag = end_tag = BOLD_DELIM
elif entity_type == "italic":
start_tag = end_tag = ITALIC_DELIM
elif entity_type == "underline":
start_tag = end_tag = UNDERLINE_DELIM
elif entity_type == "strikethrough":
start_tag = end_tag = STRIKE_DELIM
elif entity_type == "code":
start_tag = end_tag = CODE_DELIM
elif entity_type in ("pre", "blockquote"):
start_tag = end_tag = PRE_DELIM
elif entity_type == "text_link":
url = entity.url
start_tag = "["
end_tag = f"]({url})"
elif entity_type == "text_mention":
user = entity.user
start_tag = "["
end_tag = f"](tg://user?id={user.id})"
else:
continue
entities_offsets.append((start_tag, start,))
entities_offsets.append((end_tag, end,))
entities_offsets.sort(key=lambda x: -x[1])
for entity, offset in entities_offsets:
text = text[:offset] + entity + text[offset:]
return utils.remove_surrogates(text)
| true | true |
f715042ccd8dab4bb318453fc8081500dd54c9f3 | 6,397 | py | Python | python_toolbox/combi/perming/_variation_adding_mixin.py | hboshnak/python_toolbox | cb9ef64b48f1d03275484d707dc5079b6701ad0c | [
"MIT"
] | 119 | 2015-02-05T17:59:47.000Z | 2022-02-21T22:43:40.000Z | python_toolbox/combi/perming/_variation_adding_mixin.py | hboshnak/python_toolbox | cb9ef64b48f1d03275484d707dc5079b6701ad0c | [
"MIT"
] | 4 | 2019-04-24T14:01:14.000Z | 2020-05-21T12:03:29.000Z | python_toolbox/combi/perming/_variation_adding_mixin.py | hboshnak/python_toolbox | cb9ef64b48f1d03275484d707dc5079b6701ad0c | [
"MIT"
] | 14 | 2015-03-30T06:30:42.000Z | 2021-12-24T23:45:11.000Z | # Copyright 2009-2017 Ram Rachum.
# This program is distributed under the MIT license.
from python_toolbox import caching
from python_toolbox import sequence_tools
# (`PermSpace` exported to here from `perm_space.py` to avoid import loop.)
class _VariationAddingMixin:
'''Mixin for `PermSpace` to add variations to a perm space.'''
def get_rapplied(self, sequence):
'''Get a version of this `PermSpace` that has a range of `sequence`.'''
if self.is_rapplied:
raise TypeError('This space is already rapplied, to rapply it to a '
'different sequence please use `.unrapplied` '
'first.')
sequence = \
sequence_tools.ensure_iterable_is_immutable_sequence(sequence)
if len(sequence) != self.sequence_length:
raise Exception
return PermSpace(
sequence, n_elements=self.n_elements, domain=self.domain,
fixed_map={key: sequence[value] for key, value in
self.fixed_map.items()},
degrees=self.degrees, slice_=self.canonical_slice,
is_combination=self.is_combination,
perm_type=self.perm_type
)
# There's no `.get_recurrented` because we can't know which sequence you'd
# want. If you want a recurrent perm space you need to use `.get_rapplied`
# with a recurrent sequence.
def get_partialled(self, n_elements):
'''Get a partialled version of this `PermSpace`.'''
if self.is_sliced:
raise TypeError(
"Can't get partial of sliced `PermSpace` directly, because "
"the number of items would be different. Use `.unsliced` "
"first."
)
return PermSpace(
self.sequence, n_elements=n_elements, domain=self.domain,
fixed_map=self.fixed_map, degrees=self.degrees, slice_=None,
is_combination=self.is_combination,
perm_type=self.perm_type
)
@caching.CachedProperty
def combinationed(self):
'''Get a combination version of this perm space.'''
from .comb import Comb
if self.is_sliced:
raise TypeError(
"Can't get a combinationed version of a sliced `PermSpace`"
"directly, because the number of items would be different. "
"Use `.unsliced` first."
)
if self.is_typed:
raise TypeError(
"Can't convert typed `PermSpace` directly to "
"combinationed, because the perm class would not be a "
"subclass of `Comb`."
)
if self.is_degreed:
raise TypeError("Can't use degrees with combination spaces.")
return PermSpace(
self.sequence, n_elements=self.n_elements, domain=self.domain,
fixed_map=self.fixed_map, is_combination=True,
perm_type=Comb
)
def get_dapplied(self, domain):
'''Get a version of this `PermSpace` that has a domain of `domain`.'''
from . import variations
if self.is_combination:
raise variations.UnallowedVariationSelectionException(
{variations.Variation.DAPPLIED: True,
variations.Variation.COMBINATION: True,}
)
domain = sequence_tools.ensure_iterable_is_immutable_sequence(domain)
if len(domain) != self.n_elements:
raise Exception
return PermSpace(
self.sequence, n_elements=self.n_elements, domain=domain,
fixed_map={domain[key]: value for key, value in
self._undapplied_fixed_map},
degrees=self.degrees, slice_=self.canonical_slice,
is_combination=self.is_combination,
perm_type=self.perm_type
)
def get_fixed(self, fixed_map):
'''Get a fixed version of this `PermSpace`.'''
if self.is_sliced:
raise TypeError(
"Can't be used on sliced perm spaces. Try "
"`perm_space.unsliced.get_fixed(...)`. You may then re-slice "
"the resulting space."
)
combined_fixed_map = dict(self.fixed_map)
for key, value in fixed_map.items():
if key in self.fixed_map:
assert self.fixed_map[key] == value
combined_fixed_map[key] = value
return PermSpace(
self.sequence, n_elements=self.n_elements, domain=self.domain,
fixed_map=combined_fixed_map, degrees=self.degrees, slice_=None,
is_combination=self.is_combination, perm_type=self.perm_type
)
def get_degreed(self, degrees):
'''Get a version of this `PermSpace` restricted to certain degrees.'''
from . import variations
if self.is_sliced:
raise TypeError(
"Can't be used on sliced perm spaces. Try "
"`perm_space.unsliced.get_degreed(...)`. You may then "
"re-slice the resulting space."
)
if self.is_combination:
raise variations.UnallowedVariationSelectionException(
{variations.Variation.DEGREED: True,
variations.Variation.COMBINATION: True,}
)
degrees = sequence_tools.to_tuple(degrees, item_type=int)
if not degrees:
return self
degrees_to_use = \
degrees if not self.is_degreed else set(degrees) & set(self.degrees)
return PermSpace(
self.sequence, n_elements=self.n_elements, domain=self.domain,
fixed_map=self.fixed_map, degrees=degrees_to_use,
is_combination=self.is_combination, perm_type=self.perm_type
)
# There's no `get_sliced` because slicing is done using Python's normal
# slice notation, e.g. perm_space[4:-7].
def get_typed(self, perm_type):
'''
Get a version of this `PermSpace` where perms are of a custom type.
'''
return PermSpace(
self.sequence, n_elements=self.n_elements, domain=self.domain,
fixed_map=self.fixed_map, degrees=self.degrees,
slice_=self.canonical_slice, is_combination=self.is_combination,
perm_type=perm_type
)
| 41.00641 | 80 | 0.601688 |
from python_toolbox import caching
from python_toolbox import sequence_tools
class _VariationAddingMixin:
def get_rapplied(self, sequence):
if self.is_rapplied:
raise TypeError('This space is already rapplied, to rapply it to a '
'different sequence please use `.unrapplied` '
'first.')
sequence = \
sequence_tools.ensure_iterable_is_immutable_sequence(sequence)
if len(sequence) != self.sequence_length:
raise Exception
return PermSpace(
sequence, n_elements=self.n_elements, domain=self.domain,
fixed_map={key: sequence[value] for key, value in
self.fixed_map.items()},
degrees=self.degrees, slice_=self.canonical_slice,
is_combination=self.is_combination,
perm_type=self.perm_type
)
# want. If you want a recurrent perm space you need to use `.get_rapplied`
# with a recurrent sequence.
def get_partialled(self, n_elements):
if self.is_sliced:
raise TypeError(
"Can't get partial of sliced `PermSpace` directly, because "
"the number of items would be different. Use `.unsliced` "
"first."
)
return PermSpace(
self.sequence, n_elements=n_elements, domain=self.domain,
fixed_map=self.fixed_map, degrees=self.degrees, slice_=None,
is_combination=self.is_combination,
perm_type=self.perm_type
)
@caching.CachedProperty
def combinationed(self):
from .comb import Comb
if self.is_sliced:
raise TypeError(
"Can't get a combinationed version of a sliced `PermSpace`"
"directly, because the number of items would be different. "
"Use `.unsliced` first."
)
if self.is_typed:
raise TypeError(
"Can't convert typed `PermSpace` directly to "
"combinationed, because the perm class would not be a "
"subclass of `Comb`."
)
if self.is_degreed:
raise TypeError("Can't use degrees with combination spaces.")
return PermSpace(
self.sequence, n_elements=self.n_elements, domain=self.domain,
fixed_map=self.fixed_map, is_combination=True,
perm_type=Comb
)
def get_dapplied(self, domain):
from . import variations
if self.is_combination:
raise variations.UnallowedVariationSelectionException(
{variations.Variation.DAPPLIED: True,
variations.Variation.COMBINATION: True,}
)
domain = sequence_tools.ensure_iterable_is_immutable_sequence(domain)
if len(domain) != self.n_elements:
raise Exception
return PermSpace(
self.sequence, n_elements=self.n_elements, domain=domain,
fixed_map={domain[key]: value for key, value in
self._undapplied_fixed_map},
degrees=self.degrees, slice_=self.canonical_slice,
is_combination=self.is_combination,
perm_type=self.perm_type
)
def get_fixed(self, fixed_map):
if self.is_sliced:
raise TypeError(
"Can't be used on sliced perm spaces. Try "
"`perm_space.unsliced.get_fixed(...)`. You may then re-slice "
"the resulting space."
)
combined_fixed_map = dict(self.fixed_map)
for key, value in fixed_map.items():
if key in self.fixed_map:
assert self.fixed_map[key] == value
combined_fixed_map[key] = value
return PermSpace(
self.sequence, n_elements=self.n_elements, domain=self.domain,
fixed_map=combined_fixed_map, degrees=self.degrees, slice_=None,
is_combination=self.is_combination, perm_type=self.perm_type
)
def get_degreed(self, degrees):
from . import variations
if self.is_sliced:
raise TypeError(
"Can't be used on sliced perm spaces. Try "
"`perm_space.unsliced.get_degreed(...)`. You may then "
"re-slice the resulting space."
)
if self.is_combination:
raise variations.UnallowedVariationSelectionException(
{variations.Variation.DEGREED: True,
variations.Variation.COMBINATION: True,}
)
degrees = sequence_tools.to_tuple(degrees, item_type=int)
if not degrees:
return self
degrees_to_use = \
degrees if not self.is_degreed else set(degrees) & set(self.degrees)
return PermSpace(
self.sequence, n_elements=self.n_elements, domain=self.domain,
fixed_map=self.fixed_map, degrees=degrees_to_use,
is_combination=self.is_combination, perm_type=self.perm_type
)
# There's no `get_sliced` because slicing is done using Python's normal
# slice notation, e.g. perm_space[4:-7].
def get_typed(self, perm_type):
return PermSpace(
self.sequence, n_elements=self.n_elements, domain=self.domain,
fixed_map=self.fixed_map, degrees=self.degrees,
slice_=self.canonical_slice, is_combination=self.is_combination,
perm_type=perm_type
)
| true | true |
f715048138799b0ac641454d95df68f3f905c56a | 239 | py | Python | Pwn/turbofastcrypto/bin/tfc.py | aliencaocao/Sieberrsec-CTF-3.0 | 9b27b11279a7529d3affd22bbd0399c22d24f977 | [
"Apache-2.0"
] | 7 | 2021-12-30T11:54:09.000Z | 2022-01-31T09:11:04.000Z | Pwn/turbofastcrypto/bin/tfc.py | aliencaocao/Sieberrsec-CTF-3.0 | 9b27b11279a7529d3affd22bbd0399c22d24f977 | [
"Apache-2.0"
] | 1 | 2022-01-31T09:04:16.000Z | 2022-01-31T09:04:16.000Z | Pwn/turbofastcrypto/bin/tfc.py | aliencaocao/Sieberrsec-CTF-3.0 | 9b27b11279a7529d3affd22bbd0399c22d24f977 | [
"Apache-2.0"
] | 3 | 2021-12-31T02:28:08.000Z | 2022-02-24T13:11:09.000Z | import turbofastcrypto # The source code for this module is only available for part 2 of this challenge :)
while 1:
plaintext = input('> ')
ciphertext = turbofastcrypto.encrypt(plaintext)
print('Encrypted: ' + str(ciphertext))
| 39.833333 | 106 | 0.723849 | import turbofastcrypto
while 1:
plaintext = input('> ')
ciphertext = turbofastcrypto.encrypt(plaintext)
print('Encrypted: ' + str(ciphertext))
| true | true |
f715058418459dfa648e6522e744f2a5b97481cd | 1,225 | py | Python | rastervision/augmentor/augmentor_config.py | Yochengliu/raster-vision | f5badc387df86ce02d84e0e274a08026dbf65bd6 | [
"Apache-2.0"
] | 1 | 2019-12-10T13:37:39.000Z | 2019-12-10T13:37:39.000Z | rastervision/augmentor/augmentor_config.py | Yochengliu/raster-vision | f5badc387df86ce02d84e0e274a08026dbf65bd6 | [
"Apache-2.0"
] | null | null | null | rastervision/augmentor/augmentor_config.py | Yochengliu/raster-vision | f5badc387df86ce02d84e0e274a08026dbf65bd6 | [
"Apache-2.0"
] | null | null | null | from abc import abstractmethod
import rastervision as rv
from rastervision.core import (Config, ConfigBuilder)
class AugmentorConfig(Config):
def __init__(self, augmentor_type):
self.augmentor_type = augmentor_type
@abstractmethod
def create_augmentor(self):
"""Create the Augmentor that this configuration represents"""
pass
def to_builder(self, augmentor_type):
return rv._registry.get_config_builder(rv.AUGMENTOR,
self.augmentor_type)(self)
@staticmethod
def builder(augmentor_type):
return rv._registry.get_config_builder(rv.AUGMENTOR, augmentor_type)()
@staticmethod
def from_proto(msg):
"""Creates a AugmentorConfig from the specificed protobuf message
"""
return rv._registry.get_config_builder(rv.AUGMENTOR, msg.augmentor_type)() \
.from_proto(msg) \
.build()
def update_for_command(self, command_type, experiment_config, context=[]):
# Generally augmentors do not have an affect on the IO.
return (self, rv.core.CommandIODefinition())
class AugmentorConfigBuilder(ConfigBuilder):
pass
| 31.410256 | 84 | 0.663673 | from abc import abstractmethod
import rastervision as rv
from rastervision.core import (Config, ConfigBuilder)
class AugmentorConfig(Config):
def __init__(self, augmentor_type):
self.augmentor_type = augmentor_type
@abstractmethod
def create_augmentor(self):
pass
def to_builder(self, augmentor_type):
return rv._registry.get_config_builder(rv.AUGMENTOR,
self.augmentor_type)(self)
@staticmethod
def builder(augmentor_type):
return rv._registry.get_config_builder(rv.AUGMENTOR, augmentor_type)()
@staticmethod
def from_proto(msg):
return rv._registry.get_config_builder(rv.AUGMENTOR, msg.augmentor_type)() \
.from_proto(msg) \
.build()
def update_for_command(self, command_type, experiment_config, context=[]):
return (self, rv.core.CommandIODefinition())
class AugmentorConfigBuilder(ConfigBuilder):
pass
| true | true |
f7150631edcb84ba360da036d61bdd309326a6e6 | 854 | py | Python | test/scenarios/driver/linode/molecule/default/tests/test_default.py | dericcrago/molecule | cb4dec0a7d4993395f123b2c9b0590d41e9dd557 | [
"MIT"
] | null | null | null | test/scenarios/driver/linode/molecule/default/tests/test_default.py | dericcrago/molecule | cb4dec0a7d4993395f123b2c9b0590d41e9dd557 | [
"MIT"
] | null | null | null | test/scenarios/driver/linode/molecule/default/tests/test_default.py | dericcrago/molecule | cb4dec0a7d4993395f123b2c9b0590d41e9dd557 | [
"MIT"
] | null | null | null | import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('all')
@pytest.mark.skip(reason='Scenario tests not implemented yet')
def test_hostname(host):
assert 'instance' == host.check_output('hostname -s')
@pytest.mark.skip(reason='Scenario tests not implemented yet')
def test_etc_molecule_directory(host):
f = host.file('/etc/molecule')
assert f.is_directory
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o755
@pytest.mark.skip(reason='Scenario tests not implemented yet')
def test_etc_molecule_ansible_hostname_file(host):
f = host.file('/etc/molecule/instance')
assert f.is_file
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o644
| 24.4 | 63 | 0.723653 | import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('all')
@pytest.mark.skip(reason='Scenario tests not implemented yet')
def test_hostname(host):
assert 'instance' == host.check_output('hostname -s')
@pytest.mark.skip(reason='Scenario tests not implemented yet')
def test_etc_molecule_directory(host):
f = host.file('/etc/molecule')
assert f.is_directory
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o755
@pytest.mark.skip(reason='Scenario tests not implemented yet')
def test_etc_molecule_ansible_hostname_file(host):
f = host.file('/etc/molecule/instance')
assert f.is_file
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o644
| true | true |
f7150789857f831893207971e213c8b17f00080e | 54,478 | py | Python | megatron/arguments.py | deepakn94/Megatron-DeepSpeed | 541b967fbf9fd97ce090ca464ccd205b55aae59c | [
"MIT"
] | null | null | null | megatron/arguments.py | deepakn94/Megatron-DeepSpeed | 541b967fbf9fd97ce090ca464ccd205b55aae59c | [
"MIT"
] | null | null | null | megatron/arguments.py | deepakn94/Megatron-DeepSpeed | 541b967fbf9fd97ce090ca464ccd205b55aae59c | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron arguments."""
import argparse
import collections
import os
import re
import time
import torch
import deepspeed
from megatron.enums import PositionEmbeddingType
import megatron
from megatron.logging import log_levels
def parse_args(extra_args_provider=None, defaults={},
ignore_unknown_args=False):
"""Parse all arguments."""
parser = argparse.ArgumentParser(description='Megatron-LM Arguments',
allow_abbrev=False)
# Standard arguments.
parser = _add_network_size_args(parser)
parser = _add_regularization_args(parser)
parser = _add_training_args(parser)
parser = _add_initialization_args(parser)
parser = _add_learning_rate_args(parser)
parser = _add_checkpointing_args(parser)
parser = _add_mixed_precision_args(parser)
parser = _add_distributed_args(parser)
parser = _add_validation_args(parser)
parser = _add_data_args(parser)
parser = _add_autoresume_args(parser)
parser = _add_biencoder_args(parser)
parser = _add_vit_args(parser)
parser = _add_logging_args(parser)
parser = _add_zero_args(parser)
parser = _add_memoryopt_args(parser)
parser = _add_activation_checkpoint_args(parser)
# Custom arguments.
if extra_args_provider is not None:
parser = extra_args_provider(parser)
parser = deepspeed.add_config_arguments(parser)
# Parse.
if ignore_unknown_args:
args, _ = parser.parse_known_args()
else:
args = parser.parse_args()
# Distributed args.
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
# Tensor model parallel size.
args.tensor_model_parallel_size = min(
args.tensor_model_parallel_size, args.world_size)
assert args.world_size % args.tensor_model_parallel_size == 0, 'world size'\
' ({}) is not divisible by tensor model parallel size ({})'.format(
args.world_size, args.tensor_model_parallel_size)
# Pipeline model parallel size.
args.pipeline_model_parallel_size = min(
args.pipeline_model_parallel_size,
(args.world_size // args.tensor_model_parallel_size))
# Checks.
model_parallel_size = args.pipeline_model_parallel_size * \
args.tensor_model_parallel_size
assert args.world_size % model_parallel_size == 0, 'world size is not'\
' divisible by tensor parallel size ({}) times pipeline parallel ' \
'size ({})'.format(args.world_size, args.tensor_model_parallel_size,
args.pipeline_model_parallel_size)
args.data_parallel_size = args.world_size // model_parallel_size
if args.rank == 0:
print('using world size: {}, data-parallel-size: {}, '
'tensor-model-parallel size: {}, '
'pipeline-model-parallel size: {} '.format(
args.world_size, args.data_parallel_size,
args.tensor_model_parallel_size,
args.pipeline_model_parallel_size), flush=True)
# --data-path and --train-weighted-splits-paths
message = "Data loading Mode 1: --data-path and --split "\
"and Mode 2: --(train|valid|test)-weighted-split-paths"\
"are mutually exclusive i.e. cannot be set together."
if args.data_path:
assert args.train_weighted_split_paths is None, message
setattr(args, "valid_weighted_split_names", None)
setattr(args, "valid_weighted_split_weights", None)
setattr(args, "valid_weighted_split_splits", None)
setattr(args, "test_weighted_split_names", None)
setattr(args, "test_weighted_split_weights", None)
setattr(args, "test_weighted_split_splits", None)
# args.split default value in the args is None it is set here in order
# to check that it does not to overlap with the 2nd mode of data loading
if args.split is None:
args.split = "969, 30, 1"
if args.train_weighted_split_paths or args.valid_weighted_split_paths or \
args.test_weighted_split_paths:
assert args.data_path is None and args.split is None, message
# Deprecated arguments
assert args.batch_size is None, '--batch-size argument is no longer ' \
'valid, use --micro-batch-size instead'
del args.batch_size
assert args.warmup is None, '--warmup argument is no longer valid, use ' \
'--lr-warmup-fraction instead'
del args.warmup
assert args.model_parallel_size is None, '--model-parallel-size is no ' \
'longer valid, use --tensor-model-parallel-size instead'
del args.model_parallel_size
# Set input defaults.
for key in defaults:
# For default to be valid, it should not be provided in the
# arguments that are passed to the program. We check this by
# ensuring the arg is set to None.
if getattr(args, key) is not None:
if args.rank == 0:
print('WARNING: overriding default arguments for {key}:{v} \
with {key}:{v2}'.format(key=key, v=defaults[key],
v2=getattr(args, key)),
flush=True)
else:
setattr(args, key, defaults[key])
# Batch size.
assert args.micro_batch_size is not None
assert args.micro_batch_size > 0
if args.global_batch_size is None:
args.global_batch_size = args.micro_batch_size * args.data_parallel_size
if args.rank == 0:
print('setting global batch size to {}'.format(
args.global_batch_size), flush=True)
assert args.global_batch_size > 0
if args.num_layers_per_virtual_pipeline_stage is not None:
assert args.pipeline_model_parallel_size > 2, \
'pipeline-model-parallel size should be greater than 2 with ' \
'interleaved schedule'
assert args.num_layers % args.num_layers_per_virtual_pipeline_stage == 0, \
'number of layers is not divisible by number of layers per virtual ' \
'pipeline stage'
args.virtual_pipeline_model_parallel_size = \
(args.num_layers // args.pipeline_model_parallel_size) // \
args.num_layers_per_virtual_pipeline_stage
else:
args.virtual_pipeline_model_parallel_size = None
# Parameters dtype.
args.params_dtype = torch.float
if args.fp16:
assert not args.bf16
args.params_dtype = torch.half
if args.bf16:
assert not args.fp16
args.params_dtype = torch.bfloat16
# bfloat16 requires gradient accumulation and all-reduce to
# be done in fp32.
if not args.accumulate_allreduce_grads_in_fp32:
args.accumulate_allreduce_grads_in_fp32 = True
if args.rank == 0:
print('accumulate and all-reduce gradients in fp32 for '
'bfloat16 data type.', flush=True)
if args.rank == 0:
print('using {} for parameters ...'.format(args.params_dtype),
flush=True)
# If we do accumulation and all-reduces in fp32, we need to have
# local DDP and we should set the use-contiguous-buffers-in-ddp.
if args.accumulate_allreduce_grads_in_fp32:
assert args.DDP_impl == 'local'
args.use_contiguous_buffers_in_ddp = True
if args.dataloader_type is None:
args.dataloader_type = 'single'
# Consumed tokens.
args.consumed_train_samples = 0
args.consumed_valid_samples = 0
args.consumed_train_tokens = 0
args.gigaflos_no_embeds = 0
# Iteration-based training.
if args.train_iters:
# If we use iteration-based training, make sure the
# sample-based options are off.
assert args.train_samples is None, \
'expected iteration-based training'
assert args.lr_decay_samples is None, \
'expected iteration-based learning rate decay'
assert args.lr_warmup_samples == 0, \
'expected iteration-based learning rate warmup'
assert args.rampup_batch_size is None, \
'expected no batch-size rampup for iteration-based training'
if args.lr_warmup_fraction is not None:
assert args.lr_warmup_iters == 0, \
'can only specify one of lr-warmup-fraction and lr-warmup-iters'
# Sample-based training.
if args.train_samples:
# If we use sample-based training, make sure the
# iteration-based options are off.
assert args.train_iters is None, \
'expected sample-based training'
assert args.lr_decay_iters is None, \
'expected sample-based learning rate decay'
assert args.lr_warmup_iters == 0, \
'expected sample-based learnig rate warmup'
if args.lr_warmup_fraction is not None:
assert args.lr_warmup_samples == 0, \
'can only specify one of lr-warmup-fraction ' \
'and lr-warmup-samples'
# Check required arguments.
required_args = ['num_layers', 'hidden_size', 'num_attention_heads']
for req_arg in required_args:
_check_arg_is_not_none(args, req_arg)
# Checks.
if args.ffn_hidden_size is None:
args.ffn_hidden_size = 4 * args.hidden_size
if args.kv_channels is None:
assert args.hidden_size % args.num_attention_heads == 0
args.kv_channels = args.hidden_size // args.num_attention_heads
if args.seq_length is not None:
assert args.encoder_seq_length is None
args.encoder_seq_length = args.seq_length
else:
assert args.encoder_seq_length is not None
args.seq_length = args.encoder_seq_length
if args.position_embedding_type == PositionEmbeddingType.absolute or args.position_embedding_type == PositionEmbeddingType.alibi:
assert args.max_position_embeddings is not None
if args.seq_length is not None:
assert args.max_position_embeddings >= args.seq_length
if args.decoder_seq_length is not None:
assert args.max_position_embeddings >= args.decoder_seq_length
else:
assert args.max_position_embeddings is None
if args.lr is not None:
assert args.min_lr <= args.lr
if args.save is not None:
assert args.save_interval is not None
# Mixed precision checks.
if args.fp16_lm_cross_entropy:
assert args.fp16, 'lm cross entropy in fp16 only support in fp16 mode.'
if args.fp32_residual_connection:
assert args.fp16 or args.bf16, \
'residual connection in fp32 only supported when using fp16 or bf16.'
# Activation checkpointing.
if args.distribute_checkpointed_activations:
assert args.checkpoint_activations, \
'for distribute-checkpointed-activations to work you '\
'need to enable checkpoint-activations'
args.curriculum_learning = False
# Activation function
if args.glu_activation is not None and args.bias_gelu_fusion:
raise ValueError("if glu-activation is used, please set --no-bias-gelu-fusion")
# Skip train iterations
if args.skip_train_iteration_range is not None:
args.skip_train_iteration_range = [
list(map(int, range_.split("-"))) for range_ in args.skip_train_iteration_range
]
args.skip_train_iteration_range.sort()
skip_train_iteration_range = collections.deque()
for range_ in args.skip_train_iteration_range:
if len(range_) == 2:
start, end = range_
assert end >= start, \
"end of skip range cannot be smaller than start of skip range"
# merge overlapping intervals (e.g. 1-5 2-6 -> 1-6)
if not skip_train_iteration_range:
skip_train_iteration_range.append([start, end])
elif skip_train_iteration_range[-1][1] >= start:
skip_train_iteration_range[-1][1] = max(end, skip_train_iteration_range[-1][1])
else:
skip_train_iteration_range.append([start, end])
else:
raise ValueError(
"skip train iterations should be specified as two numbers, i.e. start-end"
)
args.skip_train_iteration_range = skip_train_iteration_range
if args.use_bnb_optimizer:
try:
import bitsandbytes as bnb
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install bitsandbytes from https://github.com/facebookresearch/bitsandbytes.")
_print_args(args)
return args
def _print_args(args):
"""Print arguments."""
if args.rank == 0:
print('------------------------ arguments ------------------------',
flush=True)
str_list = []
for arg in vars(args):
dots = '.' * (48 - len(arg))
str_list.append(' {} {} {}'.format(arg, dots, getattr(args, arg)))
if args.log_path is not None:
with open(os.path.join(args.log_path,f'args_{time.strftime("%Y-%m-%dT%H:%M:%S")}.txt'), 'w') as f:
for arg in sorted(str_list, key=lambda x: x.lower()):
f.write(arg+"\n")
print(arg, flush=True)
else:
for arg in sorted(str_list, key=lambda x: x.lower()):
print(arg, flush=True)
print('-------------------- end of arguments ---------------------',
flush=True)
def _check_arg_is_not_none(args, arg):
assert getattr(args, arg) is not None, '{} argument is None'.format(arg)
def _add_network_size_args(parser):
group = parser.add_argument_group(title='network size')
group.add_argument('--num-layers', type=int, default=None,
help='Number of transformer layers.')
group.add_argument('--hidden-size', type=int, default=None,
help='Tansformer hidden size.')
group.add_argument('--ffn-hidden-size', type=int, default=None,
help='Transformer Feed-Forward Network hidden size. '
'This is set to 4*hidden-size if not provided')
group.add_argument('--num-attention-heads', type=int, default=None,
help='Number of transformer attention heads.')
group.add_argument('--kv-channels', type=int, default=None,
help='Projection weights dimension in multi-head '
'attention. This is set to '
' args.hidden_size // args.num_attention_heads '
'if not provided.')
group.add_argument('--max-position-embeddings', type=int, default=None,
help='Maximum number of position embeddings to use. '
'This is the size of position embedding.')
group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,
help='Pad the vocab size to be divisible by this value.'
'This is added for computational efficieny reasons.')
group.add_argument('--layernorm-epsilon', type=float, default=1e-5,
help='Layer norm epsilon.')
group.add_argument('--apply-residual-connection-post-layernorm',
action='store_true',
help='If set, use original BERT residula connection '
'ordering.')
group.add_argument('--embed-layernorm', action='store_true',
help='use layernorm for embedding')
group.add_argument('--openai-gelu', action='store_true',
help='Use OpenAIs GeLU implementation. This option'
'should not be used unless for backward compatibility'
'reasons.')
group.add_argument('--onnx-safe', type=bool, required=False,
help='Use workarounds for known problems with '
'Torch ONNX exporter')
group.add_argument('--bert-no-binary-head', action='store_false',
help='Disable BERT binary head.',
dest='bert_binary_head')
group.add_argument('--position-embedding-type', type=lambda x: PositionEmbeddingType[x],
choices=list(PositionEmbeddingType),
default=PositionEmbeddingType.absolute,
help='Define position embedding type ("absolute" | "rotary" | "alibi"). "absolute" by default.'
)
group.add_argument('--glu-activation', type=str,
choices=megatron.model.glu_activations.GLU_ACTIVATIONS.keys(),
help='GLU activations to use.'
)
group.add_argument('--kill-switch-path', type=str,
help='path to look for a kill switch, which if found will automatically exit the program'
)
group.add_argument('--log-level', type=str, choices=list(log_levels.keys()),
help="Logger log level to use on the main process. Possible choices are the log levels as strings: 'debug', "
"'info', 'warning', 'error' and 'critical', plus a 'passive' level which doesn't set anything and lets the "
"application set the level."
)
group.add_argument('--log-level-replica', type=str, choices=list(log_levels.keys()),
help="Logger log level to use on replicas. Same choices as ``log_level``"
)
return parser
def _add_logging_args(parser):
group = parser.add_argument_group(title='logging')
group.add_argument('--log-params-norm', action='store_true',
help='If set, calculate and log parameters norm.')
group.add_argument('--log-num-zeros-in-grad', action='store_true',
help='If set, calculate and log the number of zeros in gradient.')
group.add_argument('--tensorboard-log-interval', type=int, default=1,
help='Report to tensorboard interval.')
group.add_argument('--tensorboard-queue-size', type=int, default=1000,
help='Size of the tensorboard queue for pending events '
'and summaries before one of the ‘add’ calls forces a '
'flush to disk.')
group.add_argument('--log-timers-to-tensorboard', action='store_true',
help='If set, write timers to tensorboard.')
group.add_argument('--log-batch-size-to-tensorboard', action='store_true',
help='If set, write batch-size to tensorboard.')
group.add_argument('--no-log-learnig-rate-to-tensorboard',
action='store_false',
help='Disable learning rate logging to tensorboard.',
dest='log_learning_rate_to_tensorboard')
group.add_argument('--no-log-loss-scale-to-tensorboard',
action='store_false',
help='Disable loss-scale logging to tensorboard.',
dest='log_loss_scale_to_tensorboard')
group.add_argument('--log-validation-ppl-to-tensorboard',
action='store_true',
help='If set, write validation perplexity to '
'tensorboard.')
return parser
def _add_regularization_args(parser):
group = parser.add_argument_group(title='regularization')
group.add_argument('--attention-dropout', type=float, default=0.1,
help='Post attention dropout probability.')
group.add_argument('--hidden-dropout', type=float, default=0.1,
help='Dropout probability for hidden state transformer.')
group.add_argument('--weight-decay', type=float, default=0.01,
help='Weight decay coefficient for L2 regularization.')
group.add_argument('--clip-grad', type=float, default=1.0,
help='Gradient clipping based on global L2 norm.')
group.add_argument('--adam-beta1', type=float, default=0.9,
help='First coefficient for computing running averages '
'of gradient and its square')
group.add_argument('--adam-beta2', type=float, default=0.999,
help='Second coefficient for computing running averages '
'of gradient and its square')
group.add_argument('--adam-eps', type=float, default=1e-08,
help='Term added to the denominator to improve'
'numerical stability')
group.add_argument('--sgd-momentum', type=float, default=0.9,
help='Momentum factor for sgd')
return parser
def _add_training_args(parser):
group = parser.add_argument_group(title='training')
group.add_argument('--micro-batch-size', type=int, default=None,
help='Batch size per model instance (local batch size). '
'Global batch size is local batch size times data '
'parallel size times number of micro batches.')
group.add_argument('--batch-size', type=int, default=None,
help='Old batch size parameter, do not use. '
'Use --micro-batch-size instead')
group.add_argument('--global-batch-size', type=int, default=None,
help='Training batch size. If set, it should be a '
'multiple of micro-batch-size times data-parallel-size. '
'If this value is None, then '
'use micro-batch-size * data-parallel-size as the '
'global batch size. This choice will result in 1 for '
'number of micro-batches.')
group.add_argument('--rampup-batch-size', nargs='*', default=None,
help='Batch size ramp up with the following values:'
' --rampup-batch-size <start batch size> '
' <batch size increment> '
' <ramp-up samples> '
'For example: '
' --rampup-batch-size 16 8 300000 '
' --global-batch-size 1024 '
'will start with global batch size 16 and over '
' (1024 - 16) / 8 = 126 intervals will increase '
'the batch size linearly to 1024. In each interval '
'we will use approximately 300000 / 126 = 2380 samples.')
group.add_argument('--checkpoint-activations', action='store_true',
help='Checkpoint activation to allow for training '
'with larger models, sequences, and batch sizes.')
group.add_argument('--distribute-checkpointed-activations',
action='store_true',
help='If set, distribute checkpointed activations '
'across model parallel group.')
group.add_argument('--checkpoint-num-layers', type=int, default=1,
help='chunk size (number of layers) for checkpointing.')
group.add_argument('--train-iters', type=int, default=None,
help='Total number of iterations to train over all '
'training runs. Note that either train-iters or '
'train-samples should be provided.')
group.add_argument('--train-samples', type=int, default=None,
help='Total number of samples to train over all '
'training runs. Note that either train-iters or '
'train-samples should be provided.')
group.add_argument('--train-tokens', type=int, default=None,
help='Total number of tokens to train over all '
'training runs.')
group.add_argument('--log-interval', type=int, default=100,
help='Report loss and timing interval.')
group.add_argument('--exit-interval', type=int, default=None,
help='Exit the program after the iteration is divisible '
'by this value.')
group.add_argument('--exit-duration-in-mins', type=int, default=None,
help='Exit the program after this many minutes.')
group.add_argument('--tensorboard-dir', type=str, default=None,
help='Write TensorBoard logs to this directory.')
group.add_argument('--no-masked-softmax-fusion',
action='store_false',
help='Disable fusion of query_key_value scaling, '
'masking, and softmax.',
dest='masked_softmax_fusion')
group.add_argument('--no-bias-gelu-fusion', action='store_false',
help='Disable bias and gelu fusion.',
dest='bias_gelu_fusion')
group.add_argument('--no-bias-dropout-fusion', action='store_false',
help='Disable bias and dropout fusion.',
dest='bias_dropout_fusion')
group.add_argument('--optimizer', type=str, default='adam',
choices=['adam', 'sgd'],
help='Optimizer function')
group.add_argument('--use-bnb-optimizer', action='store_true',
help='Use bitsandbytes optimizer for efficient training,'
'please refer https://github.com/facebookresearch/bitsandbytes.',
dest='use_bnb_optimizer')
group.add_argument('--dataloader-type', type=str, default=None,
choices=['single', 'cyclic'],
help='Single pass vs multiple pass data loader')
group.add_argument('--cpu-optimizer', action='store_true',
help='Run optimizer on CPU')
group.add_argument('--cpu_torch_adam', action='store_true',
help='Use Torch Adam as optimizer on CPU.')
group.add_argument('--codecarbon-dir', type=str, default=None,
help='Write CodeCarbon logs to this directory.')
group.add_argument('--eval-only', type=bool, required=False,
help='If set to True, no train step will be performed.'
'and only the evaluation on the `valid` and `test` sets '
'will be performed' )
group.add_argument('--skip-train-iteration-range', type=str, nargs='+', default=None,
help='Iteration ranges to skip. The values are one or more dash-separated ranges. e.g., 101-200 251-300.')
group.add_argument('--abort-on-unmet-fused-kernel-constraints', action='store_true',
help="If set to True, the program will abort if the constraints for loading a fused kernel aren't met")
return parser
def _add_initialization_args(parser):
group = parser.add_argument_group(title='initialization')
group.add_argument('--seed', type=int, default=1234,
help='Random seed used for python, numpy, '
'pytorch, and cuda.')
group.add_argument('--init-method-std', type=float, default=0.02,
help='Standard deviation of the zero mean normal '
'distribution used for weight initialization.')
group.add_argument('--init-method-xavier-uniform', action='store_true',
help='Enable Xavier uniform parameter initialization')
return parser
def _add_learning_rate_args(parser):
group = parser.add_argument_group(title='learning rate')
group.add_argument('--lr', type=float, default=None,
help='Initial learning rate. Depending on decay style '
'and initial warmup, the learing rate at each '
'iteration would be different.')
group.add_argument('--lr-decay-style', type=str, default='linear',
choices=['constant', 'linear', 'cosine'],
help='Learning rate decay function.')
group.add_argument('--lr-decay-iters', type=int, default=None,
help='number of iterations to decay learning rate over,'
' If None defaults to `--train-iters`')
group.add_argument('--lr-decay-samples', type=int, default=None,
help='number of samples to decay learning rate over,'
' If None defaults to `--train-samples`')
group.add_argument('--lr-decay-tokens', type=int, default=None,
help='number of tokens to decay learning rate over,'
' If not None will override iter/sample-based decay')
group.add_argument('--lr-warmup-fraction', type=float, default=None,
help='fraction of lr-warmup-(iters/samples) to use '
'for warmup (as a float)')
group.add_argument('--lr-warmup-iters', type=int, default=0,
help='number of iterations to linearly warmup '
'learning rate over.')
group.add_argument('--lr-warmup-samples', type=int, default=0,
help='number of samples to linearly warmup '
'learning rate over.')
group.add_argument('--warmup', type=int, default=None,
help='Old lr warmup argument, do not use. Use one of the'
'--lr-warmup-* arguments above')
group.add_argument('--min-lr', type=float, default=0.0,
help='Minumum value for learning rate. The scheduler'
'clip values below this threshold.')
group.add_argument('--override-lr-scheduler', action='store_true',
help='Reset the values of the scheduler (learning rate,'
'warmup iterations, minimum learning rate, maximum '
'number of iterations, and decay style from input '
'arguments and ignore values from checkpoints. Note'
'that all the above values will be reset.')
group.add_argument('--use-checkpoint-lr-scheduler', action='store_true',
help='Use checkpoint to set the values of the scheduler '
'(learning rate, warmup iterations, minimum learning '
'rate, maximum number of iterations, and decay style '
'from checkpoint and ignore input arguments.')
return parser
def _add_checkpointing_args(parser):
group = parser.add_argument_group(title='checkpointing')
group.add_argument('--save', type=str, default=None,
help='Output directory to save checkpoints to.')
group.add_argument('--save-interval', type=int, default=None,
help='Number of iterations between checkpoint saves.')
group.add_argument('--no-save-optim', action='store_true', default=None,
help='Do not save current optimizer.')
group.add_argument('--no-save-rng', action='store_true', default=None,
help='Do not save current rng state.')
group.add_argument('--load', type=str, default=None,
help='Directory containing a model checkpoint.')
group.add_argument('--no-load-optim', action='store_true', default=None,
help='Do not load optimizer when loading checkpoint.')
group.add_argument('--no-load-rng', action='store_true', default=None,
help='Do not load rng state when loading checkpoint.')
group.add_argument('--finetune', action='store_true',
help='Load model for finetuning. Do not load optimizer '
'or rng state from checkpoint and set iteration to 0. '
'Assumed when loading a release checkpoint.')
return parser
def _add_mixed_precision_args(parser):
group = parser.add_argument_group(title='mixed precision')
group.add_argument('--fp16', action='store_true',
help='Run model in fp16 mode.')
group.add_argument('--bf16', action='store_true',
help='Run model in bfloat16 mode.')
group.add_argument('--loss-scale', type=float, default=None,
help='Static loss scaling, positive power of 2 '
'values can improve fp16 convergence. If None, dynamic'
'loss scaling is used.')
group.add_argument('--initial-loss-scale', type=float, default=2**32,
help='Initial loss-scale for dynamic loss scaling.')
group.add_argument('--min-loss-scale', type=float, default=1.0,
help='Minimum loss scale for dynamic loss scale.')
group.add_argument('--loss-scale-window', type=float, default=1000,
help='Window over which to raise/lower dynamic scale.')
group.add_argument('--hysteresis', type=int, default=2,
help='hysteresis for dynamic loss scaling')
group.add_argument('--fp32-residual-connection', action='store_true',
help='Move residual connections to fp32.')
group.add_argument('--no-query-key-layer-scaling', action='store_false',
help='Do not scale Q * K^T by 1 / layer-number.',
dest='apply_query_key_layer_scaling')
group.add_argument('--attention-softmax-in-fp32', action='store_true',
help='Run attention masking and softmax in fp32. '
'This flag is ignored unless '
'--no-query-key-layer-scaling is specified.')
group.add_argument('--accumulate-allreduce-grads-in-fp32',
action='store_true',
help='Gradient accumulation and all-reduce in fp32.')
group.add_argument('--fp16-lm-cross-entropy', action='store_true',
help='Move the cross entropy unreduced loss calculation'
'for lm head to fp16.')
return parser
def _add_distributed_args(parser):
group = parser.add_argument_group(title='distributed')
group.add_argument('--tensor-model-parallel-size', type=int, default=1,
help='Degree of tensor model parallelism.')
group.add_argument('--pipeline-model-parallel-size', type=int, default=1,
help='Degree of pipeline model parallelism.')
group.add_argument('--model-parallel-size', type=int, default=None,
help='Old model parallel argument, do not use. Use '
'--tensor-model-parallel-size instead.')
group.add_argument('--num-layers-per-virtual-pipeline-stage', type=int, default=None,
help='Number of layers per virtual pipeline stage')
group.add_argument('--distributed-backend', default='nccl',
choices=['nccl', 'gloo'],
help='Which backend to use for distributed training.')
group.add_argument('--DDP-impl', default='local',
choices=['local', 'torch'],
help='which DistributedDataParallel implementation '
'to use.')
group.add_argument('--use-contiguous-buffers-in-ddp', action='store_true',
help='If set, use contiguous buffer in DDP. Note that '
'this option only works woth local DDP.' )
group.add_argument('--no-scatter-gather-tensors-in-pipeline', action='store_false',
help='Use scatter/gather to optimize communication of tensors in pipeline',
dest='scatter_gather_tensors_in_pipeline')
group.add_argument('--local_rank', type=int, default=None,
help='local rank passed from distributed launcher.')
group.add_argument('--lazy-mpu-init', type=bool, required=False,
help='If set to True, initialize_megatron() '
'skips DDP initialization and returns function to '
'complete it instead.Also turns on '
'--use-cpu-initialization flag. This is for '
'external DDP manager.' )
group.add_argument('--use-cpu-initialization', action='store_true',
default=None, help='If set, affine parallel weights '
'initialization uses CPU' )
return parser
def _add_validation_args(parser):
group = parser.add_argument_group(title='validation')
group.add_argument('--eval-iters', type=int, default=100,
help='Number of iterations to run for evaluation'
'validation/test for.')
group.add_argument('--eval-interval', type=int, default=1000,
help='Interval between running evaluation on '
'validation set.')
return parser
def _add_data_args(parser):
group = parser.add_argument_group(title='data and dataloader')
# option 1 for data loading (mutually exclusive with option2)
group.add_argument('--data-path', nargs='*', default=None,
help='Path to the training dataset. Accepted format:'
'1) a single data path, 2) multiple datasets in the'
'form: dataset1-weight dataset1-path dataset2-weight '
'dataset2-path ...')
group.add_argument('--split', type=str, default=None,
help='Comma-separated list of proportions for training,'
' validation, and test split. For example the split '
'`90,5,5` will use 90%% of data for training, 5%% for '
'validation and 5%% for test.')
# option 2 for data loading (mutually exclusive with option1)
# helper class to parse the --xxx-weighted-split-paths
# note here two args are set: extra valid dataset paths and names
class parse_data_paths(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if option_string == "--train-weighted-split-paths":
assert len(values) == 1, 'Only 1 dataset group is allowed to'
'be passed for the argument --train-weighted-split-paths'
# make sure string given in the correct format
err_message = 'Each data group should be input on the following format'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'where START < END'
for v in values:
# each prefix consists several datasets separated by commas
prefix = ":".join(v.split(":")[1:]) # remove GIVEN_NAME
datasets = prefix.split(",")
# check if each dataset is formatted like `WEIGHT START:END PATH`
for d in datasets:
assert len(d.split()) == 3, err_message
start, end = d.split()[1].split(":")
assert float(start) < float(end), err_message
names = [v.split(":")[0] for v in values]
prefixes = [":".join(v.split(":")[1:]).strip() for v in values]
weights = [[d.split()[0] for d in p.split(",")] for p in prefixes]
splits = [[d.split()[1] for d in p.split(",")] for p in prefixes]
paths = [[d.split()[2] for d in p.split(",")] for p in prefixes]
# # to keep consistency with Option 1 of data loading (through --data-path)
# # paths will contain strings on the following form
# # "WEIGHTS1 PATH1 WEIGHTS2 PATH2 WEIGHTS3 PATH3" for each dataset group
# # while data will be parsed in additional arguments below
# paths_option1_style = []
# for p, w in zip(paths, weights):
# paths_option1_style.append(" ".join([f"{w_i} {p_i}" for p_i, w_i in zip(p,w)]))
# setattr(args, self.dest, paths_option1_style)
setattr(args, self.dest, paths)
setattr(args, self.dest.replace("paths", "weights"), weights)
setattr(args, self.dest.replace("paths", "splits"), splits)
setattr(args, self.dest.replace("paths","names"), names)
group.add_argument('--train-weighted-split-paths', nargs='*', default=None,
help='Weights, splits and paths to groups of datasets'
'Accepted format: ONE dataset groups could be'
'submitted in the following form between double quotes'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0:0.6 A, 0.3 0:1 B, 0.1 0:1 C" '
'WEIGHT is used to up and down sample each dataset A,B,C in the group'
'START:END indicates the split portion of the dataset',
action=parse_data_paths)
group.add_argument('--valid-weighted-split-paths', nargs='*', default=None,
help='Weights, splits and paths to groups of datasets'
'Accepted format: one or many dataset groups could be'
'submitted in the following form each between double quotes'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0.6:0.8 A, 0.3 0:1 B, 0.1 0:1 C" '
'"NAME_CDE: 0.6 0.6:0.8 C, 0.3 0:1 D, 0.1 0:1 E" '
'validation will be run on each of those groups independently',
action=parse_data_paths)
group.add_argument('--test-weighted-split-paths', nargs='*', default=None,
help='Weights, splits and paths to groups of datasets'
'Accepted format: one or many dataset groups could be'
'submitted in the following form each between double quotes'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0.6:0.8 A, 0.3 0:1 B, 0.1 0:1 C" '
'"NAME_CDE: 0.6 0.6:0.8 C, 0.3 0:1 D, 0.1 0:1 E" '
'test will be run on each of those groups independently',
action=parse_data_paths)
group.add_argument('--log-path', type=str, default=None,
help='Path to the save arguments file.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file.')
group.add_argument('--merge-file', type=str, default=None,
help='Path to the BPE merge file.')
group.add_argument('--vocab-extra-ids', type=int, default=0,
help='Number of additional vocabulary tokens. '
'They are used for span masking in the T5 model')
group.add_argument('--seq-length', type=int, default=None,
help='Maximum sequence length to process.')
group.add_argument('--encoder-seq-length', type=int, default=None,
help='Maximum encoder sequence length to process.'
'This should be exclusive of --seq-length')
group.add_argument('--decoder-seq-length', type=int, default=None,
help="Maximum decoder sequence length to process.")
group.add_argument('--retriever-seq-length', type=int, default=256,
help='Maximum sequence length for the biencoder model '
' for retriever')
group.add_argument('--sample-rate', type=float, default=1.0,
help='sample rate for training data. Supposed to be 0 '
' < sample_rate < 1')
group.add_argument('--mask-prob', type=float, default=0.15,
help='Probability of replacing a token with mask.')
group.add_argument('--short-seq-prob', type=float, default=0.1,
help='Probability of producing a short sequence.')
group.add_argument('--mmap-warmup', action='store_true',
help='Warm up mmap files.')
group.add_argument('--num-workers', type=int, default=2,
help="Dataloader number of workers.")
group.add_argument('--tokenizer-type', type=str,
default=None,
choices=['BertWordPieceLowerCase',
'BertWordPieceCase',
'GPT2BPETokenizer',
'PretrainedFromHF'],
help='What type of tokenizer to use.')
group.add_argument("--tokenizer-name-or-path", type=str, default=None,
help="Name or path of the huggingface tokenizer.")
group.add_argument('--data-impl', type=str, default='infer',
choices=['lazy', 'cached', 'mmap', 'infer'],
help='Implementation of indexed datasets.')
group.add_argument('--reset-position-ids', action='store_true',
help='Reset posistion ids after end-of-document token.')
group.add_argument('--reset-attention-mask', action='store_true',
help='Reset self attention maske after '
'end-of-document token. Attention between tokens from different documents is null.')
group.add_argument('--eod-mask-loss', action='store_true',
help='Mask loss for the end of document tokens.')
group.add_argument('--loss-on-targets-only', action='store_true',
help='Mask loss on input sequence.')
group.add_argument('--reweight-loss-based-on-position-frequency', action="store_true",
help='Some objectives require us to sample loss_mask. This might introduce bias towards '
'specific positions. This option tries to un-bias the loss by reweighting loss on specific '
'positions based on how frequently we train on that position.'
'This is mostly used for prefix_lm training')
return parser
def _add_autoresume_args(parser):
group = parser.add_argument_group(title='autoresume')
group.add_argument('--adlr-autoresume', action='store_true',
help='Enable autoresume on adlr cluster.')
group.add_argument('--adlr-autoresume-interval', type=int, default=1000,
help='Intervals over which check for autoresume'
'termination signal')
return parser
def _add_biencoder_args(parser):
group = parser.add_argument_group(title='biencoder')
# network size
group.add_argument('--ict-head-size', type=int, default=None,
help='Size of block embeddings to be used in ICT and '
'REALM (paper default: 128)')
group.add_argument('--biencoder-projection-dim', type=int, default=0,
help='Size of projection head used in biencoder (paper'
' default: 128)')
group.add_argument('--biencoder-shared-query-context-model', action='store_true',
help='Whether to share the parameters of the query '
'and context models or not')
# checkpointing
group.add_argument('--ict-load', type=str, default=None,
help='Directory containing an ICTBertModel checkpoint')
group.add_argument('--bert-load', type=str, default=None,
help='Directory containing an BertModel checkpoint '
'(needed to start ICT and REALM)')
# data
group.add_argument('--titles-data-path', type=str, default=None,
help='Path to titles dataset used for ICT')
group.add_argument('--query-in-block-prob', type=float, default=0.1,
help='Probability of keeping query in block for '
'ICT dataset')
group.add_argument('--use-one-sent-docs', action='store_true',
help='Whether to use one sentence documents in ICT')
group.add_argument('--evidence-data-path', type=str, default=None,
help='Path to Wikipedia Evidence frm DPR paper')
# training
group.add_argument('--retriever-report-topk-accuracies', nargs='+', type=int,
default=[], help="Which top-k accuracies to report "
"(e.g. '1 5 20')")
group.add_argument('--retriever-score-scaling', action='store_true',
help='Whether to scale retriever scores by inverse '
'square root of hidden size')
# faiss index
group.add_argument('--block-data-path', type=str, default=None,
help='Where to save/load BlockData to/from')
group.add_argument('--embedding-path', type=str, default=None,
help='Where to save/load Open-Retrieval Embedding'
' data to/from')
# indexer
group.add_argument('--indexer-batch-size', type=int, default=128,
help='How large of batches to use when doing indexing '
'jobs')
group.add_argument('--indexer-log-interval', type=int, default=1000,
help='After how many batches should the indexer '
'report progress')
return parser
def _add_vit_args(parser):
group = parser.add_argument_group(title="vit")
group.add_argument('--num-classes', type=int, default=1000,
help='num of classes in vision classificaiton task')
group.add_argument('--img-dim', type=int, default=224,
help='Image size for vision classification task')
group.add_argument('--num-channels', type=int, default=3,
help='Number of channels in input image data')
group.add_argument('--patch-dim', type=int, default=16,
help='patch dimension used in vit')
return parser
def _add_zero_args(parser):
"""Text generate arguments."""
group = parser.add_argument_group('ZeRO configurations', 'configurations')
group.add_argument("--zero-stage", type=int, default=1.0)
group.add_argument('--zero-reduce-scatter', action='store_true',
help='Use reduce scatter if specified')
group.add_argument('--zero-contigious-gradients', action='store_true',
help='Use contigious memory optimizaiton if specified')
group.add_argument("--zero-reduce-bucket-size", type=int, default=0.0)
group.add_argument("--zero-allgather-bucket-size", type=int, default=0.0)
group.add_argument('--remote-device', type=str, default='none', choices=['none', 'cpu', 'nvme'],
help='Remote device for ZeRO-3 initialized parameters.')
group.add_argument('--use-pin-memory', action='store_true',
help='Use pinned CPU memory for ZeRO-3 initialized model parameters.')
return parser
def _add_memoryopt_args(parser):
"""Memory optimization arguments."""
group = parser.add_argument_group('Memory optimizations', 'configurations')
group.add_argument("--scattered-embeddings", action='store_true',
help='Save memory by scattering embedding activations. '
'Introduces dropout differences across MP configurations.')
group.add_argument("--split-transformers", action='store_true',
help='Save memory by splitting transformer layers into two parts, '
'allowing for more frequent activation checkpoint savings.')
group.add_argument("--memory-centric-tiled-linear", action="store_true",
help='Save memory by tiling with deepspeed.zero.TiledLinear.')
group.add_argument("--tile-factor", type=int, default=1,
help='Make all linear layers the same size of [hidden/tile_factor, hidden/tile_factor]. '
'Must be enabled with --memory-centric-tiled-linear. '
'Example A: if tile_factor=1, the qkv layer [hidden, 3* hidden] would be converted into [1,3] tiles of size [hidden,hidden]. '
'Example B: if tile_factor=2, the intermediate layer [4*hidden, hidden] will be converted into [8, 2] tiles of size [hidden/2, hidden/2]. '
'Default is 1.')
return parser
def _add_activation_checkpoint_args(parser):
group = parser.add_argument_group('Activation Checkpointing',
'Checkpointing Configurations')
group.add_argument('--deepspeed-activation-checkpointing', action='store_true',
help='uses activation checkpointing from deepspeed')
group.add_argument('--partition-activations', action='store_true',
help='partition Activations across GPUs before checkpointing.')
group.add_argument('--contigious-checkpointing', action='store_true',
help='Contigious memory checkpointing for activatoins.')
group.add_argument('--checkpoint-in-cpu', action='store_true',
help='Move the activation checkpoints to CPU.')
group.add_argument('--synchronize-each-layer', action='store_true',
help='does a synchronize at the beginning and end of each checkpointed layer.')
group.add_argument('--profile-backward', action='store_true',
help='Enables backward pass profiling for checkpointed layers.')
return parser
| 52.28215 | 167 | 0.600169 |
import argparse
import collections
import os
import re
import time
import torch
import deepspeed
from megatron.enums import PositionEmbeddingType
import megatron
from megatron.logging import log_levels
def parse_args(extra_args_provider=None, defaults={},
ignore_unknown_args=False):
parser = argparse.ArgumentParser(description='Megatron-LM Arguments',
allow_abbrev=False)
parser = _add_network_size_args(parser)
parser = _add_regularization_args(parser)
parser = _add_training_args(parser)
parser = _add_initialization_args(parser)
parser = _add_learning_rate_args(parser)
parser = _add_checkpointing_args(parser)
parser = _add_mixed_precision_args(parser)
parser = _add_distributed_args(parser)
parser = _add_validation_args(parser)
parser = _add_data_args(parser)
parser = _add_autoresume_args(parser)
parser = _add_biencoder_args(parser)
parser = _add_vit_args(parser)
parser = _add_logging_args(parser)
parser = _add_zero_args(parser)
parser = _add_memoryopt_args(parser)
parser = _add_activation_checkpoint_args(parser)
if extra_args_provider is not None:
parser = extra_args_provider(parser)
parser = deepspeed.add_config_arguments(parser)
if ignore_unknown_args:
args, _ = parser.parse_known_args()
else:
args = parser.parse_args()
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
args.tensor_model_parallel_size = min(
args.tensor_model_parallel_size, args.world_size)
assert args.world_size % args.tensor_model_parallel_size == 0, 'world size'\
' ({}) is not divisible by tensor model parallel size ({})'.format(
args.world_size, args.tensor_model_parallel_size)
args.pipeline_model_parallel_size = min(
args.pipeline_model_parallel_size,
(args.world_size // args.tensor_model_parallel_size))
model_parallel_size = args.pipeline_model_parallel_size * \
args.tensor_model_parallel_size
assert args.world_size % model_parallel_size == 0, 'world size is not'\
' divisible by tensor parallel size ({}) times pipeline parallel ' \
'size ({})'.format(args.world_size, args.tensor_model_parallel_size,
args.pipeline_model_parallel_size)
args.data_parallel_size = args.world_size // model_parallel_size
if args.rank == 0:
print('using world size: {}, data-parallel-size: {}, '
'tensor-model-parallel size: {}, '
'pipeline-model-parallel size: {} '.format(
args.world_size, args.data_parallel_size,
args.tensor_model_parallel_size,
args.pipeline_model_parallel_size), flush=True)
message = "Data loading Mode 1: --data-path and --split "\
"and Mode 2: --(train|valid|test)-weighted-split-paths"\
"are mutually exclusive i.e. cannot be set together."
if args.data_path:
assert args.train_weighted_split_paths is None, message
setattr(args, "valid_weighted_split_names", None)
setattr(args, "valid_weighted_split_weights", None)
setattr(args, "valid_weighted_split_splits", None)
setattr(args, "test_weighted_split_names", None)
setattr(args, "test_weighted_split_weights", None)
setattr(args, "test_weighted_split_splits", None)
if args.split is None:
args.split = "969, 30, 1"
if args.train_weighted_split_paths or args.valid_weighted_split_paths or \
args.test_weighted_split_paths:
assert args.data_path is None and args.split is None, message
assert args.batch_size is None, '--batch-size argument is no longer ' \
'valid, use --micro-batch-size instead'
del args.batch_size
assert args.warmup is None, '--warmup argument is no longer valid, use ' \
'--lr-warmup-fraction instead'
del args.warmup
assert args.model_parallel_size is None, '--model-parallel-size is no ' \
'longer valid, use --tensor-model-parallel-size instead'
del args.model_parallel_size
for key in defaults:
if getattr(args, key) is not None:
if args.rank == 0:
print('WARNING: overriding default arguments for {key}:{v} \
with {key}:{v2}'.format(key=key, v=defaults[key],
v2=getattr(args, key)),
flush=True)
else:
setattr(args, key, defaults[key])
assert args.micro_batch_size is not None
assert args.micro_batch_size > 0
if args.global_batch_size is None:
args.global_batch_size = args.micro_batch_size * args.data_parallel_size
if args.rank == 0:
print('setting global batch size to {}'.format(
args.global_batch_size), flush=True)
assert args.global_batch_size > 0
if args.num_layers_per_virtual_pipeline_stage is not None:
assert args.pipeline_model_parallel_size > 2, \
'pipeline-model-parallel size should be greater than 2 with ' \
'interleaved schedule'
assert args.num_layers % args.num_layers_per_virtual_pipeline_stage == 0, \
'number of layers is not divisible by number of layers per virtual ' \
'pipeline stage'
args.virtual_pipeline_model_parallel_size = \
(args.num_layers // args.pipeline_model_parallel_size) // \
args.num_layers_per_virtual_pipeline_stage
else:
args.virtual_pipeline_model_parallel_size = None
args.params_dtype = torch.float
if args.fp16:
assert not args.bf16
args.params_dtype = torch.half
if args.bf16:
assert not args.fp16
args.params_dtype = torch.bfloat16
if not args.accumulate_allreduce_grads_in_fp32:
args.accumulate_allreduce_grads_in_fp32 = True
if args.rank == 0:
print('accumulate and all-reduce gradients in fp32 for '
'bfloat16 data type.', flush=True)
if args.rank == 0:
print('using {} for parameters ...'.format(args.params_dtype),
flush=True)
if args.accumulate_allreduce_grads_in_fp32:
assert args.DDP_impl == 'local'
args.use_contiguous_buffers_in_ddp = True
if args.dataloader_type is None:
args.dataloader_type = 'single'
args.consumed_train_samples = 0
args.consumed_valid_samples = 0
args.consumed_train_tokens = 0
args.gigaflos_no_embeds = 0
if args.train_iters:
assert args.train_samples is None, \
'expected iteration-based training'
assert args.lr_decay_samples is None, \
'expected iteration-based learning rate decay'
assert args.lr_warmup_samples == 0, \
'expected iteration-based learning rate warmup'
assert args.rampup_batch_size is None, \
'expected no batch-size rampup for iteration-based training'
if args.lr_warmup_fraction is not None:
assert args.lr_warmup_iters == 0, \
'can only specify one of lr-warmup-fraction and lr-warmup-iters'
if args.train_samples:
assert args.train_iters is None, \
'expected sample-based training'
assert args.lr_decay_iters is None, \
'expected sample-based learning rate decay'
assert args.lr_warmup_iters == 0, \
'expected sample-based learnig rate warmup'
if args.lr_warmup_fraction is not None:
assert args.lr_warmup_samples == 0, \
'can only specify one of lr-warmup-fraction ' \
'and lr-warmup-samples'
required_args = ['num_layers', 'hidden_size', 'num_attention_heads']
for req_arg in required_args:
_check_arg_is_not_none(args, req_arg)
if args.ffn_hidden_size is None:
args.ffn_hidden_size = 4 * args.hidden_size
if args.kv_channels is None:
assert args.hidden_size % args.num_attention_heads == 0
args.kv_channels = args.hidden_size // args.num_attention_heads
if args.seq_length is not None:
assert args.encoder_seq_length is None
args.encoder_seq_length = args.seq_length
else:
assert args.encoder_seq_length is not None
args.seq_length = args.encoder_seq_length
if args.position_embedding_type == PositionEmbeddingType.absolute or args.position_embedding_type == PositionEmbeddingType.alibi:
assert args.max_position_embeddings is not None
if args.seq_length is not None:
assert args.max_position_embeddings >= args.seq_length
if args.decoder_seq_length is not None:
assert args.max_position_embeddings >= args.decoder_seq_length
else:
assert args.max_position_embeddings is None
if args.lr is not None:
assert args.min_lr <= args.lr
if args.save is not None:
assert args.save_interval is not None
if args.fp16_lm_cross_entropy:
assert args.fp16, 'lm cross entropy in fp16 only support in fp16 mode.'
if args.fp32_residual_connection:
assert args.fp16 or args.bf16, \
'residual connection in fp32 only supported when using fp16 or bf16.'
if args.distribute_checkpointed_activations:
assert args.checkpoint_activations, \
'for distribute-checkpointed-activations to work you '\
'need to enable checkpoint-activations'
args.curriculum_learning = False
if args.glu_activation is not None and args.bias_gelu_fusion:
raise ValueError("if glu-activation is used, please set --no-bias-gelu-fusion")
if args.skip_train_iteration_range is not None:
args.skip_train_iteration_range = [
list(map(int, range_.split("-"))) for range_ in args.skip_train_iteration_range
]
args.skip_train_iteration_range.sort()
skip_train_iteration_range = collections.deque()
for range_ in args.skip_train_iteration_range:
if len(range_) == 2:
start, end = range_
assert end >= start, \
"end of skip range cannot be smaller than start of skip range"
if not skip_train_iteration_range:
skip_train_iteration_range.append([start, end])
elif skip_train_iteration_range[-1][1] >= start:
skip_train_iteration_range[-1][1] = max(end, skip_train_iteration_range[-1][1])
else:
skip_train_iteration_range.append([start, end])
else:
raise ValueError(
"skip train iterations should be specified as two numbers, i.e. start-end"
)
args.skip_train_iteration_range = skip_train_iteration_range
if args.use_bnb_optimizer:
try:
import bitsandbytes as bnb
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install bitsandbytes from https://github.com/facebookresearch/bitsandbytes.")
_print_args(args)
return args
def _print_args(args):
if args.rank == 0:
print('------------------------ arguments ------------------------',
flush=True)
str_list = []
for arg in vars(args):
dots = '.' * (48 - len(arg))
str_list.append(' {} {} {}'.format(arg, dots, getattr(args, arg)))
if args.log_path is not None:
with open(os.path.join(args.log_path,f'args_{time.strftime("%Y-%m-%dT%H:%M:%S")}.txt'), 'w') as f:
for arg in sorted(str_list, key=lambda x: x.lower()):
f.write(arg+"\n")
print(arg, flush=True)
else:
for arg in sorted(str_list, key=lambda x: x.lower()):
print(arg, flush=True)
print('-------------------- end of arguments ---------------------',
flush=True)
def _check_arg_is_not_none(args, arg):
assert getattr(args, arg) is not None, '{} argument is None'.format(arg)
def _add_network_size_args(parser):
group = parser.add_argument_group(title='network size')
group.add_argument('--num-layers', type=int, default=None,
help='Number of transformer layers.')
group.add_argument('--hidden-size', type=int, default=None,
help='Tansformer hidden size.')
group.add_argument('--ffn-hidden-size', type=int, default=None,
help='Transformer Feed-Forward Network hidden size. '
'This is set to 4*hidden-size if not provided')
group.add_argument('--num-attention-heads', type=int, default=None,
help='Number of transformer attention heads.')
group.add_argument('--kv-channels', type=int, default=None,
help='Projection weights dimension in multi-head '
'attention. This is set to '
' args.hidden_size // args.num_attention_heads '
'if not provided.')
group.add_argument('--max-position-embeddings', type=int, default=None,
help='Maximum number of position embeddings to use. '
'This is the size of position embedding.')
group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,
help='Pad the vocab size to be divisible by this value.'
'This is added for computational efficieny reasons.')
group.add_argument('--layernorm-epsilon', type=float, default=1e-5,
help='Layer norm epsilon.')
group.add_argument('--apply-residual-connection-post-layernorm',
action='store_true',
help='If set, use original BERT residula connection '
'ordering.')
group.add_argument('--embed-layernorm', action='store_true',
help='use layernorm for embedding')
group.add_argument('--openai-gelu', action='store_true',
help='Use OpenAIs GeLU implementation. This option'
'should not be used unless for backward compatibility'
'reasons.')
group.add_argument('--onnx-safe', type=bool, required=False,
help='Use workarounds for known problems with '
'Torch ONNX exporter')
group.add_argument('--bert-no-binary-head', action='store_false',
help='Disable BERT binary head.',
dest='bert_binary_head')
group.add_argument('--position-embedding-type', type=lambda x: PositionEmbeddingType[x],
choices=list(PositionEmbeddingType),
default=PositionEmbeddingType.absolute,
help='Define position embedding type ("absolute" | "rotary" | "alibi"). "absolute" by default.'
)
group.add_argument('--glu-activation', type=str,
choices=megatron.model.glu_activations.GLU_ACTIVATIONS.keys(),
help='GLU activations to use.'
)
group.add_argument('--kill-switch-path', type=str,
help='path to look for a kill switch, which if found will automatically exit the program'
)
group.add_argument('--log-level', type=str, choices=list(log_levels.keys()),
help="Logger log level to use on the main process. Possible choices are the log levels as strings: 'debug', "
"'info', 'warning', 'error' and 'critical', plus a 'passive' level which doesn't set anything and lets the "
"application set the level."
)
group.add_argument('--log-level-replica', type=str, choices=list(log_levels.keys()),
help="Logger log level to use on replicas. Same choices as ``log_level``"
)
return parser
def _add_logging_args(parser):
group = parser.add_argument_group(title='logging')
group.add_argument('--log-params-norm', action='store_true',
help='If set, calculate and log parameters norm.')
group.add_argument('--log-num-zeros-in-grad', action='store_true',
help='If set, calculate and log the number of zeros in gradient.')
group.add_argument('--tensorboard-log-interval', type=int, default=1,
help='Report to tensorboard interval.')
group.add_argument('--tensorboard-queue-size', type=int, default=1000,
help='Size of the tensorboard queue for pending events '
'and summaries before one of the ‘add’ calls forces a '
'flush to disk.')
group.add_argument('--log-timers-to-tensorboard', action='store_true',
help='If set, write timers to tensorboard.')
group.add_argument('--log-batch-size-to-tensorboard', action='store_true',
help='If set, write batch-size to tensorboard.')
group.add_argument('--no-log-learnig-rate-to-tensorboard',
action='store_false',
help='Disable learning rate logging to tensorboard.',
dest='log_learning_rate_to_tensorboard')
group.add_argument('--no-log-loss-scale-to-tensorboard',
action='store_false',
help='Disable loss-scale logging to tensorboard.',
dest='log_loss_scale_to_tensorboard')
group.add_argument('--log-validation-ppl-to-tensorboard',
action='store_true',
help='If set, write validation perplexity to '
'tensorboard.')
return parser
def _add_regularization_args(parser):
group = parser.add_argument_group(title='regularization')
group.add_argument('--attention-dropout', type=float, default=0.1,
help='Post attention dropout probability.')
group.add_argument('--hidden-dropout', type=float, default=0.1,
help='Dropout probability for hidden state transformer.')
group.add_argument('--weight-decay', type=float, default=0.01,
help='Weight decay coefficient for L2 regularization.')
group.add_argument('--clip-grad', type=float, default=1.0,
help='Gradient clipping based on global L2 norm.')
group.add_argument('--adam-beta1', type=float, default=0.9,
help='First coefficient for computing running averages '
'of gradient and its square')
group.add_argument('--adam-beta2', type=float, default=0.999,
help='Second coefficient for computing running averages '
'of gradient and its square')
group.add_argument('--adam-eps', type=float, default=1e-08,
help='Term added to the denominator to improve'
'numerical stability')
group.add_argument('--sgd-momentum', type=float, default=0.9,
help='Momentum factor for sgd')
return parser
def _add_training_args(parser):
group = parser.add_argument_group(title='training')
group.add_argument('--micro-batch-size', type=int, default=None,
help='Batch size per model instance (local batch size). '
'Global batch size is local batch size times data '
'parallel size times number of micro batches.')
group.add_argument('--batch-size', type=int, default=None,
help='Old batch size parameter, do not use. '
'Use --micro-batch-size instead')
group.add_argument('--global-batch-size', type=int, default=None,
help='Training batch size. If set, it should be a '
'multiple of micro-batch-size times data-parallel-size. '
'If this value is None, then '
'use micro-batch-size * data-parallel-size as the '
'global batch size. This choice will result in 1 for '
'number of micro-batches.')
group.add_argument('--rampup-batch-size', nargs='*', default=None,
help='Batch size ramp up with the following values:'
' --rampup-batch-size <start batch size> '
' <batch size increment> '
' <ramp-up samples> '
'For example: '
' --rampup-batch-size 16 8 300000 '
' --global-batch-size 1024 '
'will start with global batch size 16 and over '
' (1024 - 16) / 8 = 126 intervals will increase '
'the batch size linearly to 1024. In each interval '
'we will use approximately 300000 / 126 = 2380 samples.')
group.add_argument('--checkpoint-activations', action='store_true',
help='Checkpoint activation to allow for training '
'with larger models, sequences, and batch sizes.')
group.add_argument('--distribute-checkpointed-activations',
action='store_true',
help='If set, distribute checkpointed activations '
'across model parallel group.')
group.add_argument('--checkpoint-num-layers', type=int, default=1,
help='chunk size (number of layers) for checkpointing.')
group.add_argument('--train-iters', type=int, default=None,
help='Total number of iterations to train over all '
'training runs. Note that either train-iters or '
'train-samples should be provided.')
group.add_argument('--train-samples', type=int, default=None,
help='Total number of samples to train over all '
'training runs. Note that either train-iters or '
'train-samples should be provided.')
group.add_argument('--train-tokens', type=int, default=None,
help='Total number of tokens to train over all '
'training runs.')
group.add_argument('--log-interval', type=int, default=100,
help='Report loss and timing interval.')
group.add_argument('--exit-interval', type=int, default=None,
help='Exit the program after the iteration is divisible '
'by this value.')
group.add_argument('--exit-duration-in-mins', type=int, default=None,
help='Exit the program after this many minutes.')
group.add_argument('--tensorboard-dir', type=str, default=None,
help='Write TensorBoard logs to this directory.')
group.add_argument('--no-masked-softmax-fusion',
action='store_false',
help='Disable fusion of query_key_value scaling, '
'masking, and softmax.',
dest='masked_softmax_fusion')
group.add_argument('--no-bias-gelu-fusion', action='store_false',
help='Disable bias and gelu fusion.',
dest='bias_gelu_fusion')
group.add_argument('--no-bias-dropout-fusion', action='store_false',
help='Disable bias and dropout fusion.',
dest='bias_dropout_fusion')
group.add_argument('--optimizer', type=str, default='adam',
choices=['adam', 'sgd'],
help='Optimizer function')
group.add_argument('--use-bnb-optimizer', action='store_true',
help='Use bitsandbytes optimizer for efficient training,'
'please refer https://github.com/facebookresearch/bitsandbytes.',
dest='use_bnb_optimizer')
group.add_argument('--dataloader-type', type=str, default=None,
choices=['single', 'cyclic'],
help='Single pass vs multiple pass data loader')
group.add_argument('--cpu-optimizer', action='store_true',
help='Run optimizer on CPU')
group.add_argument('--cpu_torch_adam', action='store_true',
help='Use Torch Adam as optimizer on CPU.')
group.add_argument('--codecarbon-dir', type=str, default=None,
help='Write CodeCarbon logs to this directory.')
group.add_argument('--eval-only', type=bool, required=False,
help='If set to True, no train step will be performed.'
'and only the evaluation on the `valid` and `test` sets '
'will be performed' )
group.add_argument('--skip-train-iteration-range', type=str, nargs='+', default=None,
help='Iteration ranges to skip. The values are one or more dash-separated ranges. e.g., 101-200 251-300.')
group.add_argument('--abort-on-unmet-fused-kernel-constraints', action='store_true',
help="If set to True, the program will abort if the constraints for loading a fused kernel aren't met")
return parser
def _add_initialization_args(parser):
group = parser.add_argument_group(title='initialization')
group.add_argument('--seed', type=int, default=1234,
help='Random seed used for python, numpy, '
'pytorch, and cuda.')
group.add_argument('--init-method-std', type=float, default=0.02,
help='Standard deviation of the zero mean normal '
'distribution used for weight initialization.')
group.add_argument('--init-method-xavier-uniform', action='store_true',
help='Enable Xavier uniform parameter initialization')
return parser
def _add_learning_rate_args(parser):
group = parser.add_argument_group(title='learning rate')
group.add_argument('--lr', type=float, default=None,
help='Initial learning rate. Depending on decay style '
'and initial warmup, the learing rate at each '
'iteration would be different.')
group.add_argument('--lr-decay-style', type=str, default='linear',
choices=['constant', 'linear', 'cosine'],
help='Learning rate decay function.')
group.add_argument('--lr-decay-iters', type=int, default=None,
help='number of iterations to decay learning rate over,'
' If None defaults to `--train-iters`')
group.add_argument('--lr-decay-samples', type=int, default=None,
help='number of samples to decay learning rate over,'
' If None defaults to `--train-samples`')
group.add_argument('--lr-decay-tokens', type=int, default=None,
help='number of tokens to decay learning rate over,'
' If not None will override iter/sample-based decay')
group.add_argument('--lr-warmup-fraction', type=float, default=None,
help='fraction of lr-warmup-(iters/samples) to use '
'for warmup (as a float)')
group.add_argument('--lr-warmup-iters', type=int, default=0,
help='number of iterations to linearly warmup '
'learning rate over.')
group.add_argument('--lr-warmup-samples', type=int, default=0,
help='number of samples to linearly warmup '
'learning rate over.')
group.add_argument('--warmup', type=int, default=None,
help='Old lr warmup argument, do not use. Use one of the'
'--lr-warmup-* arguments above')
group.add_argument('--min-lr', type=float, default=0.0,
help='Minumum value for learning rate. The scheduler'
'clip values below this threshold.')
group.add_argument('--override-lr-scheduler', action='store_true',
help='Reset the values of the scheduler (learning rate,'
'warmup iterations, minimum learning rate, maximum '
'number of iterations, and decay style from input '
'arguments and ignore values from checkpoints. Note'
'that all the above values will be reset.')
group.add_argument('--use-checkpoint-lr-scheduler', action='store_true',
help='Use checkpoint to set the values of the scheduler '
'(learning rate, warmup iterations, minimum learning '
'rate, maximum number of iterations, and decay style '
'from checkpoint and ignore input arguments.')
return parser
def _add_checkpointing_args(parser):
group = parser.add_argument_group(title='checkpointing')
group.add_argument('--save', type=str, default=None,
help='Output directory to save checkpoints to.')
group.add_argument('--save-interval', type=int, default=None,
help='Number of iterations between checkpoint saves.')
group.add_argument('--no-save-optim', action='store_true', default=None,
help='Do not save current optimizer.')
group.add_argument('--no-save-rng', action='store_true', default=None,
help='Do not save current rng state.')
group.add_argument('--load', type=str, default=None,
help='Directory containing a model checkpoint.')
group.add_argument('--no-load-optim', action='store_true', default=None,
help='Do not load optimizer when loading checkpoint.')
group.add_argument('--no-load-rng', action='store_true', default=None,
help='Do not load rng state when loading checkpoint.')
group.add_argument('--finetune', action='store_true',
help='Load model for finetuning. Do not load optimizer '
'or rng state from checkpoint and set iteration to 0. '
'Assumed when loading a release checkpoint.')
return parser
def _add_mixed_precision_args(parser):
group = parser.add_argument_group(title='mixed precision')
group.add_argument('--fp16', action='store_true',
help='Run model in fp16 mode.')
group.add_argument('--bf16', action='store_true',
help='Run model in bfloat16 mode.')
group.add_argument('--loss-scale', type=float, default=None,
help='Static loss scaling, positive power of 2 '
'values can improve fp16 convergence. If None, dynamic'
'loss scaling is used.')
group.add_argument('--initial-loss-scale', type=float, default=2**32,
help='Initial loss-scale for dynamic loss scaling.')
group.add_argument('--min-loss-scale', type=float, default=1.0,
help='Minimum loss scale for dynamic loss scale.')
group.add_argument('--loss-scale-window', type=float, default=1000,
help='Window over which to raise/lower dynamic scale.')
group.add_argument('--hysteresis', type=int, default=2,
help='hysteresis for dynamic loss scaling')
group.add_argument('--fp32-residual-connection', action='store_true',
help='Move residual connections to fp32.')
group.add_argument('--no-query-key-layer-scaling', action='store_false',
help='Do not scale Q * K^T by 1 / layer-number.',
dest='apply_query_key_layer_scaling')
group.add_argument('--attention-softmax-in-fp32', action='store_true',
help='Run attention masking and softmax in fp32. '
'This flag is ignored unless '
'--no-query-key-layer-scaling is specified.')
group.add_argument('--accumulate-allreduce-grads-in-fp32',
action='store_true',
help='Gradient accumulation and all-reduce in fp32.')
group.add_argument('--fp16-lm-cross-entropy', action='store_true',
help='Move the cross entropy unreduced loss calculation'
'for lm head to fp16.')
return parser
def _add_distributed_args(parser):
group = parser.add_argument_group(title='distributed')
group.add_argument('--tensor-model-parallel-size', type=int, default=1,
help='Degree of tensor model parallelism.')
group.add_argument('--pipeline-model-parallel-size', type=int, default=1,
help='Degree of pipeline model parallelism.')
group.add_argument('--model-parallel-size', type=int, default=None,
help='Old model parallel argument, do not use. Use '
'--tensor-model-parallel-size instead.')
group.add_argument('--num-layers-per-virtual-pipeline-stage', type=int, default=None,
help='Number of layers per virtual pipeline stage')
group.add_argument('--distributed-backend', default='nccl',
choices=['nccl', 'gloo'],
help='Which backend to use for distributed training.')
group.add_argument('--DDP-impl', default='local',
choices=['local', 'torch'],
help='which DistributedDataParallel implementation '
'to use.')
group.add_argument('--use-contiguous-buffers-in-ddp', action='store_true',
help='If set, use contiguous buffer in DDP. Note that '
'this option only works woth local DDP.' )
group.add_argument('--no-scatter-gather-tensors-in-pipeline', action='store_false',
help='Use scatter/gather to optimize communication of tensors in pipeline',
dest='scatter_gather_tensors_in_pipeline')
group.add_argument('--local_rank', type=int, default=None,
help='local rank passed from distributed launcher.')
group.add_argument('--lazy-mpu-init', type=bool, required=False,
help='If set to True, initialize_megatron() '
'skips DDP initialization and returns function to '
'complete it instead.Also turns on '
'--use-cpu-initialization flag. This is for '
'external DDP manager.' )
group.add_argument('--use-cpu-initialization', action='store_true',
default=None, help='If set, affine parallel weights '
'initialization uses CPU' )
return parser
def _add_validation_args(parser):
group = parser.add_argument_group(title='validation')
group.add_argument('--eval-iters', type=int, default=100,
help='Number of iterations to run for evaluation'
'validation/test for.')
group.add_argument('--eval-interval', type=int, default=1000,
help='Interval between running evaluation on '
'validation set.')
return parser
def _add_data_args(parser):
group = parser.add_argument_group(title='data and dataloader')
group.add_argument('--data-path', nargs='*', default=None,
help='Path to the training dataset. Accepted format:'
'1) a single data path, 2) multiple datasets in the'
'form: dataset1-weight dataset1-path dataset2-weight '
'dataset2-path ...')
group.add_argument('--split', type=str, default=None,
help='Comma-separated list of proportions for training,'
' validation, and test split. For example the split '
'`90,5,5` will use 90%% of data for training, 5%% for '
'validation and 5%% for test.')
class parse_data_paths(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if option_string == "--train-weighted-split-paths":
assert len(values) == 1, 'Only 1 dataset group is allowed to'
err_message = 'Each data group should be input on the following format'
for v in values:
prefix = ":".join(v.split(":")[1:])
datasets = prefix.split(",")
for d in datasets:
assert len(d.split()) == 3, err_message
start, end = d.split()[1].split(":")
assert float(start) < float(end), err_message
names = [v.split(":")[0] for v in values]
prefixes = [":".join(v.split(":")[1:]).strip() for v in values]
weights = [[d.split()[0] for d in p.split(",")] for p in prefixes]
splits = [[d.split()[1] for d in p.split(",")] for p in prefixes]
paths = [[d.split()[2] for d in p.split(",")] for p in prefixes]
e("paths", "splits"), splits)
setattr(args, self.dest.replace("paths","names"), names)
group.add_argument('--train-weighted-split-paths', nargs='*', default=None,
help='Weights, splits and paths to groups of datasets'
'Accepted format: ONE dataset groups could be'
'submitted in the following form between double quotes'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0:0.6 A, 0.3 0:1 B, 0.1 0:1 C" '
'WEIGHT is used to up and down sample each dataset A,B,C in the group'
'START:END indicates the split portion of the dataset',
action=parse_data_paths)
group.add_argument('--valid-weighted-split-paths', nargs='*', default=None,
help='Weights, splits and paths to groups of datasets'
'Accepted format: one or many dataset groups could be'
'submitted in the following form each between double quotes'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0.6:0.8 A, 0.3 0:1 B, 0.1 0:1 C" '
'"NAME_CDE: 0.6 0.6:0.8 C, 0.3 0:1 D, 0.1 0:1 E" '
'validation will be run on each of those groups independently',
action=parse_data_paths)
group.add_argument('--test-weighted-split-paths', nargs='*', default=None,
help='Weights, splits and paths to groups of datasets'
'Accepted format: one or many dataset groups could be'
'submitted in the following form each between double quotes'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0.6:0.8 A, 0.3 0:1 B, 0.1 0:1 C" '
'"NAME_CDE: 0.6 0.6:0.8 C, 0.3 0:1 D, 0.1 0:1 E" '
'test will be run on each of those groups independently',
action=parse_data_paths)
group.add_argument('--log-path', type=str, default=None,
help='Path to the save arguments file.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file.')
group.add_argument('--merge-file', type=str, default=None,
help='Path to the BPE merge file.')
group.add_argument('--vocab-extra-ids', type=int, default=0,
help='Number of additional vocabulary tokens. '
'They are used for span masking in the T5 model')
group.add_argument('--seq-length', type=int, default=None,
help='Maximum sequence length to process.')
group.add_argument('--encoder-seq-length', type=int, default=None,
help='Maximum encoder sequence length to process.'
'This should be exclusive of --seq-length')
group.add_argument('--decoder-seq-length', type=int, default=None,
help="Maximum decoder sequence length to process.")
group.add_argument('--retriever-seq-length', type=int, default=256,
help='Maximum sequence length for the biencoder model '
' for retriever')
group.add_argument('--sample-rate', type=float, default=1.0,
help='sample rate for training data. Supposed to be 0 '
' < sample_rate < 1')
group.add_argument('--mask-prob', type=float, default=0.15,
help='Probability of replacing a token with mask.')
group.add_argument('--short-seq-prob', type=float, default=0.1,
help='Probability of producing a short sequence.')
group.add_argument('--mmap-warmup', action='store_true',
help='Warm up mmap files.')
group.add_argument('--num-workers', type=int, default=2,
help="Dataloader number of workers.")
group.add_argument('--tokenizer-type', type=str,
default=None,
choices=['BertWordPieceLowerCase',
'BertWordPieceCase',
'GPT2BPETokenizer',
'PretrainedFromHF'],
help='What type of tokenizer to use.')
group.add_argument("--tokenizer-name-or-path", type=str, default=None,
help="Name or path of the huggingface tokenizer.")
group.add_argument('--data-impl', type=str, default='infer',
choices=['lazy', 'cached', 'mmap', 'infer'],
help='Implementation of indexed datasets.')
group.add_argument('--reset-position-ids', action='store_true',
help='Reset posistion ids after end-of-document token.')
group.add_argument('--reset-attention-mask', action='store_true',
help='Reset self attention maske after '
'end-of-document token. Attention between tokens from different documents is null.')
group.add_argument('--eod-mask-loss', action='store_true',
help='Mask loss for the end of document tokens.')
group.add_argument('--loss-on-targets-only', action='store_true',
help='Mask loss on input sequence.')
group.add_argument('--reweight-loss-based-on-position-frequency', action="store_true",
help='Some objectives require us to sample loss_mask. This might introduce bias towards '
'specific positions. This option tries to un-bias the loss by reweighting loss on specific '
'positions based on how frequently we train on that position.'
'This is mostly used for prefix_lm training')
return parser
def _add_autoresume_args(parser):
group = parser.add_argument_group(title='autoresume')
group.add_argument('--adlr-autoresume', action='store_true',
help='Enable autoresume on adlr cluster.')
group.add_argument('--adlr-autoresume-interval', type=int, default=1000,
help='Intervals over which check for autoresume'
'termination signal')
return parser
def _add_biencoder_args(parser):
group = parser.add_argument_group(title='biencoder')
group.add_argument('--ict-head-size', type=int, default=None,
help='Size of block embeddings to be used in ICT and '
'REALM (paper default: 128)')
group.add_argument('--biencoder-projection-dim', type=int, default=0,
help='Size of projection head used in biencoder (paper'
' default: 128)')
group.add_argument('--biencoder-shared-query-context-model', action='store_true',
help='Whether to share the parameters of the query '
'and context models or not')
group.add_argument('--ict-load', type=str, default=None,
help='Directory containing an ICTBertModel checkpoint')
group.add_argument('--bert-load', type=str, default=None,
help='Directory containing an BertModel checkpoint '
'(needed to start ICT and REALM)')
group.add_argument('--titles-data-path', type=str, default=None,
help='Path to titles dataset used for ICT')
group.add_argument('--query-in-block-prob', type=float, default=0.1,
help='Probability of keeping query in block for '
'ICT dataset')
group.add_argument('--use-one-sent-docs', action='store_true',
help='Whether to use one sentence documents in ICT')
group.add_argument('--evidence-data-path', type=str, default=None,
help='Path to Wikipedia Evidence frm DPR paper')
group.add_argument('--retriever-report-topk-accuracies', nargs='+', type=int,
default=[], help="Which top-k accuracies to report "
"(e.g. '1 5 20')")
group.add_argument('--retriever-score-scaling', action='store_true',
help='Whether to scale retriever scores by inverse '
'square root of hidden size')
group.add_argument('--block-data-path', type=str, default=None,
help='Where to save/load BlockData to/from')
group.add_argument('--embedding-path', type=str, default=None,
help='Where to save/load Open-Retrieval Embedding'
' data to/from')
group.add_argument('--indexer-batch-size', type=int, default=128,
help='How large of batches to use when doing indexing '
'jobs')
group.add_argument('--indexer-log-interval', type=int, default=1000,
help='After how many batches should the indexer '
'report progress')
return parser
def _add_vit_args(parser):
group = parser.add_argument_group(title="vit")
group.add_argument('--num-classes', type=int, default=1000,
help='num of classes in vision classificaiton task')
group.add_argument('--img-dim', type=int, default=224,
help='Image size for vision classification task')
group.add_argument('--num-channels', type=int, default=3,
help='Number of channels in input image data')
group.add_argument('--patch-dim', type=int, default=16,
help='patch dimension used in vit')
return parser
def _add_zero_args(parser):
group = parser.add_argument_group('ZeRO configurations', 'configurations')
group.add_argument("--zero-stage", type=int, default=1.0)
group.add_argument('--zero-reduce-scatter', action='store_true',
help='Use reduce scatter if specified')
group.add_argument('--zero-contigious-gradients', action='store_true',
help='Use contigious memory optimizaiton if specified')
group.add_argument("--zero-reduce-bucket-size", type=int, default=0.0)
group.add_argument("--zero-allgather-bucket-size", type=int, default=0.0)
group.add_argument('--remote-device', type=str, default='none', choices=['none', 'cpu', 'nvme'],
help='Remote device for ZeRO-3 initialized parameters.')
group.add_argument('--use-pin-memory', action='store_true',
help='Use pinned CPU memory for ZeRO-3 initialized model parameters.')
return parser
def _add_memoryopt_args(parser):
group = parser.add_argument_group('Memory optimizations', 'configurations')
group.add_argument("--scattered-embeddings", action='store_true',
help='Save memory by scattering embedding activations. '
'Introduces dropout differences across MP configurations.')
group.add_argument("--split-transformers", action='store_true',
help='Save memory by splitting transformer layers into two parts, '
'allowing for more frequent activation checkpoint savings.')
group.add_argument("--memory-centric-tiled-linear", action="store_true",
help='Save memory by tiling with deepspeed.zero.TiledLinear.')
group.add_argument("--tile-factor", type=int, default=1,
help='Make all linear layers the same size of [hidden/tile_factor, hidden/tile_factor]. '
'Must be enabled with --memory-centric-tiled-linear. '
'Example A: if tile_factor=1, the qkv layer [hidden, 3* hidden] would be converted into [1,3] tiles of size [hidden,hidden]. '
'Example B: if tile_factor=2, the intermediate layer [4*hidden, hidden] will be converted into [8, 2] tiles of size [hidden/2, hidden/2]. '
'Default is 1.')
return parser
def _add_activation_checkpoint_args(parser):
group = parser.add_argument_group('Activation Checkpointing',
'Checkpointing Configurations')
group.add_argument('--deepspeed-activation-checkpointing', action='store_true',
help='uses activation checkpointing from deepspeed')
group.add_argument('--partition-activations', action='store_true',
help='partition Activations across GPUs before checkpointing.')
group.add_argument('--contigious-checkpointing', action='store_true',
help='Contigious memory checkpointing for activatoins.')
group.add_argument('--checkpoint-in-cpu', action='store_true',
help='Move the activation checkpoints to CPU.')
group.add_argument('--synchronize-each-layer', action='store_true',
help='does a synchronize at the beginning and end of each checkpointed layer.')
group.add_argument('--profile-backward', action='store_true',
help='Enables backward pass profiling for checkpointed layers.')
return parser
| true | true |
f71508036be54c36e5daf87e785d178e4ded75db | 3,571 | py | Python | openpyxlzip/packaging/tests/test_core.py | ankitJoshi03/openpyxlzip | f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647 | [
"MIT"
] | null | null | null | openpyxlzip/packaging/tests/test_core.py | ankitJoshi03/openpyxlzip | f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647 | [
"MIT"
] | null | null | null | openpyxlzip/packaging/tests/test_core.py | ankitJoshi03/openpyxlzip | f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647 | [
"MIT"
] | null | null | null | # copyright openpyxlzip 2014
import datetime
import pytest
from openpyxlzip.tests.helper import compare_xml
from openpyxlzip.xml.constants import DCTERMS_PREFIX, DCTERMS_NS, XSI_NS
from openpyxlzip.xml.functions import (
fromstring,
tostring,
register_namespace,
NS_REGEX,
)
@pytest.fixture()
def SampleProperties():
from .. core import DocumentProperties
props = DocumentProperties()
props.keywords = "one, two, three"
props.created = datetime.datetime(2010, 4, 1, 20, 30, 00)
props.modified = datetime.datetime(2010, 4, 5, 14, 5, 30)
props.lastPrinted = datetime.datetime(2014, 10, 14, 10, 30)
props.category = "The category"
props.contentStatus = "The status"
props.creator = 'TEST_USER'
props.lastModifiedBy = "SOMEBODY"
props.revision = "0"
props.version = "2.5"
props.description = "The description"
props.identifier = "The identifier"
props.language = "The language"
props.subject = "The subject"
props.title = "The title"
return props
def test_ctor(SampleProperties):
expected = """
<coreProperties
xmlns="http://schemas.openxmlformats.org/package/2006/metadata/core-properties"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:dcterms="http://purl.org/dc/terms/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<dc:creator>TEST_USER</dc:creator>
<dc:title>The title</dc:title>
<dc:description>The description</dc:description>
<dc:subject>The subject</dc:subject>
<dc:identifier>The identifier</dc:identifier>
<dc:language>The language</dc:language>
<dcterms:created xsi:type="dcterms:W3CDTF">2010-04-01T20:30:00Z</dcterms:created>
<dcterms:modified xsi:type="dcterms:W3CDTF">2010-04-05T14:05:30Z</dcterms:modified>
<lastModifiedBy>SOMEBODY</lastModifiedBy>
<category>The category</category>
<contentStatus>The status</contentStatus>
<version>2.5</version>
<revision>0</revision>
<keywords>one, two, three</keywords>
<lastPrinted>2014-10-14T10:30:00Z</lastPrinted>
</coreProperties>
"""
xml = tostring(SampleProperties.to_tree())
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_tree(datadir, SampleProperties):
datadir.chdir()
with open("core.xml") as src:
content = src.read()
content = fromstring(content)
props = SampleProperties.from_tree(content)
assert props == SampleProperties
def test_qualified_datetime():
from ..core import QualifiedDateTime
dt = QualifiedDateTime()
tree = dt.to_tree("time", datetime.datetime(2015, 7, 20, 12, 30))
xml = tostring(tree)
expected = """
<time xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="dcterms:W3CDTF">
2015-07-20T12:30:00Z
</time>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.fixture(params=['abc', 'dct', 'dcterms', 'xyz'])
def dcterms_prefix(request):
register_namespace(request.param, DCTERMS_NS)
yield request.param
register_namespace(DCTERMS_PREFIX, DCTERMS_NS)
@pytest.mark.no_pypy
def test_qualified_datetime_ns(dcterms_prefix):
from ..core import QualifiedDateTime
dt = QualifiedDateTime()
tree = dt.to_tree("time", datetime.datetime(2015, 7, 20, 12, 30))
xml = tostring(tree) # serialise to make remove QName
tree = fromstring(xml)
xsi = tree.attrib["{%s}type" % XSI_NS]
prefix = xsi.split(":")[0]
assert prefix == dcterms_prefix
| 32.761468 | 91 | 0.681322 |
import datetime
import pytest
from openpyxlzip.tests.helper import compare_xml
from openpyxlzip.xml.constants import DCTERMS_PREFIX, DCTERMS_NS, XSI_NS
from openpyxlzip.xml.functions import (
fromstring,
tostring,
register_namespace,
NS_REGEX,
)
@pytest.fixture()
def SampleProperties():
from .. core import DocumentProperties
props = DocumentProperties()
props.keywords = "one, two, three"
props.created = datetime.datetime(2010, 4, 1, 20, 30, 00)
props.modified = datetime.datetime(2010, 4, 5, 14, 5, 30)
props.lastPrinted = datetime.datetime(2014, 10, 14, 10, 30)
props.category = "The category"
props.contentStatus = "The status"
props.creator = 'TEST_USER'
props.lastModifiedBy = "SOMEBODY"
props.revision = "0"
props.version = "2.5"
props.description = "The description"
props.identifier = "The identifier"
props.language = "The language"
props.subject = "The subject"
props.title = "The title"
return props
def test_ctor(SampleProperties):
expected = """
<coreProperties
xmlns="http://schemas.openxmlformats.org/package/2006/metadata/core-properties"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:dcterms="http://purl.org/dc/terms/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<dc:creator>TEST_USER</dc:creator>
<dc:title>The title</dc:title>
<dc:description>The description</dc:description>
<dc:subject>The subject</dc:subject>
<dc:identifier>The identifier</dc:identifier>
<dc:language>The language</dc:language>
<dcterms:created xsi:type="dcterms:W3CDTF">2010-04-01T20:30:00Z</dcterms:created>
<dcterms:modified xsi:type="dcterms:W3CDTF">2010-04-05T14:05:30Z</dcterms:modified>
<lastModifiedBy>SOMEBODY</lastModifiedBy>
<category>The category</category>
<contentStatus>The status</contentStatus>
<version>2.5</version>
<revision>0</revision>
<keywords>one, two, three</keywords>
<lastPrinted>2014-10-14T10:30:00Z</lastPrinted>
</coreProperties>
"""
xml = tostring(SampleProperties.to_tree())
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_tree(datadir, SampleProperties):
datadir.chdir()
with open("core.xml") as src:
content = src.read()
content = fromstring(content)
props = SampleProperties.from_tree(content)
assert props == SampleProperties
def test_qualified_datetime():
from ..core import QualifiedDateTime
dt = QualifiedDateTime()
tree = dt.to_tree("time", datetime.datetime(2015, 7, 20, 12, 30))
xml = tostring(tree)
expected = """
<time xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="dcterms:W3CDTF">
2015-07-20T12:30:00Z
</time>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.fixture(params=['abc', 'dct', 'dcterms', 'xyz'])
def dcterms_prefix(request):
register_namespace(request.param, DCTERMS_NS)
yield request.param
register_namespace(DCTERMS_PREFIX, DCTERMS_NS)
@pytest.mark.no_pypy
def test_qualified_datetime_ns(dcterms_prefix):
from ..core import QualifiedDateTime
dt = QualifiedDateTime()
tree = dt.to_tree("time", datetime.datetime(2015, 7, 20, 12, 30))
xml = tostring(tree)
tree = fromstring(xml)
xsi = tree.attrib["{%s}type" % XSI_NS]
prefix = xsi.split(":")[0]
assert prefix == dcterms_prefix
| true | true |
f7150806ca03c83bb4c23c630e2ac8e54993a8f6 | 223 | py | Python | gumo/task/bind.py | gumo-py/gumo-task | 412c8351da206299ad2785963c8e7e6c7117e75c | [
"MIT"
] | null | null | null | gumo/task/bind.py | gumo-py/gumo-task | 412c8351da206299ad2785963c8e7e6c7117e75c | [
"MIT"
] | 54 | 2019-08-08T02:08:15.000Z | 2022-02-11T02:55:47.000Z | gumo/task/bind.py | gumo-py/gumo-task | 412c8351da206299ad2785963c8e7e6c7117e75c | [
"MIT"
] | 1 | 2019-04-10T09:24:03.000Z | 2019-04-10T09:24:03.000Z | from gumo.task.application.repository import GumoTaskRepository
from gumo.task.infrastructure.repository import GumoTaskRepositoryImpl
def task_bind(binder):
binder.bind(GumoTaskRepository, to=GumoTaskRepositoryImpl)
| 31.857143 | 70 | 0.856502 | from gumo.task.application.repository import GumoTaskRepository
from gumo.task.infrastructure.repository import GumoTaskRepositoryImpl
def task_bind(binder):
binder.bind(GumoTaskRepository, to=GumoTaskRepositoryImpl)
| true | true |
f715095c652e3a7692eb07b19d8b2822fddca9ac | 3,595 | py | Python | practice contest 2018/Palindrome/solution2.py | robingan7/ACSL | 3e1c35b0282e8317ff6820ae76ebcad6506a3b53 | [
"MIT"
] | 6 | 2018-11-05T22:59:42.000Z | 2021-09-13T05:43:08.000Z | practice contest 2018/Palindrome/solution2.py | robin-gan/ACSL | 3e1c35b0282e8317ff6820ae76ebcad6506a3b53 | [
"MIT"
] | null | null | null | practice contest 2018/Palindrome/solution2.py | robin-gan/ACSL | 3e1c35b0282e8317ff6820ae76ebcad6506a3b53 | [
"MIT"
] | 2 | 2020-01-12T17:46:37.000Z | 2021-09-13T05:29:38.000Z | def main():
global originalNum
global base
ipl=input("Enter the input")
originalNum=ipl[0:len(ipl)-3]
base=ipl[len(ipl)-2:]
def ABCD(num):
num=str(num)
if(num=='1'):
return '1'
if(num=='2'):
return '2'
if(num=='3'):
return '3'
if(num=='4'):
return '4'
if(num=='5'):
return '5'
if(num=='6'):
return '6'
if(num=='7'):
return '7'
if(num=='8'):
return '8'
if(num=='9'):
return '9'
if(num=='A'):
return '10'
if(num=='B'):
return '11'
if(num=='C'):
return '12'
if(num=='D'):
return '13'
if(num=='E'):
return '14'
if(num=='F'):
return '15'
if(num=='0'):
return '0'
def ABCD_reverse(num):
num=int(num)
if(num==1):
return '1'
if(num==2):
return '2'
if(num==3):
return '3'
if(num==4):
return '4'
if(num==5):
return '5'
if(num==6):
return '6'
if(num==7):
return '7'
if(num==8):
return '8'
if(num==9):
return '9'
if(num==10):
return 'A'
if(num==11):
return 'B'
if(num==12):
return 'C'
if(num==13):
return 'D'
if(num==14):
return 'E'
if(num==15):
return 'F'
if(num==0):
return '0'
def edit(l):
l=list(l)
result=""
for i5 in range(len(l)):
result+=str(ABCD_reverse(l[i5]))
return add_reverse(result)
def add(input_number):
intbase=int(base)
input_number=str(input_number)
if(intbase==10):
input_number=int(input_number)
return input_number+int(add_reverse(str(input_number)))
else:
reverse=add_reverse(input_number)
reduncy=[0]
index=[]
for i3 in range(len(input_number)):
sum1=int(ABCD(input_number[i3]))+int(ABCD(reverse[i3]))
if(sum1>(intbase-1)):
sum1=sum1-intbase+reduncy[i3]
index.append(sum1)
reduncy.append(1)
elif(sum1==(intbase-1)):
sum1=sum1-intbase+reduncy[i3]
if(sum1>=0):
index.append(sum1)
reduncy.append(1)
else:
index.append(sum1++intbase-reduncy[i3])
reduncy.append(0)
else:
sum1=sum1+reduncy[i3]
reduncy.append(0)
index.append(sum1)
if(reduncy[len(reduncy)-1]==1):
index.append(1)
return edit(index)
def add_reverse(input_string):
input_string=str(input_string)
reverse_str=""
count=1
for i in range(len(input_string)):
reverse_str+=(input_string[len(input_string)-count])
count+=1
return reverse_str
def is_check(input_string):
input_string=str(input_string)
if(input_string[:int(len(input_string)/2)]==add_reverse(input_string[int(len(input_string)/2):])):
return True
elif(input_string[:(int(len(input_string)/2)+1)]==add_reverse(input_string[(int(len(input_string)/2)-0):])):
return True
else:
return False
def execute():
count=0
intial_number=originalNum
intial_number=str(intial_number)
while(count<10 and is_check(intial_number)==False):
intial_number=add(intial_number)
is_check(intial_number)
count+=1
if(is_check(intial_number)):
return intial_number
else:
return 'None,'+str(intial_number)
main()
print(execute())
| 21.526946 | 112 | 0.51516 | def main():
global originalNum
global base
ipl=input("Enter the input")
originalNum=ipl[0:len(ipl)-3]
base=ipl[len(ipl)-2:]
def ABCD(num):
num=str(num)
if(num=='1'):
return '1'
if(num=='2'):
return '2'
if(num=='3'):
return '3'
if(num=='4'):
return '4'
if(num=='5'):
return '5'
if(num=='6'):
return '6'
if(num=='7'):
return '7'
if(num=='8'):
return '8'
if(num=='9'):
return '9'
if(num=='A'):
return '10'
if(num=='B'):
return '11'
if(num=='C'):
return '12'
if(num=='D'):
return '13'
if(num=='E'):
return '14'
if(num=='F'):
return '15'
if(num=='0'):
return '0'
def ABCD_reverse(num):
num=int(num)
if(num==1):
return '1'
if(num==2):
return '2'
if(num==3):
return '3'
if(num==4):
return '4'
if(num==5):
return '5'
if(num==6):
return '6'
if(num==7):
return '7'
if(num==8):
return '8'
if(num==9):
return '9'
if(num==10):
return 'A'
if(num==11):
return 'B'
if(num==12):
return 'C'
if(num==13):
return 'D'
if(num==14):
return 'E'
if(num==15):
return 'F'
if(num==0):
return '0'
def edit(l):
l=list(l)
result=""
for i5 in range(len(l)):
result+=str(ABCD_reverse(l[i5]))
return add_reverse(result)
def add(input_number):
intbase=int(base)
input_number=str(input_number)
if(intbase==10):
input_number=int(input_number)
return input_number+int(add_reverse(str(input_number)))
else:
reverse=add_reverse(input_number)
reduncy=[0]
index=[]
for i3 in range(len(input_number)):
sum1=int(ABCD(input_number[i3]))+int(ABCD(reverse[i3]))
if(sum1>(intbase-1)):
sum1=sum1-intbase+reduncy[i3]
index.append(sum1)
reduncy.append(1)
elif(sum1==(intbase-1)):
sum1=sum1-intbase+reduncy[i3]
if(sum1>=0):
index.append(sum1)
reduncy.append(1)
else:
index.append(sum1++intbase-reduncy[i3])
reduncy.append(0)
else:
sum1=sum1+reduncy[i3]
reduncy.append(0)
index.append(sum1)
if(reduncy[len(reduncy)-1]==1):
index.append(1)
return edit(index)
def add_reverse(input_string):
input_string=str(input_string)
reverse_str=""
count=1
for i in range(len(input_string)):
reverse_str+=(input_string[len(input_string)-count])
count+=1
return reverse_str
def is_check(input_string):
input_string=str(input_string)
if(input_string[:int(len(input_string)/2)]==add_reverse(input_string[int(len(input_string)/2):])):
return True
elif(input_string[:(int(len(input_string)/2)+1)]==add_reverse(input_string[(int(len(input_string)/2)-0):])):
return True
else:
return False
def execute():
count=0
intial_number=originalNum
intial_number=str(intial_number)
while(count<10 and is_check(intial_number)==False):
intial_number=add(intial_number)
is_check(intial_number)
count+=1
if(is_check(intial_number)):
return intial_number
else:
return 'None,'+str(intial_number)
main()
print(execute())
| true | true |
f7150987acbe3caf5f386ef1600ec19dfb6f7681 | 10,064 | py | Python | WDL/runtime/download.py | TMiguelT/miniwdl | 5a7724fbf1cbe7bd3b4f251994c83820646ecd9d | [
"MIT"
] | null | null | null | WDL/runtime/download.py | TMiguelT/miniwdl | 5a7724fbf1cbe7bd3b4f251994c83820646ecd9d | [
"MIT"
] | null | null | null | WDL/runtime/download.py | TMiguelT/miniwdl | 5a7724fbf1cbe7bd3b4f251994c83820646ecd9d | [
"MIT"
] | null | null | null | """
Downloading input files from URIs, with plugin modules for different URI schemes
Download URI plugins are installed & registered using the setuptools entry point group
"miniwdl.plugin.file_download", with name equal to the URI scheme (e.g. "gs" or "s3").
The plugin entry point should be a context manager, which the runtime keeps open for the duration of
the download operation. Given the desired URI, it should quickly yield a tuple with:
1. source code of a WDL 1.0 task to perform the download
2. dict of Cromwell-style JSON inputs to give to the task
miniwdl then executes this specified operation, expecting it to produce an output "File file" with
the downloaded file. By doing the heavy lifting in a WDL task, the operation gets to inherit all
the functionality of miniwdl's task runtime, e.g. pulling docker image with binary dependencies,
resource scheduling & isolation, logging, error/signal handling, retry, etc.
The Python context manager itself might be used to obtain and manage the lifetime of any needed
security credentials.
"""
import os
import logging
import traceback
import tempfile
import hashlib
import importlib_metadata
from contextlib import ExitStack
from typing import Optional, List, Generator, Dict, Any, Tuple, Callable
from . import config
from .cache import CallCache
from .._util import compose_coroutines
from .._util import StructuredLogMessage as _
def _load(cfg: config.Loader):
table = getattr(cfg, "_downloaders", None)
if table:
return table
# default public URI downloaders
table = {
"https": aria2c_downloader,
"http": aria2c_downloader,
"ftp": aria2c_downloader,
"s3": awscli_downloader,
}
# plugins
for plugin_name, plugin_fn in config.load_plugins(cfg, "file_download"):
table[plugin_name] = plugin_fn
setattr(cfg, "_downloaders", table)
return table
def _downloader(
cfg: config.Loader, uri: str,
) -> Optional[Callable[..., Generator[Dict[str, Any], Dict[str, Any], None]]]:
_load(cfg)
colon = uri.find(":")
if colon <= 0:
return None
scheme = uri[:colon]
return getattr(cfg, "_downloaders").get(scheme, None)
def able(cfg: config.Loader, uri: str) -> bool:
"""
Returns True if uri appears to be a URI we know how to download
"""
return _downloader(cfg, uri) is not None
def run(cfg: config.Loader, logger: logging.Logger, uri: str, **kwargs) -> str:
"""
Download the URI and return the local filename.
kwargs are passed through to ``run_local_task``, so ``run_dir`` and ``logger_prefix`` may be
useful in particular.
"""
from .error import RunFailed, DownloadFailed, Terminated, error_json
from .task import run_local_task
from .. import parse_document, values_from_json, values_to_json, Walker
gen = _downloader(cfg, uri)
assert gen
try:
with compose_coroutines([lambda kwargs: gen(cfg, logger, **kwargs)], {"uri": uri}) as cor:
recv = next(cor)
if "task_wdl" in recv:
task_wdl, inputs = (recv[k] for k in ["task_wdl", "inputs"])
doc = parse_document(task_wdl, version="1.0") # pyre-ignore
assert len(doc.tasks) == 1 and not doc.workflow
doc.typecheck()
Walker.SetParents()(doc)
task = doc.tasks[0]
inputs = values_from_json(inputs, task.available_inputs) # pyre-ignore
subdir, outputs_env = run_local_task(
cfg, task, inputs, run_id=("download-" + task.name), **kwargs
)
recv = cor.send(
{"outputs": values_to_json(outputs_env), "dir": subdir} # pyre-ignore
)
ans = recv["outputs"]["file"]
assert isinstance(ans, str) and os.path.isfile(ans)
return ans
except RunFailed as exn:
if isinstance(exn.__cause__, Terminated):
raise exn.__cause__ from None
raise DownloadFailed(uri) from exn.__cause__
except Exception as exn:
logger.debug(traceback.format_exc())
logger.error(_("downloader error", uri=uri, **error_json(exn)))
raise DownloadFailed(uri) from exn
def run_cached(
cfg, logger: logging.Logger, cache: CallCache, uri: str, run_dir: str, **kwargs
) -> Tuple[bool, str]:
"""
Cached download logic: returns the file from the cache if available; otherwise, runs the
download and puts it into the cache before returning
"""
cached = cache.get_download(uri, logger=logger)
if cached:
return True, cached
if not cfg["download_cache"].get_bool("put") or not cache.download_path(uri):
return False, run(cfg, logger, uri, run_dir=run_dir, **kwargs)
# run the download within the cache directory
run_dir = os.path.join(cfg["download_cache"]["dir"], "ops")
filename = run(cfg, logger, uri, run_dir=run_dir, **kwargs)
return False, cache.put_download(uri, os.path.realpath(filename), logger=logger)
# WDL tasks for downloading a file based on its URI scheme
def aria2c_downloader(
cfg: config.Loader, logger: logging.Logger, uri: str, **kwargs
) -> Generator[Dict[str, Any], Dict[str, Any], None]:
wdl = r"""
task aria2c {
input {
String uri
Int connections = 10
}
command <<<
set -euxo pipefail
mkdir __out
cd __out
aria2c -x ~{connections} -s ~{connections} \
--file-allocation=none --retry-wait=2 --stderr=true --enable-color=false \
"~{uri}"
>>>
output {
File file = glob("__out/*")[0]
}
runtime {
cpu: 4
memory: "1G"
docker: "hobbsau/aria2"
}
}
"""
recv = yield {"task_wdl": wdl, "inputs": {"uri": uri}}
yield recv # pyre-ignore
def awscli_downloader(
cfg: config.Loader, logger: logging.Logger, uri: str, **kwargs
) -> Generator[Dict[str, Any], Dict[str, Any], None]:
# get AWS credentials from boto3 (unless prevented by configuration)
host_aws_credentials = None
if cfg["download_awscli"].get_bool("host_credentials"):
try:
import boto3 # pyre-fixme
b3creds = boto3.session.Session().get_credentials()
host_aws_credentials = "\n".join(
f"export {k}='{v}'"
for (k, v) in {
"AWS_ACCESS_KEY_ID": b3creds.access_key,
"AWS_SECRET_ACCESS_KEY": b3creds.secret_key,
"AWS_SESSION_TOKEN": b3creds.token,
}.items()
if v
)
except Exception:
pass
inputs = {"uri": uri}
with ExitStack() as cleanup:
if host_aws_credentials:
# write credentials to temp file that'll self-destruct afterwards
aws_credentials_file = cleanup.enter_context(
tempfile.NamedTemporaryFile(
prefix=hashlib.sha256(host_aws_credentials.encode()).hexdigest(),
delete=True,
mode="w",
)
)
print(host_aws_credentials, file=aws_credentials_file, flush=True)
# make file group-readable to ensure it'll be usable if the docker image runs as non-root
os.chmod(aws_credentials_file.name, os.stat(aws_credentials_file.name).st_mode | 0o40)
inputs["aws_credentials"] = aws_credentials_file.name
logger.getChild("awscli_downloader").info("loaded host AWS credentials")
else:
logger.getChild("awscli_downloader").info(
"no AWS credentials available via host awscli/boto3; if needed, "
"configure them and set [download_awscli] host_credentials=true. "
"(On EC2: awscli might still assume role from instance metadata "
"service.)"
)
wdl = r"""
task aws_s3_cp {
input {
String uri
File? aws_credentials
}
command <<<
set -euo pipefail
if [ -n "~{aws_credentials}" ]; then
source "~{aws_credentials}"
fi
args=""
if ! aws sts get-caller-identity >&2 ; then
# no credentials or instance role; add --no-sign-request to allow requests for
# PUBLIC objects to proceed.
args="--no-sign-request"
fi
mkdir __out
cd __out
aws s3 cp $args "~{uri}" .
>>>
output {
File file = glob("__out/*")[0]
}
runtime {
cpu: 4
memory: "1G"
docker: "amazon/aws-cli"
}
}
"""
recv = yield {
"task_wdl": wdl,
"inputs": inputs,
}
yield recv # pyre-ignore
def gsutil_downloader(
cfg: config.Loader, logger: logging.Logger, uri: str, **kwargs
) -> Generator[Dict[str, Any], Dict[str, Any], None]:
"""
Built-in downloader plugin for public gs:// URIs; registered by setup.cfg entry_points section
TODO: adopt security credentials from runtime environment
"""
if uri == "gs://8675309":
# hook for test coverage of exception handler
raise RuntimeError("don't change your number")
wdl = r"""
task gsutil_cp {
input {
String uri
}
command <<<
set -euxo pipefail
mkdir __out/
gsutil -q cp "~{uri}" __out/
>>>
output {
File file = glob("__out/*")[0]
}
runtime {
cpu: 4
memory: "1G"
docker: "google/cloud-sdk:slim"
}
}
"""
yield (yield {"task_wdl": wdl, "inputs": {"uri": uri}}) # pyre-ignore
| 34.465753 | 101 | 0.588335 | import os
import logging
import traceback
import tempfile
import hashlib
import importlib_metadata
from contextlib import ExitStack
from typing import Optional, List, Generator, Dict, Any, Tuple, Callable
from . import config
from .cache import CallCache
from .._util import compose_coroutines
from .._util import StructuredLogMessage as _
def _load(cfg: config.Loader):
table = getattr(cfg, "_downloaders", None)
if table:
return table
table = {
"https": aria2c_downloader,
"http": aria2c_downloader,
"ftp": aria2c_downloader,
"s3": awscli_downloader,
}
for plugin_name, plugin_fn in config.load_plugins(cfg, "file_download"):
table[plugin_name] = plugin_fn
setattr(cfg, "_downloaders", table)
return table
def _downloader(
cfg: config.Loader, uri: str,
) -> Optional[Callable[..., Generator[Dict[str, Any], Dict[str, Any], None]]]:
_load(cfg)
colon = uri.find(":")
if colon <= 0:
return None
scheme = uri[:colon]
return getattr(cfg, "_downloaders").get(scheme, None)
def able(cfg: config.Loader, uri: str) -> bool:
return _downloader(cfg, uri) is not None
def run(cfg: config.Loader, logger: logging.Logger, uri: str, **kwargs) -> str:
from .error import RunFailed, DownloadFailed, Terminated, error_json
from .task import run_local_task
from .. import parse_document, values_from_json, values_to_json, Walker
gen = _downloader(cfg, uri)
assert gen
try:
with compose_coroutines([lambda kwargs: gen(cfg, logger, **kwargs)], {"uri": uri}) as cor:
recv = next(cor)
if "task_wdl" in recv:
task_wdl, inputs = (recv[k] for k in ["task_wdl", "inputs"])
doc = parse_document(task_wdl, version="1.0")
assert len(doc.tasks) == 1 and not doc.workflow
doc.typecheck()
Walker.SetParents()(doc)
task = doc.tasks[0]
inputs = values_from_json(inputs, task.available_inputs)
subdir, outputs_env = run_local_task(
cfg, task, inputs, run_id=("download-" + task.name), **kwargs
)
recv = cor.send(
{"outputs": values_to_json(outputs_env), "dir": subdir}
)
ans = recv["outputs"]["file"]
assert isinstance(ans, str) and os.path.isfile(ans)
return ans
except RunFailed as exn:
if isinstance(exn.__cause__, Terminated):
raise exn.__cause__ from None
raise DownloadFailed(uri) from exn.__cause__
except Exception as exn:
logger.debug(traceback.format_exc())
logger.error(_("downloader error", uri=uri, **error_json(exn)))
raise DownloadFailed(uri) from exn
def run_cached(
cfg, logger: logging.Logger, cache: CallCache, uri: str, run_dir: str, **kwargs
) -> Tuple[bool, str]:
cached = cache.get_download(uri, logger=logger)
if cached:
return True, cached
if not cfg["download_cache"].get_bool("put") or not cache.download_path(uri):
return False, run(cfg, logger, uri, run_dir=run_dir, **kwargs)
run_dir = os.path.join(cfg["download_cache"]["dir"], "ops")
filename = run(cfg, logger, uri, run_dir=run_dir, **kwargs)
return False, cache.put_download(uri, os.path.realpath(filename), logger=logger)
def aria2c_downloader(
cfg: config.Loader, logger: logging.Logger, uri: str, **kwargs
) -> Generator[Dict[str, Any], Dict[str, Any], None]:
wdl = r"""
task aria2c {
input {
String uri
Int connections = 10
}
command <<<
set -euxo pipefail
mkdir __out
cd __out
aria2c -x ~{connections} -s ~{connections} \
--file-allocation=none --retry-wait=2 --stderr=true --enable-color=false \
"~{uri}"
>>>
output {
File file = glob("__out/*")[0]
}
runtime {
cpu: 4
memory: "1G"
docker: "hobbsau/aria2"
}
}
"""
recv = yield {"task_wdl": wdl, "inputs": {"uri": uri}}
yield recv
def awscli_downloader(
cfg: config.Loader, logger: logging.Logger, uri: str, **kwargs
) -> Generator[Dict[str, Any], Dict[str, Any], None]:
host_aws_credentials = None
if cfg["download_awscli"].get_bool("host_credentials"):
try:
import boto3
b3creds = boto3.session.Session().get_credentials()
host_aws_credentials = "\n".join(
f"export {k}='{v}'"
for (k, v) in {
"AWS_ACCESS_KEY_ID": b3creds.access_key,
"AWS_SECRET_ACCESS_KEY": b3creds.secret_key,
"AWS_SESSION_TOKEN": b3creds.token,
}.items()
if v
)
except Exception:
pass
inputs = {"uri": uri}
with ExitStack() as cleanup:
if host_aws_credentials:
aws_credentials_file = cleanup.enter_context(
tempfile.NamedTemporaryFile(
prefix=hashlib.sha256(host_aws_credentials.encode()).hexdigest(),
delete=True,
mode="w",
)
)
print(host_aws_credentials, file=aws_credentials_file, flush=True)
# make file group-readable to ensure it'll be usable if the docker image runs as non-root
os.chmod(aws_credentials_file.name, os.stat(aws_credentials_file.name).st_mode | 0o40)
inputs["aws_credentials"] = aws_credentials_file.name
logger.getChild("awscli_downloader").info("loaded host AWS credentials")
else:
logger.getChild("awscli_downloader").info(
"no AWS credentials available via host awscli/boto3; if needed, "
"configure them and set [download_awscli] host_credentials=true. "
"(On EC2: awscli might still assume role from instance metadata "
"service.)"
)
wdl = r"""
task aws_s3_cp {
input {
String uri
File? aws_credentials
}
command <<<
set -euo pipefail
if [ -n "~{aws_credentials}" ]; then
source "~{aws_credentials}"
fi
args=""
if ! aws sts get-caller-identity >&2 ; then
# no credentials or instance role; add --no-sign-request to allow requests for
# PUBLIC objects to proceed.
args="--no-sign-request"
fi
mkdir __out
cd __out
aws s3 cp $args "~{uri}" .
>>>
output {
File file = glob("__out/*")[0]
}
runtime {
cpu: 4
memory: "1G"
docker: "amazon/aws-cli"
}
}
"""
recv = yield {
"task_wdl": wdl,
"inputs": inputs,
}
yield recv
def gsutil_downloader(
cfg: config.Loader, logger: logging.Logger, uri: str, **kwargs
) -> Generator[Dict[str, Any], Dict[str, Any], None]:
if uri == "gs://8675309":
raise RuntimeError("don't change your number")
wdl = r"""
task gsutil_cp {
input {
String uri
}
command <<<
set -euxo pipefail
mkdir __out/
gsutil -q cp "~{uri}" __out/
>>>
output {
File file = glob("__out/*")[0]
}
runtime {
cpu: 4
memory: "1G"
docker: "google/cloud-sdk:slim"
}
}
"""
yield (yield {"task_wdl": wdl, "inputs": {"uri": uri}}) # pyre-ignore
| true | true |
f7150a970357699aca9cf473334a78e6e5582097 | 130 | py | Python | palindrome_string.py | arghya-007/collage_python_scripts | 5260c30b86209293ba8caa2fbb1d8afdf5230519 | [
"MIT"
] | 3 | 2020-09-24T18:45:56.000Z | 2020-10-02T02:28:42.000Z | palindrome_string.py | arghya-007/collage_python_scripts | 5260c30b86209293ba8caa2fbb1d8afdf5230519 | [
"MIT"
] | null | null | null | palindrome_string.py | arghya-007/collage_python_scripts | 5260c30b86209293ba8caa2fbb1d8afdf5230519 | [
"MIT"
] | null | null | null | s = input("Please enter your own String : ")
if(s == s[:: - 1]):
print("Palindrome")
else:
print("Not a Palindrome string") | 21.666667 | 44 | 0.607692 | s = input("Please enter your own String : ")
if(s == s[:: - 1]):
print("Palindrome")
else:
print("Not a Palindrome string") | true | true |
f7150d2bc449be689d702bd308ab77339d0f7e3d | 24,627 | py | Python | xalpha/multiple.py | wxw-matt/xalpha | b142a5daebac5f1129ead0553efcd40cd471190c | [
"MIT"
] | null | null | null | xalpha/multiple.py | wxw-matt/xalpha | b142a5daebac5f1129ead0553efcd40cd471190c | [
"MIT"
] | null | null | null | xalpha/multiple.py | wxw-matt/xalpha | b142a5daebac5f1129ead0553efcd40cd471190c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
module for mul and mulfix class: fund combination management
"""
import logging
import pandas as pd
from pyecharts import options as opts
from pyecharts.charts import Pie, ThemeRiver
from xalpha.cons import convert_date, myround, yesterdaydash, yesterdayobj
from xalpha.evaluate import evaluate
from xalpha.exceptions import FundTypeError, TradeBehaviorError
from xalpha.record import record, irecord
from xalpha.indicator import indicator
from xalpha.info import cashinfo, fundinfo, mfundinfo, get_fund_holdings
from xalpha.trade import (
bottleneck,
trade,
turnoverrate,
vtradevolume,
xirrcal,
itrade,
vtradecost,
)
from xalpha.universal import get_fund_type, ttjjcode, get_rt, get_industry_fromxq
import xalpha.universal as xu
logger = logging.getLogger(__name__)
class mul:
"""
multiple fund positions manage class
:param fundtradeobj: list of trade obj which you want to analyse together
:param status: the status table of trade, all code in this table would be considered.
one must provide one of the two paramters, if both are offered, status will be overlooked
可以是场内记账单 DataFrame,也可以是 record 对象。
:param istatus: 场内交易账单,也可以是 irecord 对象。
若提供,则场内外交易联合统计展示。该选项只保证 ``combsummary`` 方法可正常使用,不保证 ``mul`` 类的其他方法可用。
:param property: Dict[fundcode, property_number]. property number 的解释:
int. 1: 基金申购采取分位以后全舍而非四舍五入(这种基金是真实存在的==)。2:基金默认分红再投入(0 则是默认现金分红)。4:基金赎回按净值处理(暂时只支持货币基金,事实上无法精确支持按份额赎回的净值型基金)。将想要的性质数值相加即可,类似 *nix 上的 xwr 系统。
:param fetch: boolean, when open the fetch option, info class will try fetching from local files first in the init
:param save: boolean, when open the save option, info classes automatically save the class to files
:param path: string, the file path prefix of IO, or object or engine from sqlalchemy to connect sql database
:param form: string, the format of IO, options including: 'csv','sql'
"""
def __init__(
self,
*fundtradeobj,
status=None,
istatus=None,
property=None,
fetch=False,
save=False,
path="",
form="csv"
):
if isinstance(status, record):
if not property:
property = getattr(status, "property", {})
status = status.status
elif not property:
property = {}
self.is_in = False
if fundtradeobj:
for t in fundtradeobj:
if isinstance(t, itrade):
self.is_in = True
break
else:
fundtradeobj = []
# warning: not a very good way to automatic generate these fund obj
# because there might be some funds use round_down for share calculation, ie, label=2 must be given
# unless you are sure corresponding funds are added to the droplist
fundcodelist = [f.code for f in fundtradeobj]
if status is not None:
for code in status.columns:
if code == "date":
continue
# r1, d2, v4 p = r+d+v
if code in fundcodelist:
continue
p = property.get(code, 0)
round_label = p % 2
dividend_label = ((p - round_label) / 2) % 2
value_label = ((p - round_label - dividend_label) / 4) % 2
try:
fundtradeobj.append(
trade(
fundinfo(
code,
round_label=round_label,
dividend_label=dividend_label,
fetch=fetch,
save=save,
path=path,
form=form,
),
status,
)
)
except FundTypeError:
fundtradeobj.append(
trade(
mfundinfo(
code,
round_label=round_label,
value_label=value_label,
fetch=fetch,
save=save,
path=path,
form=form,
),
status,
)
)
if istatus is not None:
self.is_in = True
if isinstance(istatus, irecord):
istatus = istatus.status
for code in istatus.code.unique():
if code not in fundcodelist and not code.startswith("#"):
fundtradeobj.append(itrade(code, istatus))
self.fundtradeobj = tuple(fundtradeobj)
self.totcftable = self._mergecftb()
def tot(self, prop="基金现值", date=yesterdayobj()):
"""
sum of all the values from one prop of fund daily report,
of coures many of the props make no sense to sum
:param prop: string defined in the daily report dict,
typical one is 'currentvalue' or 'originalpurchase'
"""
res = 0
for fund in self.fundtradeobj:
res += fund.dailyreport().iloc[0][prop]
return res
def combsummary(self, date=yesterdayobj()):
"""
brief report table of every funds and the combination investment
:param date: string or obj of date, show info of the date given
:returns: empty dict if nothing is remaining that date
dict of various data on the trade positions
"""
date = convert_date(date)
columns = [
"基金名称",
"基金代码",
"当日净值",
"单位成本",
"持有份额",
"基金现值",
"基金总申购",
"历史最大占用",
"基金持有成本",
"基金分红与赎回",
"换手率",
"基金收益总额",
"投资收益率",
]
summarydf = pd.DataFrame([], columns=columns)
for fund in self.fundtradeobj:
summarydf = summarydf.append(
fund.dailyreport(date), ignore_index=True, sort=True
)
tname = "总计"
tcode = "total"
tunitvalue = float("NaN")
tunitcost = float("NaN")
tholdshare = float("NaN")
tcurrentvalue = summarydf["基金现值"].sum()
tpurchase = summarydf["基金总申购"].sum()
tbtnk = bottleneck(self.totcftable[self.totcftable["date"] <= date])
tcost = summarydf["基金持有成本"].sum()
toutput = summarydf["基金分红与赎回"].sum()
tturnover = turnoverrate(self.totcftable[self.totcftable["date"] <= date], date)
# 计算的是总系统作为整体和外界的换手率,而非系统各成分之间的换手率
tearn = summarydf["基金收益总额"].sum()
trate = round(tearn / tbtnk * 100, 4)
trow = pd.DataFrame(
[
[
tname,
tcode,
tunitvalue,
tunitcost,
tholdshare,
tcurrentvalue,
tpurchase,
tbtnk,
tcost,
toutput,
tturnover,
tearn,
trate,
]
],
columns=columns,
)
summarydf = summarydf.append(trow, ignore_index=True, sort=True)
return summarydf[columns].sort_values(by="基金现值", ascending=False)
summary = combsummary
def _mergecftb(self):
"""
merge the different cftable for different funds into one table
"""
dtlist = []
for fund in self.fundtradeobj:
dtlist2 = []
for _, row in fund.cftable.iterrows():
dtlist2.append((row["date"], row["cash"]))
dtlist.extend(dtlist2)
nndtlist = set([item[0] for item in dtlist])
nndtlist = sorted(list(nndtlist), key=lambda x: x)
reslist = []
for date in nndtlist:
reslist.append(sum([item[1] for item in dtlist if item[0] == date]))
df = pd.DataFrame(data={"date": nndtlist, "cash": reslist})
df = df[df["cash"] != 0]
df = df.reset_index(drop=True)
return df
def xirrrate(self, date=yesterdayobj(), startdate=None, guess=0.01):
"""
xirr rate evauation of the whole invest combination
:param date: string or obj of datetime, the virtually sell-all date
:param startdate: string or obj of datetime, the beginning date of calculation, default from first buy
"""
return xirrcal(self.totcftable, self.fundtradeobj, date, startdate, guess)
def evaluation(self, start=None):
"""
give the evaluation object to analysis funds properties themselves instead of trades
:returns: :class:`xalpha.evaluate.evaluate` object, with referenced funds the same as funds
we invested
"""
if self.is_in:
raise NotImplementedError()
case = evaluate(
*[fundtrade.aim for fundtrade in self.fundtradeobj], start=start
)
return case
def get_stock_holdings(
self, year=None, season=None, date=yesterdayobj(), threhold=100
):
"""
获取整个基金组合的底层股票持仓总和和细节,组合穿透
:param year: 基于的基金季报年份
:param season: 基于的基金季报季度
:param date: 默认昨天
:param threhold: 默认100。小于100元的底层股票将不在最后的结果中展示
:return: pd.DataFrame column: name, code, value, ratio
"""
d = {}
if year is None or season is None:
rd = convert_date(date) - pd.Timedelta(days=120)
if not year:
year = rd.year
if not season:
season = int((rd.month - 0.1) / 3) + 1
logger.debug("use %s, %s for fund report" % (year, season))
for f in self.fundtradeobj:
if isinstance(f, itrade):
if f.get_type() == "股票":
code = f.code
elif f.get_type() == "场内基金":
code = f.code[2:]
else:
continue
else:
code = f.code
value = f.briefdailyreport(date).get("currentvalue", 0)
if value > 0:
if code.startswith("SH") or code.startswith("SZ"):
stock = code
d[stock] = d.get(stock, 0) + value
elif code == "mf":
continue
else:
df = get_fund_holdings(code, year, season)
if df is None:
continue
for _, row in df.iterrows():
stock = row["code"]
stock = ttjjcode(stock)
d[stock] = d.get(stock, 0) + row["ratio"] / 100 * value
# print("%s has %s contribution from %s" %(stock, row["ratio"] / 100 * value, f.name))
l = []
for code, value in sorted(d.items(), key=lambda item: -item[1]):
if value >= threhold:
try:
name = get_rt(code)["name"]
except:
name = code
l.append([name, code, value])
fdf = pd.DataFrame(l, columns=["name", "code", "value"])
fdf["ratio"] = fdf["value"] / fdf["value"].sum()
return fdf
def get_portfolio(self, date=yesterdayobj()):
"""
获取基金组合底层资产大类配置的具体值
:param date:
:return: Dict[str, float]. stock,bond,cash 对应总值的字典
"""
d = {"stock": 0, "bond": 0, "cash": 0}
date = convert_date(date)
for f in self.fundtradeobj:
value = f.briefdailyreport(date).get("currentvalue", 0)
if value > 0:
if isinstance(f, itrade):
if f.get_type() == "股票":
d["stock"] += value
continue
elif f.get_type() in ["可转债", "债券"]:
d["bond"] += value
continue
elif f.get_type() == "货币基金":
d["cash"] += value
continue
elif f.get_type() == "场内基金":
code = f.code[2:]
else:
continue
else:
code = f.code
if code == "mf":
d["cash"] += value
continue
if get_fund_type(code) == "货币基金":
d["cash"] += value
continue
df = xu.get_daily("pt-F" + code, end=date.strftime("%Y%m%d"))
if df is None or len(df) == 0:
logger.warning("empty portfolio info for %s" % code)
row = df.iloc[-1]
if row["bond_ratio"] + row["stock_ratio"] < 10: # 联接基金
d["stock"] += (
(100 - row["bond_ratio"] - row["cash_ratio"]) * value / 100
)
d["bond"] += row["bond_ratio"] * value / 100
d["cash"] += row["cash_ratio"] * value / 100
else:
d["stock"] += row["stock_ratio"] * value / 100
d["bond"] += row["bond_ratio"] * value / 100
d["cash"] += row["cash_ratio"] * value / 100
return d
get_portfolio_holdings = get_portfolio
def get_industry(self, date=yesterdayobj()):
"""
获取基金组合持仓的行业占比信息,底层为非 A 股持仓的暂不支持
:param date:
:return: Dict
"""
# TODO: hard coded 一个字典来合并一些二级行业
d = {}
date = convert_date(date)
rd = date - pd.Timedelta(days=120)
year = rd.year
season = int((rd.month - 0.1) / 3) + 1
for f in self.fundtradeobj:
value = f.briefdailyreport(date).get("currentvalue", 0)
if value > 0:
if isinstance(f, itrade):
if f.get_type() == "股票":
industry = get_industry_fromxq(f.code).get("industryname", "")
if industry.strip():
d[industry] = d.get(industry, 0) + value
continue
elif f.get_type() in ["可转债", "债券", "货币基金"]:
# 现在简化实现可转债暂时不按正股记行业
continue
elif f.get_type() == "场内基金":
code = f.code[2:]
else:
continue
else:
code = f.code
if code == "mf":
continue
if get_fund_type(code) == "货币基金":
continue
## 以下为持有股票的基金处理
## fundinfo 有点浪费,不过简化实现暂时如此
fobj = fundinfo(code)
industry_dict = fobj.get_industry_holdings(year=year, season=season)
if industry_dict is None:
continue
## 这里行业占比需要做个 scaling
sv = sum([v for _, v in industry_dict.items()])
if sv < 1.0:
# 只有极少数持仓存在行业信息
continue
stock_ratio = fobj.get_portfolio_holdings(date.strftime("%Y%m%d"))[
"stock_ratio"
]
scale = stock_ratio / sv
print(scale)
for k, v in industry_dict.items():
if k.strip():
d[k] = d.get(k, 0) + value * v / 100 * scale
return d
get_industry_holdings = get_industry
def v_positions(self, date=yesterdayobj(), rendered=True):
"""
pie chart visualization of positions ratio in combination
"""
sdata = sorted(
[
(fob.name, fob.briefdailyreport(date).get("currentvalue", 0))
for fob in self.fundtradeobj
],
key=lambda x: x[1],
reverse=True,
)
pie = Pie()
pie.add(
series_name="总值占比",
data_pair=sdata,
label_opts=opts.LabelOpts(is_show=False, position="center"),
).set_global_opts(
legend_opts=opts.LegendOpts(
pos_left="left", type_="scroll", orient="vertical"
)
).set_series_opts(
tooltip_opts=opts.TooltipOpts(
trigger="item", formatter="{a} <br/>{b}: {c} ({d}%)"
),
)
if rendered:
return pie.render_notebook()
else:
return pie
def v_category_positions(self, date=yesterdayobj(), rendered=True):
"""
资产分类扇形图,按大类资产求和绘制
:param date:
:param rendered: bool. default true for notebook, for plain pyechart obj to return, set rendered=False
:return:
"""
d = {}
for f in self.fundtradeobj:
if isinstance(f, itrade):
t = f.get_type()
if t == "场内基金":
t = get_fund_type(f.code[2:])
elif f.code == "mf":
t = "货币基金"
else:
t = get_fund_type(f.code)
if t == "其他":
logger.warning(
"%s has category others which should be double checked" % f.code
)
d[t] = d.get(t, 0) + f.briefdailyreport(date).get("currentvalue", 0)
sdata = sorted([(k, round(v, 2)) for k, v in d.items()])
pie = Pie()
pie.add(
series_name="总值占比",
data_pair=sdata,
label_opts=opts.LabelOpts(is_show=False, position="center"),
).set_global_opts(
legend_opts=opts.LegendOpts(
pos_left="left", type_="scroll", orient="vertical"
)
).set_series_opts(
tooltip_opts=opts.TooltipOpts(
trigger="item", formatter="{a} <br/>{b}: {c} ({d}%)"
),
)
if rendered:
return pie.render_notebook()
else:
return pie
def v_positions_history(self, end=yesterdaydash(), rendered=True):
"""
river chart visulization of positions ratio history
use text size to avoid legend overlap in some sense, eg. legend_text_size=8
"""
start = self.totcftable.iloc[0].date
times = pd.date_range(start, end)
tdata = []
for date in times:
sdata = sorted(
[
(date, fob.briefdailyreport(date).get("currentvalue", 0), fob.name,)
for fob in self.fundtradeobj
],
key=lambda x: x[1],
reverse=True,
)
tdata.extend(sdata)
tr = ThemeRiver()
tr.add(
series_name=[foj.name for foj in self.fundtradeobj],
data=tdata,
label_opts=opts.LabelOpts(is_show=False),
singleaxis_opts=opts.SingleAxisOpts(type_="time", pos_bottom="10%"),
)
if rendered:
return tr.render_notebook()
else:
return tr
def v_tradevolume(self, freq="D", rendered=True):
"""
visualization on trade summary of the funds combination
:param freq: one character string, frequency label, now supporting D for date,
W for week and M for month, namely the trade volume is shown based on the time unit
:returns: ``pyecharts.Bar()``
"""
return vtradevolume(self.totcftable, freq=freq, rendered=rendered)
class mulfix(mul, indicator):
"""
introduce cash to make a closed investment system, where netvalue analysis can be applied
namely the totcftable only has one row at the very beginning
:param fundtradeobj: trade obj to be include
:param status: status table, if no trade obj is provided, it will include all fund
based on code in status table
:param property: Dict[fundcode, property_number]. property number 的解释:
int. 1: 基金申购采取分位以后全舍而非四舍五入(这种基金是真实存在的==)。2:基金默认分红再投入(0 则是默认现金分红)。4:基金赎回按净值
:param fetch: boolean, when open the fetch option, info class will try fetching from local files first in the init
:param save: boolean, when open the save option, info classes automatically save the class to files
:param path: string, the file path prefix of IO, or object or engine from sqlalchemy to connect sql database
:param form: string, the format of IO, options including: 'csv','sql'
:param totmoney: positive float, the total money as the input at the beginning
:param cashobj: cashinfo object, which is designed to balance the cash in and out
"""
def __init__(
self,
*fundtradeobj,
status=None,
istatus=None,
property=None,
fetch=False,
save=False,
path="",
form="csv",
totmoney=100000,
cashobj=None
):
super().__init__(
*fundtradeobj,
status=status,
istatus=istatus,
property=property,
fetch=fetch,
save=save,
path=path,
form=form
)
if cashobj is None:
cashobj = cashinfo()
self.totmoney = totmoney
nst = mulfix._vcash(totmoney, self.totcftable, cashobj)
cashtrade = trade(cashobj, nst)
# super().__init__(*self.fundtradeobj, cashtrade)
self.cashobj = cashobj
self.fundtradeobj = list(self.fundtradeobj)
self.fundtradeobj.append(cashtrade)
self.fundtradeobj = tuple(self.fundtradeobj)
btnk = bottleneck(self.totcftable)
if btnk > totmoney:
raise TradeBehaviorError("the initial total cash is too low")
self.totcftable = pd.DataFrame(
data={"date": [nst.iloc[0].date], "cash": [-totmoney]}
)
@staticmethod
def _vcash(totmoney, totcftable, cashobj):
"""
return a virtue status table with a mf(cash) column based on the given tot money and cftable
"""
cashl = []
cashl.append(totmoney + totcftable.iloc[0].cash)
for i in range(len(totcftable) - 1):
date = totcftable.iloc[i + 1].date
delta = totcftable.iloc[i + 1].cash
if delta < 0:
cashl.append(
myround(
delta
/ cashobj.price[cashobj.price["date"] <= date].iloc[-1].netvalue
)
)
else:
cashl.append(delta)
datadict = {"date": totcftable.loc[:, "date"], "mf": cashl}
return pd.DataFrame(data=datadict)
def unitvalue(self, date=yesterdayobj()):
"""
:returns: float at unitvalue of the whole investment combination
"""
date = convert_date(date)
res = 0
for fund in self.fundtradeobj:
res += fund.briefdailyreport(date).get("currentvalue", 0)
return res / self.totmoney
def v_tradecost(self, threhold=0, date=yesterdayobj(), rendered=True):
if getattr(self, "price", None) is None:
raise ValueError("Please generate price table by ``bcmkset()`` first")
cftable = self.fundtradeobj[-1].cftable[1:]
cftable = cftable[abs(cftable["cash"]) > threhold]
cftable["cash"] = -cftable["cash"]
return vtradecost(self, cftable, end=date, rendered=rendered)
class imul(mul):
def __init__(self, *fundtradeobj, status=None, istatus=None):
"""
对场内投资组合进行分析的类
:param fundtradeobj: itrade objects.
:param status: 场内格式记账单,或 irecord 对象。
"""
if not fundtradeobj:
fundtradeobj = []
if status is None:
status = istatus
if isinstance(status, irecord):
status = status.status
fundcodelist = [f.code for f in fundtradeobj]
if status is not None:
for code in status.code.unique():
if code not in fundcodelist and not code.startswith("#"):
fundtradeobj.append(itrade(code, status))
self.fundtradeobj = tuple(fundtradeobj)
self.totcftable = self._mergecftb()
self.is_in = True
Mul = mul
MulFix = mulfix
IMul = imul
| 36.538576 | 152 | 0.511999 |
import logging
import pandas as pd
from pyecharts import options as opts
from pyecharts.charts import Pie, ThemeRiver
from xalpha.cons import convert_date, myround, yesterdaydash, yesterdayobj
from xalpha.evaluate import evaluate
from xalpha.exceptions import FundTypeError, TradeBehaviorError
from xalpha.record import record, irecord
from xalpha.indicator import indicator
from xalpha.info import cashinfo, fundinfo, mfundinfo, get_fund_holdings
from xalpha.trade import (
bottleneck,
trade,
turnoverrate,
vtradevolume,
xirrcal,
itrade,
vtradecost,
)
from xalpha.universal import get_fund_type, ttjjcode, get_rt, get_industry_fromxq
import xalpha.universal as xu
logger = logging.getLogger(__name__)
class mul:
def __init__(
self,
*fundtradeobj,
status=None,
istatus=None,
property=None,
fetch=False,
save=False,
path="",
form="csv"
):
if isinstance(status, record):
if not property:
property = getattr(status, "property", {})
status = status.status
elif not property:
property = {}
self.is_in = False
if fundtradeobj:
for t in fundtradeobj:
if isinstance(t, itrade):
self.is_in = True
break
else:
fundtradeobj = []
fundcodelist = [f.code for f in fundtradeobj]
if status is not None:
for code in status.columns:
if code == "date":
continue
if code in fundcodelist:
continue
p = property.get(code, 0)
round_label = p % 2
dividend_label = ((p - round_label) / 2) % 2
value_label = ((p - round_label - dividend_label) / 4) % 2
try:
fundtradeobj.append(
trade(
fundinfo(
code,
round_label=round_label,
dividend_label=dividend_label,
fetch=fetch,
save=save,
path=path,
form=form,
),
status,
)
)
except FundTypeError:
fundtradeobj.append(
trade(
mfundinfo(
code,
round_label=round_label,
value_label=value_label,
fetch=fetch,
save=save,
path=path,
form=form,
),
status,
)
)
if istatus is not None:
self.is_in = True
if isinstance(istatus, irecord):
istatus = istatus.status
for code in istatus.code.unique():
if code not in fundcodelist and not code.startswith("#"):
fundtradeobj.append(itrade(code, istatus))
self.fundtradeobj = tuple(fundtradeobj)
self.totcftable = self._mergecftb()
def tot(self, prop="基金现值", date=yesterdayobj()):
res = 0
for fund in self.fundtradeobj:
res += fund.dailyreport().iloc[0][prop]
return res
def combsummary(self, date=yesterdayobj()):
date = convert_date(date)
columns = [
"基金名称",
"基金代码",
"当日净值",
"单位成本",
"持有份额",
"基金现值",
"基金总申购",
"历史最大占用",
"基金持有成本",
"基金分红与赎回",
"换手率",
"基金收益总额",
"投资收益率",
]
summarydf = pd.DataFrame([], columns=columns)
for fund in self.fundtradeobj:
summarydf = summarydf.append(
fund.dailyreport(date), ignore_index=True, sort=True
)
tname = "总计"
tcode = "total"
tunitvalue = float("NaN")
tunitcost = float("NaN")
tholdshare = float("NaN")
tcurrentvalue = summarydf["基金现值"].sum()
tpurchase = summarydf["基金总申购"].sum()
tbtnk = bottleneck(self.totcftable[self.totcftable["date"] <= date])
tcost = summarydf["基金持有成本"].sum()
toutput = summarydf["基金分红与赎回"].sum()
tturnover = turnoverrate(self.totcftable[self.totcftable["date"] <= date], date)
tearn = summarydf["基金收益总额"].sum()
trate = round(tearn / tbtnk * 100, 4)
trow = pd.DataFrame(
[
[
tname,
tcode,
tunitvalue,
tunitcost,
tholdshare,
tcurrentvalue,
tpurchase,
tbtnk,
tcost,
toutput,
tturnover,
tearn,
trate,
]
],
columns=columns,
)
summarydf = summarydf.append(trow, ignore_index=True, sort=True)
return summarydf[columns].sort_values(by="基金现值", ascending=False)
summary = combsummary
def _mergecftb(self):
dtlist = []
for fund in self.fundtradeobj:
dtlist2 = []
for _, row in fund.cftable.iterrows():
dtlist2.append((row["date"], row["cash"]))
dtlist.extend(dtlist2)
nndtlist = set([item[0] for item in dtlist])
nndtlist = sorted(list(nndtlist), key=lambda x: x)
reslist = []
for date in nndtlist:
reslist.append(sum([item[1] for item in dtlist if item[0] == date]))
df = pd.DataFrame(data={"date": nndtlist, "cash": reslist})
df = df[df["cash"] != 0]
df = df.reset_index(drop=True)
return df
def xirrrate(self, date=yesterdayobj(), startdate=None, guess=0.01):
return xirrcal(self.totcftable, self.fundtradeobj, date, startdate, guess)
def evaluation(self, start=None):
if self.is_in:
raise NotImplementedError()
case = evaluate(
*[fundtrade.aim for fundtrade in self.fundtradeobj], start=start
)
return case
def get_stock_holdings(
self, year=None, season=None, date=yesterdayobj(), threhold=100
):
d = {}
if year is None or season is None:
rd = convert_date(date) - pd.Timedelta(days=120)
if not year:
year = rd.year
if not season:
season = int((rd.month - 0.1) / 3) + 1
logger.debug("use %s, %s for fund report" % (year, season))
for f in self.fundtradeobj:
if isinstance(f, itrade):
if f.get_type() == "股票":
code = f.code
elif f.get_type() == "场内基金":
code = f.code[2:]
else:
continue
else:
code = f.code
value = f.briefdailyreport(date).get("currentvalue", 0)
if value > 0:
if code.startswith("SH") or code.startswith("SZ"):
stock = code
d[stock] = d.get(stock, 0) + value
elif code == "mf":
continue
else:
df = get_fund_holdings(code, year, season)
if df is None:
continue
for _, row in df.iterrows():
stock = row["code"]
stock = ttjjcode(stock)
d[stock] = d.get(stock, 0) + row["ratio"] / 100 * value
l = []
for code, value in sorted(d.items(), key=lambda item: -item[1]):
if value >= threhold:
try:
name = get_rt(code)["name"]
except:
name = code
l.append([name, code, value])
fdf = pd.DataFrame(l, columns=["name", "code", "value"])
fdf["ratio"] = fdf["value"] / fdf["value"].sum()
return fdf
def get_portfolio(self, date=yesterdayobj()):
d = {"stock": 0, "bond": 0, "cash": 0}
date = convert_date(date)
for f in self.fundtradeobj:
value = f.briefdailyreport(date).get("currentvalue", 0)
if value > 0:
if isinstance(f, itrade):
if f.get_type() == "股票":
d["stock"] += value
continue
elif f.get_type() in ["可转债", "债券"]:
d["bond"] += value
continue
elif f.get_type() == "货币基金":
d["cash"] += value
continue
elif f.get_type() == "场内基金":
code = f.code[2:]
else:
continue
else:
code = f.code
if code == "mf":
d["cash"] += value
continue
if get_fund_type(code) == "货币基金":
d["cash"] += value
continue
df = xu.get_daily("pt-F" + code, end=date.strftime("%Y%m%d"))
if df is None or len(df) == 0:
logger.warning("empty portfolio info for %s" % code)
row = df.iloc[-1]
if row["bond_ratio"] + row["stock_ratio"] < 10:
d["stock"] += (
(100 - row["bond_ratio"] - row["cash_ratio"]) * value / 100
)
d["bond"] += row["bond_ratio"] * value / 100
d["cash"] += row["cash_ratio"] * value / 100
else:
d["stock"] += row["stock_ratio"] * value / 100
d["bond"] += row["bond_ratio"] * value / 100
d["cash"] += row["cash_ratio"] * value / 100
return d
get_portfolio_holdings = get_portfolio
def get_industry(self, date=yesterdayobj()):
d = {}
date = convert_date(date)
rd = date - pd.Timedelta(days=120)
year = rd.year
season = int((rd.month - 0.1) / 3) + 1
for f in self.fundtradeobj:
value = f.briefdailyreport(date).get("currentvalue", 0)
if value > 0:
if isinstance(f, itrade):
if f.get_type() == "股票":
industry = get_industry_fromxq(f.code).get("industryname", "")
if industry.strip():
d[industry] = d.get(industry, 0) + value
continue
elif f.get_type() in ["可转债", "债券", "货币基金"]:
continue
elif f.get_type() == "场内基金":
code = f.code[2:]
else:
continue
else:
code = f.code
if code == "mf":
continue
if get_fund_type(code) == "货币基金":
continue
ndinfo(code)
industry_dict = fobj.get_industry_holdings(year=year, season=season)
if industry_dict is None:
continue
= sum([v for _, v in industry_dict.items()])
if sv < 1.0:
continue
stock_ratio = fobj.get_portfolio_holdings(date.strftime("%Y%m%d"))[
"stock_ratio"
]
scale = stock_ratio / sv
print(scale)
for k, v in industry_dict.items():
if k.strip():
d[k] = d.get(k, 0) + value * v / 100 * scale
return d
get_industry_holdings = get_industry
def v_positions(self, date=yesterdayobj(), rendered=True):
sdata = sorted(
[
(fob.name, fob.briefdailyreport(date).get("currentvalue", 0))
for fob in self.fundtradeobj
],
key=lambda x: x[1],
reverse=True,
)
pie = Pie()
pie.add(
series_name="总值占比",
data_pair=sdata,
label_opts=opts.LabelOpts(is_show=False, position="center"),
).set_global_opts(
legend_opts=opts.LegendOpts(
pos_left="left", type_="scroll", orient="vertical"
)
).set_series_opts(
tooltip_opts=opts.TooltipOpts(
trigger="item", formatter="{a} <br/>{b}: {c} ({d}%)"
),
)
if rendered:
return pie.render_notebook()
else:
return pie
def v_category_positions(self, date=yesterdayobj(), rendered=True):
d = {}
for f in self.fundtradeobj:
if isinstance(f, itrade):
t = f.get_type()
if t == "场内基金":
t = get_fund_type(f.code[2:])
elif f.code == "mf":
t = "货币基金"
else:
t = get_fund_type(f.code)
if t == "其他":
logger.warning(
"%s has category others which should be double checked" % f.code
)
d[t] = d.get(t, 0) + f.briefdailyreport(date).get("currentvalue", 0)
sdata = sorted([(k, round(v, 2)) for k, v in d.items()])
pie = Pie()
pie.add(
series_name="总值占比",
data_pair=sdata,
label_opts=opts.LabelOpts(is_show=False, position="center"),
).set_global_opts(
legend_opts=opts.LegendOpts(
pos_left="left", type_="scroll", orient="vertical"
)
).set_series_opts(
tooltip_opts=opts.TooltipOpts(
trigger="item", formatter="{a} <br/>{b}: {c} ({d}%)"
),
)
if rendered:
return pie.render_notebook()
else:
return pie
def v_positions_history(self, end=yesterdaydash(), rendered=True):
start = self.totcftable.iloc[0].date
times = pd.date_range(start, end)
tdata = []
for date in times:
sdata = sorted(
[
(date, fob.briefdailyreport(date).get("currentvalue", 0), fob.name,)
for fob in self.fundtradeobj
],
key=lambda x: x[1],
reverse=True,
)
tdata.extend(sdata)
tr = ThemeRiver()
tr.add(
series_name=[foj.name for foj in self.fundtradeobj],
data=tdata,
label_opts=opts.LabelOpts(is_show=False),
singleaxis_opts=opts.SingleAxisOpts(type_="time", pos_bottom="10%"),
)
if rendered:
return tr.render_notebook()
else:
return tr
def v_tradevolume(self, freq="D", rendered=True):
return vtradevolume(self.totcftable, freq=freq, rendered=rendered)
class mulfix(mul, indicator):
def __init__(
self,
*fundtradeobj,
status=None,
istatus=None,
property=None,
fetch=False,
save=False,
path="",
form="csv",
totmoney=100000,
cashobj=None
):
super().__init__(
*fundtradeobj,
status=status,
istatus=istatus,
property=property,
fetch=fetch,
save=save,
path=path,
form=form
)
if cashobj is None:
cashobj = cashinfo()
self.totmoney = totmoney
nst = mulfix._vcash(totmoney, self.totcftable, cashobj)
cashtrade = trade(cashobj, nst)
self.cashobj = cashobj
self.fundtradeobj = list(self.fundtradeobj)
self.fundtradeobj.append(cashtrade)
self.fundtradeobj = tuple(self.fundtradeobj)
btnk = bottleneck(self.totcftable)
if btnk > totmoney:
raise TradeBehaviorError("the initial total cash is too low")
self.totcftable = pd.DataFrame(
data={"date": [nst.iloc[0].date], "cash": [-totmoney]}
)
@staticmethod
def _vcash(totmoney, totcftable, cashobj):
cashl = []
cashl.append(totmoney + totcftable.iloc[0].cash)
for i in range(len(totcftable) - 1):
date = totcftable.iloc[i + 1].date
delta = totcftable.iloc[i + 1].cash
if delta < 0:
cashl.append(
myround(
delta
/ cashobj.price[cashobj.price["date"] <= date].iloc[-1].netvalue
)
)
else:
cashl.append(delta)
datadict = {"date": totcftable.loc[:, "date"], "mf": cashl}
return pd.DataFrame(data=datadict)
def unitvalue(self, date=yesterdayobj()):
date = convert_date(date)
res = 0
for fund in self.fundtradeobj:
res += fund.briefdailyreport(date).get("currentvalue", 0)
return res / self.totmoney
def v_tradecost(self, threhold=0, date=yesterdayobj(), rendered=True):
if getattr(self, "price", None) is None:
raise ValueError("Please generate price table by ``bcmkset()`` first")
cftable = self.fundtradeobj[-1].cftable[1:]
cftable = cftable[abs(cftable["cash"]) > threhold]
cftable["cash"] = -cftable["cash"]
return vtradecost(self, cftable, end=date, rendered=rendered)
class imul(mul):
def __init__(self, *fundtradeobj, status=None, istatus=None):
if not fundtradeobj:
fundtradeobj = []
if status is None:
status = istatus
if isinstance(status, irecord):
status = status.status
fundcodelist = [f.code for f in fundtradeobj]
if status is not None:
for code in status.code.unique():
if code not in fundcodelist and not code.startswith("#"):
fundtradeobj.append(itrade(code, status))
self.fundtradeobj = tuple(fundtradeobj)
self.totcftable = self._mergecftb()
self.is_in = True
Mul = mul
MulFix = mulfix
IMul = imul
| true | true |
f7150d4f3994a0060df418ddb3fbabd3267a1aec | 27,846 | py | Python | tensorflow/python/ops/op_def_library.py | smrutiranjans/tensorflow | d8e8b872eae63188c75046d5bb068e03a81b3f85 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/op_def_library.py | smrutiranjans/tensorflow | d8e8b872eae63188c75046d5bb068e03a81b3f85 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/op_def_library.py | smrutiranjans/tensorflow | d8e8b872eae63188c75046d5bb068e03a81b3f85 | [
"Apache-2.0"
] | 1 | 2020-03-08T13:12:13.000Z | 2020-03-08T13:12:13.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to hold a library of OpDefs and use it to create Brain operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import constant_op
from tensorflow.python.platform import logging
from tensorflow.python.util import compat
def _Attr(op_def, name):
for attr in op_def.attr:
if attr.name == name:
return attr
raise TypeError("Inconsistent OpDef for '%s', missing attr '%s'" %
(op_def.name, name))
def _AttrValue(attr_protos, name):
if name in attr_protos:
return attr_protos[name]
raise TypeError("Inconsistent OpDef, missing attr '%s' from '%s'." %
(name, attr_protos))
def _SatisfiesTypeConstraint(dtype, attr_def):
if attr_def.HasField("allowed_values"):
allowed_list = attr_def.allowed_values.list.type
if dtype not in allowed_list:
raise TypeError(
"DataType %s for attr '%s' not in list of allowed values: %s" %
(dtypes.as_dtype(dtype).name, attr_def.name,
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
def _IsListParameter(arg):
if arg.number_attr:
return True
elif arg.type_list_attr:
return True
return False
def _NumTypeFields(arg):
num = 0
if arg.type != types_pb2.DT_INVALID: num += 1
if arg.type_attr: num += 1
if arg.type_list_attr: num += 1
return num
def _IsListValue(v):
return isinstance(v, (list, tuple))
def _Flatten(l):
"""Converts [1, 2, [3, 4], [5]] to [1, 2, 3, 4, 5]."""
# [1, 2, [3, 4], [5]] -> [[1], [2], [3, 4], [5]]
l_of_l = [x if _IsListValue(x) else [x] for x in l]
# [[1], [2], [3, 4], [5]] -> [1, 2, 3, 4, 5]
return [item for sublist in l_of_l for item in sublist]
def _Restructure(l, structure):
"""Returns the elements of list l structured according to the given structure.
A structure is represented by a list whose elements are either
`None` or a non-negative integer. `None` corresponds to a single
element in the output list, and an integer N corresponds to a nested
list of length N.
The function returns a data structure whose shape is given by
`structure`, and whose elements are taken from `l`. If `structure`
is a singleton, the function returns the single data structure
implied by the 0th element of `structure`. For example:
_Restructure(["foo", "bar", "baz", "qux"], [None, 2, None])
-> ["foo", ["bar", "baz"], "qux"]
_Restructure(["foo"], [None]) -> "foo"
_Restructure(["foo"], [1]) -> ["foo"]
_Restructure([], [0]) -> []
Args:
l: A list.
structure: A list whose elements are either `None` or a non-negative
integer.
Returns:
The elements of `l`, restructured according to `structure`. If
`structure` is a list of length 1, this function returns the
single data structure implied by `structure[0]`.
"""
result = []
current_index = 0
for element in structure:
if element is None:
result.append(l[current_index])
current_index += 1
else:
result.append(l[current_index:current_index+element])
current_index += element
if len(result) == 1:
return result[0]
else:
return tuple(result)
def _MakeFloat(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def _MakeInt(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def _MakeStr(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def _MakeBool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def _MakeType(v, attr_def):
try:
v = dtypes.as_dtype(v)
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(attr_def.name, repr(v)))
i = v.as_datatype_enum
_SatisfiesTypeConstraint(i, attr_def)
return i
def _MakeShape(v, arg_name):
"""Convert v into a TensorShapeProto."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# A TensorShapeProto.
if isinstance(v, tensor_shape_pb2.TensorShapeProto):
for d in v.dim:
if d.name:
logging.warning("Warning: TensorShapeProto with a named dimension: %s",
str(v))
break
return v
return tensor_shape.as_shape(v).as_proto()
def _MakeTensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'" %
(repr(v), arg_name))
class _OpInfo(object):
"""All per-Op state we would like to precompute/validate."""
def __init__(self, op_def):
self.op_def = op_def
# TODO(josh11b): SWIG the ValidateOpDef() function from C++ and call it
# here, instead of these checks.
for arg in list(op_def.input_arg) + list(op_def.output_arg):
num_type_fields = _NumTypeFields(arg)
if num_type_fields != 1:
raise TypeError("Arg '%s' of '%s' must have one type field not %d" %
(arg.name, op_def.name, num_type_fields))
if arg.type_attr:
attr_type = _Attr(op_def, arg.type_attr).type
if attr_type != "type":
raise TypeError("Attr '%s' of '%s' used as a type_attr "
"but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.type_list_attr:
attr_type = _Attr(op_def, arg.type_list_attr).type
if attr_type != "list(type)":
raise TypeError(
"Attr '%s' of '%s' used as a type_list_attr but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.number_attr:
attr_type = _Attr(op_def, arg.number_attr).type
if attr_type != "int":
raise TypeError(
"Attr '%s' of '%s' used as a number_attr but has type %s" %
(arg.number_attr, op_def.name, attr_type))
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def _MaybeColocateWith(inputs):
"""A context manager for (maybe) colocating with a list of input tensors.
Args:
inputs: A list of `Tensor` or `Operation` objects.
Returns:
A context manager.
"""
if not inputs:
yield
else:
# NOTE(mrry): The `ops.colocate_with()` function accepts only a single
# op or tensor, so we create one context manager per element in the list.
with ops.colocate_with(inputs[0]), _MaybeColocateWith(inputs[1:]):
yield
# pylint: enable=g-doc-return-or-yield
class OpDefLibrary(object):
"""Holds a collection of OpDefs, can add the corresponding Ops to a graph."""
def __init__(self):
self._ops = {}
def add_op(self, op_def):
"""Register an OpDef. May call apply_op with the name afterwards."""
if not isinstance(op_def, op_def_pb2.OpDef):
raise TypeError("%s is %s, not an op_def_pb2.OpDef" %
(op_def, type(op_def)))
if not op_def.name:
raise ValueError("%s missing name." % op_def)
if op_def.name in self._ops:
raise RuntimeError("Op name %s registered twice." % op_def.name)
self._ops[op_def.name] = _OpInfo(op_def)
def add_op_list(self, op_list):
"""Register the OpDefs from an OpList."""
if not isinstance(op_list, op_def_pb2.OpList):
raise TypeError("%s is %s, not an op_def_pb2.OpList" %
(op_list, type(op_list)))
for op_def in op_list.op:
self.add_op(op_def)
def apply_op(self, op_type_name, name=None, **keywords):
# pylint: disable=g-doc-args
"""Add a node invoking a registered Op to a graph.
Config proto extensions must be provided via the 'ext' keyword argument.
Example usage:
# input1 and input2 can be Tensors or anything ops.convert_to_tensor()
# will convert to a Tensor.
op_def_library.apply_op("op", input1=input1, input2=input2)
# Can specify a node name.
op_def_library.apply_op("op", input1=input1, name="node_name")
# Must use keyword arguments, with the names specified in the OpDef.
op_def_library.apply_op("op", input_name=input, attr_name=attr)
All attrs must either be inferred from an input or specified.
(If inferred, the attr must not be specified.) If an attr has a default
value specified in the Op's OpDef, then you may pass None as the value
of that attr to get the default.
Args:
op_type_name: string. Must match the name field of a registered Op.
name: string. Optional name of the created op.
**keywords: input Tensor and attr arguments specified by name,
and optional parameters to pass when constructing the Operation.
Returns:
The Tensor(s) representing the output of the operation, or the Operation
itself if there are no outputs.
Raises:
RuntimeError: On some errors.
TypeError: On some errors.
ValueError: On some errors.
"""
op_info = self._ops.get(op_type_name, None)
if op_info is None:
raise RuntimeError("Unrecognized Op name " + op_type_name)
op_def = op_info.op_def
# Determine the graph context.
try:
# Need to flatten all the arguments into a list.
# pylint: disable=protected-access
g = ops._get_graph_from_inputs(_Flatten(keywords.values()))
# pyline: enable=protected-access
except AssertionError as e:
raise RuntimeError(
"Cannot determine graph for Op '%s' due to: %s"
% (op_type_name, e.message))
# Default name if not specified.
if name is None:
name = op_type_name
# Check for deprecation
deprecation_version = op_def.deprecation.version
if deprecation_version:
producer = g.graph_def_versions.producer
if producer >= deprecation_version:
raise NotImplementedError(
("Op %s is not available in GraphDef version %d. "
"It has been removed in version %d. %s.") %
(op_type_name, producer, deprecation_version,
op_def.deprecation.explanation))
# Requires that op_def has passed validation (using the C++
# ValidateOpDef() from ../framework/op_def_util.h).
attrs = {}
inputs = []
input_types = []
with g.as_default(), ops.name_scope(name) as scope:
# Perform input type inference
inferred_from = {}
for input_arg in op_def.input_arg:
input_name = input_arg.name
if input_name in keywords:
values = keywords.pop(input_name)
elif input_name + "_" in keywords:
# Handle the case where the name is a keyword or built-in
# for Python so we use the name + _ instead.
input_name += "_"
values = keywords.pop(input_name)
else:
raise TypeError("No argument for input " + input_name)
# Goals:
# * Convert values to Tensors if it contains constants.
# * Verify that values is a list if that matches the input_arg's
# type.
# * If the input_arg's type is determined by attrs, either set
# those attrs and validate those attr values are legal (if
# they have not yet been set) or validate the input matches
# the type indicated by the attrs (if they have already been
# inferred via an earlier input).
# * If the input_arg has an explicit type, make sure the input
# conforms.
if _IsListParameter(input_arg):
if not _IsListValue(values):
raise TypeError(
"Expected list for '%s' argument to '%s' Op, not %s." %
(input_name, op_type_name, values))
# In cases where we expect all elements of the list to have the
# same dtype, try to cast non-Tensor elements to that type.
dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.number_attr:
if input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
else:
for t in values:
if isinstance(t, ops.Tensor):
dtype = t.dtype
break
try:
if not input_arg.is_ref and dtype:
dtype = dtypes.as_dtype(dtype).base_dtype
values = ops.convert_n_to_tensor(
values, name=input_arg.name, dtype=dtype if dtype else None,
as_ref=input_arg.is_ref)
except (TypeError, ValueError):
assert dtype is not None, "Should not fail if dtype is None"
assert input_arg.number_attr, "Should be number_attr case"
# What types does the conversion function think values have?
values = ops.convert_n_to_tensor(values, as_ref=input_arg.is_ref)
observed = ", ".join(v.dtype.base_dtype.name for v in values)
prefix = (
"Tensors in list passed to '%s' of '%s' Op have types [%s]" %
(input_name, op_type_name, observed))
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s that do not match expected type %s." %
(prefix, dtype.name))
elif input_arg.type_attr in attrs:
raise TypeError("%s that do not match type %s inferred from "
"earlier arguments." %
(prefix, dtype.name))
else:
raise TypeError("%s that don't all match." % prefix)
types = [x.dtype for x in values]
inputs.extend(values)
else:
# In cases where we have an expected type, try to convert non-Tensor
# arguments to that type.
dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
try:
values = ops.convert_to_tensor(
values, name=input_arg.name, dtype=dtype,
as_ref=input_arg.is_ref)
except ValueError:
# What type does convert_to_tensor think it has?
observed = ops.convert_to_tensor(values,
as_ref=input_arg.is_ref).dtype.name
prefix = ("Input '%s' of '%s' Op has type %s that does not match" %
(input_name, op_type_name, observed))
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s expected type of %s." %
(prefix, dtypes.as_dtype(input_arg.type).name))
else:
raise TypeError(
"%s type %s of argument '%s'." %
(prefix, dtypes.as_dtype(attrs[input_arg.type_attr]).name,
inferred_from[input_arg.type_attr]))
types = [values.dtype]
inputs.append(values)
base_types = [x.base_dtype for x in types]
if input_arg.number_attr:
# <number-attr> * <type> or <number-attr> * <type-attr>
if input_arg.number_attr in attrs:
if len(values) != attrs[input_arg.number_attr]:
raise ValueError(
"List argument '%s' to '%s' Op with length %d must match "
"length %d of argument '%s'." %
(input_name, op_type_name, len(values),
attrs[input_arg.number_attr],
inferred_from[input_arg.number_attr]))
else:
attrs[input_arg.number_attr] = len(values)
inferred_from[input_arg.number_attr] = input_name
num_attr = _Attr(op_def, input_arg.number_attr)
if num_attr.has_minimum and len(values) < num_attr.minimum:
raise ValueError(
"List argument '%s' to '%s' Op with length %d shorter "
"than minimum length %d." %
(input_name, op_type_name, len(values), num_attr.minimum))
# All tensors must have the same base type.
if any([bt != base_types[0] for bt in base_types]):
raise TypeError(
"All tensors passed to '%s' of '%s' Op "
"must have the same type." %
(input_name, op_type_name))
if input_arg.type != types_pb2.DT_INVALID:
# <number-attr> * <type> case
if base_types and base_types[0] != input_arg.type:
assert False, "Unreachable"
elif input_arg.type_attr in attrs:
# <number-attr> * <type-attr> case, where <type-attr> already
# has an inferred value.
if base_types and base_types[0] != attrs[input_arg.type_attr]:
assert False, "Unreachable"
else:
# <number-attr> * <type-attr> case, where we are now setting
# the <type-attr> based on this input
if not base_types:
raise TypeError(
"Don't know how to infer type variable from empty input "
"list passed to input '%s' of '%s' Op." %
(input_name, op_type_name))
attrs[input_arg.type_attr] = base_types[0]
inferred_from[input_arg.type_attr] = input_name
type_attr = _Attr(op_def, input_arg.type_attr)
_SatisfiesTypeConstraint(base_types[0], type_attr)
elif input_arg.type_attr:
# <type-attr>
attr_value = base_types[0]
if input_arg.type_attr in attrs:
if attrs[input_arg.type_attr] != attr_value:
assert False, "Unreachable"
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_attr))
attrs[input_arg.type_attr] = attr_value
inferred_from[input_arg.type_attr] = input_name
elif input_arg.type_list_attr:
# <type-list-attr>
attr_value = base_types
if input_arg.type_list_attr in attrs:
if attrs[input_arg.type_list_attr] != attr_value:
raise TypeError(
"Input '%s' of '%s' Op has type list of %s that does not "
"match type list %s of argument '%s'." %
(input_name, op_type_name,
", ".join(dtypes.as_dtype(x).name for x in attr_value),
", ".join(dtypes.as_dtype(x).name
for x in attrs[input_arg.type_list_attr]),
inferred_from[input_arg.type_list_attr]))
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_list_attr))
attrs[input_arg.type_list_attr] = attr_value
inferred_from[input_arg.type_list_attr] = input_name
else:
# single Tensor with specified type
if base_types[0] != input_arg.type:
assert False, "Unreachable"
if input_arg.is_ref:
if not all(x.is_ref_dtype for x in types):
raise TypeError(
"Input '%s' of '%s' Op requires l-value input" %
(input_name, op_type_name))
input_types.extend(types)
else:
input_types.extend(base_types)
# Process remaining attrs
for attr in op_def.attr:
# Skip attrs that have already had their values inferred
if attr.name in attrs:
if attr.name in keywords:
raise TypeError(
"Should not specify value for inferred attr '%s'." % attr.name)
continue
if attr.name in keywords:
attrs[attr.name] = keywords.pop(attr.name)
elif attr.name + "_" in keywords:
# Attrs whose names match Python keywords have an extra '_'
# appended, so we must check for that as well.
attrs[attr.name] = keywords.pop(attr.name + "_")
else:
raise TypeError("No argument for attr " + attr.name)
# Convert attr values to AttrValue protos.
attr_protos = {}
for attr_def in op_def.attr:
key = attr_def.name
value = attrs[key]
attr_value = attr_value_pb2.AttrValue()
if attr_def.HasField("default_value") and value is None:
attr_value.CopyFrom(attr_def.default_value)
attr_protos[key] = attr_value
continue
if attr_def.type.startswith("list("):
if not _IsListValue(value):
raise TypeError("Expected list for attr " + key)
if attr_def.has_minimum:
if len(value) < attr_def.minimum:
raise ValueError("Attr '%s' of '%s' Op passed list of length %d "
"less than minimum %d." %
(key, op_type_name, len(value),
attr_def.minimum))
attr_value.list.SetInParent()
if attr_def.type == "string":
attr_value.s = _MakeStr(value, key)
if attr_def.HasField("allowed_values"):
if attr_value.s not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(attr_value.s),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "list(string)":
attr_value.list.s.extend([_MakeStr(x, key) for x in value])
if attr_def.HasField("allowed_values"):
for x in attr_value.list.s:
if x not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(x),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "int":
attr_value.i = _MakeInt(value, key)
if attr_def.has_minimum:
if attr_value.i < attr_def.minimum:
raise ValueError(
"Attr '%s' of '%s' Op passed %d less than minimum %d." %
(key, op_type_name, attr_value.i, attr_def.minimum))
elif attr_def.type == "list(int)":
attr_value.list.i.extend([_MakeInt(x, key) for x in value])
elif attr_def.type == "float":
attr_value.f = _MakeFloat(value, key)
elif attr_def.type == "list(float)":
attr_value.list.f.extend([_MakeFloat(x, key) for x in value])
elif attr_def.type == "bool":
attr_value.b = _MakeBool(value, key)
elif attr_def.type == "list(bool)":
attr_value.list.b.extend([_MakeBool(x, key) for x in value])
elif attr_def.type == "type":
attr_value.type = _MakeType(value, attr_def)
elif attr_def.type == "list(type)":
attr_value.list.type.extend(
[_MakeType(x, attr_def) for x in value])
elif attr_def.type == "shape":
attr_value.shape.CopyFrom(_MakeShape(value, key))
elif attr_def.type == "list(shape)":
attr_value.list.shape.extend(
[_MakeShape(x, key) for x in value])
elif attr_def.type == "tensor":
attr_value.tensor.CopyFrom(_MakeTensor(value, key))
elif attr_def.type == "list(tensor)":
attr_value.list.tensor.extend(
[_MakeTensor(x, key) for x in value])
elif attr_def.type == "func":
if not isinstance(value, compat.bytes_or_text_types):
raise TypeError("Expects a string for the func name")
attr_value.func.name = value
else:
raise TypeError("Unrecognized Attr type " + attr_def.type)
attr_protos[key] = attr_value
del attrs # attrs is no longer authoritative, use attr_protos instead
# Determine output types (possibly using attrs)
output_types = []
output_structure = []
for arg in op_def.output_arg:
types = []
if arg.number_attr:
n = _AttrValue(attr_protos, arg.number_attr).i
if arg.type_attr:
types = [_AttrValue(attr_protos, arg.type_attr).type] * n
else:
types = [arg.type] * n
output_structure.append(n)
elif arg.type_attr:
t = _AttrValue(attr_protos, arg.type_attr)
types = [t.type]
output_structure.append(None)
elif arg.type_list_attr:
t = _AttrValue(attr_protos, arg.type_list_attr)
types = t.list.type
output_structure.append(len(t.list.type))
else:
types = [arg.type]
output_structure.append(None)
if arg.is_ref:
types = [dtypes.as_dtype(x).as_ref for x in types]
output_types.extend(types)
if keywords:
raise TypeError("apply_op() got unexpected keyword arguments: " +
", ".join(sorted(keywords.keys())))
# NOTE(mrry): We add an explicit colocation constraint between
# the newly created op and any of its reference-typed inputs.
must_colocate_inputs = [val for arg, val in zip(op_def.input_arg, inputs)
if arg.is_ref]
with _MaybeColocateWith(must_colocate_inputs):
# Add Op to graph
if output_structure:
op = g.create_op(op_type_name, inputs, output_types, name=scope,
input_types=input_types, attrs=attr_protos,
op_def=op_def)
outputs = op.outputs
return _Restructure(ops.convert_n_to_tensor(outputs),
output_structure)
else:
return g.create_op(op_type_name, inputs, output_types, name=scope,
input_types=input_types, attrs=attr_protos,
op_def=op_def)
| 39.666667 | 80 | 0.602097 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import constant_op
from tensorflow.python.platform import logging
from tensorflow.python.util import compat
def _Attr(op_def, name):
for attr in op_def.attr:
if attr.name == name:
return attr
raise TypeError("Inconsistent OpDef for '%s', missing attr '%s'" %
(op_def.name, name))
def _AttrValue(attr_protos, name):
if name in attr_protos:
return attr_protos[name]
raise TypeError("Inconsistent OpDef, missing attr '%s' from '%s'." %
(name, attr_protos))
def _SatisfiesTypeConstraint(dtype, attr_def):
if attr_def.HasField("allowed_values"):
allowed_list = attr_def.allowed_values.list.type
if dtype not in allowed_list:
raise TypeError(
"DataType %s for attr '%s' not in list of allowed values: %s" %
(dtypes.as_dtype(dtype).name, attr_def.name,
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
def _IsListParameter(arg):
if arg.number_attr:
return True
elif arg.type_list_attr:
return True
return False
def _NumTypeFields(arg):
num = 0
if arg.type != types_pb2.DT_INVALID: num += 1
if arg.type_attr: num += 1
if arg.type_list_attr: num += 1
return num
def _IsListValue(v):
return isinstance(v, (list, tuple))
def _Flatten(l):
l_of_l = [x if _IsListValue(x) else [x] for x in l]
return [item for sublist in l_of_l for item in sublist]
def _Restructure(l, structure):
result = []
current_index = 0
for element in structure:
if element is None:
result.append(l[current_index])
current_index += 1
else:
result.append(l[current_index:current_index+element])
current_index += element
if len(result) == 1:
return result[0]
else:
return tuple(result)
def _MakeFloat(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def _MakeInt(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def _MakeStr(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v)
def _MakeBool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def _MakeType(v, attr_def):
try:
v = dtypes.as_dtype(v)
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(attr_def.name, repr(v)))
i = v.as_datatype_enum
_SatisfiesTypeConstraint(i, attr_def)
return i
def _MakeShape(v, arg_name):
if isinstance(v, tensor_shape_pb2.TensorShapeProto):
for d in v.dim:
if d.name:
logging.warning("Warning: TensorShapeProto with a named dimension: %s",
str(v))
break
return v
return tensor_shape.as_shape(v).as_proto()
def _MakeTensor(v, arg_name):
if isinstance(v, tensor_pb2.TensorProto):
return v
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'" %
(repr(v), arg_name))
class _OpInfo(object):
def __init__(self, op_def):
self.op_def = op_def
# TODO(josh11b): SWIG the ValidateOpDef() function from C++ and call it
# here, instead of these checks.
for arg in list(op_def.input_arg) + list(op_def.output_arg):
num_type_fields = _NumTypeFields(arg)
if num_type_fields != 1:
raise TypeError("Arg '%s' of '%s' must have one type field not %d" %
(arg.name, op_def.name, num_type_fields))
if arg.type_attr:
attr_type = _Attr(op_def, arg.type_attr).type
if attr_type != "type":
raise TypeError("Attr '%s' of '%s' used as a type_attr "
"but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.type_list_attr:
attr_type = _Attr(op_def, arg.type_list_attr).type
if attr_type != "list(type)":
raise TypeError(
"Attr '%s' of '%s' used as a type_list_attr but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.number_attr:
attr_type = _Attr(op_def, arg.number_attr).type
if attr_type != "int":
raise TypeError(
"Attr '%s' of '%s' used as a number_attr but has type %s" %
(arg.number_attr, op_def.name, attr_type))
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def _MaybeColocateWith(inputs):
if not inputs:
yield
else:
# NOTE(mrry): The `ops.colocate_with()` function accepts only a single
# op or tensor, so we create one context manager per element in the list.
with ops.colocate_with(inputs[0]), _MaybeColocateWith(inputs[1:]):
yield
# pylint: enable=g-doc-return-or-yield
class OpDefLibrary(object):
def __init__(self):
self._ops = {}
def add_op(self, op_def):
if not isinstance(op_def, op_def_pb2.OpDef):
raise TypeError("%s is %s, not an op_def_pb2.OpDef" %
(op_def, type(op_def)))
if not op_def.name:
raise ValueError("%s missing name." % op_def)
if op_def.name in self._ops:
raise RuntimeError("Op name %s registered twice." % op_def.name)
self._ops[op_def.name] = _OpInfo(op_def)
def add_op_list(self, op_list):
if not isinstance(op_list, op_def_pb2.OpList):
raise TypeError("%s is %s, not an op_def_pb2.OpList" %
(op_list, type(op_list)))
for op_def in op_list.op:
self.add_op(op_def)
def apply_op(self, op_type_name, name=None, **keywords):
# pylint: disable=g-doc-args
op_info = self._ops.get(op_type_name, None)
if op_info is None:
raise RuntimeError("Unrecognized Op name " + op_type_name)
op_def = op_info.op_def
# Determine the graph context.
try:
# Need to flatten all the arguments into a list.
# pylint: disable=protected-access
g = ops._get_graph_from_inputs(_Flatten(keywords.values()))
# pyline: enable=protected-access
except AssertionError as e:
raise RuntimeError(
"Cannot determine graph for Op '%s' due to: %s"
% (op_type_name, e.message))
# Default name if not specified.
if name is None:
name = op_type_name
# Check for deprecation
deprecation_version = op_def.deprecation.version
if deprecation_version:
producer = g.graph_def_versions.producer
if producer >= deprecation_version:
raise NotImplementedError(
("Op %s is not available in GraphDef version %d. "
"It has been removed in version %d. %s.") %
(op_type_name, producer, deprecation_version,
op_def.deprecation.explanation))
# Requires that op_def has passed validation (using the C++
# ValidateOpDef() from ../framework/op_def_util.h).
attrs = {}
inputs = []
input_types = []
with g.as_default(), ops.name_scope(name) as scope:
# Perform input type inference
inferred_from = {}
for input_arg in op_def.input_arg:
input_name = input_arg.name
if input_name in keywords:
values = keywords.pop(input_name)
elif input_name + "_" in keywords:
# Handle the case where the name is a keyword or built-in
# for Python so we use the name + _ instead.
input_name += "_"
values = keywords.pop(input_name)
else:
raise TypeError("No argument for input " + input_name)
# Goals:
# * Convert values to Tensors if it contains constants.
# * Verify that values is a list if that matches the input_arg's
# those attrs and validate those attr values are legal (if
# they have not yet been set) or validate the input matches
# the type indicated by the attrs (if they have already been
# inferred via an earlier input).
# * If the input_arg has an explicit type, make sure the input
# conforms.
if _IsListParameter(input_arg):
if not _IsListValue(values):
raise TypeError(
"Expected list for '%s' argument to '%s' Op, not %s." %
(input_name, op_type_name, values))
# In cases where we expect all elements of the list to have the
# same dtype, try to cast non-Tensor elements to that type.
dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.number_attr:
if input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
else:
for t in values:
if isinstance(t, ops.Tensor):
dtype = t.dtype
break
try:
if not input_arg.is_ref and dtype:
dtype = dtypes.as_dtype(dtype).base_dtype
values = ops.convert_n_to_tensor(
values, name=input_arg.name, dtype=dtype if dtype else None,
as_ref=input_arg.is_ref)
except (TypeError, ValueError):
assert dtype is not None, "Should not fail if dtype is None"
assert input_arg.number_attr, "Should be number_attr case"
# What types does the conversion function think values have?
values = ops.convert_n_to_tensor(values, as_ref=input_arg.is_ref)
observed = ", ".join(v.dtype.base_dtype.name for v in values)
prefix = (
"Tensors in list passed to '%s' of '%s' Op have types [%s]" %
(input_name, op_type_name, observed))
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s that do not match expected type %s." %
(prefix, dtype.name))
elif input_arg.type_attr in attrs:
raise TypeError("%s that do not match type %s inferred from "
"earlier arguments." %
(prefix, dtype.name))
else:
raise TypeError("%s that don't all match." % prefix)
types = [x.dtype for x in values]
inputs.extend(values)
else:
dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
try:
values = ops.convert_to_tensor(
values, name=input_arg.name, dtype=dtype,
as_ref=input_arg.is_ref)
except ValueError:
observed = ops.convert_to_tensor(values,
as_ref=input_arg.is_ref).dtype.name
prefix = ("Input '%s' of '%s' Op has type %s that does not match" %
(input_name, op_type_name, observed))
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s expected type of %s." %
(prefix, dtypes.as_dtype(input_arg.type).name))
else:
raise TypeError(
"%s type %s of argument '%s'." %
(prefix, dtypes.as_dtype(attrs[input_arg.type_attr]).name,
inferred_from[input_arg.type_attr]))
types = [values.dtype]
inputs.append(values)
base_types = [x.base_dtype for x in types]
if input_arg.number_attr:
if input_arg.number_attr in attrs:
if len(values) != attrs[input_arg.number_attr]:
raise ValueError(
"List argument '%s' to '%s' Op with length %d must match "
"length %d of argument '%s'." %
(input_name, op_type_name, len(values),
attrs[input_arg.number_attr],
inferred_from[input_arg.number_attr]))
else:
attrs[input_arg.number_attr] = len(values)
inferred_from[input_arg.number_attr] = input_name
num_attr = _Attr(op_def, input_arg.number_attr)
if num_attr.has_minimum and len(values) < num_attr.minimum:
raise ValueError(
"List argument '%s' to '%s' Op with length %d shorter "
"than minimum length %d." %
(input_name, op_type_name, len(values), num_attr.minimum))
if any([bt != base_types[0] for bt in base_types]):
raise TypeError(
"All tensors passed to '%s' of '%s' Op "
"must have the same type." %
(input_name, op_type_name))
if input_arg.type != types_pb2.DT_INVALID:
if base_types and base_types[0] != input_arg.type:
assert False, "Unreachable"
elif input_arg.type_attr in attrs:
if base_types and base_types[0] != attrs[input_arg.type_attr]:
assert False, "Unreachable"
else:
if not base_types:
raise TypeError(
"Don't know how to infer type variable from empty input "
"list passed to input '%s' of '%s' Op." %
(input_name, op_type_name))
attrs[input_arg.type_attr] = base_types[0]
inferred_from[input_arg.type_attr] = input_name
type_attr = _Attr(op_def, input_arg.type_attr)
_SatisfiesTypeConstraint(base_types[0], type_attr)
elif input_arg.type_attr:
# <type-attr>
attr_value = base_types[0]
if input_arg.type_attr in attrs:
if attrs[input_arg.type_attr] != attr_value:
assert False, "Unreachable"
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_attr))
attrs[input_arg.type_attr] = attr_value
inferred_from[input_arg.type_attr] = input_name
elif input_arg.type_list_attr:
# <type-list-attr>
attr_value = base_types
if input_arg.type_list_attr in attrs:
if attrs[input_arg.type_list_attr] != attr_value:
raise TypeError(
"Input '%s' of '%s' Op has type list of %s that does not "
"match type list %s of argument '%s'." %
(input_name, op_type_name,
", ".join(dtypes.as_dtype(x).name for x in attr_value),
", ".join(dtypes.as_dtype(x).name
for x in attrs[input_arg.type_list_attr]),
inferred_from[input_arg.type_list_attr]))
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_list_attr))
attrs[input_arg.type_list_attr] = attr_value
inferred_from[input_arg.type_list_attr] = input_name
else:
# single Tensor with specified type
if base_types[0] != input_arg.type:
assert False, "Unreachable"
if input_arg.is_ref:
if not all(x.is_ref_dtype for x in types):
raise TypeError(
"Input '%s' of '%s' Op requires l-value input" %
(input_name, op_type_name))
input_types.extend(types)
else:
input_types.extend(base_types)
# Process remaining attrs
for attr in op_def.attr:
# Skip attrs that have already had their values inferred
if attr.name in attrs:
if attr.name in keywords:
raise TypeError(
"Should not specify value for inferred attr '%s'." % attr.name)
continue
if attr.name in keywords:
attrs[attr.name] = keywords.pop(attr.name)
elif attr.name + "_" in keywords:
# Attrs whose names match Python keywords have an extra '_'
# appended, so we must check for that as well.
attrs[attr.name] = keywords.pop(attr.name + "_")
else:
raise TypeError("No argument for attr " + attr.name)
# Convert attr values to AttrValue protos.
attr_protos = {}
for attr_def in op_def.attr:
key = attr_def.name
value = attrs[key]
attr_value = attr_value_pb2.AttrValue()
if attr_def.HasField("default_value") and value is None:
attr_value.CopyFrom(attr_def.default_value)
attr_protos[key] = attr_value
continue
if attr_def.type.startswith("list("):
if not _IsListValue(value):
raise TypeError("Expected list for attr " + key)
if attr_def.has_minimum:
if len(value) < attr_def.minimum:
raise ValueError("Attr '%s' of '%s' Op passed list of length %d "
"less than minimum %d." %
(key, op_type_name, len(value),
attr_def.minimum))
attr_value.list.SetInParent()
if attr_def.type == "string":
attr_value.s = _MakeStr(value, key)
if attr_def.HasField("allowed_values"):
if attr_value.s not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(attr_value.s),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "list(string)":
attr_value.list.s.extend([_MakeStr(x, key) for x in value])
if attr_def.HasField("allowed_values"):
for x in attr_value.list.s:
if x not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(x),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "int":
attr_value.i = _MakeInt(value, key)
if attr_def.has_minimum:
if attr_value.i < attr_def.minimum:
raise ValueError(
"Attr '%s' of '%s' Op passed %d less than minimum %d." %
(key, op_type_name, attr_value.i, attr_def.minimum))
elif attr_def.type == "list(int)":
attr_value.list.i.extend([_MakeInt(x, key) for x in value])
elif attr_def.type == "float":
attr_value.f = _MakeFloat(value, key)
elif attr_def.type == "list(float)":
attr_value.list.f.extend([_MakeFloat(x, key) for x in value])
elif attr_def.type == "bool":
attr_value.b = _MakeBool(value, key)
elif attr_def.type == "list(bool)":
attr_value.list.b.extend([_MakeBool(x, key) for x in value])
elif attr_def.type == "type":
attr_value.type = _MakeType(value, attr_def)
elif attr_def.type == "list(type)":
attr_value.list.type.extend(
[_MakeType(x, attr_def) for x in value])
elif attr_def.type == "shape":
attr_value.shape.CopyFrom(_MakeShape(value, key))
elif attr_def.type == "list(shape)":
attr_value.list.shape.extend(
[_MakeShape(x, key) for x in value])
elif attr_def.type == "tensor":
attr_value.tensor.CopyFrom(_MakeTensor(value, key))
elif attr_def.type == "list(tensor)":
attr_value.list.tensor.extend(
[_MakeTensor(x, key) for x in value])
elif attr_def.type == "func":
if not isinstance(value, compat.bytes_or_text_types):
raise TypeError("Expects a string for the func name")
attr_value.func.name = value
else:
raise TypeError("Unrecognized Attr type " + attr_def.type)
attr_protos[key] = attr_value
del attrs # attrs is no longer authoritative, use attr_protos instead
# Determine output types (possibly using attrs)
output_types = []
output_structure = []
for arg in op_def.output_arg:
types = []
if arg.number_attr:
n = _AttrValue(attr_protos, arg.number_attr).i
if arg.type_attr:
types = [_AttrValue(attr_protos, arg.type_attr).type] * n
else:
types = [arg.type] * n
output_structure.append(n)
elif arg.type_attr:
t = _AttrValue(attr_protos, arg.type_attr)
types = [t.type]
output_structure.append(None)
elif arg.type_list_attr:
t = _AttrValue(attr_protos, arg.type_list_attr)
types = t.list.type
output_structure.append(len(t.list.type))
else:
types = [arg.type]
output_structure.append(None)
if arg.is_ref:
types = [dtypes.as_dtype(x).as_ref for x in types]
output_types.extend(types)
if keywords:
raise TypeError("apply_op() got unexpected keyword arguments: " +
", ".join(sorted(keywords.keys())))
# NOTE(mrry): We add an explicit colocation constraint between
# the newly created op and any of its reference-typed inputs.
must_colocate_inputs = [val for arg, val in zip(op_def.input_arg, inputs)
if arg.is_ref]
with _MaybeColocateWith(must_colocate_inputs):
# Add Op to graph
if output_structure:
op = g.create_op(op_type_name, inputs, output_types, name=scope,
input_types=input_types, attrs=attr_protos,
op_def=op_def)
outputs = op.outputs
return _Restructure(ops.convert_n_to_tensor(outputs),
output_structure)
else:
return g.create_op(op_type_name, inputs, output_types, name=scope,
input_types=input_types, attrs=attr_protos,
op_def=op_def)
| true | true |
f7150df7efc2173d6fc9c35645e25cb08e4e030d | 5,210 | py | Python | tests/components/freebox/test_config_flow.py | miccico/core | 14c205384171dee59c1a908f8449f9864778b2dc | [
"Apache-2.0"
] | 6 | 2017-08-02T19:26:39.000Z | 2020-03-14T22:47:41.000Z | tests/components/freebox/test_config_flow.py | miccico/core | 14c205384171dee59c1a908f8449f9864778b2dc | [
"Apache-2.0"
] | 54 | 2020-11-17T07:04:57.000Z | 2022-03-31T06:45:39.000Z | tests/components/freebox/test_config_flow.py | miccico/core | 14c205384171dee59c1a908f8449f9864778b2dc | [
"Apache-2.0"
] | 14 | 2018-08-19T16:28:26.000Z | 2021-09-02T18:26:53.000Z | """Tests for the Freebox config flow."""
from unittest.mock import AsyncMock, patch
from aiofreepybox.exceptions import (
AuthorizationError,
HttpRequestError,
InvalidTokenError,
)
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.freebox.const import DOMAIN
from homeassistant.config_entries import SOURCE_DISCOVERY, SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import CONF_HOST, CONF_PORT
from tests.common import MockConfigEntry
HOST = "myrouter.freeboxos.fr"
PORT = 1234
@pytest.fixture(name="connect")
def mock_controller_connect():
"""Mock a successful connection."""
with patch("homeassistant.components.freebox.router.Freepybox") as service_mock:
service_mock.return_value.open = AsyncMock()
service_mock.return_value.system.get_config = AsyncMock(
return_value={
"mac": "abcd",
"model_info": {"pretty_name": "Pretty Model"},
"firmware_version": "123",
}
)
service_mock.return_value.lan.get_hosts_list = AsyncMock()
service_mock.return_value.connection.get_status = AsyncMock()
service_mock.return_value.close = AsyncMock()
yield service_mock
async def test_user(hass):
"""Test user config."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# test with all provided
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
async def test_import(hass):
"""Test import step."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
async def test_discovery(hass):
"""Test discovery step."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_DISCOVERY},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
async def test_link(hass, connect):
"""Test linking."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == HOST
assert result["title"] == HOST
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == PORT
async def test_abort_if_already_setup(hass):
"""Test we abort if component is already setup."""
MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: HOST, CONF_PORT: PORT}, unique_id=HOST
).add_to_hass(hass)
# Should fail, same HOST (import)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
# Should fail, same HOST (flow)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_on_link_failed(hass):
"""Test when we have errors during linking the router."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
with patch(
"homeassistant.components.freebox.router.Freepybox.open",
side_effect=AuthorizationError(),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "register_failed"}
with patch(
"homeassistant.components.freebox.router.Freepybox.open",
side_effect=HttpRequestError(),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.freebox.router.Freepybox.open",
side_effect=InvalidTokenError(),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "unknown"}
| 34.276316 | 86 | 0.673321 | from unittest.mock import AsyncMock, patch
from aiofreepybox.exceptions import (
AuthorizationError,
HttpRequestError,
InvalidTokenError,
)
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.freebox.const import DOMAIN
from homeassistant.config_entries import SOURCE_DISCOVERY, SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import CONF_HOST, CONF_PORT
from tests.common import MockConfigEntry
HOST = "myrouter.freeboxos.fr"
PORT = 1234
@pytest.fixture(name="connect")
def mock_controller_connect():
with patch("homeassistant.components.freebox.router.Freepybox") as service_mock:
service_mock.return_value.open = AsyncMock()
service_mock.return_value.system.get_config = AsyncMock(
return_value={
"mac": "abcd",
"model_info": {"pretty_name": "Pretty Model"},
"firmware_version": "123",
}
)
service_mock.return_value.lan.get_hosts_list = AsyncMock()
service_mock.return_value.connection.get_status = AsyncMock()
service_mock.return_value.close = AsyncMock()
yield service_mock
async def test_user(hass):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
async def test_import(hass):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
async def test_discovery(hass):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_DISCOVERY},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
async def test_link(hass, connect):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == HOST
assert result["title"] == HOST
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == PORT
async def test_abort_if_already_setup(hass):
MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: HOST, CONF_PORT: PORT}, unique_id=HOST
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_on_link_failed(hass):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_PORT: PORT},
)
with patch(
"homeassistant.components.freebox.router.Freepybox.open",
side_effect=AuthorizationError(),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "register_failed"}
with patch(
"homeassistant.components.freebox.router.Freepybox.open",
side_effect=HttpRequestError(),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.freebox.router.Freepybox.open",
side_effect=InvalidTokenError(),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "unknown"}
| true | true |
f7150e4ced48a3dd4e84f1e34c3cf0335508d142 | 620 | py | Python | PythonModulo1/ex028.py | BossNX/ExerciciosDePython | 27c79d284794f65f94d3a07de11429d665ec92da | [
"MIT"
] | null | null | null | PythonModulo1/ex028.py | BossNX/ExerciciosDePython | 27c79d284794f65f94d3a07de11429d665ec92da | [
"MIT"
] | null | null | null | PythonModulo1/ex028.py | BossNX/ExerciciosDePython | 27c79d284794f65f94d3a07de11429d665ec92da | [
"MIT"
] | null | null | null | from random import randint
import playsound
from time import sleep
print('-=-' * 20)
print('Vou pensar em um número entre 0 e 5. Tente advinhar... ')
print('-=-' * 20)
jogador = int(input('Em que número você pensou? '))
print('PROCESSANDO... ')
sleep(3)
computador = randint(0, 5)
if jogador == computador:
print('PARABÉNS! Você acertou! Eu escolhi {} e você escolheu {} também! '.format(computador, jogador))
playsound.playsound('ex028.mp3')
else:
print('VOCÊ ERRROU! Eu escolhi {} e você escolheu {}'.format(computador, jogador))
playsound.playsound('errou.mp3')
print('Foi muito bom jogar com você!')
| 34.444444 | 106 | 0.695161 | from random import randint
import playsound
from time import sleep
print('-=-' * 20)
print('Vou pensar em um número entre 0 e 5. Tente advinhar... ')
print('-=-' * 20)
jogador = int(input('Em que número você pensou? '))
print('PROCESSANDO... ')
sleep(3)
computador = randint(0, 5)
if jogador == computador:
print('PARABÉNS! Você acertou! Eu escolhi {} e você escolheu {} também! '.format(computador, jogador))
playsound.playsound('ex028.mp3')
else:
print('VOCÊ ERRROU! Eu escolhi {} e você escolheu {}'.format(computador, jogador))
playsound.playsound('errou.mp3')
print('Foi muito bom jogar com você!')
| true | true |
f7150fdd54d3ad81b16118068731af80a1829d37 | 5,393 | py | Python | picar.py | ElwinCabrera/picar | 975a5c49ea4c12a0dd8faefb4e0a405d902ccd62 | [
"MIT"
] | null | null | null | picar.py | ElwinCabrera/picar | 975a5c49ea4c12a0dd8faefb4e0a405d902ccd62 | [
"MIT"
] | null | null | null | picar.py | ElwinCabrera/picar | 975a5c49ea4c12a0dd8faefb4e0a405d902ccd62 | [
"MIT"
] | null | null | null | """
PI power
5V on pin
GND on pin
The GPIO mode is set to BCM
H-Bridge Motor Driver Pin Configuration
in1 -> BCM 05 (board pin 29 or GPIO 5)
in2 -> BCM 06 (board pin 31 or GPIO 6)
enable -> BCM 13 (board pin 33 or GPIO 13, PWM)
PCA9685 (16-Channel Servo Driver) Pin Configuration
SDA -> BCM 2 (board pin 3, GPIO 2)
SCL -> BCM 3 (board pin 5, GPIO 3)
VCC -> Board Pin 1 (3.3v)
GND -> Board Pin 9
HC-SR04 (Sonar Distance Sensor)
Trig -> BCM 23 (board pin 16 or GPIO 23)
Echo -> BCM 24 (board pin 18 or GPIO 24)
VCC -> Board Pin 17 (3.3v)
GND -> Board Pin 20
"""
from adafruit_servokit import ServoKit
from gpiozero import Motor, PWMOutputDevice
from time import sleep
from enum import Enum
class ServoCh(Enum):
STEERING = 0
CAM_PAN = 1
CAM_TILT = 2
TRIGHT_HYDR = 4
TLEFT_HYDR = 5
BRIGHT_HYDR = 6
BLEFT_HYDR = 7
class PiCar:
def __init__(self):
self.motorDriver = HBridgeMotorDriver(in1=5, in2=6, enable=13)
self.servoDiver = ServoDriver(sda=2, scl=3)
def f(self):
pass
class HBridgeMotorDriver:
def __init__(self, in1, in2, enable):
self.in1 = in1
self.in2 = in2
self.enable = enable # this gpio is pwm
self.pwmEnable = PWMOutputDevice(enable, frequency=100)
self.motor = Motor(forward=in1, backward=in2)
self.pwmEnable.on()
self.currSpeed = 0.0
# def start(self, startPWMDutyCycle: float = 1.0):
# self.pwmEnable.on()
# self.pwmEnable.value = startPWMDutyCycle
#
# def stop(self):
# self.pwmEnable.value = 0.0
# # self.pwmEnable.off()
def slowStart(self, accelRate: int = 1, perSec: float = 1, speedFrom: float = 0):
self.accelerate(rate=accelRate, perSec=perSec, speedFrom=speedFrom)
self.pwmEnable.value = 0.0
def slowStop(self, decelRate: int = 1, perSec: float = 1, speedFrom: float = 100):
self.decelerate(rate=decelRate, perSec=perSec, speedFrom=speedFrom)
self.pwmEnable.value = 0.0
# self.pwmEnable.off()
def accelerate(self, rate: int = 1, perSec: float = 1, speedFrom: float = 0, speedTo: float = 100):
if speedFrom < 0 or speedTo < 0:
# in physics its posible to have negative speed but lets keep it positive for now
print("one of the speed is negative")
return
if speedTo > speedFrom:
print("Cant accelerate to a speed less than the start speed, do you want to decelerate instead? ")
print("ERROR: accelerate Speed From: {} -> Speed To: {}".format(speedFrom, speedTo))
return
if rate < 0:
print("Cant accelerate at a negative rate, , do you want to decelerate instead?")
return
if rate == 0:
print("going constant speed")
return
if rate > 100:
rate = 100
print("Accelerating at a rate of {} unit/sec".format(rate))
for currRate in range(int(speedFrom), 101, rate):
dutyCycle = currRate / 100
self.pwmEnable.value = dutyCycle
currSpeed = currRate / perSec
print("Current Speed: {} unit/sec".format(currSpeed))
if currSpeed >= speedTo:
print("Accelerating stopped, speed limit of {} unit/sec reached".format(speedTo))
break
sleep(perSec)
def decelerate(self, rate: int = 1, perSec: float = 1, speedFrom: float = 100, speedTo: float = 0):
if speedFrom < 0 or speedTo < 0:
# in physics its posible to have negative speed but lets keep it positive for now
print("one of the speed is negative")
return
if speedTo > speedFrom:
print("Cant decelerate to a speed higher than the start speed, do you want to accelerate instead? ")
print("ERROR: Decelerate Speed From: {} -> Speed To: {}".format(speedFrom, speedTo))
return
if rate < 0:
rate *= -1
if rate == 0:
print("going constant speed")
return
if rate > 100:
rate = 100
print("Decelerating at a rate of {} unit/sec".format(rate))
for r in range(int(speedFrom), 101, rate):
currRate = speedFrom - r
dutyCycle = currRate / 100
self.pwmEnable.value = dutyCycle
currSpeed = currRate / perSec
print("Current Speed: {} unit/sec".format(currSpeed))
if currSpeed <= speedTo:
print("Accelerating stopped, speed limit of {} unit/sec reached".format(speedTo))
break
sleep(perSec)
def forward(self, pwmDutyCycle: float = 1.0):
self.motor.forward()
self.pwmEnable.value = pwmDutyCycle
def backward(self, pwmDutyCycle: float = 1.0):
# self.motor.backward(pwmDutyCycle)
self.motor.backward()
self.pwmEnable.value = pwmDutyCycle
def halt(self):
self.pwmEnable.off()
class ServoDriver:
def __init__(self, sda, scl):
self.sda = sda
self.scl = scl
# self.vccPin = 17
# self.gndPin = 20
self.kit = ServoKit(channels=16)
class DistanceSensor:
pass
if __name__ == "__main__":
try:
print("")
except KeyboardInterrupt:
print("Program Stopped via keyboard interrupt")
| 29.79558 | 112 | 0.597627 |
from adafruit_servokit import ServoKit
from gpiozero import Motor, PWMOutputDevice
from time import sleep
from enum import Enum
class ServoCh(Enum):
STEERING = 0
CAM_PAN = 1
CAM_TILT = 2
TRIGHT_HYDR = 4
TLEFT_HYDR = 5
BRIGHT_HYDR = 6
BLEFT_HYDR = 7
class PiCar:
def __init__(self):
self.motorDriver = HBridgeMotorDriver(in1=5, in2=6, enable=13)
self.servoDiver = ServoDriver(sda=2, scl=3)
def f(self):
pass
class HBridgeMotorDriver:
def __init__(self, in1, in2, enable):
self.in1 = in1
self.in2 = in2
self.enable = enable
self.pwmEnable = PWMOutputDevice(enable, frequency=100)
self.motor = Motor(forward=in1, backward=in2)
self.pwmEnable.on()
self.currSpeed = 0.0
lf, accelRate: int = 1, perSec: float = 1, speedFrom: float = 0):
self.accelerate(rate=accelRate, perSec=perSec, speedFrom=speedFrom)
self.pwmEnable.value = 0.0
def slowStop(self, decelRate: int = 1, perSec: float = 1, speedFrom: float = 100):
self.decelerate(rate=decelRate, perSec=perSec, speedFrom=speedFrom)
self.pwmEnable.value = 0.0
def accelerate(self, rate: int = 1, perSec: float = 1, speedFrom: float = 0, speedTo: float = 100):
if speedFrom < 0 or speedTo < 0:
print("one of the speed is negative")
return
if speedTo > speedFrom:
print("Cant accelerate to a speed less than the start speed, do you want to decelerate instead? ")
print("ERROR: accelerate Speed From: {} -> Speed To: {}".format(speedFrom, speedTo))
return
if rate < 0:
print("Cant accelerate at a negative rate, , do you want to decelerate instead?")
return
if rate == 0:
print("going constant speed")
return
if rate > 100:
rate = 100
print("Accelerating at a rate of {} unit/sec".format(rate))
for currRate in range(int(speedFrom), 101, rate):
dutyCycle = currRate / 100
self.pwmEnable.value = dutyCycle
currSpeed = currRate / perSec
print("Current Speed: {} unit/sec".format(currSpeed))
if currSpeed >= speedTo:
print("Accelerating stopped, speed limit of {} unit/sec reached".format(speedTo))
break
sleep(perSec)
def decelerate(self, rate: int = 1, perSec: float = 1, speedFrom: float = 100, speedTo: float = 0):
if speedFrom < 0 or speedTo < 0:
print("one of the speed is negative")
return
if speedTo > speedFrom:
print("Cant decelerate to a speed higher than the start speed, do you want to accelerate instead? ")
print("ERROR: Decelerate Speed From: {} -> Speed To: {}".format(speedFrom, speedTo))
return
if rate < 0:
rate *= -1
if rate == 0:
print("going constant speed")
return
if rate > 100:
rate = 100
print("Decelerating at a rate of {} unit/sec".format(rate))
for r in range(int(speedFrom), 101, rate):
currRate = speedFrom - r
dutyCycle = currRate / 100
self.pwmEnable.value = dutyCycle
currSpeed = currRate / perSec
print("Current Speed: {} unit/sec".format(currSpeed))
if currSpeed <= speedTo:
print("Accelerating stopped, speed limit of {} unit/sec reached".format(speedTo))
break
sleep(perSec)
def forward(self, pwmDutyCycle: float = 1.0):
self.motor.forward()
self.pwmEnable.value = pwmDutyCycle
def backward(self, pwmDutyCycle: float = 1.0):
self.motor.backward()
self.pwmEnable.value = pwmDutyCycle
def halt(self):
self.pwmEnable.off()
class ServoDriver:
def __init__(self, sda, scl):
self.sda = sda
self.scl = scl
self.kit = ServoKit(channels=16)
class DistanceSensor:
pass
if __name__ == "__main__":
try:
print("")
except KeyboardInterrupt:
print("Program Stopped via keyboard interrupt")
| true | true |
f7151043e58b5a9e39f8c3bb1497ebe774d3ce3c | 11,345 | py | Python | nltk/chat/zen.py | Pandinosaurus/nltk | 53dbaa5591003f6764a3d69834e92bc83e3a754c | [
"Apache-2.0"
] | 2 | 2021-12-06T04:28:18.000Z | 2021-12-20T03:33:00.000Z | nltk/chat/zen.py | Pandinosaurus/nltk | 53dbaa5591003f6764a3d69834e92bc83e3a754c | [
"Apache-2.0"
] | null | null | null | nltk/chat/zen.py | Pandinosaurus/nltk | 53dbaa5591003f6764a3d69834e92bc83e3a754c | [
"Apache-2.0"
] | 1 | 2021-12-29T16:44:59.000Z | 2021-12-29T16:44:59.000Z | # Natural Language Toolkit: Zen Chatbot
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Amy Holland <amyrh@csse.unimelb.edu.au>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Zen Chatbot talks in gems of Zen wisdom.
This is a sample conversation with Zen Chatbot:
ZC: Welcome, my child.
me: Good afternoon.
ZC: Ask the question you have come to ask.
me: How can I achieve enlightenment?
ZC: How do you suppose?
me: Through meditation.
ZC: Form is emptiness, and emptiness form.
me: How can I empty my mind of worldly troubles?
ZC: Will an answer to that really help in your search for enlightenment?
me: Yes.
ZC: It is better to be right than to be certain.
me: I seek truth and wisdom.
ZC: The search for truth is a long journey.
me: Are you sure?
ZC: Maybe sure, maybe not sure.
The chatbot structure is based on that of chat.eliza. Thus, it uses
a translation table to convert from question to response
i.e. "I am" --> "you are"
Of course, since Zen Chatbot does not understand the meaning of any words,
responses are very limited. Zen Chatbot will usually answer very vaguely, or
respond to a question by asking a different question, in much the same way
as Eliza.
"""
from nltk.chat.util import Chat, reflections
# responses are matched top to bottom, so non-specific matches occur later
# for each match, a list of possible responses is provided
responses = (
# Zen Chatbot opens with the line "Welcome, my child." The usual
# response will be a greeting problem: 'good' matches "good morning",
# "good day" etc, but also "good grief!" and other sentences starting
# with the word 'good' that may not be a greeting
(
r"(hello(.*))|(good [a-zA-Z]+)",
(
"The path to enlightenment is often difficult to see.",
"Greetings. I sense your mind is troubled. Tell me of your troubles.",
"Ask the question you have come to ask.",
"Hello. Do you seek englightenment?",
),
),
# "I need" and "I want" can be followed by a thing (eg 'help')
# or an action (eg 'to see you')
#
# This is a problem with this style of response -
# person: "I need you"
# chatbot: "me can be achieved by hard work and dedication of the mind"
# i.e. 'you' is not really a thing that can be mapped this way, so this
# interpretation only makes sense for some inputs
#
(
r"i need (.*)",
(
"%1 can be achieved by hard work and dedication of the mind.",
"%1 is not a need, but a desire of the mind. Clear your mind of such concerns.",
"Focus your mind on%1, and you will find what you need.",
),
),
(
r"i want (.*)",
(
"Desires of the heart will distract you from the path to enlightenment.",
"Will%1 help you attain enlightenment?",
"Is%1 a desire of the mind, or of the heart?",
),
),
# why questions are separated into three types:
# "why..I" e.g. "why am I here?" "Why do I like cake?"
# "why..you" e.g. "why are you here?" "Why won't you tell me?"
# "why..." e.g. "Why is the sky blue?"
# problems:
# person: "Why can't you tell me?"
# chatbot: "Are you sure I tell you?"
# - this style works for positives (e.g. "why do you like cake?")
# but does not work for negatives (e.g. "why don't you like cake?")
(r"why (.*) i (.*)\?", ("You%1%2?", "Perhaps you only think you%1%2")),
(r"why (.*) you(.*)\?", ("Why%1 you%2?", "%2 I%1", "Are you sure I%2?")),
(r"why (.*)\?", ("I cannot tell you why%1.", "Why do you think %1?")),
# e.g. "are you listening?", "are you a duck"
(
r"are you (.*)\?",
("Maybe%1, maybe not%1.", "Whether I am%1 or not is God's business."),
),
# e.g. "am I a duck?", "am I going to die?"
(
r"am i (.*)\?",
("Perhaps%1, perhaps not%1.", "Whether you are%1 or not is not for me to say."),
),
# what questions, e.g. "what time is it?"
# problems:
# person: "What do you want?"
# chatbot: "Seek truth, not what do me want."
(r"what (.*)\?", ("Seek truth, not what%1.", "What%1 should not concern you.")),
# how questions, e.g. "how do you do?"
(
r"how (.*)\?",
(
"How do you suppose?",
"Will an answer to that really help in your search for enlightenment?",
"Ask yourself not how, but why.",
),
),
# can questions, e.g. "can you run?", "can you come over here please?"
(
r"can you (.*)\?",
(
"I probably can, but I may not.",
"Maybe I can%1, and maybe I cannot.",
"I can do all, and I can do nothing.",
),
),
# can questions, e.g. "can I have some cake?", "can I know truth?"
(
r"can i (.*)\?",
(
"You can%1 if you believe you can%1, and have a pure spirit.",
"Seek truth and you will know if you can%1.",
),
),
# e.g. "It is raining" - implies the speaker is certain of a fact
(
r"it is (.*)",
(
"How can you be certain that%1, when you do not even know yourself?",
"Whether it is%1 or not does not change the way the world is.",
),
),
# e.g. "is there a doctor in the house?"
(
r"is there (.*)\?",
("There is%1 if you believe there is.", "It is possible that there is%1."),
),
# e.g. "is it possible?", "is this true?"
(r"is(.*)\?", ("%1 is not relevant.", "Does this matter?")),
# non-specific question
(
r"(.*)\?",
(
"Do you think %1?",
"You seek the truth. Does the truth seek you?",
"If you intentionally pursue the answers to your questions, the answers become hard to see.",
"The answer to your question cannot be told. It must be experienced.",
),
),
# expression of hate of form "I hate you" or "Kelly hates cheese"
(
r"(.*) (hate[s]?)|(dislike[s]?)|(don\'t like)(.*)",
(
"Perhaps it is not about hating %2, but about hate from within.",
"Weeds only grow when we dislike them",
"Hate is a very strong emotion.",
),
),
# statement containing the word 'truth'
(
r"(.*) truth(.*)",
(
"Seek truth, and truth will seek you.",
"Remember, it is not the spoon which bends - only yourself.",
"The search for truth is a long journey.",
),
),
# desire to do an action
# e.g. "I want to go shopping"
(
r"i want to (.*)",
("You may %1 if your heart truly desires to.", "You may have to %1."),
),
# desire for an object
# e.g. "I want a pony"
(
r"i want (.*)",
(
"Does your heart truly desire %1?",
"Is this a desire of the heart, or of the mind?",
),
),
# e.g. "I can't wait" or "I can't do this"
(
r"i can\'t (.*)",
(
"What we can and can't do is a limitation of the mind.",
"There are limitations of the body, and limitations of the mind.",
"Have you tried to%1 with a clear mind?",
),
),
# "I think.." indicates uncertainty. e.g. "I think so."
# problem: exceptions...
# e.g. "I think, therefore I am"
(
r"i think (.*)",
(
"Uncertainty in an uncertain world.",
"Indeed, how can we be certain of anything in such uncertain times.",
"Are you not, in fact, certain that%1?",
),
),
# "I feel...emotions/sick/light-headed..."
(
r"i feel (.*)",
(
"Your body and your emotions are both symptoms of your mind."
"What do you believe is the root of such feelings?",
"Feeling%1 can be a sign of your state-of-mind.",
),
),
# exclaimation mark indicating emotion
# e.g. "Wow!" or "No!"
(
r"(.*)!",
(
"I sense that you are feeling emotional today.",
"You need to calm your emotions.",
),
),
# because [statement]
# e.g. "because I said so"
(
r"because (.*)",
(
"Does knowning the reasons behind things help you to understand"
" the things themselves?",
"If%1, what else must be true?",
),
),
# yes or no - raise an issue of certainty/correctness
(
r"(yes)|(no)",
(
"Is there certainty in an uncertain world?",
"It is better to be right than to be certain.",
),
),
# sentence containing word 'love'
(
r"(.*)love(.*)",
(
"Think of the trees: they let the birds perch and fly with no intention to call them when they come, and no longing for their return when they fly away. Let your heart be like the trees.",
"Free love!",
),
),
# sentence containing word 'understand' - r
(
r"(.*)understand(.*)",
(
"If you understand, things are just as they are;"
" if you do not understand, things are just as they are.",
"Imagination is more important than knowledge.",
),
),
# 'I', 'me', 'my' - person is talking about themself.
# this breaks down when words contain these - eg 'Thyme', 'Irish'
(
r"(.*)(me )|( me)|(my)|(mine)|(i)(.*)",
(
"'I', 'me', 'my'... these are selfish expressions.",
"Have you ever considered that you might be a selfish person?",
"Try to consider others, not just yourself.",
"Think not just of yourself, but of others.",
),
),
# 'you' starting a sentence
# e.g. "you stink!"
(
r"you (.*)",
("My path is not of concern to you.", "I am but one, and you but one more."),
),
# say goodbye with some extra Zen wisdom.
(
r"exit",
(
"Farewell. The obstacle is the path.",
"Farewell. Life is a journey, not a destination.",
"Good bye. We are cups, constantly and quietly being filled."
"\nThe trick is knowning how to tip ourselves over and let the beautiful stuff out.",
),
),
# fall through case -
# when stumped, respond with generic zen wisdom
#
(
r"(.*)",
(
"When you're enlightened, every word is wisdom.",
"Random talk is useless.",
"The reverse side also has a reverse side.",
"Form is emptiness, and emptiness is form.",
"I pour out a cup of water. Is the cup empty?",
),
),
)
zen_chatbot = Chat(responses, reflections)
def zen_chat():
print("*" * 75)
print("Zen Chatbot!".center(75))
print("*" * 75)
print('"Look beyond mere words and letters - look into your mind"'.center(75))
print("* Talk your way to truth with Zen Chatbot.")
print("* Type 'quit' when you have had enough.")
print("*" * 75)
print("Welcome, my child.")
zen_chatbot.converse()
def demo():
zen_chat()
if __name__ == "__main__":
demo()
| 34.378788 | 200 | 0.545439 |
from nltk.chat.util import Chat, reflections
responses = (
(
r"(hello(.*))|(good [a-zA-Z]+)",
(
"The path to enlightenment is often difficult to see.",
"Greetings. I sense your mind is troubled. Tell me of your troubles.",
"Ask the question you have come to ask.",
"Hello. Do you seek englightenment?",
),
),
(
r"i need (.*)",
(
"%1 can be achieved by hard work and dedication of the mind.",
"%1 is not a need, but a desire of the mind. Clear your mind of such concerns.",
"Focus your mind on%1, and you will find what you need.",
),
),
(
r"i want (.*)",
(
"Desires of the heart will distract you from the path to enlightenment.",
"Will%1 help you attain enlightenment?",
"Is%1 a desire of the mind, or of the heart?",
),
),
# "why..." e.g. "Why is the sky blue?"
# problems:
# person: "Why can't you tell me?"
(r"why (.*) i (.*)\?", ("You%1%2?", "Perhaps you only think you%1%2")),
(r"why (.*) you(.*)\?", ("Why%1 you%2?", "%2 I%1", "Are you sure I%2?")),
(r"why (.*)\?", ("I cannot tell you why%1.", "Why do you think %1?")),
# e.g. "are you listening?", "are you a duck"
(
r"are you (.*)\?",
("Maybe%1, maybe not%1.", "Whether I am%1 or not is God's business."),
),
(
r"am i (.*)\?",
("Perhaps%1, perhaps not%1.", "Whether you are%1 or not is not for me to say."),
),
(r"what (.*)\?", ("Seek truth, not what%1.", "What%1 should not concern you.")),
(
r"how (.*)\?",
(
"How do you suppose?",
"Will an answer to that really help in your search for enlightenment?",
"Ask yourself not how, but why.",
),
),
(
r"can you (.*)\?",
(
"I probably can, but I may not.",
"Maybe I can%1, and maybe I cannot.",
"I can do all, and I can do nothing.",
),
),
(
r"can i (.*)\?",
(
"You can%1 if you believe you can%1, and have a pure spirit.",
"Seek truth and you will know if you can%1.",
),
),
(
r"it is (.*)",
(
"How can you be certain that%1, when you do not even know yourself?",
"Whether it is%1 or not does not change the way the world is.",
),
),
(
r"is there (.*)\?",
("There is%1 if you believe there is.", "It is possible that there is%1."),
),
(r"is(.*)\?", ("%1 is not relevant.", "Does this matter?")),
(
r"(.*)\?",
(
"Do you think %1?",
"You seek the truth. Does the truth seek you?",
"If you intentionally pursue the answers to your questions, the answers become hard to see.",
"The answer to your question cannot be told. It must be experienced.",
),
),
(
r"(.*) (hate[s]?)|(dislike[s]?)|(don\'t like)(.*)",
(
"Perhaps it is not about hating %2, but about hate from within.",
"Weeds only grow when we dislike them",
"Hate is a very strong emotion.",
),
),
# statement containing the word 'truth'
(
r"(.*) truth(.*)",
(
"Seek truth, and truth will seek you.",
"Remember, it is not the spoon which bends - only yourself.",
"The search for truth is a long journey.",
),
),
# desire to do an action
# e.g. "I want to go shopping"
(
r"i want to (.*)",
("You may %1 if your heart truly desires to.", "You may have to %1."),
),
# desire for an object
# e.g. "I want a pony"
(
r"i want (.*)",
(
"Does your heart truly desire %1?",
"Is this a desire of the heart, or of the mind?",
),
),
# e.g. "I can't wait" or "I can't do this"
(
r"i can\'t (.*)",
(
"What we can and can't do is a limitation of the mind.",
"There are limitations of the body, and limitations of the mind.",
"Have you tried to%1 with a clear mind?",
),
),
# "I think.." indicates uncertainty. e.g. "I think so."
# problem: exceptions...
# e.g. "I think, therefore I am"
(
r"i think (.*)",
(
"Uncertainty in an uncertain world.",
"Indeed, how can we be certain of anything in such uncertain times.",
"Are you not, in fact, certain that%1?",
),
),
# "I feel...emotions/sick/light-headed..."
(
r"i feel (.*)",
(
"Your body and your emotions are both symptoms of your mind."
"What do you believe is the root of such feelings?",
"Feeling%1 can be a sign of your state-of-mind.",
),
),
# exclaimation mark indicating emotion
# e.g. "Wow!" or "No!"
(
r"(.*)!",
(
"I sense that you are feeling emotional today.",
"You need to calm your emotions.",
),
),
# because [statement]
# e.g. "because I said so"
(
r"because (.*)",
(
"Does knowning the reasons behind things help you to understand"
" the things themselves?",
"If%1, what else must be true?",
),
),
# yes or no - raise an issue of certainty/correctness
(
r"(yes)|(no)",
(
"Is there certainty in an uncertain world?",
"It is better to be right than to be certain.",
),
),
# sentence containing word 'love'
(
r"(.*)love(.*)",
(
"Think of the trees: they let the birds perch and fly with no intention to call them when they come, and no longing for their return when they fly away. Let your heart be like the trees.",
"Free love!",
),
),
# sentence containing word 'understand' - r
(
r"(.*)understand(.*)",
(
"If you understand, things are just as they are;"
" if you do not understand, things are just as they are.",
"Imagination is more important than knowledge.",
),
),
# 'I', 'me', 'my' - person is talking about themself.
# this breaks down when words contain these - eg 'Thyme', 'Irish'
(
r"(.*)(me )|( me)|(my)|(mine)|(i)(.*)",
(
"'I', 'me', 'my'... these are selfish expressions.",
"Have you ever considered that you might be a selfish person?",
"Try to consider others, not just yourself.",
"Think not just of yourself, but of others.",
),
),
# 'you' starting a sentence
# e.g. "you stink!"
(
r"you (.*)",
("My path is not of concern to you.", "I am but one, and you but one more."),
),
# say goodbye with some extra Zen wisdom.
(
r"exit",
(
"Farewell. The obstacle is the path.",
"Farewell. Life is a journey, not a destination.",
"Good bye. We are cups, constantly and quietly being filled."
"\nThe trick is knowning how to tip ourselves over and let the beautiful stuff out.",
),
),
# fall through case -
# when stumped, respond with generic zen wisdom
#
(
r"(.*)",
(
"When you're enlightened, every word is wisdom.",
"Random talk is useless.",
"The reverse side also has a reverse side.",
"Form is emptiness, and emptiness is form.",
"I pour out a cup of water. Is the cup empty?",
),
),
)
zen_chatbot = Chat(responses, reflections)
def zen_chat():
print("*" * 75)
print("Zen Chatbot!".center(75))
print("*" * 75)
print('"Look beyond mere words and letters - look into your mind"'.center(75))
print("* Talk your way to truth with Zen Chatbot.")
print("* Type 'quit' when you have had enough.")
print("*" * 75)
print("Welcome, my child.")
zen_chatbot.converse()
def demo():
zen_chat()
if __name__ == "__main__":
demo()
| true | true |
f71511f5483a56d525eed60470acbd3271e7bc13 | 4,890 | py | Python | doc/conf.py | kcleal/InSilicoSeqSplit | 3ed2881570b3984c82a6e56200c5e6d0f9067e59 | [
"MIT"
] | 109 | 2017-09-06T00:46:07.000Z | 2022-03-31T14:41:53.000Z | doc/conf.py | kcleal/InSilicoSeqSplit | 3ed2881570b3984c82a6e56200c5e6d0f9067e59 | [
"MIT"
] | 210 | 2016-11-16T21:04:37.000Z | 2022-03-25T16:37:05.000Z | doc/conf.py | kcleal/InSilicoSeqSplit | 3ed2881570b3984c82a6e56200c5e6d0f9067e59 | [
"MIT"
] | 31 | 2017-05-23T11:53:52.000Z | 2021-12-27T05:57:27.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# InSilicoSeq documentation build configuration file, created by
# sphinx-quickstart on Tue May 30 11:45:01 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from iss.version import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'InSilicoSeq'
copyright = '2017, Hadrien Gourle'
author = 'Hadrien Gourle'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__[:-2]
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'InSilicoSeqdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'InSilicoSeq.tex', 'InSilicoSeq Documentation',
'Hadrien Gourlé', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'insilicoseq', 'InSilicoSeq Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'InSilicoSeq', 'InSilicoSeq Documentation',
author, 'InSilicoSeq', 'One line description of project.',
'Miscellaneous'),
]
| 30.949367 | 79 | 0.684663 |
from iss.version import __version__
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'InSilicoSeq'
copyright = '2017, Hadrien Gourle'
author = 'Hadrien Gourle'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__[:-2]
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'InSilicoSeqdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'InSilicoSeq.tex', 'InSilicoSeq Documentation',
'Hadrien Gourlé', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'insilicoseq', 'InSilicoSeq Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'InSilicoSeq', 'InSilicoSeq Documentation',
author, 'InSilicoSeq', 'One line description of project.',
'Miscellaneous'),
]
| true | true |
f71512a29b12796c7073840bf47222a9f69148b6 | 213 | py | Python | donations/payment_gateways/offline/constants.py | diffractive/newstream | cf1a1f230e18d01c63b50ab9d360aa44ac5a486f | [
"MIT"
] | 1 | 2020-05-03T12:33:42.000Z | 2020-05-03T12:33:42.000Z | donations/payment_gateways/offline/constants.py | diffractive/newstream | cf1a1f230e18d01c63b50ab9d360aa44ac5a486f | [
"MIT"
] | 14 | 2020-07-06T20:05:57.000Z | 2022-03-12T00:39:11.000Z | donations/payment_gateways/offline/constants.py | diffractive/newstream | cf1a1f230e18d01c63b50ab9d360aa44ac5a486f | [
"MIT"
] | null | null | null | from site_settings.models import GATEWAY_CAN_EDIT_SUBSCRIPTION, GATEWAY_CAN_TOGGLE_SUBSCRIPTION, GATEWAY_CAN_CANCEL_SUBSCRIPTION
API_CAPABILITIES = [GATEWAY_CAN_EDIT_SUBSCRIPTION, GATEWAY_CAN_CANCEL_SUBSCRIPTION] | 71 | 128 | 0.920188 | from site_settings.models import GATEWAY_CAN_EDIT_SUBSCRIPTION, GATEWAY_CAN_TOGGLE_SUBSCRIPTION, GATEWAY_CAN_CANCEL_SUBSCRIPTION
API_CAPABILITIES = [GATEWAY_CAN_EDIT_SUBSCRIPTION, GATEWAY_CAN_CANCEL_SUBSCRIPTION] | true | true |
f7151307628b5972cb55056043855b80acf50ddc | 16,890 | py | Python | var/spack/repos/builtin/packages/wrf/package.py | marcost2/spack | d23bb6b3af31186d3933b9946b5f4c5d97addf74 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/wrf/package.py | marcost2/spack | d23bb6b3af31186d3933b9946b5f4c5d97addf74 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/wrf/package.py | marcost2/spack | d23bb6b3af31186d3933b9946b5f4c5d97addf74 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import glob
import re
import time
from os.path import basename
from subprocess import PIPE, Popen
from sys import platform, stdout
from llnl.util import tty
from spack import *
is_windows = platform == 'win32'
if not is_windows:
from fcntl import F_GETFL, F_SETFL, fcntl
from os import O_NONBLOCK
re_optline = re.compile(r'\s+[0-9]+\..*\((serial|smpar|dmpar|dm\+sm)\)\s+')
re_paroptname = re.compile(r'\((serial|smpar|dmpar|dm\+sm)\)')
re_paroptnum = re.compile(r'\s+([0-9]+)\.\s+\(')
re_nestline = re.compile(r'\(([0-9]+=[^)0-9]+)+\)')
re_nestoptnum = re.compile(r'([0-9]+)=')
re_nestoptname = re.compile(r'=([^,)]+)')
def setNonBlocking(fd):
"""
Set the given file descriptor to non-blocking
Non-blocking pipes are not supported on windows
"""
flags = fcntl(fd, F_GETFL) | O_NONBLOCK
fcntl(fd, F_SETFL, flags)
def collect_platform_options(stdoutpipe):
# Attempt to parse to collect options
optiondict = {}
for line in stdoutpipe.splitlines():
if re_optline.match(line):
numbers = re_paroptnum.findall(line)
entries = re_paroptname.findall(line)
paropts = dict(zip(entries, numbers))
platline = re_optline.sub("", line).strip()
optiondict[platline] = paropts
return optiondict
def collect_nesting_options(stdoutpipe):
nestoptline = re_nestline.search(stdoutpipe)[0]
nestoptnum = re_nestoptnum.findall(nestoptline)
nestoptname = re_nestoptname.findall(nestoptline)
nestoptname = [x.replace(" ", "_") for x in nestoptname]
return dict(zip(nestoptname, nestoptnum))
class Wrf(Package):
"""The Weather Research and Forecasting (WRF) Model
is a next-generation mesoscale numerical weather prediction system designed
for both atmospheric research and operational forecasting applications.
"""
homepage = "https://www.mmm.ucar.edu/weather-research-and-forecasting-model"
url = "https://github.com/wrf-model/WRF/archive/v4.2.tar.gz"
maintainers = ["MichaelLaufer", "ptooley"]
version("4.3.3", sha256='1b98b8673513f95716c7fc54e950dfebdb582516e22758cd94bc442bccfc0b86')
version("4.3.2", sha256='2c682da0cd0fd13f57d5125eef331f9871ec6a43d860d13b0c94a07fa64348ec')
version("4.3.1", sha256='6c9a69d05ee17d2c80b3699da173cfe6fdf65487db7587c8cc96bfa9ceafce87')
version("4.2", sha256="c39a1464fd5c439134bbd39be632f7ce1afd9a82ad726737e37228c6a3d74706")
version("4.0", sha256="9718f26ee48e6c348d8e28b8bc5e8ff20eafee151334b3959a11b7320999cf65")
version("3.9.1.1", sha256="a04f5c425bedd262413ec88192a0f0896572cc38549de85ca120863c43df047a", url="https://github.com/wrf-model/WRF/archive/V3.9.1.1.tar.gz")
resource(name='elec',
url='https://master.dl.sourceforge.net/project/wrfelec/WRFV3911_elec.beta_release.01.tgz',
sha256='eaaece04711a2883f39349f0857468b42af1a6f8d0985759ce5dfde4058316b4',
when='@3.9.1.1+elec',
destination='.'
)
variant(
"build_type",
default="dmpar",
values=("serial", "smpar", "dmpar", "dm+sm"),
)
variant(
"nesting",
default="basic",
values=("no_nesting", "basic", "preset_moves", "vortex_following"),
)
variant(
"compile_type",
default="em_real",
values=(
"em_real",
"em_quarter_ss",
"em_b_wave",
"em_les",
"em_heldsuarez",
"em_tropical_cyclone",
"em_hill2d_x",
"em_squall2d_x",
"em_squall2d_y",
"em_grav2d_x",
"em_seabreeze2d_x",
"em_scm_xy",
),
)
variant(
"pnetcdf",
default=True,
description="Parallel IO support through Pnetcdf library",
)
variant(
"elec",
default=False,
description="Compile support for the storm electrification package"
+ "for the WRF-ARW"
)
conflicts("@4.0:", when="+elec",
msg="WRF_ELEC is only supported in V3.9.1.1")
patch("patches/3.9/netcdf_backport.patch", when="@3.9.1.1")
patch("patches/3.9/tirpc_detect.patch", when="@3.9.1.1")
patch("patches/3.9/add_aarch64.patch", when="@3.9.1.1")
patch("patches/3.9/force_flags.patch", when="@3.9.1.1 %gcc@10:")
patch("patches/3.9/configure_aocc_2.3.patch", when="@3.9.1.1 %aocc@:2.4.0")
patch("patches/3.9/configure_aocc_3.0.patch", when="@3.9.1.1 %aocc@3.0.0")
patch("patches/3.9/configure_aocc_3.1.patch", when="@3.9.1.1 %aocc@3.1.0")
patch("patches/3.9/fujitsu.patch", when="@3.9.1.1 %fj")
patch("patches/3.9/add_elec_support.patch", when="@3.9.1.1+elec")
patch("patches/3.9/add_elec_changes.patch", when="@3.9.1.1+elec")
# These patches deal with netcdf & netcdf-fortran being two diff things
# Patches are based on:
# https://github.com/easybuilders/easybuild-easyconfigs/blob/master/easybuild/easyconfigs/w/WRF/WRF-3.5_netCDF-Fortran_separate_path.patch
patch("patches/4.0/arch.Config.pl.patch", when="@4.0")
patch("patches/4.0/arch.configure.defaults.patch", when="@4.0")
patch("patches/4.0/arch.conf_tokens.patch", when="@4.0")
patch("patches/4.0/arch.postamble.patch", when="@4.0")
patch("patches/4.0/configure.patch", when="@4.0")
patch("patches/4.0/external.io_netcdf.makefile.patch", when="@4.0")
patch("patches/4.0/Makefile.patch", when="@4.0")
patch("patches/4.0/tirpc_detect.patch", when="@4.0")
patch("patches/4.0/add_aarch64.patch", when="@4.0")
patch("patches/4.2/arch.Config.pl.patch", when="@4.2:")
patch("patches/4.2/arch.configure.defaults.patch", when="@4.2")
patch("patches/4.2/arch.conf_tokens.patch", when="@4.2:")
patch("patches/4.2/arch.postamble.patch", when="@4.2")
patch("patches/4.2/configure.patch", when="@4.2:")
patch("patches/4.2/external.io_netcdf.makefile.patch", when="@4.2:")
patch("patches/4.2/var.gen_be.Makefile.patch", when="@4.2:")
patch("patches/4.2/Makefile.patch", when="@4.2")
patch("patches/4.2/tirpc_detect.patch", when="@4.2")
patch("patches/4.2/add_aarch64.patch", when="@4.2:")
patch("patches/4.2/configure_aocc_2.3.patch", when="@4.2 %aocc@:2.4.0")
patch("patches/4.2/configure_aocc_3.0.patch", when="@4.2: %aocc@3.0.0:3.2.0")
patch("patches/4.2/hdf5_fix.patch", when="@4.2: %aocc")
patch("patches/4.2/derf_fix.patch", when="@4.2 %aocc")
# Various syntax fixes found by FPT tool
patch("https://github.com/wrf-model/WRF/commit/6502d5d9c15f5f9a652dec244cc12434af737c3c.patch?full_index=1",
sha256="c5162c23a132b377132924f8f1545313861c6cee5a627e9ebbdcf7b7b9d5726f", when="@4.2 %fj")
patch("patches/4.2/configure_fujitsu.patch", when="@4 %fj")
patch("patches/4.3/Makefile.patch", when="@4.3:")
patch("patches/4.3/arch.postamble.patch", when="@4.3:")
patch("patches/4.3/fujitsu.patch", when="@4.3: %fj")
# Syntax errors in physics routines
patch("https://github.com/wrf-model/WRF/commit/7c6fd575b7a8fe5715b07b38db160e606c302956.patch?full_index=1",
sha256="1ce97f4fd09e440bdf00f67711b1c50439ac27595ea6796efbfb32e0b9a1f3e4", when="@4.3.1")
patch("https://github.com/wrf-model/WRF/commit/238a7d219b7c8e285db28fe4f0c96ebe5068d91c.patch?full_index=1",
sha256="27c7268f6c84b884d21e4afad0bab8554b06961cf4d6bfd7d0f5a457dcfdffb1", when="@4.3.1")
depends_on("pkgconfig", type=("build"))
depends_on("libtirpc")
depends_on("mpi")
# According to:
# http://www2.mmm.ucar.edu/wrf/users/docs/user_guide_v4/v4.0/users_guide_chap2.html#_Required_Compilers_and_1
# Section: "Required/Optional Libraries to Download"
depends_on("parallel-netcdf", when="+pnetcdf")
depends_on("netcdf-c")
depends_on("netcdf-fortran")
depends_on("jasper")
depends_on("libpng")
depends_on("zlib")
depends_on("perl")
depends_on("jemalloc", when="%aocc")
# not sure if +fortran is required, but seems like a good idea
depends_on("hdf5+fortran+hl+mpi")
# build script use csh
depends_on("tcsh", type=("build"))
# time is not installed on all systems b/c bash provides it
# this fixes that for csh install scripts
depends_on("time", type=("build"))
depends_on("m4", type="build")
depends_on("libtool", type="build")
depends_on("boxmg4wrf", type="build", when="+elec")
depends_on("tar", type="build", when="+elec")
phases = ["configure", "build", "install"]
def setup_run_environment(self, env):
env.set("WRF_HOME", self.prefix)
env.append_path("PATH", self.prefix.main)
env.append_path("PATH", self.prefix.tools)
def setup_build_environment(self, env):
env.set("NETCDF", self.spec["netcdf-c"].prefix)
if "+pnetcdf" in self.spec:
env.set("PNETCDF", self.spec["parallel-netcdf"].prefix)
# This gets used via the applied patch files
env.set("NETCDFF", self.spec["netcdf-fortran"].prefix)
env.set("PHDF5", self.spec["hdf5"].prefix)
env.set("JASPERINC", self.spec["jasper"].prefix.include)
env.set("JASPERLIB", self.spec["jasper"].prefix.lib)
if self.spec.satisfies("%gcc@10:"):
args = "-w -O2 -fallow-argument-mismatch -fallow-invalid-boz"
env.set("FCFLAGS", args)
env.set("FFLAGS", args)
if self.spec.satisfies("%aocc"):
env.set("WRFIO_NCD_LARGE_FILE_SUPPORT", 1)
env.set("HDF5", self.spec["hdf5"].prefix)
env.prepend_path('PATH', ancestor(self.compiler.cc))
if self.spec.satisfies("+elec"):
env.set("WRF_ELEC", 1)
env.set("BOXMGLIBDIR", self.spec["boxmg4wrf"].prefix)
def patch(self):
# Let's not assume csh is intalled in bin
files = glob.glob("*.csh")
filter_file("^#!/bin/csh -f", "#!/usr/bin/env csh", *files)
filter_file("^#!/bin/csh", "#!/usr/bin/env csh", *files)
def answer_configure_question(self, outputbuf):
# Platform options question:
if "Please select from among the following" in outputbuf:
options = collect_platform_options(outputbuf)
comp_pair = "%s/%s" % (
basename(self.compiler.fc).split("-")[0],
basename(self.compiler.cc).split("-")[0],
)
compiler_matches = dict(
(x, y) for x, y in options.items() if comp_pair in x.lower()
)
if len(compiler_matches) > 1:
tty.warn("Found multiple potential build options")
try:
compiler_key = min(compiler_matches.keys(), key=len)
tty.warn("Selected build option %s." % compiler_key)
return (
"%s\n"
% compiler_matches[compiler_key][
self.spec.variants["build_type"].value
]
)
except KeyError:
InstallError(
"build_type %s unsupported for %s compilers"
% (self.spec.variants["build_type"].value, comp_pair)
)
if "Compile for nesting?" in outputbuf:
options = collect_nesting_options(outputbuf)
try:
return "%s\n" % options[self.spec.variants["nesting"].value]
except KeyError:
InstallError("Failed to parse correct nesting option")
def do_configure_fixup(self):
# Fix mpi compiler wrapper aliases
# In version 4.2 the file to be patched is called
# configure.defaults, while in earlier versions
# it's configure_new.defaults
if self.spec.satisfies("@3.9.1.1"):
config = FileFilter(join_path('arch', 'configure_new.defaults'))
else:
config = FileFilter(join_path('arch', 'configure.defaults'))
if self.spec.satisfies("@3.9.1.1 %gcc"):
config.filter(r'^DM_FC.*mpif90 -f90=\$\(SFC\)',
'DM_FC = {0}'.format(self.spec['mpi'].mpifc))
config.filter(r'^DM_CC.*mpicc -cc=\$\(SCC\)',
'DM_CC = {0}'.format(self.spec['mpi'].mpicc))
if self.spec.satisfies("%aocc"):
config.filter(
'^DM_FC.*mpif90 -DMPI2SUPPORT',
'DM_FC = {0}'.format(self.spec['mpi'].mpifc + ' -DMPI2_SUPPORT')
)
config.filter(
'^DM_.CC*mpicc -DMPI2SUPPORT',
'DM_CC = {0}'.format(self.spec['mpi'].mpicc) + ' -DMPI2_SUPPORT'
)
if self.spec.satisfies("@4.2: %intel"):
config.filter('^DM_FC.*mpif90',
'DM_FC = {0}'.format(self.spec['mpi'].mpifc))
config.filter('^DM_CC.*mpicc',
'DM_CC = {0}'.format(self.spec['mpi'].mpicc))
@run_before('configure')
def untar(self):
tar = which('tar')
tar('-xvf', 'WRFV3911_elec/elec.tgz')
def configure(self, spec, prefix):
# Remove broken default options...
self.do_configure_fixup()
if self.spec.compiler.name not in ["intel", "gcc", "aocc", "fj"]:
raise InstallError(
"Compiler %s not currently supported for WRF build."
% self.spec.compiler.name
)
p = Popen("./configure", stdin=PIPE, stdout=PIPE, stderr=PIPE)
if not is_windows:
setNonBlocking(p.stdout)
setNonBlocking(p.stderr)
# Because of WRFs custom configure scripts that require interactive
# input we need to parse and respond to questions. The details can
# vary somewhat with the exact version, so try to detect and fail
# gracefully on unexpected questions.
stallcounter = 0
outputbuf = ""
while True:
line = p.stderr.readline().decode()
if not line:
line = p.stdout.readline().decode()
if not line:
if p.poll() is not None:
returncode = p.returncode
break
if stallcounter > 300:
raise InstallError(
"Output stalled for 30s, presumably an "
"undetected question."
)
time.sleep(0.1) # Try to do a bit of rate limiting
stallcounter += 1
continue
stdout.write(line)
stallcounter = 0
outputbuf += line
if (
"Enter selection" in outputbuf
or "Compile for nesting" in outputbuf
):
answer = self.answer_configure_question(outputbuf)
p.stdin.write(answer.encode())
p.stdin.flush()
outputbuf = ""
if returncode != 0:
raise InstallError("Configure failed - unknown error")
@run_after("configure")
def patch_for_libmvec(self):
if self.spec.satisfies("@3.9.1.1 %aocc"):
fp = self.package_dir + "/patches/3.9/aocc_lmvec.patch"
which('patch')('-s', '-p1', '-i', '{0}'.format(fp), '-d', '.')
def run_compile_script(self):
csh_bin = self.spec["tcsh"].prefix.bin.csh
csh = Executable(csh_bin)
if self.spec.satisfies("+elec"):
num_jobs = str(1)
else:
# num of compile jobs capped at 20 in wrf
num_jobs = str(min(int(make_jobs), 10))
# Now run the compile script and track the output to check for
# failure/success We need to do this because upstream use `make -i -k`
# and the custom compile script will always return zero regardless of
# success or failure
result_buf = csh(
"./compile",
"-j",
num_jobs,
self.spec.variants["compile_type"].value,
output=str,
error=str
)
print(result_buf)
if "Executables successfully built" in result_buf:
return True
return False
def build(self, spec, prefix):
result = self.run_compile_script()
if not result:
tty.warn(
"Compilation failed first time (WRF idiosyncrasies?) "
"- trying again..."
)
result = self.run_compile_script()
if not result:
raise InstallError(
"Compile failed. Check the output log for details."
)
def install(self, spec, prefix):
# Save all install files as many are needed for WPS and WRF runs
install_tree(".", prefix)
| 39.555035 | 161 | 0.60148 |
import glob
import re
import time
from os.path import basename
from subprocess import PIPE, Popen
from sys import platform, stdout
from llnl.util import tty
from spack import *
is_windows = platform == 'win32'
if not is_windows:
from fcntl import F_GETFL, F_SETFL, fcntl
from os import O_NONBLOCK
re_optline = re.compile(r'\s+[0-9]+\..*\((serial|smpar|dmpar|dm\+sm)\)\s+')
re_paroptname = re.compile(r'\((serial|smpar|dmpar|dm\+sm)\)')
re_paroptnum = re.compile(r'\s+([0-9]+)\.\s+\(')
re_nestline = re.compile(r'\(([0-9]+=[^)0-9]+)+\)')
re_nestoptnum = re.compile(r'([0-9]+)=')
re_nestoptname = re.compile(r'=([^,)]+)')
def setNonBlocking(fd):
flags = fcntl(fd, F_GETFL) | O_NONBLOCK
fcntl(fd, F_SETFL, flags)
def collect_platform_options(stdoutpipe):
optiondict = {}
for line in stdoutpipe.splitlines():
if re_optline.match(line):
numbers = re_paroptnum.findall(line)
entries = re_paroptname.findall(line)
paropts = dict(zip(entries, numbers))
platline = re_optline.sub("", line).strip()
optiondict[platline] = paropts
return optiondict
def collect_nesting_options(stdoutpipe):
nestoptline = re_nestline.search(stdoutpipe)[0]
nestoptnum = re_nestoptnum.findall(nestoptline)
nestoptname = re_nestoptname.findall(nestoptline)
nestoptname = [x.replace(" ", "_") for x in nestoptname]
return dict(zip(nestoptname, nestoptnum))
class Wrf(Package):
homepage = "https://www.mmm.ucar.edu/weather-research-and-forecasting-model"
url = "https://github.com/wrf-model/WRF/archive/v4.2.tar.gz"
maintainers = ["MichaelLaufer", "ptooley"]
version("4.3.3", sha256='1b98b8673513f95716c7fc54e950dfebdb582516e22758cd94bc442bccfc0b86')
version("4.3.2", sha256='2c682da0cd0fd13f57d5125eef331f9871ec6a43d860d13b0c94a07fa64348ec')
version("4.3.1", sha256='6c9a69d05ee17d2c80b3699da173cfe6fdf65487db7587c8cc96bfa9ceafce87')
version("4.2", sha256="c39a1464fd5c439134bbd39be632f7ce1afd9a82ad726737e37228c6a3d74706")
version("4.0", sha256="9718f26ee48e6c348d8e28b8bc5e8ff20eafee151334b3959a11b7320999cf65")
version("3.9.1.1", sha256="a04f5c425bedd262413ec88192a0f0896572cc38549de85ca120863c43df047a", url="https://github.com/wrf-model/WRF/archive/V3.9.1.1.tar.gz")
resource(name='elec',
url='https://master.dl.sourceforge.net/project/wrfelec/WRFV3911_elec.beta_release.01.tgz',
sha256='eaaece04711a2883f39349f0857468b42af1a6f8d0985759ce5dfde4058316b4',
when='@3.9.1.1+elec',
destination='.'
)
variant(
"build_type",
default="dmpar",
values=("serial", "smpar", "dmpar", "dm+sm"),
)
variant(
"nesting",
default="basic",
values=("no_nesting", "basic", "preset_moves", "vortex_following"),
)
variant(
"compile_type",
default="em_real",
values=(
"em_real",
"em_quarter_ss",
"em_b_wave",
"em_les",
"em_heldsuarez",
"em_tropical_cyclone",
"em_hill2d_x",
"em_squall2d_x",
"em_squall2d_y",
"em_grav2d_x",
"em_seabreeze2d_x",
"em_scm_xy",
),
)
variant(
"pnetcdf",
default=True,
description="Parallel IO support through Pnetcdf library",
)
variant(
"elec",
default=False,
description="Compile support for the storm electrification package"
+ "for the WRF-ARW"
)
conflicts("@4.0:", when="+elec",
msg="WRF_ELEC is only supported in V3.9.1.1")
patch("patches/3.9/netcdf_backport.patch", when="@3.9.1.1")
patch("patches/3.9/tirpc_detect.patch", when="@3.9.1.1")
patch("patches/3.9/add_aarch64.patch", when="@3.9.1.1")
patch("patches/3.9/force_flags.patch", when="@3.9.1.1 %gcc@10:")
patch("patches/3.9/configure_aocc_2.3.patch", when="@3.9.1.1 %aocc@:2.4.0")
patch("patches/3.9/configure_aocc_3.0.patch", when="@3.9.1.1 %aocc@3.0.0")
patch("patches/3.9/configure_aocc_3.1.patch", when="@3.9.1.1 %aocc@3.1.0")
patch("patches/3.9/fujitsu.patch", when="@3.9.1.1 %fj")
patch("patches/3.9/add_elec_support.patch", when="@3.9.1.1+elec")
patch("patches/3.9/add_elec_changes.patch", when="@3.9.1.1+elec")
patch("patches/4.0/arch.Config.pl.patch", when="@4.0")
patch("patches/4.0/arch.configure.defaults.patch", when="@4.0")
patch("patches/4.0/arch.conf_tokens.patch", when="@4.0")
patch("patches/4.0/arch.postamble.patch", when="@4.0")
patch("patches/4.0/configure.patch", when="@4.0")
patch("patches/4.0/external.io_netcdf.makefile.patch", when="@4.0")
patch("patches/4.0/Makefile.patch", when="@4.0")
patch("patches/4.0/tirpc_detect.patch", when="@4.0")
patch("patches/4.0/add_aarch64.patch", when="@4.0")
patch("patches/4.2/arch.Config.pl.patch", when="@4.2:")
patch("patches/4.2/arch.configure.defaults.patch", when="@4.2")
patch("patches/4.2/arch.conf_tokens.patch", when="@4.2:")
patch("patches/4.2/arch.postamble.patch", when="@4.2")
patch("patches/4.2/configure.patch", when="@4.2:")
patch("patches/4.2/external.io_netcdf.makefile.patch", when="@4.2:")
patch("patches/4.2/var.gen_be.Makefile.patch", when="@4.2:")
patch("patches/4.2/Makefile.patch", when="@4.2")
patch("patches/4.2/tirpc_detect.patch", when="@4.2")
patch("patches/4.2/add_aarch64.patch", when="@4.2:")
patch("patches/4.2/configure_aocc_2.3.patch", when="@4.2 %aocc@:2.4.0")
patch("patches/4.2/configure_aocc_3.0.patch", when="@4.2: %aocc@3.0.0:3.2.0")
patch("patches/4.2/hdf5_fix.patch", when="@4.2: %aocc")
patch("patches/4.2/derf_fix.patch", when="@4.2 %aocc")
patch("https://github.com/wrf-model/WRF/commit/6502d5d9c15f5f9a652dec244cc12434af737c3c.patch?full_index=1",
sha256="c5162c23a132b377132924f8f1545313861c6cee5a627e9ebbdcf7b7b9d5726f", when="@4.2 %fj")
patch("patches/4.2/configure_fujitsu.patch", when="@4 %fj")
patch("patches/4.3/Makefile.patch", when="@4.3:")
patch("patches/4.3/arch.postamble.patch", when="@4.3:")
patch("patches/4.3/fujitsu.patch", when="@4.3: %fj")
patch("https://github.com/wrf-model/WRF/commit/7c6fd575b7a8fe5715b07b38db160e606c302956.patch?full_index=1",
sha256="1ce97f4fd09e440bdf00f67711b1c50439ac27595ea6796efbfb32e0b9a1f3e4", when="@4.3.1")
patch("https://github.com/wrf-model/WRF/commit/238a7d219b7c8e285db28fe4f0c96ebe5068d91c.patch?full_index=1",
sha256="27c7268f6c84b884d21e4afad0bab8554b06961cf4d6bfd7d0f5a457dcfdffb1", when="@4.3.1")
depends_on("pkgconfig", type=("build"))
depends_on("libtirpc")
depends_on("mpi")
llel-netcdf", when="+pnetcdf")
depends_on("netcdf-c")
depends_on("netcdf-fortran")
depends_on("jasper")
depends_on("libpng")
depends_on("zlib")
depends_on("perl")
depends_on("jemalloc", when="%aocc")
depends_on("hdf5+fortran+hl+mpi")
depends_on("tcsh", type=("build"))
depends_on("time", type=("build"))
depends_on("m4", type="build")
depends_on("libtool", type="build")
depends_on("boxmg4wrf", type="build", when="+elec")
depends_on("tar", type="build", when="+elec")
phases = ["configure", "build", "install"]
def setup_run_environment(self, env):
env.set("WRF_HOME", self.prefix)
env.append_path("PATH", self.prefix.main)
env.append_path("PATH", self.prefix.tools)
def setup_build_environment(self, env):
env.set("NETCDF", self.spec["netcdf-c"].prefix)
if "+pnetcdf" in self.spec:
env.set("PNETCDF", self.spec["parallel-netcdf"].prefix)
env.set("NETCDFF", self.spec["netcdf-fortran"].prefix)
env.set("PHDF5", self.spec["hdf5"].prefix)
env.set("JASPERINC", self.spec["jasper"].prefix.include)
env.set("JASPERLIB", self.spec["jasper"].prefix.lib)
if self.spec.satisfies("%gcc@10:"):
args = "-w -O2 -fallow-argument-mismatch -fallow-invalid-boz"
env.set("FCFLAGS", args)
env.set("FFLAGS", args)
if self.spec.satisfies("%aocc"):
env.set("WRFIO_NCD_LARGE_FILE_SUPPORT", 1)
env.set("HDF5", self.spec["hdf5"].prefix)
env.prepend_path('PATH', ancestor(self.compiler.cc))
if self.spec.satisfies("+elec"):
env.set("WRF_ELEC", 1)
env.set("BOXMGLIBDIR", self.spec["boxmg4wrf"].prefix)
def patch(self):
files = glob.glob("*.csh")
filter_file("^#!/bin/csh -f", "#!/usr/bin/env csh", *files)
filter_file("^#!/bin/csh", "#!/usr/bin/env csh", *files)
def answer_configure_question(self, outputbuf):
# Platform options question:
if "Please select from among the following" in outputbuf:
options = collect_platform_options(outputbuf)
comp_pair = "%s/%s" % (
basename(self.compiler.fc).split("-")[0],
basename(self.compiler.cc).split("-")[0],
)
compiler_matches = dict(
(x, y) for x, y in options.items() if comp_pair in x.lower()
)
if len(compiler_matches) > 1:
tty.warn("Found multiple potential build options")
try:
compiler_key = min(compiler_matches.keys(), key=len)
tty.warn("Selected build option %s." % compiler_key)
return (
"%s\n"
% compiler_matches[compiler_key][
self.spec.variants["build_type"].value
]
)
except KeyError:
InstallError(
"build_type %s unsupported for %s compilers"
% (self.spec.variants["build_type"].value, comp_pair)
)
if "Compile for nesting?" in outputbuf:
options = collect_nesting_options(outputbuf)
try:
return "%s\n" % options[self.spec.variants["nesting"].value]
except KeyError:
InstallError("Failed to parse correct nesting option")
def do_configure_fixup(self):
# Fix mpi compiler wrapper aliases
# In version 4.2 the file to be patched is called
# configure.defaults, while in earlier versions
# it's configure_new.defaults
if self.spec.satisfies("@3.9.1.1"):
config = FileFilter(join_path('arch', 'configure_new.defaults'))
else:
config = FileFilter(join_path('arch', 'configure.defaults'))
if self.spec.satisfies("@3.9.1.1 %gcc"):
config.filter(r'^DM_FC.*mpif90 -f90=\$\(SFC\)',
'DM_FC = {0}'.format(self.spec['mpi'].mpifc))
config.filter(r'^DM_CC.*mpicc -cc=\$\(SCC\)',
'DM_CC = {0}'.format(self.spec['mpi'].mpicc))
if self.spec.satisfies("%aocc"):
config.filter(
'^DM_FC.*mpif90 -DMPI2SUPPORT',
'DM_FC = {0}'.format(self.spec['mpi'].mpifc + ' -DMPI2_SUPPORT')
)
config.filter(
'^DM_.CC*mpicc -DMPI2SUPPORT',
'DM_CC = {0}'.format(self.spec['mpi'].mpicc) + ' -DMPI2_SUPPORT'
)
if self.spec.satisfies("@4.2: %intel"):
config.filter('^DM_FC.*mpif90',
'DM_FC = {0}'.format(self.spec['mpi'].mpifc))
config.filter('^DM_CC.*mpicc',
'DM_CC = {0}'.format(self.spec['mpi'].mpicc))
@run_before('configure')
def untar(self):
tar = which('tar')
tar('-xvf', 'WRFV3911_elec/elec.tgz')
def configure(self, spec, prefix):
self.do_configure_fixup()
if self.spec.compiler.name not in ["intel", "gcc", "aocc", "fj"]:
raise InstallError(
"Compiler %s not currently supported for WRF build."
% self.spec.compiler.name
)
p = Popen("./configure", stdin=PIPE, stdout=PIPE, stderr=PIPE)
if not is_windows:
setNonBlocking(p.stdout)
setNonBlocking(p.stderr)
stallcounter = 0
outputbuf = ""
while True:
line = p.stderr.readline().decode()
if not line:
line = p.stdout.readline().decode()
if not line:
if p.poll() is not None:
returncode = p.returncode
break
if stallcounter > 300:
raise InstallError(
"Output stalled for 30s, presumably an "
"undetected question."
)
time.sleep(0.1)
stallcounter += 1
continue
stdout.write(line)
stallcounter = 0
outputbuf += line
if (
"Enter selection" in outputbuf
or "Compile for nesting" in outputbuf
):
answer = self.answer_configure_question(outputbuf)
p.stdin.write(answer.encode())
p.stdin.flush()
outputbuf = ""
if returncode != 0:
raise InstallError("Configure failed - unknown error")
@run_after("configure")
def patch_for_libmvec(self):
if self.spec.satisfies("@3.9.1.1 %aocc"):
fp = self.package_dir + "/patches/3.9/aocc_lmvec.patch"
which('patch')('-s', '-p1', '-i', '{0}'.format(fp), '-d', '.')
def run_compile_script(self):
csh_bin = self.spec["tcsh"].prefix.bin.csh
csh = Executable(csh_bin)
if self.spec.satisfies("+elec"):
num_jobs = str(1)
else:
num_jobs = str(min(int(make_jobs), 10))
result_buf = csh(
"./compile",
"-j",
num_jobs,
self.spec.variants["compile_type"].value,
output=str,
error=str
)
print(result_buf)
if "Executables successfully built" in result_buf:
return True
return False
def build(self, spec, prefix):
result = self.run_compile_script()
if not result:
tty.warn(
"Compilation failed first time (WRF idiosyncrasies?) "
"- trying again..."
)
result = self.run_compile_script()
if not result:
raise InstallError(
"Compile failed. Check the output log for details."
)
def install(self, spec, prefix):
install_tree(".", prefix)
| true | true |
f71513d9753f7f0ac33b142e11457712ae4430d5 | 6,367 | py | Python | robel/components/tracking/group_config.py | Del9fina/robel | 63dfac65932757134e5766f1e20a339efe281bc7 | [
"Apache-2.0"
] | 109 | 2019-08-29T22:55:41.000Z | 2022-03-19T18:26:37.000Z | robel/components/tracking/group_config.py | Del9fina/robel | 63dfac65932757134e5766f1e20a339efe281bc7 | [
"Apache-2.0"
] | 12 | 2019-11-14T05:16:00.000Z | 2021-02-21T07:49:32.000Z | robel/components/tracking/group_config.py | Del9fina/robel | 63dfac65932757134e5766f1e20a339efe281bc7 | [
"Apache-2.0"
] | 40 | 2019-09-29T06:50:44.000Z | 2022-03-19T18:34:20.000Z | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for a tracker component group."""
from typing import Iterable, Optional
import numpy as np
from transforms3d.euler import euler2mat, quat2euler
from transforms3d.quaternions import quat2mat
from robel.simulation.sim_scene import SimScene
class TrackerGroupConfig:
"""Group configuration for a TrackerComponent."""
def __init__(self,
sim_scene: SimScene,
element_name: Optional[str] = None,
element_type: Optional[str] = None,
qpos_indices: Optional[Iterable[int]] = None,
qvel_indices: Optional[Iterable[int]] = None,
sim_observation_noise: Optional[float] = None):
"""Initializes a group configuration for a TrackerComponent.
Args:
sim_scene: The simulation, used for validation purposes.
element_name: The name of the element to use for tracking in
simulation.
element_type: The type of the element as defined in the XML.
Should be one of `site`, `body`, `geom`, or `joint`. If this is
`joint`, `qpos_indices` and `qvel_indices` should be
provided.
qpos_indices: The indices into `MjData.qpos` to read for the
joint element position and rotation.
qvel_indices: The indices into `MjData.qvel` to read for the joint
element velocity. This defaults to `qpos_indices`.
sim_observation_noise: The range of the observation noise (in
meters) to apply to the state in simulation.
"""
self.element_type = element_type
if self.element_type not in ['site', 'body', 'geom', 'joint']:
raise ValueError('Unknown element type %s' % self.element_type)
self.element_name = element_name
self.element_id = None
self.element_attr = None
self.qpos_indices = None
self.qvel_indices = None
self._is_euler = False
if self.element_type == 'joint':
if qpos_indices is None:
raise ValueError('Must provided qpos_indices for joints.')
# Ensure that the qpos indices are valid.
nq = sim_scene.model.nq
assert all(-nq <= i < nq for i in qpos_indices), \
'All qpos indices must be in [-{}, {}]'.format(nq, nq - 1)
self.qpos_indices = np.array(qpos_indices, dtype=int)
if len(self.qpos_indices) == 6:
self._is_euler = True
elif len(self.qpos_indices) != 7:
raise ValueError('qpos_indices must be 6 or 7 elements.')
if qvel_indices is None:
if not self._is_euler:
raise ValueError(
'qvel_indices must be provided for free joints.')
qvel_indices = qpos_indices
# Ensure that the qvel indices are valid.
nv = sim_scene.model.nv
assert all(-nv <= i < nv for i in qvel_indices), \
'All qvel indices must be in [-{}, {}]'.format(nv, nv - 1)
self.qvel_indices = np.array(qvel_indices, dtype=int)
else:
self.element_attr = (lambda obj, attr_name: getattr(
obj, self.element_type + '_' + attr_name))
self.element_id = self.element_attr(sim_scene.model, 'name2id')(
element_name)
self.sim_observation_noise = sim_observation_noise
def get_pos(self, sim_scene: SimScene) -> np.ndarray:
"""Returns the cartesian position of the element."""
if self.qpos_indices is not None:
return sim_scene.data.qpos[self.qpos_indices[:3]]
return self.element_attr(sim_scene.data, 'xpos')[self.element_id, :]
def get_rot(self, sim_scene: SimScene) -> np.ndarray:
"""Returns the (3x3) rotation matrix of the element."""
if self.qpos_indices is not None:
qpos = sim_scene.data.qpos[self.qpos_indices[3:]]
if self._is_euler:
return euler2mat(*qpos, axes='rxyz')
return quat2mat(qpos)
return self.element_attr(sim_scene.data,
'xmat')[self.element_id].reshape((3, 3))
def get_vel(self, sim_scene: SimScene) -> np.ndarray:
"""Returns the cartesian velocity of the element."""
if self.qvel_indices is not None:
return sim_scene.data.qvel[self.qvel_indices[:3]]
raise NotImplementedError('Cartesian velocity is not supported for ' +
self.element_type)
def get_angular_vel(self, sim_scene: SimScene) -> np.ndarray:
"""Returns the angular velocity (x, y, z) of the element."""
if self.qvel_indices is not None:
return sim_scene.data.qvel[self.qvel_indices[3:]]
raise NotImplementedError('Angular velocity is not supported for ' +
self.element_type)
def set_pos(self, sim_scene: SimScene, pos: np.ndarray):
"""Sets the cartesian position of the element."""
if self.qpos_indices is not None:
sim_scene.data.qpos[self.qpos_indices[:len(pos)]] = pos
return
self.element_attr(sim_scene.model,
'pos')[self.element_id, :len(pos)] = pos
def set_rot_quat(self, sim_scene: SimScene, quat: np.ndarray):
"""Sets the cartesian position of the element."""
if self.qpos_indices is not None:
qpos = quat
if self._is_euler:
qpos = quat2euler(quat, axes='rxyz')
sim_scene.data.qpos[self.qpos_indices[3:]] = qpos
return
self.element_attr(sim_scene.model, 'quat')[self.element_id, :] = quat
| 44.215278 | 79 | 0.613162 |
from typing import Iterable, Optional
import numpy as np
from transforms3d.euler import euler2mat, quat2euler
from transforms3d.quaternions import quat2mat
from robel.simulation.sim_scene import SimScene
class TrackerGroupConfig:
def __init__(self,
sim_scene: SimScene,
element_name: Optional[str] = None,
element_type: Optional[str] = None,
qpos_indices: Optional[Iterable[int]] = None,
qvel_indices: Optional[Iterable[int]] = None,
sim_observation_noise: Optional[float] = None):
self.element_type = element_type
if self.element_type not in ['site', 'body', 'geom', 'joint']:
raise ValueError('Unknown element type %s' % self.element_type)
self.element_name = element_name
self.element_id = None
self.element_attr = None
self.qpos_indices = None
self.qvel_indices = None
self._is_euler = False
if self.element_type == 'joint':
if qpos_indices is None:
raise ValueError('Must provided qpos_indices for joints.')
nq = sim_scene.model.nq
assert all(-nq <= i < nq for i in qpos_indices), \
'All qpos indices must be in [-{}, {}]'.format(nq, nq - 1)
self.qpos_indices = np.array(qpos_indices, dtype=int)
if len(self.qpos_indices) == 6:
self._is_euler = True
elif len(self.qpos_indices) != 7:
raise ValueError('qpos_indices must be 6 or 7 elements.')
if qvel_indices is None:
if not self._is_euler:
raise ValueError(
'qvel_indices must be provided for free joints.')
qvel_indices = qpos_indices
nv = sim_scene.model.nv
assert all(-nv <= i < nv for i in qvel_indices), \
'All qvel indices must be in [-{}, {}]'.format(nv, nv - 1)
self.qvel_indices = np.array(qvel_indices, dtype=int)
else:
self.element_attr = (lambda obj, attr_name: getattr(
obj, self.element_type + '_' + attr_name))
self.element_id = self.element_attr(sim_scene.model, 'name2id')(
element_name)
self.sim_observation_noise = sim_observation_noise
def get_pos(self, sim_scene: SimScene) -> np.ndarray:
if self.qpos_indices is not None:
return sim_scene.data.qpos[self.qpos_indices[:3]]
return self.element_attr(sim_scene.data, 'xpos')[self.element_id, :]
def get_rot(self, sim_scene: SimScene) -> np.ndarray:
if self.qpos_indices is not None:
qpos = sim_scene.data.qpos[self.qpos_indices[3:]]
if self._is_euler:
return euler2mat(*qpos, axes='rxyz')
return quat2mat(qpos)
return self.element_attr(sim_scene.data,
'xmat')[self.element_id].reshape((3, 3))
def get_vel(self, sim_scene: SimScene) -> np.ndarray:
if self.qvel_indices is not None:
return sim_scene.data.qvel[self.qvel_indices[:3]]
raise NotImplementedError('Cartesian velocity is not supported for ' +
self.element_type)
def get_angular_vel(self, sim_scene: SimScene) -> np.ndarray:
if self.qvel_indices is not None:
return sim_scene.data.qvel[self.qvel_indices[3:]]
raise NotImplementedError('Angular velocity is not supported for ' +
self.element_type)
def set_pos(self, sim_scene: SimScene, pos: np.ndarray):
if self.qpos_indices is not None:
sim_scene.data.qpos[self.qpos_indices[:len(pos)]] = pos
return
self.element_attr(sim_scene.model,
'pos')[self.element_id, :len(pos)] = pos
def set_rot_quat(self, sim_scene: SimScene, quat: np.ndarray):
if self.qpos_indices is not None:
qpos = quat
if self._is_euler:
qpos = quat2euler(quat, axes='rxyz')
sim_scene.data.qpos[self.qpos_indices[3:]] = qpos
return
self.element_attr(sim_scene.model, 'quat')[self.element_id, :] = quat
| true | true |
f7151463007effd34f97b1411f3c8126fe6337ed | 26,377 | py | Python | geopandas/tests/test_geom_methods.py | raphacosta27/geopandas | 2c22a26bd40ec48536026b160c54c6fe523d22d7 | [
"BSD-3-Clause"
] | null | null | null | geopandas/tests/test_geom_methods.py | raphacosta27/geopandas | 2c22a26bd40ec48536026b160c54c6fe523d22d7 | [
"BSD-3-Clause"
] | null | null | null | geopandas/tests/test_geom_methods.py | raphacosta27/geopandas | 2c22a26bd40ec48536026b160c54c6fe523d22d7 | [
"BSD-3-Clause"
] | 2 | 2020-02-18T13:25:58.000Z | 2021-02-15T21:25:07.000Z | import string
import numpy as np
from numpy.testing import assert_array_equal
from pandas import DataFrame, MultiIndex, Series
from shapely.geometry import LinearRing, LineString, MultiPoint, Point, Polygon
from shapely.geometry.collection import GeometryCollection
from shapely.ops import unary_union
from geopandas import GeoDataFrame, GeoSeries
from geopandas.base import GeoPandasBase
from geopandas.tests.util import assert_geoseries_equal, geom_almost_equals, geom_equals
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
def assert_array_dtype_equal(a, b, *args, **kwargs):
a = np.asanyarray(a)
b = np.asanyarray(b)
assert a.dtype == b.dtype
assert_array_equal(a, b, *args, **kwargs)
class TestGeomMethods:
def setup_method(self):
self.t1 = Polygon([(0, 0), (1, 0), (1, 1)])
self.t2 = Polygon([(0, 0), (1, 1), (0, 1)])
self.t3 = Polygon([(2, 0), (3, 0), (3, 1)])
self.sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
self.inner_sq = Polygon(
[(0.25, 0.25), (0.75, 0.25), (0.75, 0.75), (0.25, 0.75)]
)
self.nested_squares = Polygon(self.sq.boundary, [self.inner_sq.boundary])
self.p0 = Point(5, 5)
self.p3d = Point(5, 5, 5)
self.g0 = GeoSeries(
[
self.t1,
self.t2,
self.sq,
self.inner_sq,
self.nested_squares,
self.p0,
None,
]
)
self.g1 = GeoSeries([self.t1, self.sq])
self.g2 = GeoSeries([self.sq, self.t1])
self.g3 = GeoSeries([self.t1, self.t2])
self.g3.crs = "epsg:4326"
self.g4 = GeoSeries([self.t2, self.t1])
self.g4.crs = "epsg:4326"
self.g_3d = GeoSeries([self.p0, self.p3d])
self.na = GeoSeries([self.t1, self.t2, Polygon()])
self.na_none = GeoSeries([self.t1, None])
self.a1 = self.g1.copy()
self.a1.index = ["A", "B"]
self.a2 = self.g2.copy()
self.a2.index = ["B", "C"]
self.esb = Point(-73.9847, 40.7484)
self.sol = Point(-74.0446, 40.6893)
self.landmarks = GeoSeries([self.esb, self.sol], crs="epsg:4326")
self.l1 = LineString([(0, 0), (0, 1), (1, 1)])
self.l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1)])
self.g5 = GeoSeries([self.l1, self.l2])
self.g6 = GeoSeries([self.p0, self.t3])
self.empty = GeoSeries([])
self.all_none = GeoSeries([None, None])
self.empty_poly = Polygon()
# Crossed lines
self.l3 = LineString([(0, 0), (1, 1)])
self.l4 = LineString([(0, 1), (1, 0)])
self.crossed_lines = GeoSeries([self.l3, self.l4])
# Placeholder for testing, will just drop in different geometries
# when needed
self.gdf1 = GeoDataFrame(
{"geometry": self.g1, "col0": [1.0, 2.0], "col1": ["geo", "pandas"]}
)
self.gdf2 = GeoDataFrame(
{"geometry": self.g1, "col3": [4, 5], "col4": ["rand", "string"]}
)
self.gdf3 = GeoDataFrame(
{"geometry": self.g3, "col3": [4, 5], "col4": ["rand", "string"]}
)
def _test_unary_real(self, op, expected, a):
""" Tests for 'area', 'length', 'is_valid', etc. """
fcmp = assert_series_equal
self._test_unary(op, expected, a, fcmp)
def _test_unary_topological(self, op, expected, a):
if isinstance(expected, GeoPandasBase):
fcmp = assert_geoseries_equal
else:
def fcmp(a, b):
assert a.equals(b)
self._test_unary(op, expected, a, fcmp)
def _test_binary_topological(self, op, expected, a, b, *args, **kwargs):
""" Tests for 'intersection', 'union', 'symmetric_difference', etc. """
if isinstance(expected, GeoPandasBase):
fcmp = assert_geoseries_equal
else:
def fcmp(a, b):
assert geom_equals(a, b)
if isinstance(b, GeoPandasBase):
right_df = True
else:
right_df = False
self._binary_op_test(op, expected, a, b, fcmp, True, right_df, *args, **kwargs)
def _test_binary_real(self, op, expected, a, b, *args, **kwargs):
fcmp = assert_series_equal
self._binary_op_test(op, expected, a, b, fcmp, True, False, *args, **kwargs)
def _test_binary_operator(self, op, expected, a, b):
"""
The operators only have GeoSeries on the left, but can have
GeoSeries or GeoDataFrame on the right.
If GeoDataFrame is on the left, geometry column is used.
"""
if isinstance(expected, GeoPandasBase):
fcmp = assert_geoseries_equal
else:
def fcmp(a, b):
assert geom_equals(a, b)
if isinstance(b, GeoPandasBase):
right_df = True
else:
right_df = False
self._binary_op_test(op, expected, a, b, fcmp, False, right_df)
def _binary_op_test(
self, op, expected, left, right, fcmp, left_df, right_df, *args, **kwargs
):
"""
This is a helper to call a function on GeoSeries and GeoDataFrame
arguments. For example, 'intersection' is a member of both GeoSeries
and GeoDataFrame and can take either GeoSeries or GeoDataFrame inputs.
This function has the ability to test all four combinations of input
types.
Parameters
----------
expected : str
The operation to be tested. e.g., 'intersection'
left: GeoSeries
right: GeoSeries
fcmp: function
Called with the result of the operation and expected. It should
assert if the result is incorrect
left_df: bool
If the left input should also be called with a GeoDataFrame
right_df: bool
Indicates whether the right input should be called with a
GeoDataFrame
"""
def _make_gdf(s):
n = len(s)
col1 = string.ascii_lowercase[:n]
col2 = range(n)
return GeoDataFrame(
{"geometry": s.values, "col1": col1, "col2": col2},
index=s.index,
crs=s.crs,
)
# Test GeoSeries.op(GeoSeries)
result = getattr(left, op)(right, *args, **kwargs)
fcmp(result, expected)
if left_df:
# Test GeoDataFrame.op(GeoSeries)
gdf_left = _make_gdf(left)
result = getattr(gdf_left, op)(right, *args, **kwargs)
fcmp(result, expected)
if right_df:
# Test GeoSeries.op(GeoDataFrame)
gdf_right = _make_gdf(right)
result = getattr(left, op)(gdf_right, *args, **kwargs)
fcmp(result, expected)
if left_df:
# Test GeoDataFrame.op(GeoDataFrame)
result = getattr(gdf_left, op)(gdf_right, *args, **kwargs)
fcmp(result, expected)
def _test_unary(self, op, expected, a, fcmp):
# GeoSeries, (GeoSeries or geometry)
result = getattr(a, op)
fcmp(result, expected)
# GeoDataFrame, (GeoSeries or geometry)
gdf = self.gdf1.set_geometry(a)
result = getattr(gdf, op)
fcmp(result, expected)
# TODO reenable for all operations once we use pyproj > 2
# def test_crs_warning(self):
# # operations on geometries should warn for different CRS
# no_crs_g3 = self.g3.copy()
# no_crs_g3.crs = None
# with pytest.warns(UserWarning):
# self._test_binary_topological('intersection', self.g3,
# self.g3, no_crs_g3)
def test_intersection(self):
self._test_binary_topological("intersection", self.t1, self.g1, self.g2)
with pytest.warns(UserWarning, match="The indices .+ different"):
self._test_binary_topological(
"intersection", self.all_none, self.g1, self.empty
)
def test_union_series(self):
self._test_binary_topological("union", self.sq, self.g1, self.g2)
def test_union_polygon(self):
self._test_binary_topological("union", self.sq, self.g1, self.t2)
def test_symmetric_difference_series(self):
self._test_binary_topological("symmetric_difference", self.sq, self.g3, self.g4)
def test_symmetric_difference_poly(self):
expected = GeoSeries([GeometryCollection(), self.sq], crs=self.g3.crs)
self._test_binary_topological(
"symmetric_difference", expected, self.g3, self.t1
)
def test_difference_series(self):
expected = GeoSeries([GeometryCollection(), self.t2])
self._test_binary_topological("difference", expected, self.g1, self.g2)
def test_difference_poly(self):
expected = GeoSeries([self.t1, self.t1])
self._test_binary_topological("difference", expected, self.g1, self.t2)
def test_geo_op_empty_result(self):
l1 = LineString([(0, 0), (1, 1)])
l2 = LineString([(2, 2), (3, 3)])
expected = GeoSeries([GeometryCollection()])
# binary geo resulting in empty geometry
result = GeoSeries([l1]).intersection(l2)
assert_geoseries_equal(result, expected)
# binary geo empty result with right GeoSeries
result = GeoSeries([l1]).intersection(GeoSeries([l2]))
assert_geoseries_equal(result, expected)
# unary geo resulting in emtpy geometry
result = GeoSeries([GeometryCollection()]).convex_hull
assert_geoseries_equal(result, expected)
def test_boundary(self):
l1 = LineString([(0, 0), (1, 0), (1, 1), (0, 0)])
l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)])
expected = GeoSeries([l1, l2], index=self.g1.index, crs=self.g1.crs)
self._test_unary_topological("boundary", expected, self.g1)
def test_area(self):
expected = Series(np.array([0.5, 1.0]), index=self.g1.index)
self._test_unary_real("area", expected, self.g1)
expected = Series(np.array([0.5, np.nan]), index=self.na_none.index)
self._test_unary_real("area", expected, self.na_none)
def test_bounds(self):
# Set columns to get the order right
expected = DataFrame(
{
"minx": [0.0, 0.0],
"miny": [0.0, 0.0],
"maxx": [1.0, 1.0],
"maxy": [1.0, 1.0],
},
index=self.g1.index,
columns=["minx", "miny", "maxx", "maxy"],
)
result = self.g1.bounds
assert_frame_equal(expected, result)
gdf = self.gdf1.set_geometry(self.g1)
result = gdf.bounds
assert_frame_equal(expected, result)
def test_bounds_empty(self):
# test bounds of empty GeoSeries
# https://github.com/geopandas/geopandas/issues/1195
s = GeoSeries([])
result = s.bounds
expected = DataFrame(
columns=["minx", "miny", "maxx", "maxy"], index=s.index, dtype="float64"
)
assert_frame_equal(result, expected)
def test_unary_union(self):
p1 = self.t1
p2 = Polygon([(2, 0), (3, 0), (3, 1)])
expected = unary_union([p1, p2])
g = GeoSeries([p1, p2])
self._test_unary_topological("unary_union", expected, g)
def test_contains(self):
expected = [True, False, True, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.contains(self.t1))
def test_length(self):
expected = Series(np.array([2 + np.sqrt(2), 4]), index=self.g1.index)
self._test_unary_real("length", expected, self.g1)
expected = Series(np.array([2 + np.sqrt(2), np.nan]), index=self.na_none.index)
self._test_unary_real("length", expected, self.na_none)
def test_crosses(self):
expected = [False, False, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.crosses(self.t1))
expected = [False, True]
assert_array_dtype_equal(expected, self.crossed_lines.crosses(self.l3))
def test_disjoint(self):
expected = [False, False, False, False, False, True, False]
assert_array_dtype_equal(expected, self.g0.disjoint(self.t1))
def test_relate(self):
expected = Series(
[
"212101212",
"212101212",
"212FF1FF2",
"2FFF1FFF2",
"FF2F112F2",
"FF0FFF212",
None,
],
index=self.g0.index,
)
assert_array_dtype_equal(expected, self.g0.relate(self.inner_sq))
expected = Series(["FF0FFF212", None], index=self.g6.index)
assert_array_dtype_equal(expected, self.g6.relate(self.na_none))
def test_distance(self):
expected = Series(
np.array([np.sqrt((5 - 1) ** 2 + (5 - 1) ** 2), np.nan]), self.na_none.index
)
assert_array_dtype_equal(expected, self.na_none.distance(self.p0))
expected = Series(np.array([np.sqrt(4 ** 2 + 4 ** 2), np.nan]), self.g6.index)
assert_array_dtype_equal(expected, self.g6.distance(self.na_none))
def test_intersects(self):
expected = [True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.intersects(self.t1))
expected = [True, False]
assert_array_dtype_equal(expected, self.na_none.intersects(self.t2))
expected = np.array([], dtype=bool)
assert_array_dtype_equal(expected, self.empty.intersects(self.t1))
expected = np.array([], dtype=bool)
assert_array_dtype_equal(expected, self.empty.intersects(self.empty_poly))
expected = [False] * 7
assert_array_dtype_equal(expected, self.g0.intersects(self.empty_poly))
def test_overlaps(self):
expected = [True, True, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.overlaps(self.inner_sq))
expected = [False, False]
assert_array_dtype_equal(expected, self.g4.overlaps(self.t1))
def test_touches(self):
expected = [False, True, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.touches(self.t1))
def test_within(self):
expected = [True, False, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.within(self.t1))
expected = [True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.within(self.sq))
def test_is_valid(self):
expected = Series(np.array([True] * len(self.g1)), self.g1.index)
self._test_unary_real("is_valid", expected, self.g1)
def test_is_empty(self):
expected = Series(np.array([False] * len(self.g1)), self.g1.index)
self._test_unary_real("is_empty", expected, self.g1)
def test_is_ring(self):
expected = Series(np.array([True] * len(self.g1)), self.g1.index)
self._test_unary_real("is_ring", expected, self.g1)
def test_is_simple(self):
expected = Series(np.array([True] * len(self.g1)), self.g1.index)
self._test_unary_real("is_simple", expected, self.g1)
def test_has_z(self):
expected = Series([False, True], self.g_3d.index)
self._test_unary_real("has_z", expected, self.g_3d)
def test_xy_points(self):
expected_x = [-73.9847, -74.0446]
expected_y = [40.7484, 40.6893]
assert_array_dtype_equal(expected_x, self.landmarks.geometry.x)
assert_array_dtype_equal(expected_y, self.landmarks.geometry.y)
def test_xy_polygons(self):
# accessing x attribute in polygon geoseries should raise an error
with pytest.raises(ValueError):
_ = self.gdf1.geometry.x
# and same for accessing y attribute in polygon geoseries
with pytest.raises(ValueError):
_ = self.gdf1.geometry.y
def test_centroid(self):
polygon = Polygon([(-1, -1), (1, -1), (1, 1), (-1, 1)])
point = Point(0, 0)
polygons = GeoSeries([polygon for i in range(3)])
points = GeoSeries([point for i in range(3)])
assert_geoseries_equal(polygons.centroid, points)
def test_convex_hull(self):
# the convex hull of a square should be the same as the square
squares = GeoSeries([self.sq for i in range(3)])
assert_geoseries_equal(squares, squares.convex_hull)
def test_exterior(self):
exp_exterior = GeoSeries([LinearRing(p.boundary) for p in self.g3])
for expected, computed in zip(exp_exterior, self.g3.exterior):
assert computed.equals(expected)
def test_interiors(self):
original = GeoSeries([self.t1, self.nested_squares])
# This is a polygon with no interior.
expected = []
assert original.interiors[0] == expected
# This is a polygon with an interior.
expected = LinearRing(self.inner_sq.boundary)
assert original.interiors[1][0].equals(expected)
def test_interpolate(self):
expected = GeoSeries([Point(0.5, 1.0), Point(0.75, 1.0)])
self._test_binary_topological(
"interpolate", expected, self.g5, 0.75, normalized=True
)
expected = GeoSeries([Point(0.5, 1.0), Point(1.0, 0.5)])
self._test_binary_topological("interpolate", expected, self.g5, 1.5)
def test_interpolate_distance_array(self):
expected = GeoSeries([Point(0.0, 0.75), Point(1.0, 0.5)])
self._test_binary_topological(
"interpolate", expected, self.g5, np.array([0.75, 1.5])
)
expected = GeoSeries([Point(0.5, 1.0), Point(0.0, 1.0)])
self._test_binary_topological(
"interpolate", expected, self.g5, np.array([0.75, 1.5]), normalized=True
)
def test_interpolate_distance_wrong_length(self):
distances = np.array([1, 2, 3])
with pytest.raises(ValueError):
self.g5.interpolate(distances)
def test_interpolate_distance_wrong_index(self):
distances = Series([1, 2], index=[99, 98])
with pytest.raises(ValueError):
self.g5.interpolate(distances)
def test_project(self):
expected = Series([2.0, 1.5], index=self.g5.index)
p = Point(1.0, 0.5)
self._test_binary_real("project", expected, self.g5, p)
expected = Series([1.0, 0.5], index=self.g5.index)
self._test_binary_real("project", expected, self.g5, p, normalized=True)
def test_affine_transform(self):
# 45 degree reflection matrix
matrix = [0, 1, 1, 0, 0, 0]
expected = self.g4
res = self.g3.affine_transform(matrix)
assert_geoseries_equal(expected, res)
def test_translate_tuple(self):
trans = self.sol.x - self.esb.x, self.sol.y - self.esb.y
assert self.landmarks.translate(*trans)[0].equals(self.sol)
res = self.gdf1.set_geometry(self.landmarks).translate(*trans)[0]
assert res.equals(self.sol)
def test_rotate(self):
angle = 98
expected = self.g4
o = Point(0, 0)
res = self.g4.rotate(angle, origin=o).rotate(-angle, origin=o)
assert geom_almost_equals(self.g4, res)
res = self.gdf1.set_geometry(self.g4).rotate(angle, origin=Point(0, 0))
assert geom_almost_equals(expected, res.rotate(-angle, origin=o))
def test_scale(self):
expected = self.g4
scale = 2.0, 1.0
inv = tuple(1.0 / i for i in scale)
o = Point(0, 0)
res = self.g4.scale(*scale, origin=o).scale(*inv, origin=o)
assert geom_almost_equals(expected, res)
res = self.gdf1.set_geometry(self.g4).scale(*scale, origin=o)
res = res.scale(*inv, origin=o)
assert geom_almost_equals(expected, res)
def test_skew(self):
expected = self.g4
skew = 45.0
o = Point(0, 0)
# Test xs
res = self.g4.skew(xs=skew, origin=o).skew(xs=-skew, origin=o)
assert geom_almost_equals(expected, res)
res = self.gdf1.set_geometry(self.g4).skew(xs=skew, origin=o)
res = res.skew(xs=-skew, origin=o)
assert geom_almost_equals(expected, res)
# Test ys
res = self.g4.skew(ys=skew, origin=o).skew(ys=-skew, origin=o)
assert geom_almost_equals(expected, res)
res = self.gdf1.set_geometry(self.g4).skew(ys=skew, origin=o)
res = res.skew(ys=-skew, origin=o)
assert geom_almost_equals(expected, res)
def test_buffer(self):
original = GeoSeries([Point(0, 0)])
expected = GeoSeries([Polygon(((5, 0), (0, -5), (-5, 0), (0, 5), (5, 0)))])
calculated = original.buffer(5, resolution=1)
assert geom_almost_equals(expected, calculated)
def test_buffer_args(self):
args = dict(cap_style=3, join_style=2, mitre_limit=2.5)
calculated_series = self.g0.buffer(10, **args)
for original, calculated in zip(self.g0, calculated_series):
if original is None:
assert calculated is None
else:
expected = original.buffer(10, **args)
assert calculated.equals(expected)
def test_buffer_distance_array(self):
original = GeoSeries([self.p0, self.p0])
expected = GeoSeries(
[
Polygon(((6, 5), (5, 4), (4, 5), (5, 6), (6, 5))),
Polygon(((10, 5), (5, 0), (0, 5), (5, 10), (10, 5))),
]
)
calculated = original.buffer(np.array([1, 5]), resolution=1)
assert_geoseries_equal(calculated, expected, check_less_precise=True)
def test_buffer_distance_wrong_length(self):
original = GeoSeries([self.p0, self.p0])
distances = np.array([1, 2, 3])
with pytest.raises(ValueError):
original.buffer(distances)
def test_buffer_distance_wrong_index(self):
original = GeoSeries([self.p0, self.p0], index=[0, 1])
distances = Series(data=[1, 2], index=[99, 98])
with pytest.raises(ValueError):
original.buffer(distances)
def test_buffer_empty_none(self):
p = Polygon([(0, 0), (0, 1), (1, 1), (1, 0)])
s = GeoSeries([p, GeometryCollection(), None])
result = s.buffer(0)
assert_geoseries_equal(result, s)
result = s.buffer(np.array([0, 0, 0]))
assert_geoseries_equal(result, s)
def test_envelope(self):
e = self.g3.envelope
assert np.all(e.geom_equals(self.sq))
assert isinstance(e, GeoSeries)
assert self.g3.crs == e.crs
def test_total_bounds(self):
bbox = self.sol.x, self.sol.y, self.esb.x, self.esb.y
assert isinstance(self.landmarks.total_bounds, np.ndarray)
assert tuple(self.landmarks.total_bounds) == bbox
df = GeoDataFrame(
{"geometry": self.landmarks, "col1": range(len(self.landmarks))}
)
assert tuple(df.total_bounds) == bbox
def test_explode_geoseries(self):
s = GeoSeries(
[MultiPoint([(0, 0), (1, 1)]), MultiPoint([(2, 2), (3, 3), (4, 4)])]
)
s.index.name = "test_index_name"
expected_index_name = ["test_index_name", None]
index = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 2)]
expected = GeoSeries(
[Point(0, 0), Point(1, 1), Point(2, 2), Point(3, 3), Point(4, 4)],
index=MultiIndex.from_tuples(index, names=expected_index_name),
)
assert_geoseries_equal(expected, s.explode())
@pytest.mark.parametrize("index_name", [None, "test"])
def test_explode_geodataframe(self, index_name):
s = GeoSeries([MultiPoint([Point(1, 2), Point(2, 3)]), Point(5, 5)])
df = GeoDataFrame({"col": [1, 2], "geometry": s})
df.index.name = index_name
test_df = df.explode()
expected_s = GeoSeries([Point(1, 2), Point(2, 3), Point(5, 5)])
expected_df = GeoDataFrame({"col": [1, 1, 2], "geometry": expected_s})
expected_index = MultiIndex(
[[0, 1], [0, 1]], # levels
[[0, 0, 1], [0, 1, 0]], # labels/codes
names=[index_name, None],
)
expected_df = expected_df.set_index(expected_index)
assert_frame_equal(test_df, expected_df)
#
# Test '&', '|', '^', and '-'
#
def test_intersection_operator(self):
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__and__", self.t1, self.g1, self.g2)
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__and__", self.t1, self.gdf1, self.g2)
def test_union_operator(self):
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__or__", self.sq, self.g1, self.g2)
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__or__", self.sq, self.gdf1, self.g2)
def test_union_operator_polygon(self):
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__or__", self.sq, self.g1, self.t2)
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__or__", self.sq, self.gdf1, self.t2)
def test_symmetric_difference_operator(self):
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__xor__", self.sq, self.g3, self.g4)
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__xor__", self.sq, self.gdf3, self.g4)
def test_difference_series2(self):
expected = GeoSeries([GeometryCollection(), self.t2])
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__sub__", expected, self.g1, self.g2)
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__sub__", expected, self.gdf1, self.g2)
def test_difference_poly2(self):
expected = GeoSeries([self.t1, self.t1])
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__sub__", expected, self.g1, self.t2)
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__sub__", expected, self.gdf1, self.t2)
| 37.627675 | 88 | 0.601812 | import string
import numpy as np
from numpy.testing import assert_array_equal
from pandas import DataFrame, MultiIndex, Series
from shapely.geometry import LinearRing, LineString, MultiPoint, Point, Polygon
from shapely.geometry.collection import GeometryCollection
from shapely.ops import unary_union
from geopandas import GeoDataFrame, GeoSeries
from geopandas.base import GeoPandasBase
from geopandas.tests.util import assert_geoseries_equal, geom_almost_equals, geom_equals
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
def assert_array_dtype_equal(a, b, *args, **kwargs):
a = np.asanyarray(a)
b = np.asanyarray(b)
assert a.dtype == b.dtype
assert_array_equal(a, b, *args, **kwargs)
class TestGeomMethods:
def setup_method(self):
self.t1 = Polygon([(0, 0), (1, 0), (1, 1)])
self.t2 = Polygon([(0, 0), (1, 1), (0, 1)])
self.t3 = Polygon([(2, 0), (3, 0), (3, 1)])
self.sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
self.inner_sq = Polygon(
[(0.25, 0.25), (0.75, 0.25), (0.75, 0.75), (0.25, 0.75)]
)
self.nested_squares = Polygon(self.sq.boundary, [self.inner_sq.boundary])
self.p0 = Point(5, 5)
self.p3d = Point(5, 5, 5)
self.g0 = GeoSeries(
[
self.t1,
self.t2,
self.sq,
self.inner_sq,
self.nested_squares,
self.p0,
None,
]
)
self.g1 = GeoSeries([self.t1, self.sq])
self.g2 = GeoSeries([self.sq, self.t1])
self.g3 = GeoSeries([self.t1, self.t2])
self.g3.crs = "epsg:4326"
self.g4 = GeoSeries([self.t2, self.t1])
self.g4.crs = "epsg:4326"
self.g_3d = GeoSeries([self.p0, self.p3d])
self.na = GeoSeries([self.t1, self.t2, Polygon()])
self.na_none = GeoSeries([self.t1, None])
self.a1 = self.g1.copy()
self.a1.index = ["A", "B"]
self.a2 = self.g2.copy()
self.a2.index = ["B", "C"]
self.esb = Point(-73.9847, 40.7484)
self.sol = Point(-74.0446, 40.6893)
self.landmarks = GeoSeries([self.esb, self.sol], crs="epsg:4326")
self.l1 = LineString([(0, 0), (0, 1), (1, 1)])
self.l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1)])
self.g5 = GeoSeries([self.l1, self.l2])
self.g6 = GeoSeries([self.p0, self.t3])
self.empty = GeoSeries([])
self.all_none = GeoSeries([None, None])
self.empty_poly = Polygon()
self.l3 = LineString([(0, 0), (1, 1)])
self.l4 = LineString([(0, 1), (1, 0)])
self.crossed_lines = GeoSeries([self.l3, self.l4])
self.gdf1 = GeoDataFrame(
{"geometry": self.g1, "col0": [1.0, 2.0], "col1": ["geo", "pandas"]}
)
self.gdf2 = GeoDataFrame(
{"geometry": self.g1, "col3": [4, 5], "col4": ["rand", "string"]}
)
self.gdf3 = GeoDataFrame(
{"geometry": self.g3, "col3": [4, 5], "col4": ["rand", "string"]}
)
def _test_unary_real(self, op, expected, a):
fcmp = assert_series_equal
self._test_unary(op, expected, a, fcmp)
def _test_unary_topological(self, op, expected, a):
if isinstance(expected, GeoPandasBase):
fcmp = assert_geoseries_equal
else:
def fcmp(a, b):
assert a.equals(b)
self._test_unary(op, expected, a, fcmp)
def _test_binary_topological(self, op, expected, a, b, *args, **kwargs):
if isinstance(expected, GeoPandasBase):
fcmp = assert_geoseries_equal
else:
def fcmp(a, b):
assert geom_equals(a, b)
if isinstance(b, GeoPandasBase):
right_df = True
else:
right_df = False
self._binary_op_test(op, expected, a, b, fcmp, True, right_df, *args, **kwargs)
def _test_binary_real(self, op, expected, a, b, *args, **kwargs):
fcmp = assert_series_equal
self._binary_op_test(op, expected, a, b, fcmp, True, False, *args, **kwargs)
def _test_binary_operator(self, op, expected, a, b):
if isinstance(expected, GeoPandasBase):
fcmp = assert_geoseries_equal
else:
def fcmp(a, b):
assert geom_equals(a, b)
if isinstance(b, GeoPandasBase):
right_df = True
else:
right_df = False
self._binary_op_test(op, expected, a, b, fcmp, False, right_df)
def _binary_op_test(
self, op, expected, left, right, fcmp, left_df, right_df, *args, **kwargs
):
def _make_gdf(s):
n = len(s)
col1 = string.ascii_lowercase[:n]
col2 = range(n)
return GeoDataFrame(
{"geometry": s.values, "col1": col1, "col2": col2},
index=s.index,
crs=s.crs,
)
result = getattr(left, op)(right, *args, **kwargs)
fcmp(result, expected)
if left_df:
gdf_left = _make_gdf(left)
result = getattr(gdf_left, op)(right, *args, **kwargs)
fcmp(result, expected)
if right_df:
gdf_right = _make_gdf(right)
result = getattr(left, op)(gdf_right, *args, **kwargs)
fcmp(result, expected)
if left_df:
result = getattr(gdf_left, op)(gdf_right, *args, **kwargs)
fcmp(result, expected)
def _test_unary(self, op, expected, a, fcmp):
result = getattr(a, op)
fcmp(result, expected)
gdf = self.gdf1.set_geometry(a)
result = getattr(gdf, op)
fcmp(result, expected)
f):
self._test_binary_topological("intersection", self.t1, self.g1, self.g2)
with pytest.warns(UserWarning, match="The indices .+ different"):
self._test_binary_topological(
"intersection", self.all_none, self.g1, self.empty
)
def test_union_series(self):
self._test_binary_topological("union", self.sq, self.g1, self.g2)
def test_union_polygon(self):
self._test_binary_topological("union", self.sq, self.g1, self.t2)
def test_symmetric_difference_series(self):
self._test_binary_topological("symmetric_difference", self.sq, self.g3, self.g4)
def test_symmetric_difference_poly(self):
expected = GeoSeries([GeometryCollection(), self.sq], crs=self.g3.crs)
self._test_binary_topological(
"symmetric_difference", expected, self.g3, self.t1
)
def test_difference_series(self):
expected = GeoSeries([GeometryCollection(), self.t2])
self._test_binary_topological("difference", expected, self.g1, self.g2)
def test_difference_poly(self):
expected = GeoSeries([self.t1, self.t1])
self._test_binary_topological("difference", expected, self.g1, self.t2)
def test_geo_op_empty_result(self):
l1 = LineString([(0, 0), (1, 1)])
l2 = LineString([(2, 2), (3, 3)])
expected = GeoSeries([GeometryCollection()])
result = GeoSeries([l1]).intersection(l2)
assert_geoseries_equal(result, expected)
result = GeoSeries([l1]).intersection(GeoSeries([l2]))
assert_geoseries_equal(result, expected)
result = GeoSeries([GeometryCollection()]).convex_hull
assert_geoseries_equal(result, expected)
def test_boundary(self):
l1 = LineString([(0, 0), (1, 0), (1, 1), (0, 0)])
l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)])
expected = GeoSeries([l1, l2], index=self.g1.index, crs=self.g1.crs)
self._test_unary_topological("boundary", expected, self.g1)
def test_area(self):
expected = Series(np.array([0.5, 1.0]), index=self.g1.index)
self._test_unary_real("area", expected, self.g1)
expected = Series(np.array([0.5, np.nan]), index=self.na_none.index)
self._test_unary_real("area", expected, self.na_none)
def test_bounds(self):
expected = DataFrame(
{
"minx": [0.0, 0.0],
"miny": [0.0, 0.0],
"maxx": [1.0, 1.0],
"maxy": [1.0, 1.0],
},
index=self.g1.index,
columns=["minx", "miny", "maxx", "maxy"],
)
result = self.g1.bounds
assert_frame_equal(expected, result)
gdf = self.gdf1.set_geometry(self.g1)
result = gdf.bounds
assert_frame_equal(expected, result)
def test_bounds_empty(self):
s = GeoSeries([])
result = s.bounds
expected = DataFrame(
columns=["minx", "miny", "maxx", "maxy"], index=s.index, dtype="float64"
)
assert_frame_equal(result, expected)
def test_unary_union(self):
p1 = self.t1
p2 = Polygon([(2, 0), (3, 0), (3, 1)])
expected = unary_union([p1, p2])
g = GeoSeries([p1, p2])
self._test_unary_topological("unary_union", expected, g)
def test_contains(self):
expected = [True, False, True, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.contains(self.t1))
def test_length(self):
expected = Series(np.array([2 + np.sqrt(2), 4]), index=self.g1.index)
self._test_unary_real("length", expected, self.g1)
expected = Series(np.array([2 + np.sqrt(2), np.nan]), index=self.na_none.index)
self._test_unary_real("length", expected, self.na_none)
def test_crosses(self):
expected = [False, False, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.crosses(self.t1))
expected = [False, True]
assert_array_dtype_equal(expected, self.crossed_lines.crosses(self.l3))
def test_disjoint(self):
expected = [False, False, False, False, False, True, False]
assert_array_dtype_equal(expected, self.g0.disjoint(self.t1))
def test_relate(self):
expected = Series(
[
"212101212",
"212101212",
"212FF1FF2",
"2FFF1FFF2",
"FF2F112F2",
"FF0FFF212",
None,
],
index=self.g0.index,
)
assert_array_dtype_equal(expected, self.g0.relate(self.inner_sq))
expected = Series(["FF0FFF212", None], index=self.g6.index)
assert_array_dtype_equal(expected, self.g6.relate(self.na_none))
def test_distance(self):
expected = Series(
np.array([np.sqrt((5 - 1) ** 2 + (5 - 1) ** 2), np.nan]), self.na_none.index
)
assert_array_dtype_equal(expected, self.na_none.distance(self.p0))
expected = Series(np.array([np.sqrt(4 ** 2 + 4 ** 2), np.nan]), self.g6.index)
assert_array_dtype_equal(expected, self.g6.distance(self.na_none))
def test_intersects(self):
expected = [True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.intersects(self.t1))
expected = [True, False]
assert_array_dtype_equal(expected, self.na_none.intersects(self.t2))
expected = np.array([], dtype=bool)
assert_array_dtype_equal(expected, self.empty.intersects(self.t1))
expected = np.array([], dtype=bool)
assert_array_dtype_equal(expected, self.empty.intersects(self.empty_poly))
expected = [False] * 7
assert_array_dtype_equal(expected, self.g0.intersects(self.empty_poly))
def test_overlaps(self):
expected = [True, True, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.overlaps(self.inner_sq))
expected = [False, False]
assert_array_dtype_equal(expected, self.g4.overlaps(self.t1))
def test_touches(self):
expected = [False, True, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.touches(self.t1))
def test_within(self):
expected = [True, False, False, False, False, False, False]
assert_array_dtype_equal(expected, self.g0.within(self.t1))
expected = [True, True, True, True, True, False, False]
assert_array_dtype_equal(expected, self.g0.within(self.sq))
def test_is_valid(self):
expected = Series(np.array([True] * len(self.g1)), self.g1.index)
self._test_unary_real("is_valid", expected, self.g1)
def test_is_empty(self):
expected = Series(np.array([False] * len(self.g1)), self.g1.index)
self._test_unary_real("is_empty", expected, self.g1)
def test_is_ring(self):
expected = Series(np.array([True] * len(self.g1)), self.g1.index)
self._test_unary_real("is_ring", expected, self.g1)
def test_is_simple(self):
expected = Series(np.array([True] * len(self.g1)), self.g1.index)
self._test_unary_real("is_simple", expected, self.g1)
def test_has_z(self):
expected = Series([False, True], self.g_3d.index)
self._test_unary_real("has_z", expected, self.g_3d)
def test_xy_points(self):
expected_x = [-73.9847, -74.0446]
expected_y = [40.7484, 40.6893]
assert_array_dtype_equal(expected_x, self.landmarks.geometry.x)
assert_array_dtype_equal(expected_y, self.landmarks.geometry.y)
def test_xy_polygons(self):
with pytest.raises(ValueError):
_ = self.gdf1.geometry.x
with pytest.raises(ValueError):
_ = self.gdf1.geometry.y
def test_centroid(self):
polygon = Polygon([(-1, -1), (1, -1), (1, 1), (-1, 1)])
point = Point(0, 0)
polygons = GeoSeries([polygon for i in range(3)])
points = GeoSeries([point for i in range(3)])
assert_geoseries_equal(polygons.centroid, points)
def test_convex_hull(self):
squares = GeoSeries([self.sq for i in range(3)])
assert_geoseries_equal(squares, squares.convex_hull)
def test_exterior(self):
exp_exterior = GeoSeries([LinearRing(p.boundary) for p in self.g3])
for expected, computed in zip(exp_exterior, self.g3.exterior):
assert computed.equals(expected)
def test_interiors(self):
original = GeoSeries([self.t1, self.nested_squares])
expected = []
assert original.interiors[0] == expected
expected = LinearRing(self.inner_sq.boundary)
assert original.interiors[1][0].equals(expected)
def test_interpolate(self):
expected = GeoSeries([Point(0.5, 1.0), Point(0.75, 1.0)])
self._test_binary_topological(
"interpolate", expected, self.g5, 0.75, normalized=True
)
expected = GeoSeries([Point(0.5, 1.0), Point(1.0, 0.5)])
self._test_binary_topological("interpolate", expected, self.g5, 1.5)
def test_interpolate_distance_array(self):
expected = GeoSeries([Point(0.0, 0.75), Point(1.0, 0.5)])
self._test_binary_topological(
"interpolate", expected, self.g5, np.array([0.75, 1.5])
)
expected = GeoSeries([Point(0.5, 1.0), Point(0.0, 1.0)])
self._test_binary_topological(
"interpolate", expected, self.g5, np.array([0.75, 1.5]), normalized=True
)
def test_interpolate_distance_wrong_length(self):
distances = np.array([1, 2, 3])
with pytest.raises(ValueError):
self.g5.interpolate(distances)
def test_interpolate_distance_wrong_index(self):
distances = Series([1, 2], index=[99, 98])
with pytest.raises(ValueError):
self.g5.interpolate(distances)
def test_project(self):
expected = Series([2.0, 1.5], index=self.g5.index)
p = Point(1.0, 0.5)
self._test_binary_real("project", expected, self.g5, p)
expected = Series([1.0, 0.5], index=self.g5.index)
self._test_binary_real("project", expected, self.g5, p, normalized=True)
def test_affine_transform(self):
matrix = [0, 1, 1, 0, 0, 0]
expected = self.g4
res = self.g3.affine_transform(matrix)
assert_geoseries_equal(expected, res)
def test_translate_tuple(self):
trans = self.sol.x - self.esb.x, self.sol.y - self.esb.y
assert self.landmarks.translate(*trans)[0].equals(self.sol)
res = self.gdf1.set_geometry(self.landmarks).translate(*trans)[0]
assert res.equals(self.sol)
def test_rotate(self):
angle = 98
expected = self.g4
o = Point(0, 0)
res = self.g4.rotate(angle, origin=o).rotate(-angle, origin=o)
assert geom_almost_equals(self.g4, res)
res = self.gdf1.set_geometry(self.g4).rotate(angle, origin=Point(0, 0))
assert geom_almost_equals(expected, res.rotate(-angle, origin=o))
def test_scale(self):
expected = self.g4
scale = 2.0, 1.0
inv = tuple(1.0 / i for i in scale)
o = Point(0, 0)
res = self.g4.scale(*scale, origin=o).scale(*inv, origin=o)
assert geom_almost_equals(expected, res)
res = self.gdf1.set_geometry(self.g4).scale(*scale, origin=o)
res = res.scale(*inv, origin=o)
assert geom_almost_equals(expected, res)
def test_skew(self):
expected = self.g4
skew = 45.0
o = Point(0, 0)
res = self.g4.skew(xs=skew, origin=o).skew(xs=-skew, origin=o)
assert geom_almost_equals(expected, res)
res = self.gdf1.set_geometry(self.g4).skew(xs=skew, origin=o)
res = res.skew(xs=-skew, origin=o)
assert geom_almost_equals(expected, res)
res = self.g4.skew(ys=skew, origin=o).skew(ys=-skew, origin=o)
assert geom_almost_equals(expected, res)
res = self.gdf1.set_geometry(self.g4).skew(ys=skew, origin=o)
res = res.skew(ys=-skew, origin=o)
assert geom_almost_equals(expected, res)
def test_buffer(self):
original = GeoSeries([Point(0, 0)])
expected = GeoSeries([Polygon(((5, 0), (0, -5), (-5, 0), (0, 5), (5, 0)))])
calculated = original.buffer(5, resolution=1)
assert geom_almost_equals(expected, calculated)
def test_buffer_args(self):
args = dict(cap_style=3, join_style=2, mitre_limit=2.5)
calculated_series = self.g0.buffer(10, **args)
for original, calculated in zip(self.g0, calculated_series):
if original is None:
assert calculated is None
else:
expected = original.buffer(10, **args)
assert calculated.equals(expected)
def test_buffer_distance_array(self):
original = GeoSeries([self.p0, self.p0])
expected = GeoSeries(
[
Polygon(((6, 5), (5, 4), (4, 5), (5, 6), (6, 5))),
Polygon(((10, 5), (5, 0), (0, 5), (5, 10), (10, 5))),
]
)
calculated = original.buffer(np.array([1, 5]), resolution=1)
assert_geoseries_equal(calculated, expected, check_less_precise=True)
def test_buffer_distance_wrong_length(self):
original = GeoSeries([self.p0, self.p0])
distances = np.array([1, 2, 3])
with pytest.raises(ValueError):
original.buffer(distances)
def test_buffer_distance_wrong_index(self):
original = GeoSeries([self.p0, self.p0], index=[0, 1])
distances = Series(data=[1, 2], index=[99, 98])
with pytest.raises(ValueError):
original.buffer(distances)
def test_buffer_empty_none(self):
p = Polygon([(0, 0), (0, 1), (1, 1), (1, 0)])
s = GeoSeries([p, GeometryCollection(), None])
result = s.buffer(0)
assert_geoseries_equal(result, s)
result = s.buffer(np.array([0, 0, 0]))
assert_geoseries_equal(result, s)
def test_envelope(self):
e = self.g3.envelope
assert np.all(e.geom_equals(self.sq))
assert isinstance(e, GeoSeries)
assert self.g3.crs == e.crs
def test_total_bounds(self):
bbox = self.sol.x, self.sol.y, self.esb.x, self.esb.y
assert isinstance(self.landmarks.total_bounds, np.ndarray)
assert tuple(self.landmarks.total_bounds) == bbox
df = GeoDataFrame(
{"geometry": self.landmarks, "col1": range(len(self.landmarks))}
)
assert tuple(df.total_bounds) == bbox
def test_explode_geoseries(self):
s = GeoSeries(
[MultiPoint([(0, 0), (1, 1)]), MultiPoint([(2, 2), (3, 3), (4, 4)])]
)
s.index.name = "test_index_name"
expected_index_name = ["test_index_name", None]
index = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 2)]
expected = GeoSeries(
[Point(0, 0), Point(1, 1), Point(2, 2), Point(3, 3), Point(4, 4)],
index=MultiIndex.from_tuples(index, names=expected_index_name),
)
assert_geoseries_equal(expected, s.explode())
@pytest.mark.parametrize("index_name", [None, "test"])
def test_explode_geodataframe(self, index_name):
s = GeoSeries([MultiPoint([Point(1, 2), Point(2, 3)]), Point(5, 5)])
df = GeoDataFrame({"col": [1, 2], "geometry": s})
df.index.name = index_name
test_df = df.explode()
expected_s = GeoSeries([Point(1, 2), Point(2, 3), Point(5, 5)])
expected_df = GeoDataFrame({"col": [1, 1, 2], "geometry": expected_s})
expected_index = MultiIndex(
[[0, 1], [0, 1]],
[[0, 0, 1], [0, 1, 0]],
names=[index_name, None],
)
expected_df = expected_df.set_index(expected_index)
assert_frame_equal(test_df, expected_df)
def test_intersection_operator(self):
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__and__", self.t1, self.g1, self.g2)
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__and__", self.t1, self.gdf1, self.g2)
def test_union_operator(self):
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__or__", self.sq, self.g1, self.g2)
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__or__", self.sq, self.gdf1, self.g2)
def test_union_operator_polygon(self):
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__or__", self.sq, self.g1, self.t2)
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__or__", self.sq, self.gdf1, self.t2)
def test_symmetric_difference_operator(self):
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__xor__", self.sq, self.g3, self.g4)
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__xor__", self.sq, self.gdf3, self.g4)
def test_difference_series2(self):
expected = GeoSeries([GeometryCollection(), self.t2])
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__sub__", expected, self.g1, self.g2)
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__sub__", expected, self.gdf1, self.g2)
def test_difference_poly2(self):
expected = GeoSeries([self.t1, self.t1])
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__sub__", expected, self.g1, self.t2)
with pytest.warns(DeprecationWarning):
self._test_binary_operator("__sub__", expected, self.gdf1, self.t2)
| true | true |
f71514719dd30abf00b523af63d90107b7beea30 | 111 | py | Python | src/routes/donate.py | BuildForSDG/team-247 | 4115c32078189c581a6155f57a3f321eebe361a8 | [
"MIT"
] | 1 | 2020-05-11T07:33:03.000Z | 2020-05-11T07:33:03.000Z | src/routes/donate.py | BuildForSDG/team-247 | 4115c32078189c581a6155f57a3f321eebe361a8 | [
"MIT"
] | 15 | 2020-05-03T10:44:22.000Z | 2021-05-11T12:05:39.000Z | src/routes/donate.py | BuildForSDG/team-247 | 4115c32078189c581a6155f57a3f321eebe361a8 | [
"MIT"
] | 5 | 2020-05-01T16:38:47.000Z | 2020-07-26T19:55:58.000Z | from flask import Blueprint, render_template
from src.extensions import db
from src.models import Donated
| 12.333333 | 44 | 0.810811 | from flask import Blueprint, render_template
from src.extensions import db
from src.models import Donated
| true | true |
f71514d7bb61d91d8c7004b1e432aad584aeea1a | 25,261 | py | Python | tf_agents/policies/tf_policy.py | moesio-f/agents | 53ce87c9203222585fdcd833e052fcdce1b6fa37 | [
"Apache-2.0"
] | null | null | null | tf_agents/policies/tf_policy.py | moesio-f/agents | 53ce87c9203222585fdcd833e052fcdce1b6fa37 | [
"Apache-2.0"
] | null | null | null | tf_agents/policies/tf_policy.py | moesio-f/agents | 53ce87c9203222585fdcd833e052fcdce1b6fa37 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Policies API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from typing import Optional, Text, Sequence
import six
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.distributions import reparameterized_sampling
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
from tf_agents.typing import types
from tf_agents.utils import common
from tf_agents.utils import nest_utils
tfd = tfp.distributions
@six.add_metaclass(abc.ABCMeta)
class TFPolicy(tf.Module):
"""Abstract base class for TF Policies.
The Policy represents a mapping from `time_steps` recieved from the
environment to `actions` that can be applied to the environment.
Agents expose two policies. A `policy` meant for deployment and evaluation,
and a `collect_policy` for collecting data from the environment. The
`collect_policy` is usually stochastic for exploring the environment better
and may log auxilliary information such as log probabilities required for
training as well. `Policy` objects can also be created directly by the users
without using an `Agent`.
The main methods of TFPolicy are:
* `action`: Maps a `time_step` from the environment to an action.
* `distribution`: Maps a `time_step` to a distribution over actions.
* `get_initial_state`: Generates the initial state for stateful policies, e.g.
RNN/LSTM policies.
Example usage:
```
env = SomeTFEnvironment()
policy = TFRandomPolicy(env.time_step_spec(), env.action_spec())
# Or policy = agent.policy or agent.collect_policy
policy_state = policy.get_initial_state(env.batch_size)
time_step = env.reset()
while not time_step.is_last():
policy_step = policy.action(time_step, policy_state)
time_step = env.step(policy_step.action)
policy_state = policy_step.state
# policy_step.info may contain side info for logging, such as action log
# probabilities.
```
Policies can be saved to disk as SavedModels (see policy_saver.py and
policy_loader.py) or as TF Checkpoints.
A `PyTFEagerPolicy` can be used to wrap a `TFPolicy` so that it works with
`PyEnvironment`s.
**NOTE**: For API consistency, subclasses are not allowed to override public
methods of `TFPolicy` class. Instead, they may implement the protected methods
including `_get_initial_state`, `_action`, and `_distribution`. This
public-calls-private convention allowed this base class to do things like
properly add `spec` and shape checks, which provide users an easier experience
when debugging their environments and networks.
For researchers, and those developing new Policies, the `TFPolicy` base class
constructor also accept a `validate_args` parameter. If `False`, this
disables all spec structure, dtype, and shape checks in the public methods of
these classes. It allows algorithm developers to iterate and try different
input and output structures without worrying about overly restrictive
requirements, or input and output states being in a certain format. However,
*disabling argument validation* can make it very hard to identify structural
input or algorithmic errors; and should not be done for final, or
production-ready, Policies. In addition to having implementations that may
disagree with specs, this mean that the resulting Policy may no longer
interact well with other parts of TF-Agents. Examples include impedance
mismatches with Actor/Learner APIs, replay buffers, and the model export
functionality in `PolicySaver.
"""
# TODO(b/127327645) Remove this attribute.
# This attribute allows subclasses to back out of automatic tf.function
# attribute inside TF1 (for autodeps).
_enable_functions = True
def __init__(
self,
time_step_spec: ts.TimeStep,
action_spec: types.NestedTensorSpec,
policy_state_spec: types.NestedTensorSpec = (),
info_spec: types.NestedTensorSpec = (),
clip: bool = True,
emit_log_probability: bool = False,
automatic_state_reset: bool = True,
observation_and_action_constraint_splitter: Optional[
types.Splitter] = None,
validate_args: bool = True,
name: Optional[Text] = None):
"""Initialization of TFPolicy class.
Args:
time_step_spec: A `TimeStep` spec of the expected time_steps. Usually
provided by the user to the subclass.
action_spec: A nest of BoundedTensorSpec representing the actions. Usually
provided by the user to the subclass.
policy_state_spec: A nest of TensorSpec representing the policy_state.
Provided by the subclass, not directly by the user.
info_spec: A nest of TensorSpec representing the policy info. Provided by
the subclass, not directly by the user.
clip: Whether to clip actions to spec before returning them. Default
True. Most policy-based algorithms (PCL, PPO, REINFORCE) use unclipped
continuous actions for training.
emit_log_probability: Emit log-probabilities of actions, if supported. If
True, policy_step.info will have CommonFields.LOG_PROBABILITY set.
Please consult utility methods provided in policy_step for setting and
retrieving these. When working with custom policies, either provide a
dictionary info_spec or a namedtuple with the field 'log_probability'.
automatic_state_reset: If `True`, then `get_initial_policy_state` is used
to clear state in `action()` and `distribution()` for for time steps
where `time_step.is_first()`.
observation_and_action_constraint_splitter: A function used to process
observations with action constraints. These constraints can indicate,
for example, a mask of valid/invalid actions for a given state of the
environment. The function takes in a full observation and returns a
tuple consisting of 1) the part of the observation intended as input to
the network and 2) the constraint. An example
`observation_and_action_constraint_splitter` could be as simple as: ```
def observation_and_action_constraint_splitter(observation): return
observation['network_input'], observation['constraint'] ```
*Note*: when using `observation_and_action_constraint_splitter`, make
sure the provided `q_network` is compatible with the network-specific
half of the output of the
`observation_and_action_constraint_splitter`. In particular,
`observation_and_action_constraint_splitter` will be called on the
observation before passing to the network. If
`observation_and_action_constraint_splitter` is None, action
constraints are not applied.
validate_args: Python bool. Whether to verify inputs to, and outputs of,
functions like `action` and `distribution` against spec structures,
dtypes, and shapes.
Research code may prefer to set this value to `False` to allow iterating
on input and output structures without being hamstrung by overly
rigid checking (at the cost of harder-to-debug errors).
See also `TFAgent.validate_args`.
name: A name for this module. Defaults to the class name.
"""
super(TFPolicy, self).__init__(name=name)
common.check_tf1_allowed()
common.tf_agents_gauge.get_cell('TFAPolicy').set(True)
common.assert_members_are_not_overridden(base_cls=TFPolicy, instance=self)
if not isinstance(time_step_spec, ts.TimeStep):
raise ValueError(
'The `time_step_spec` must be an instance of `TimeStep`, but is `{}`.'
.format(type(time_step_spec)))
self._time_step_spec = tensor_spec.from_spec(time_step_spec)
self._action_spec = tensor_spec.from_spec(action_spec)
self._policy_state_spec = tensor_spec.from_spec(policy_state_spec)
self._emit_log_probability = emit_log_probability
self._validate_args = validate_args
if emit_log_probability:
log_probability_spec = tensor_spec.BoundedTensorSpec(
shape=(),
dtype=tf.float32,
maximum=0,
minimum=-float('inf'),
name='log_probability')
log_probability_spec = tf.nest.map_structure(
lambda _: log_probability_spec, action_spec)
info_spec = policy_step.set_log_probability(
info_spec, log_probability_spec) # pytype: disable=wrong-arg-types
self._info_spec = tensor_spec.from_spec(info_spec)
self._setup_specs()
self._clip = clip
self._action_fn = common.function_in_tf1(experimental_relax_shapes=False)(
self._action)
self._automatic_state_reset = automatic_state_reset
self._observation_and_action_constraint_splitter = (
observation_and_action_constraint_splitter)
def _setup_specs(self):
self._policy_step_spec = policy_step.PolicyStep(
action=self._action_spec,
state=self._policy_state_spec,
info=self._info_spec)
self._trajectory_spec = trajectory.from_transition(self._time_step_spec,
self._policy_step_spec,
self._time_step_spec)
def variables(self) -> Sequence[tf.Variable]:
"""Returns the list of Variables that belong to the policy."""
# Ignore self._variables() in favor of using tf.Module's tracking.
return super(TFPolicy, self).variables
@property
def observation_and_action_constraint_splitter(self) -> types.Splitter:
return self._observation_and_action_constraint_splitter
@property
def validate_args(self) -> bool:
"""Whether `action` & `distribution` validate input and output args."""
return self._validate_args
def get_initial_state(self,
batch_size: Optional[types.Int]) -> types.NestedTensor:
"""Returns an initial state usable by the policy.
Args:
batch_size: Tensor or constant: size of the batch dimension. Can be None
in which case no dimensions gets added.
Returns:
A nested object of type `policy_state` containing properly
initialized Tensors.
"""
return self._get_initial_state(batch_size)
def _maybe_reset_state(self, time_step, policy_state):
if policy_state is (): # pylint: disable=literal-comparison
return policy_state
batch_size = tf.compat.dimension_value(time_step.discount.shape[0])
if batch_size is None:
batch_size = tf.shape(time_step.discount)[0]
# Make sure we call this with a kwarg as it may be wrapped in tf.function
# which would expect a tensor if it was not a kwarg.
zero_state = self.get_initial_state(batch_size=batch_size)
condition = time_step.is_first()
# When experience is a sequence we only reset automatically for the first
# time_step in the sequence as we can't easily generalize how the policy is
# unrolled over the sequence.
if nest_utils.get_outer_rank(time_step, self._time_step_spec) > 1:
condition = time_step.is_first()[:, 0, ...]
return nest_utils.where(condition, zero_state, policy_state)
def action(self,
time_step: ts.TimeStep,
policy_state: types.NestedTensor = (),
seed: Optional[types.Seed] = None) -> policy_step.PolicyStep:
"""Generates next action given the time_step and policy_state.
Args:
time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.
policy_state: A Tensor, or a nested dict, list or tuple of Tensors
representing the previous policy_state.
seed: Seed to use if action performs sampling (optional).
Returns:
A `PolicyStep` named tuple containing:
`action`: An action Tensor matching the `action_spec`.
`state`: A policy state tensor to be fed into the next call to action.
`info`: Optional side information such as action log probabilities.
Raises:
RuntimeError: If subclass __init__ didn't call super().__init__.
ValueError or TypeError: If `validate_args is True` and inputs or
outputs do not match `time_step_spec`, `policy_state_spec`,
or `policy_step_spec`.
"""
if self._enable_functions and getattr(self, '_action_fn', None) is None:
raise RuntimeError(
'Cannot find _action_fn. Did %s.__init__ call super?' %
type(self).__name__)
if self._enable_functions:
action_fn = self._action_fn
else:
action_fn = self._action
if self._validate_args:
time_step = nest_utils.prune_extra_keys(self._time_step_spec, time_step)
policy_state = nest_utils.prune_extra_keys(
self._policy_state_spec, policy_state)
nest_utils.assert_same_structure(
time_step,
self._time_step_spec,
message='time_step and time_step_spec structures do not match')
# TODO(b/158804957): Use literal comparison because in some strange cases
# (tf.function? autograph?) the expression "x not in (None, (), [])" gets
# converted to a tensor.
if not (policy_state is None or policy_state is () or policy_state is []): # pylint: disable=literal-comparison
nest_utils.assert_same_structure(
policy_state,
self._policy_state_spec,
message=('policy_state and policy_state_spec '
'structures do not match'))
if self._automatic_state_reset:
policy_state = self._maybe_reset_state(time_step, policy_state)
step = action_fn(time_step=time_step, policy_state=policy_state, seed=seed)
def clip_action(action, action_spec):
if isinstance(action_spec, tensor_spec.BoundedTensorSpec):
return common.clip_to_spec(action, action_spec)
return action
if self._validate_args:
nest_utils.assert_same_structure(
step.action, self._action_spec,
message='action and action_spec structures do not match')
if self._clip:
clipped_actions = tf.nest.map_structure(clip_action,
step.action,
self._action_spec)
step = step._replace(action=clipped_actions)
if self._validate_args:
nest_utils.assert_same_structure(
step,
self._policy_step_spec,
message='action output and policy_step_spec structures do not match')
def compare_to_spec(value, spec):
return value.dtype.is_compatible_with(spec.dtype)
compatibility = [
compare_to_spec(v, s) for (v, s)
in zip(tf.nest.flatten(step.action),
tf.nest.flatten(self.action_spec))]
if not all(compatibility):
get_dtype = lambda x: x.dtype
action_dtypes = tf.nest.map_structure(get_dtype, step.action)
spec_dtypes = tf.nest.map_structure(get_dtype, self.action_spec)
raise TypeError('Policy produced an action with a dtype that doesn\'t '
'match its action_spec. Got action:\n %s\n with '
'action_spec:\n %s' % (action_dtypes, spec_dtypes))
return step
def distribution(
self, time_step: ts.TimeStep, policy_state: types.NestedTensor = ()
) -> policy_step.PolicyStep:
"""Generates the distribution over next actions given the time_step.
Args:
time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.
policy_state: A Tensor, or a nested dict, list or tuple of Tensors
representing the previous policy_state.
Returns:
A `PolicyStep` named tuple containing:
`action`: A tf.distribution capturing the distribution of next actions.
`state`: A policy state tensor for the next call to distribution.
`info`: Optional side information such as action log probabilities.
Raises:
ValueError or TypeError: If `validate_args is True` and inputs or
outputs do not match `time_step_spec`, `policy_state_spec`,
or `policy_step_spec`.
"""
if self._validate_args:
time_step = nest_utils.prune_extra_keys(self._time_step_spec, time_step)
policy_state = nest_utils.prune_extra_keys(
self._policy_state_spec, policy_state)
nest_utils.assert_same_structure(
time_step,
self._time_step_spec,
message='time_step and time_step_spec structures do not match')
nest_utils.assert_same_structure(
policy_state,
self._policy_state_spec,
message='policy_state and policy_state_spec structures do not match')
if self._automatic_state_reset:
policy_state = self._maybe_reset_state(time_step, policy_state)
step = self._distribution(time_step=time_step, policy_state=policy_state)
if self.emit_log_probability:
# This here is set only for compatibility with info_spec in constructor.
info = policy_step.set_log_probability(
step.info,
tf.nest.map_structure(
lambda _: tf.constant(0., dtype=tf.float32),
policy_step.get_log_probability(self._info_spec)))
step = step._replace(info=info)
if self._validate_args:
nest_utils.assert_same_structure(
step,
self._policy_step_spec,
message=('distribution output and policy_step_spec structures '
'do not match'))
return step
def update(self,
policy,
tau: float = 1.0,
tau_non_trainable: Optional[float] = None,
sort_variables_by_name: bool = False) -> tf.Operation:
"""Update the current policy with another policy.
This would include copying the variables from the other policy.
Args:
policy: Another policy it can update from.
tau: A float scalar in [0, 1]. When tau is 1.0 (the default), we do a hard
update. This is used for trainable variables.
tau_non_trainable: A float scalar in [0, 1] for non_trainable variables.
If None, will copy from tau.
sort_variables_by_name: A bool, when True would sort the variables by name
before doing the update.
Returns:
An TF op to do the update.
"""
if self.variables():
return common.soft_variables_update(
policy.variables(),
self.variables(),
tau=tau,
tau_non_trainable=tau_non_trainable,
sort_variables_by_name=sort_variables_by_name)
else:
return tf.no_op()
@property
def emit_log_probability(self) -> bool:
"""Whether this policy instance emits log probabilities or not."""
return self._emit_log_probability
@property
def time_step_spec(self) -> ts.TimeStep:
"""Describes the `TimeStep` tensors returned by `step()`.
Returns:
A `TimeStep` namedtuple with `TensorSpec` objects instead of Tensors,
which describe the shape, dtype and name of each tensor returned by
`step()`.
"""
return self._time_step_spec
@property
def action_spec(self) -> types.NestedTensorSpec:
"""Describes the TensorSpecs of the Tensors expected by `step(action)`.
`action` can be a single Tensor, or a nested dict, list or tuple of
Tensors.
Returns:
An single BoundedTensorSpec, or a nested dict, list or tuple of
`BoundedTensorSpec` objects, which describe the shape and
dtype of each Tensor expected by `step()`.
"""
return self._action_spec
@property
def policy_state_spec(self) -> types.NestedTensorSpec:
"""Describes the Tensors expected by `step(_, policy_state)`.
`policy_state` can be an empty tuple, a single Tensor, or a nested dict,
list or tuple of Tensors.
Returns:
An single TensorSpec, or a nested dict, list or tuple of
`TensorSpec` objects, which describe the shape and
dtype of each Tensor expected by `step(_, policy_state)`.
"""
return self._policy_state_spec
@property
def info_spec(self) -> types.NestedTensorSpec:
"""Describes the Tensors emitted as info by `action` and `distribution`.
`info` can be an empty tuple, a single Tensor, or a nested dict,
list or tuple of Tensors.
Returns:
An single TensorSpec, or a nested dict, list or tuple of
`TensorSpec` objects, which describe the shape and
dtype of each Tensor expected by `step(_, policy_state)`.
"""
return self._info_spec
@property
def policy_step_spec(self) -> policy_step.PolicyStep:
"""Describes the output of `action()`.
Returns:
A nest of TensorSpec which describe the shape and dtype of each Tensor
emitted by `action()`.
"""
return self._policy_step_spec
# TODO(kbanoop, ebrevdo): Should this be collect_data_spec to mirror agents?
@property
def trajectory_spec(self) -> trajectory.Trajectory:
"""Describes the Tensors written when using this policy with an environment.
Returns:
A `Trajectory` containing all tensor specs associated with the
observation_spec, action_spec, policy_state_spec, and info_spec of
this policy.
"""
return self._trajectory_spec
@property
def collect_data_spec(self) -> trajectory.Trajectory:
"""Describes the Tensors written when using this policy with an environment.
Returns:
A nest of TensorSpec which describe the shape and dtype of each Tensor
required to train the agent which generated this policy.
"""
return self._trajectory_spec
# Subclasses MAY optionally override _action.
def _action(self, time_step: ts.TimeStep,
policy_state: types.NestedTensor,
seed: Optional[types.Seed] = None) -> policy_step.PolicyStep:
"""Implementation of `action`.
Args:
time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.
policy_state: A Tensor, or a nested dict, list or tuple of Tensors
representing the previous policy_state.
seed: Seed to use if action performs sampling (optional).
Returns:
A `PolicyStep` named tuple containing:
`action`: An action Tensor matching the `action_spec`.
`state`: A policy state tensor to be fed into the next call to action.
`info`: Optional side information such as action log probabilities.
"""
seed_stream = tfp.util.SeedStream(seed=seed, salt='tf_agents_tf_policy')
distribution_step = self._distribution(time_step, policy_state) # pytype: disable=wrong-arg-types
actions = tf.nest.map_structure(
lambda d: reparameterized_sampling.sample(d, seed=seed_stream()),
distribution_step.action)
info = distribution_step.info
if self.emit_log_probability:
try:
log_probability = tf.nest.map_structure(lambda a, d: d.log_prob(a),
actions,
distribution_step.action)
info = policy_step.set_log_probability(info, log_probability)
except:
raise TypeError('%s does not support emitting log-probabilities.' %
type(self).__name__)
return distribution_step._replace(action=actions, info=info)
## Subclasses MUST implement these.
def _distribution(
self, time_step: ts.TimeStep,
policy_state: types.NestedTensorSpec) -> policy_step.PolicyStep:
"""Implementation of `distribution`.
Args:
time_step: A `TimeStep` tuple corresponding to `time_step_spec()`.
policy_state: A Tensor, or a nested dict, list or tuple of Tensors
representing the previous policy_state.
Returns:
A `PolicyStep` named tuple containing:
`action`: A (optionally nested) of tfp.distribution.Distribution
capturing the distribution of next actions.
`state`: A policy state tensor for the next call to distribution.
`info`: Optional side information such as action log probabilities.
"""
raise NotImplementedError()
# Subclasses MAY optionally overwrite _get_initial_state.
def _get_initial_state(self, batch_size: int) -> types.NestedTensor:
"""Returns the initial state of the policy network.
Args:
batch_size: A constant or Tensor holding the batch size. Can be None, in
which case the state will not have a batch dimension added.
Returns:
A nest of zero tensors matching the spec of the policy network state.
"""
return tensor_spec.zero_spec_nest(
self._policy_state_spec,
outer_dims=None if batch_size is None else [batch_size])
| 41.343699 | 118 | 0.702585 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from typing import Optional, Text, Sequence
import six
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.distributions import reparameterized_sampling
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
from tf_agents.typing import types
from tf_agents.utils import common
from tf_agents.utils import nest_utils
tfd = tfp.distributions
@six.add_metaclass(abc.ABCMeta)
class TFPolicy(tf.Module):
_enable_functions = True
def __init__(
self,
time_step_spec: ts.TimeStep,
action_spec: types.NestedTensorSpec,
policy_state_spec: types.NestedTensorSpec = (),
info_spec: types.NestedTensorSpec = (),
clip: bool = True,
emit_log_probability: bool = False,
automatic_state_reset: bool = True,
observation_and_action_constraint_splitter: Optional[
types.Splitter] = None,
validate_args: bool = True,
name: Optional[Text] = None):
super(TFPolicy, self).__init__(name=name)
common.check_tf1_allowed()
common.tf_agents_gauge.get_cell('TFAPolicy').set(True)
common.assert_members_are_not_overridden(base_cls=TFPolicy, instance=self)
if not isinstance(time_step_spec, ts.TimeStep):
raise ValueError(
'The `time_step_spec` must be an instance of `TimeStep`, but is `{}`.'
.format(type(time_step_spec)))
self._time_step_spec = tensor_spec.from_spec(time_step_spec)
self._action_spec = tensor_spec.from_spec(action_spec)
self._policy_state_spec = tensor_spec.from_spec(policy_state_spec)
self._emit_log_probability = emit_log_probability
self._validate_args = validate_args
if emit_log_probability:
log_probability_spec = tensor_spec.BoundedTensorSpec(
shape=(),
dtype=tf.float32,
maximum=0,
minimum=-float('inf'),
name='log_probability')
log_probability_spec = tf.nest.map_structure(
lambda _: log_probability_spec, action_spec)
info_spec = policy_step.set_log_probability(
info_spec, log_probability_spec)
self._info_spec = tensor_spec.from_spec(info_spec)
self._setup_specs()
self._clip = clip
self._action_fn = common.function_in_tf1(experimental_relax_shapes=False)(
self._action)
self._automatic_state_reset = automatic_state_reset
self._observation_and_action_constraint_splitter = (
observation_and_action_constraint_splitter)
def _setup_specs(self):
self._policy_step_spec = policy_step.PolicyStep(
action=self._action_spec,
state=self._policy_state_spec,
info=self._info_spec)
self._trajectory_spec = trajectory.from_transition(self._time_step_spec,
self._policy_step_spec,
self._time_step_spec)
def variables(self) -> Sequence[tf.Variable]:
return super(TFPolicy, self).variables
@property
def observation_and_action_constraint_splitter(self) -> types.Splitter:
return self._observation_and_action_constraint_splitter
@property
def validate_args(self) -> bool:
return self._validate_args
def get_initial_state(self,
batch_size: Optional[types.Int]) -> types.NestedTensor:
return self._get_initial_state(batch_size)
def _maybe_reset_state(self, time_step, policy_state):
if policy_state is (): # pylint: disable=literal-comparison
return policy_state
batch_size = tf.compat.dimension_value(time_step.discount.shape[0])
if batch_size is None:
batch_size = tf.shape(time_step.discount)[0]
# Make sure we call this with a kwarg as it may be wrapped in tf.function
# which would expect a tensor if it was not a kwarg.
zero_state = self.get_initial_state(batch_size=batch_size)
condition = time_step.is_first()
# When experience is a sequence we only reset automatically for the first
# time_step in the sequence as we can't easily generalize how the policy is
if nest_utils.get_outer_rank(time_step, self._time_step_spec) > 1:
condition = time_step.is_first()[:, 0, ...]
return nest_utils.where(condition, zero_state, policy_state)
def action(self,
time_step: ts.TimeStep,
policy_state: types.NestedTensor = (),
seed: Optional[types.Seed] = None) -> policy_step.PolicyStep:
if self._enable_functions and getattr(self, '_action_fn', None) is None:
raise RuntimeError(
'Cannot find _action_fn. Did %s.__init__ call super?' %
type(self).__name__)
if self._enable_functions:
action_fn = self._action_fn
else:
action_fn = self._action
if self._validate_args:
time_step = nest_utils.prune_extra_keys(self._time_step_spec, time_step)
policy_state = nest_utils.prune_extra_keys(
self._policy_state_spec, policy_state)
nest_utils.assert_same_structure(
time_step,
self._time_step_spec,
message='time_step and time_step_spec structures do not match')
if not (policy_state is None or policy_state is () or policy_state is []):
nest_utils.assert_same_structure(
policy_state,
self._policy_state_spec,
message=('policy_state and policy_state_spec '
'structures do not match'))
if self._automatic_state_reset:
policy_state = self._maybe_reset_state(time_step, policy_state)
step = action_fn(time_step=time_step, policy_state=policy_state, seed=seed)
def clip_action(action, action_spec):
if isinstance(action_spec, tensor_spec.BoundedTensorSpec):
return common.clip_to_spec(action, action_spec)
return action
if self._validate_args:
nest_utils.assert_same_structure(
step.action, self._action_spec,
message='action and action_spec structures do not match')
if self._clip:
clipped_actions = tf.nest.map_structure(clip_action,
step.action,
self._action_spec)
step = step._replace(action=clipped_actions)
if self._validate_args:
nest_utils.assert_same_structure(
step,
self._policy_step_spec,
message='action output and policy_step_spec structures do not match')
def compare_to_spec(value, spec):
return value.dtype.is_compatible_with(spec.dtype)
compatibility = [
compare_to_spec(v, s) for (v, s)
in zip(tf.nest.flatten(step.action),
tf.nest.flatten(self.action_spec))]
if not all(compatibility):
get_dtype = lambda x: x.dtype
action_dtypes = tf.nest.map_structure(get_dtype, step.action)
spec_dtypes = tf.nest.map_structure(get_dtype, self.action_spec)
raise TypeError('Policy produced an action with a dtype that doesn\'t '
'match its action_spec. Got action:\n %s\n with '
'action_spec:\n %s' % (action_dtypes, spec_dtypes))
return step
def distribution(
self, time_step: ts.TimeStep, policy_state: types.NestedTensor = ()
) -> policy_step.PolicyStep:
if self._validate_args:
time_step = nest_utils.prune_extra_keys(self._time_step_spec, time_step)
policy_state = nest_utils.prune_extra_keys(
self._policy_state_spec, policy_state)
nest_utils.assert_same_structure(
time_step,
self._time_step_spec,
message='time_step and time_step_spec structures do not match')
nest_utils.assert_same_structure(
policy_state,
self._policy_state_spec,
message='policy_state and policy_state_spec structures do not match')
if self._automatic_state_reset:
policy_state = self._maybe_reset_state(time_step, policy_state)
step = self._distribution(time_step=time_step, policy_state=policy_state)
if self.emit_log_probability:
# This here is set only for compatibility with info_spec in constructor.
info = policy_step.set_log_probability(
step.info,
tf.nest.map_structure(
lambda _: tf.constant(0., dtype=tf.float32),
policy_step.get_log_probability(self._info_spec)))
step = step._replace(info=info)
if self._validate_args:
nest_utils.assert_same_structure(
step,
self._policy_step_spec,
message=('distribution output and policy_step_spec structures '
'do not match'))
return step
def update(self,
policy,
tau: float = 1.0,
tau_non_trainable: Optional[float] = None,
sort_variables_by_name: bool = False) -> tf.Operation:
if self.variables():
return common.soft_variables_update(
policy.variables(),
self.variables(),
tau=tau,
tau_non_trainable=tau_non_trainable,
sort_variables_by_name=sort_variables_by_name)
else:
return tf.no_op()
@property
def emit_log_probability(self) -> bool:
return self._emit_log_probability
@property
def time_step_spec(self) -> ts.TimeStep:
return self._time_step_spec
@property
def action_spec(self) -> types.NestedTensorSpec:
return self._action_spec
@property
def policy_state_spec(self) -> types.NestedTensorSpec:
return self._policy_state_spec
@property
def info_spec(self) -> types.NestedTensorSpec:
return self._info_spec
@property
def policy_step_spec(self) -> policy_step.PolicyStep:
return self._policy_step_spec
# TODO(kbanoop, ebrevdo): Should this be collect_data_spec to mirror agents?
@property
def trajectory_spec(self) -> trajectory.Trajectory:
return self._trajectory_spec
@property
def collect_data_spec(self) -> trajectory.Trajectory:
return self._trajectory_spec
# Subclasses MAY optionally override _action.
def _action(self, time_step: ts.TimeStep,
policy_state: types.NestedTensor,
seed: Optional[types.Seed] = None) -> policy_step.PolicyStep:
seed_stream = tfp.util.SeedStream(seed=seed, salt='tf_agents_tf_policy')
distribution_step = self._distribution(time_step, policy_state) # pytype: disable=wrong-arg-types
actions = tf.nest.map_structure(
lambda d: reparameterized_sampling.sample(d, seed=seed_stream()),
distribution_step.action)
info = distribution_step.info
if self.emit_log_probability:
try:
log_probability = tf.nest.map_structure(lambda a, d: d.log_prob(a),
actions,
distribution_step.action)
info = policy_step.set_log_probability(info, log_probability)
except:
raise TypeError('%s does not support emitting log-probabilities.' %
type(self).__name__)
return distribution_step._replace(action=actions, info=info)
## Subclasses MUST implement these.
def _distribution(
self, time_step: ts.TimeStep,
policy_state: types.NestedTensorSpec) -> policy_step.PolicyStep:
raise NotImplementedError()
# Subclasses MAY optionally overwrite _get_initial_state.
def _get_initial_state(self, batch_size: int) -> types.NestedTensor:
return tensor_spec.zero_spec_nest(
self._policy_state_spec,
outer_dims=None if batch_size is None else [batch_size])
| true | true |
f71514f0dad88cf3e04985d36511d73d2b429e0f | 2,737 | py | Python | salt/modules/freebsdkmod.py | abh/salt | e8870573a2d3eca1a7794ce8340797fa487de04d | [
"Apache-2.0"
] | 1 | 2017-09-09T11:21:13.000Z | 2017-09-09T11:21:13.000Z | salt/modules/freebsdkmod.py | abh/salt | e8870573a2d3eca1a7794ce8340797fa487de04d | [
"Apache-2.0"
] | null | null | null | salt/modules/freebsdkmod.py | abh/salt | e8870573a2d3eca1a7794ce8340797fa487de04d | [
"Apache-2.0"
] | null | null | null | '''
Module to manage FreeBSD kernel modules
'''
import os
def __virtual__():
'''
Only runs on FreeBSD systems
'''
return 'kmod' if __grains__['kernel'] == 'FreeBSD' else False
def _new_mods(pre_mods, post_mods):
'''
Return a list of the new modules, pass an kldstat dict before running
modprobe and one after modprobe has run
'''
pre = set()
post = set()
for mod in pre_mods:
pre.add(mod['module'])
for mod in post_mods:
post.add(mod['module'])
return list(post - pre)
def _rm_mods(pre_mods, post_mods):
'''
Return a list of the new modules, pass an kldstat dict before running
modprobe and one after modprobe has run
'''
pre = set()
post = set()
for mod in pre_mods:
pre.add(mod['module'])
for mod in post_mods:
post.add(mod['module'])
return list(pre - post)
def available():
'''
Return a list of all available kernel modules
CLI Example::
salt '*' kmod.available
'''
ret = []
for path in __salt__['cmd.run']('ls /boot/kernel | grep .ko$').split('\n'):
bpath = os.path.basename(path)
comps = bpath.split('.')
if 'ko' in comps:
# This is a kernel module, return it without the .ko extension
ret.append('.'.join(comps[:comps.index('ko')]))
return ret
def check_available(mod):
'''
Check to see if the specified kernel module is available
CLI Example::
salt '*' kmod.check_available kvm
'''
return mod in available()
def lsmod():
'''
Return a dict containing information about currently loaded modules
CLI Example::
salt '*' kmod.lsmod
'''
ret = []
for line in __salt__['cmd.run']('kldstat').split('\n'):
comps = line.split()
if not len(comps) > 2:
continue
if comps[0] == 'Module':
continue
mdat = {}
mdat['module'] = comps[0]
mdat['size'] = comps[1]
mdat['depcount'] = comps[2]
if len(comps) > 3:
mdat['deps'] = comps[3].split(',')
else:
mdat['deps'] = []
ret.append(mdat)
return ret
def load(mod):
'''
Load the specified kernel module
CLI Example::
salt '*' kmod.load kvm
'''
pre_mods = lsmod()
__salt__['cmd.run_all']('kldload {0}'.format(mod))
post_mods = lsmod()
return _new_mods(pre_mods, post_mods)
def remove(mod):
'''
Remove the specified kernel module
CLI Example::
salt '*' kmod.remove kvm
'''
pre_mods = lsmod()
__salt__['cmd.run_all']('kldunload {0}'.format(mod))
post_mods = lsmod()
return _rm_mods(pre_mods, post_mods)
| 21.896 | 79 | 0.569236 |
import os
def __virtual__():
return 'kmod' if __grains__['kernel'] == 'FreeBSD' else False
def _new_mods(pre_mods, post_mods):
pre = set()
post = set()
for mod in pre_mods:
pre.add(mod['module'])
for mod in post_mods:
post.add(mod['module'])
return list(post - pre)
def _rm_mods(pre_mods, post_mods):
pre = set()
post = set()
for mod in pre_mods:
pre.add(mod['module'])
for mod in post_mods:
post.add(mod['module'])
return list(pre - post)
def available():
ret = []
for path in __salt__['cmd.run']('ls /boot/kernel | grep .ko$').split('\n'):
bpath = os.path.basename(path)
comps = bpath.split('.')
if 'ko' in comps:
ret.append('.'.join(comps[:comps.index('ko')]))
return ret
def check_available(mod):
return mod in available()
def lsmod():
ret = []
for line in __salt__['cmd.run']('kldstat').split('\n'):
comps = line.split()
if not len(comps) > 2:
continue
if comps[0] == 'Module':
continue
mdat = {}
mdat['module'] = comps[0]
mdat['size'] = comps[1]
mdat['depcount'] = comps[2]
if len(comps) > 3:
mdat['deps'] = comps[3].split(',')
else:
mdat['deps'] = []
ret.append(mdat)
return ret
def load(mod):
pre_mods = lsmod()
__salt__['cmd.run_all']('kldload {0}'.format(mod))
post_mods = lsmod()
return _new_mods(pre_mods, post_mods)
def remove(mod):
pre_mods = lsmod()
__salt__['cmd.run_all']('kldunload {0}'.format(mod))
post_mods = lsmod()
return _rm_mods(pre_mods, post_mods)
| true | true |
f715150d07f9689d433e3b3d3176bd3af0b5ace6 | 72 | py | Python | run.py | priscillapepe/News-API | 7931edf4bac58fbc894f9007c91c0a55c480736d | [
"MIT"
] | null | null | null | run.py | priscillapepe/News-API | 7931edf4bac58fbc894f9007c91c0a55c480736d | [
"MIT"
] | null | null | null | run.py | priscillapepe/News-API | 7931edf4bac58fbc894f9007c91c0a55c480736d | [
"MIT"
] | null | null | null | # from app import app
# if __name__ == "__main__":
# app.run()
| 14.4 | 28 | 0.555556 | true | true | |
f71515bd965b1d9750e3c8f9bcbd37c39cad5398 | 679 | py | Python | aiogram/types/file.py | muhammedfurkan/aiogram | 692c1340b4dda556da640e5f9ea2200848c06840 | [
"MIT"
] | null | null | null | aiogram/types/file.py | muhammedfurkan/aiogram | 692c1340b4dda556da640e5f9ea2200848c06840 | [
"MIT"
] | 4 | 2020-11-04T15:55:55.000Z | 2020-11-08T21:36:02.000Z | aiogram/types/file.py | muhammedfurkan/aiogram | 692c1340b4dda556da640e5f9ea2200848c06840 | [
"MIT"
] | null | null | null | from . import base, fields, mixins
class File(base.TelegramObject, mixins.Downloadable):
"""
This object represents a file ready to be downloaded.
The file can be downloaded via the link https://api.telegram.org/file/bot<token>/<file_path>.
It is guaranteed that the link will be valid for at least 1 hour.
When the link expires, a new one can be requested by calling getFile.
Maximum file size to download is 20 MB
https://core.telegram.org/bots/api#file
"""
file_id: base.String = fields.Field()
file_unique_id: base.String = fields.Field()
file_size: base.Integer = fields.Field()
file_path: base.String = fields.Field()
| 30.863636 | 97 | 0.705449 | from . import base, fields, mixins
class File(base.TelegramObject, mixins.Downloadable):
file_id: base.String = fields.Field()
file_unique_id: base.String = fields.Field()
file_size: base.Integer = fields.Field()
file_path: base.String = fields.Field()
| true | true |
f7151637a20ed087dc8cfe8bbbaa192496fb0745 | 13,806 | py | Python | kinto/core/storage/__init__.py | taus-semmle/kinto | a4cd7c6413d1d7809fe02670c0224959390dc25d | [
"Apache-2.0"
] | null | null | null | kinto/core/storage/__init__.py | taus-semmle/kinto | a4cd7c6413d1d7809fe02670c0224959390dc25d | [
"Apache-2.0"
] | null | null | null | kinto/core/storage/__init__.py | taus-semmle/kinto | a4cd7c6413d1d7809fe02670c0224959390dc25d | [
"Apache-2.0"
] | null | null | null | import json
import logging
import random
import warnings
from collections import namedtuple
from pyramid.settings import asbool
import ujson
from kinto.core.decorators import deprecate_kwargs
from . import generators
class Missing:
"""Dummy value to represent a value that is completely absent from an object.
Handling these correctly is important for pagination.
"""
pass
MISSING = Missing()
logger = logging.getLogger(__name__)
Filter = namedtuple("Filter", ["field", "value", "operator"])
"""Filtering properties."""
Sort = namedtuple("Sort", ["field", "direction"])
"""Sorting properties."""
DEFAULT_ID_FIELD = "id"
DEFAULT_MODIFIED_FIELD = "last_modified"
DEFAULT_DELETED_FIELD = "deleted"
_HEARTBEAT_DELETE_RATE = 0.6
_HEARTBEAT_RESOURCE_NAME = "__heartbeat__"
_HEART_PARENT_ID = _HEARTBEAT_RESOURCE_NAME
_HEARTBEAT_OBJECT = {"__heartbeat__": True}
class StorageBase:
"""Storage abstraction used by resource views.
It is meant to be instantiated at application startup.
Any operation may raise a `HTTPServiceUnavailable` error if an error
occurs with the underlying service.
Configuration can be changed to choose which storage backend will
persist the objects.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPServiceUnavailable`
"""
id_generator = generators.UUID4()
"""Id generator used when no one is provided for create."""
def __init__(self, strict_json=True):
"""initialize json (de)serializer to be the strict, slow json or ujson"""
if strict_json:
self.json = json
else:
self.json = ujson
def initialize_schema(self, dry_run=False):
"""Create every necessary objects (like tables or indices) in the
backend.
This is executed when the ``kinto migrate`` command is run.
:param bool dry_run: simulate instead of executing the operations.
"""
raise NotImplementedError
def flush(self, auth=None):
"""Remove **every** object from this storage.
"""
raise NotImplementedError
def resource_timestamp(self, resource_name, parent_id, auth=None):
"""Get the highest timestamp of every objects in this `resource_name` for
this `parent_id`.
.. note::
This should take deleted objects into account.
:param str resource_name: the resource name.
:param str parent_id: the resource parent.
:returns: the latest timestamp of the resource.
:rtype: int
"""
raise NotImplementedError
def create(
self,
resource_name,
parent_id,
obj,
id_generator=None,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
auth=None,
):
"""Create the specified `obj` in this `resource_name` for this `parent_id`.
Assign the id to the object, using the attribute
:attr:`kinto.core.resource.model.Model.id_field`.
.. note::
This will update the resource timestamp.
:raises: :exc:`kinto.core.storage.exceptions.UnicityError`
:param str resource_name: the resource name.
:param str parent_id: the resource parent.
:param dict obj: the object to create.
:returns: the newly created object.
:rtype: dict
"""
raise NotImplementedError
def get(
self,
resource_name,
parent_id,
object_id,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
auth=None,
):
"""Retrieve the object with specified `object_id`, or raise error
if not found.
:raises: :exc:`kinto.core.storage.exceptions.ObjectNotFoundError`
:param str resource_name: the resource name.
:param str parent_id: the resource parent.
:param str object_id: unique identifier of the object
:returns: the stored object.
:rtype: dict
"""
raise NotImplementedError
def update(
self,
resource_name,
parent_id,
object_id,
obj,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
auth=None,
):
"""Overwrite the `obj` with the specified `object_id`.
If the specified id is not found, the object is created with the
specified id.
.. note::
This will update the resource timestamp.
:param str resource_name: the resource name.
:param str parent_id: the resource parent.
:param str object_id: unique identifier of the object
:param dict obj: the object to update or create.
:returns: the updated object.
:rtype: dict
"""
raise NotImplementedError
def delete(
self,
resource_name,
parent_id,
object_id,
id_field=DEFAULT_ID_FIELD,
with_deleted=True,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
auth=None,
last_modified=None,
):
"""Delete the object with specified `object_id`, and raise error
if not found.
Deleted objects must be removed from the database, but their ids and
timestamps of deletion must be tracked for synchronization purposes.
(See :meth:`kinto.core.storage.StorageBase.get_all`)
.. note::
This will update the resource timestamp.
:raises: :exc:`kinto.core.storage.exceptions.ObjectNotFoundError`
:param str resource_name: the resource name.
:param str parent_id: the resource parent.
:param str object_id: unique identifier of the object
:param bool with_deleted: track deleted object with a tombstone
:returns: the deleted object, with minimal set of attributes.
:rtype: dict
"""
raise NotImplementedError
def delete_all(
self,
resource_name,
parent_id,
filters=None,
sorting=None,
pagination_rules=None,
limit=None,
id_field=DEFAULT_ID_FIELD,
with_deleted=True,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
auth=None,
):
"""Delete all objects in this `resource_name` for this `parent_id`.
:param str resource_name: the resource name.
:param str parent_id: the resource parent.
:param filters: Optionnally filter the objects to delete.
:type filters: list of :class:`kinto.core.storage.Filter`
:param sorting: Optionnally sort the objects by attribute.
Each sort instruction in this list refers to a field and a
direction (negative means descending). All sort instructions are
cumulative.
:type sorting: list of :class:`kinto.core.storage.Sort`
:param pagination_rules: Optionnally paginate the deletion of objects.
This list of rules aims to reduce the set of objects to the current
page. A rule is a list of filters (see `filters` parameter),
and all rules are combined using *OR*.
:type pagination_rules: list of list of
:class:`kinto.core.storage.Filter`
:param int limit: Optionnally limit the number of objects to be
deleted.
:param bool with_deleted: track deleted objects with a tombstone
:returns: the list of deleted objects, with minimal set of attributes.
:rtype: list
"""
raise NotImplementedError
def purge_deleted(
self,
resource_name,
parent_id,
before=None,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
auth=None,
):
"""Delete all deleted object tombstones in this `resource_name`
for this `parent_id`.
:param str resource_name: the resource name.
:param str parent_id: the resource parent.
:param int before: Optionnal timestamp to limit deletion (exclusive)
:returns: The number of deleted objects.
:rtype: int
"""
raise NotImplementedError
@deprecate_kwargs({"collection_id": "resource_name"})
def get_all(self, *args, **kwargs):
"""Legacy method to support code that relied on the old API where the storage's
get_all() would return a tuple of (<list of objects paginated>, <count of all>).
Since then, we're being more explicit and expecting the client to deliberately
decide if they need a paginated list or a count.
This method exists solely to make the transition easier.
"""
warnings.warn("Use either self.list_all() or self.count_all()", DeprecationWarning)
list_ = self.list_all(*args, **kwargs)
kwargs.pop("pagination_rules", None)
kwargs.pop("limit", None)
kwargs.pop("sorting", None)
kwargs.pop("include_deleted", None)
count = self.count_all(*args, **kwargs)
return (list_, count)
def list_all(
self,
resource_name,
parent_id,
filters=None,
sorting=None,
pagination_rules=None,
limit=None,
include_deleted=False,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
auth=None,
):
"""Retrieve all objects in this `resource_name` for this `parent_id`.
:param str resource_name: the resource name.
:param str parent_id: the resource parent, possibly
containing a wildcard '*'. (This can happen when
implementing "administrator" operations on a Resource,
for example, like ``kinto.plugins.accounts``.)
:param filters: Optionally filter the objects by their attribute.
Each filter in this list is a tuple of a field, a value and a
comparison (see `kinto.core.utils.COMPARISON`). All filters
are combined using *AND*.
:type filters: list of :class:`kinto.core.storage.Filter`
:param sorting: Optionnally sort the objects by attribute.
Each sort instruction in this list refers to a field and a
direction (negative means descending). All sort instructions are
cumulative.
:type sorting: list of :class:`kinto.core.storage.Sort`
:param pagination_rules: Optionnally paginate the list of objects.
This list of rules aims to reduce the set of objects to the current
page. A rule is a list of filters (see `filters` parameter),
and all rules are combined using *OR*.
:type pagination_rules: list of list of
:class:`kinto.core.storage.Filter`
:param int limit: Optionnally limit the number of objects to be
retrieved.
:param bool include_deleted: Optionnally include the deleted objects
that match the filters.
:returns: the limited list of objects of
matching objects in the resource (deleted ones excluded).
:rtype: list
"""
raise NotImplementedError
def count_all(
self,
resource_name,
parent_id,
filters=None,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
auth=None,
):
"""Return a count of all objects in this `resource_name` for this `parent_id`.
:param str resource_name: the resource name.
:param str parent_id: the parent resource, possibly
containing a wildcard '*'. (This can happen when
implementing "administrator" operations on a UserResource,
for example.)
:param filters: Optionally filter the objects by their attribute.
Each filter in this list is a tuple of a field, a value and a
comparison (see `kinto.core.utils.COMPARISON`). All filters
are combined using *AND*.
:type filters: list of :class:`kinto.core.storage.Filter`
:returns: the total number of matching objects in the resource (deleted ones excluded).
:rtype: int
"""
raise NotImplementedError
def collection_timestamp(self, collection_id, parent_id, auth=None):
message = "`collection_timestamp()` is deprecated, use `resource_timestamp()` instead."
warnings.warn(message, DeprecationWarning)
return self.resource_timestamp(resource_name=collection_id, parent_id=parent_id, auth=auth)
def heartbeat(backend):
def ping(request):
"""Test that storage is operational.
:param request: current request object
:type request: :class:`~pyramid:pyramid.request.Request`
:returns: ``True`` is everything is ok, ``False`` otherwise.
:rtype: bool
"""
try:
auth = request.headers.get("Authorization")
storage_kw = dict(
resource_name=_HEARTBEAT_RESOURCE_NAME, parent_id=_HEART_PARENT_ID, auth=auth
)
if asbool(request.registry.settings.get("readonly")):
# Do not try to write in readonly mode.
backend.get_all(**storage_kw)
else:
if random.SystemRandom().random() < _HEARTBEAT_DELETE_RATE:
backend.delete_all(**storage_kw)
backend.purge_deleted(**storage_kw) # Kinto/kinto#985
else:
backend.create(obj=_HEARTBEAT_OBJECT, **storage_kw)
return True
except Exception:
logger.exception("Heartbeat Error")
return False
return ping
| 33.028708 | 99 | 0.640519 | import json
import logging
import random
import warnings
from collections import namedtuple
from pyramid.settings import asbool
import ujson
from kinto.core.decorators import deprecate_kwargs
from . import generators
class Missing:
pass
MISSING = Missing()
logger = logging.getLogger(__name__)
Filter = namedtuple("Filter", ["field", "value", "operator"])
Sort = namedtuple("Sort", ["field", "direction"])
DEFAULT_ID_FIELD = "id"
DEFAULT_MODIFIED_FIELD = "last_modified"
DEFAULT_DELETED_FIELD = "deleted"
_HEARTBEAT_DELETE_RATE = 0.6
_HEARTBEAT_RESOURCE_NAME = "__heartbeat__"
_HEART_PARENT_ID = _HEARTBEAT_RESOURCE_NAME
_HEARTBEAT_OBJECT = {"__heartbeat__": True}
class StorageBase:
id_generator = generators.UUID4()
def __init__(self, strict_json=True):
if strict_json:
self.json = json
else:
self.json = ujson
def initialize_schema(self, dry_run=False):
raise NotImplementedError
def flush(self, auth=None):
raise NotImplementedError
def resource_timestamp(self, resource_name, parent_id, auth=None):
raise NotImplementedError
def create(
self,
resource_name,
parent_id,
obj,
id_generator=None,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
auth=None,
):
raise NotImplementedError
def get(
self,
resource_name,
parent_id,
object_id,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
auth=None,
):
raise NotImplementedError
def update(
self,
resource_name,
parent_id,
object_id,
obj,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
auth=None,
):
raise NotImplementedError
def delete(
self,
resource_name,
parent_id,
object_id,
id_field=DEFAULT_ID_FIELD,
with_deleted=True,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
auth=None,
last_modified=None,
):
raise NotImplementedError
def delete_all(
self,
resource_name,
parent_id,
filters=None,
sorting=None,
pagination_rules=None,
limit=None,
id_field=DEFAULT_ID_FIELD,
with_deleted=True,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
auth=None,
):
raise NotImplementedError
def purge_deleted(
self,
resource_name,
parent_id,
before=None,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
auth=None,
):
raise NotImplementedError
@deprecate_kwargs({"collection_id": "resource_name"})
def get_all(self, *args, **kwargs):
warnings.warn("Use either self.list_all() or self.count_all()", DeprecationWarning)
list_ = self.list_all(*args, **kwargs)
kwargs.pop("pagination_rules", None)
kwargs.pop("limit", None)
kwargs.pop("sorting", None)
kwargs.pop("include_deleted", None)
count = self.count_all(*args, **kwargs)
return (list_, count)
def list_all(
self,
resource_name,
parent_id,
filters=None,
sorting=None,
pagination_rules=None,
limit=None,
include_deleted=False,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
auth=None,
):
raise NotImplementedError
def count_all(
self,
resource_name,
parent_id,
filters=None,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
auth=None,
):
raise NotImplementedError
def collection_timestamp(self, collection_id, parent_id, auth=None):
message = "`collection_timestamp()` is deprecated, use `resource_timestamp()` instead."
warnings.warn(message, DeprecationWarning)
return self.resource_timestamp(resource_name=collection_id, parent_id=parent_id, auth=auth)
def heartbeat(backend):
def ping(request):
try:
auth = request.headers.get("Authorization")
storage_kw = dict(
resource_name=_HEARTBEAT_RESOURCE_NAME, parent_id=_HEART_PARENT_ID, auth=auth
)
if asbool(request.registry.settings.get("readonly")):
backend.get_all(**storage_kw)
else:
if random.SystemRandom().random() < _HEARTBEAT_DELETE_RATE:
backend.delete_all(**storage_kw)
backend.purge_deleted(**storage_kw) else:
backend.create(obj=_HEARTBEAT_OBJECT, **storage_kw)
return True
except Exception:
logger.exception("Heartbeat Error")
return False
return ping
| true | true |
f715167091a8b5611e5d6929e5426cf12480693e | 2,883 | py | Python | coovie2/coovie2.py | deshi-basara/coovie2 | 07351aa9cc132d1bd95b02d37fc9230cc9f81b2c | [
"MIT"
] | null | null | null | coovie2/coovie2.py | deshi-basara/coovie2 | 07351aa9cc132d1bd95b02d37fc9230cc9f81b2c | [
"MIT"
] | null | null | null | coovie2/coovie2.py | deshi-basara/coovie2 | 07351aa9cc132d1bd95b02d37fc9230cc9f81b2c | [
"MIT"
] | null | null | null |
import os
import click
from movie import Movie
from scan import Scan
from helper import Helper
@click.command()
@click.option('--endings',
default='mp4, mkv',
help='File-endings that are accepted as valid movie-files. ' +
'Default: [.mkv, .mp4]'
)
@click.option('--size_limit',
default="1500",
help='Smaller files are excluded from search (in MegaBytes). ' +
"Default: 1500")
@click.argument('search_path', required=True)
def main(endings, size_limit, search_path):
# initiate global function variables
movie_list = []
longest_title = 0
# initiate options & arguments from cli
movie_endings = tuple(endings.split(", "))
movie_size_limit = int(size_limit) * 1024 * 1024 # MegaBytes
# initiate needed objects
scanner = Scan(movie_endings, movie_size_limit)
helper = Helper()
# look for all available files inside directory recursively
for root, subs, files in os.walk(search_path):
# do available files match a movie-file?
for file in files:
# is movie file?
bool_movie = scanner.is_movie(file)
if not bool_movie:
continue
# is large enough?
movie_path = os.path.join(root, file)
movie_folder = os.path.basename(root)
bool_large = scanner.is_large(movie_path)
if not bool_large:
continue
# is movie file and large enough, try to extract a valid movie name
extracted_data = scanner.extract_file_data(file, movie_folder)
# if movie has valid data, create a new movie object
if -1 in extracted_data:
print("Problem with: " + extracted_data[0] + " " +
str(extracted_data[1]))
else:
# data valid, create object and append it
movie_object = Movie(
extracted_data[0],
extracted_data[1],
movie_path,
root
)
movie_list.append(movie_object)
# does the current movie have the longest title?
if longest_title < len(movie_object.title):
longest_title = len(movie_object.title)
result_str = 'Movies counted: {number}'.format(number=len(movie_list))
print(result_str)
# try to fetch imdb rating for each movie-object
for movie in movie_list:
movie.fetch_rating()
# is current movie in top 250
movie.imdb_top = helper.is_imdb_top(movie)
# sort movies by their rating and print them
print("")
movie_list.sort(key=lambda x: x.rating, reverse=True)
for movie in movie_list:
movie.print_data(longest_title)
if __name__ == '__main__':
main()
| 32.761364 | 79 | 0.589317 |
import os
import click
from movie import Movie
from scan import Scan
from helper import Helper
@click.command()
@click.option('--endings',
default='mp4, mkv',
help='File-endings that are accepted as valid movie-files. ' +
'Default: [.mkv, .mp4]'
)
@click.option('--size_limit',
default="1500",
help='Smaller files are excluded from search (in MegaBytes). ' +
"Default: 1500")
@click.argument('search_path', required=True)
def main(endings, size_limit, search_path):
movie_list = []
longest_title = 0
movie_endings = tuple(endings.split(", "))
movie_size_limit = int(size_limit) * 1024 * 1024
scanner = Scan(movie_endings, movie_size_limit)
helper = Helper()
for root, subs, files in os.walk(search_path):
for file in files:
bool_movie = scanner.is_movie(file)
if not bool_movie:
continue
movie_path = os.path.join(root, file)
movie_folder = os.path.basename(root)
bool_large = scanner.is_large(movie_path)
if not bool_large:
continue
extracted_data = scanner.extract_file_data(file, movie_folder)
if -1 in extracted_data:
print("Problem with: " + extracted_data[0] + " " +
str(extracted_data[1]))
else:
movie_object = Movie(
extracted_data[0],
extracted_data[1],
movie_path,
root
)
movie_list.append(movie_object)
if longest_title < len(movie_object.title):
longest_title = len(movie_object.title)
result_str = 'Movies counted: {number}'.format(number=len(movie_list))
print(result_str)
for movie in movie_list:
movie.fetch_rating()
movie.imdb_top = helper.is_imdb_top(movie)
print("")
movie_list.sort(key=lambda x: x.rating, reverse=True)
for movie in movie_list:
movie.print_data(longest_title)
if __name__ == '__main__':
main()
| true | true |
f715167252441cd29ac5fc75d9b88326376c06e6 | 1,381 | py | Python | strategic.py | rayanf/Liars-Dice | bf68d08eb2d48bbceca4c79a91c3b88054143305 | [
"MIT"
] | 1 | 2021-11-21T18:10:15.000Z | 2021-11-21T18:10:15.000Z | strategic.py | rayanf/Liars-Dice | bf68d08eb2d48bbceca4c79a91c3b88054143305 | [
"MIT"
] | null | null | null | strategic.py | rayanf/Liars-Dice | bf68d08eb2d48bbceca4c79a91c3b88054143305 | [
"MIT"
] | null | null | null | import math
class Player:
def __init__(self):
pass
# self.most_common = lambda : self.numbers.index(max(self.numbers)) + 1
def initcards(self,num1,num2,num3,num4,num_all):
self.numbers = [num1,num2,num3,num4]
self.num_all = num_all
self.common = self.numbers.index(max(self.numbers)) + 1
def guess(self):
prob = self.num_all / 4
ceil = math.ceil(prob)
floor = math.floor(prob)
prob = floor if abs(ceil - prob)> abs(floor - prob) else ceil
return {self.common :prob + max(self.numbers)}
def play(self):
guess_ansewr = self.guess()
return(guess_ansewr)
def play_one_round(cart_list,num_all):
player = Player()
player.initcards(cart_list.count(1),
cart_list.count(2),
cart_list.count(3),
cart_list.count(4),
num_all)
try:
player_guess = player.play()
print(player_guess)
except:
print('something wrong please try again')
l, num_all = get_input()
play_one_round(l,num_all)
def get_input():
l = input('list of my cart: ').split()
num_all = int(input('number of all cart: '))
l = list(map(int,l))
return l,num_all
if __name__ == '__main__':
l, num_all = get_input()
play_one_round(l,num_all)
| 23.016667 | 79 | 0.57929 | import math
class Player:
def __init__(self):
pass
def initcards(self,num1,num2,num3,num4,num_all):
self.numbers = [num1,num2,num3,num4]
self.num_all = num_all
self.common = self.numbers.index(max(self.numbers)) + 1
def guess(self):
prob = self.num_all / 4
ceil = math.ceil(prob)
floor = math.floor(prob)
prob = floor if abs(ceil - prob)> abs(floor - prob) else ceil
return {self.common :prob + max(self.numbers)}
def play(self):
guess_ansewr = self.guess()
return(guess_ansewr)
def play_one_round(cart_list,num_all):
player = Player()
player.initcards(cart_list.count(1),
cart_list.count(2),
cart_list.count(3),
cart_list.count(4),
num_all)
try:
player_guess = player.play()
print(player_guess)
except:
print('something wrong please try again')
l, num_all = get_input()
play_one_round(l,num_all)
def get_input():
l = input('list of my cart: ').split()
num_all = int(input('number of all cart: '))
l = list(map(int,l))
return l,num_all
if __name__ == '__main__':
l, num_all = get_input()
play_one_round(l,num_all)
| true | true |
f715167bc39c8d99f903da2fe8c83bd99f51806e | 20,547 | py | Python | purity_fb/purity_fb_1dot3/apis/file_systems_api.py | mabdelhafez/purity_fb_python_client | a9856875b3df43b4302a2e4addd1a6b71f51f5ce | [
"Apache-2.0"
] | null | null | null | purity_fb/purity_fb_1dot3/apis/file_systems_api.py | mabdelhafez/purity_fb_python_client | a9856875b3df43b4302a2e4addd1a6b71f51f5ce | [
"Apache-2.0"
] | null | null | null | purity_fb/purity_fb_1dot3/apis/file_systems_api.py | mabdelhafez/purity_fb_python_client | a9856875b3df43b4302a2e4addd1a6b71f51f5ce | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.3 Python SDK
Pure Storage FlashBlade REST 1.3 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.3
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class FileSystemsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_file_systems(self, file_system, **kwargs):
"""
Create a new file system
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_file_systems(file_system, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FileSystem file_system: the attribute map used to create the file system (required)
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_file_systems_with_http_info(file_system, **kwargs)
else:
(data) = self.create_file_systems_with_http_info(file_system, **kwargs)
return data
def create_file_systems_with_http_info(self, file_system, **kwargs):
"""
Create a new file system
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_file_systems_with_http_info(file_system, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FileSystem file_system: the attribute map used to create the file system (required)
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_system']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_file_systems" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_system' is set
if ('file_system' not in params) or (params['file_system'] is None):
raise ValueError("Missing the required parameter `file_system` when calling `create_file_systems`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'file_system' in params:
body_params = params['file_system']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/file-systems', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_file_systems(self, name, **kwargs):
"""
Delete a file system
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_file_systems(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the file system to be deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_file_systems_with_http_info(name, **kwargs)
else:
(data) = self.delete_file_systems_with_http_info(name, **kwargs)
return data
def delete_file_systems_with_http_info(self, name, **kwargs):
"""
Delete a file system
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_file_systems_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the file system to be deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_file_systems" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_file_systems`")
collection_formats = {}
path_params = {}
query_params = []
if 'name' in params:
query_params.append(('name', params['name']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/file-systems', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_file_systems(self, **kwargs):
"""
List file systems
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_file_systems(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: The way to order the results.
:param int start: start
:param int limit: limit, should be >= 0
:param str token: token
:param bool total: Return a total object in addition to the other results.
:param bool total_only: Return only the total object.
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_file_systems_with_http_info(**kwargs)
else:
(data) = self.list_file_systems_with_http_info(**kwargs)
return data
def list_file_systems_with_http_info(self, **kwargs):
"""
List file systems
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_file_systems_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: The way to order the results.
:param int start: start
:param int limit: limit, should be >= 0
:param str token: token
:param bool total: Return a total object in addition to the other results.
:param bool total_only: Return only the total object.
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'filter', 'sort', 'start', 'limit', 'token', 'total', 'total_only']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_file_systems" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'token' in params:
query_params.append(('token', params['token']))
if 'total' in params:
query_params.append(('total', params['total']))
if 'total_only' in params:
query_params.append(('total_only', params['total_only']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/file-systems', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_file_systems(self, name, attributes, **kwargs):
"""
Update an existing file system
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_file_systems(name, attributes, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: the name of the file system to be updated (required)
:param FileSystem attributes: the new attributes, only modifiable fields could be used. (required)
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_file_systems_with_http_info(name, attributes, **kwargs)
else:
(data) = self.update_file_systems_with_http_info(name, attributes, **kwargs)
return data
def update_file_systems_with_http_info(self, name, attributes, **kwargs):
"""
Update an existing file system
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_file_systems_with_http_info(name, attributes, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: the name of the file system to be updated (required)
:param FileSystem attributes: the new attributes, only modifiable fields could be used. (required)
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'attributes']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_file_systems" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `update_file_systems`")
# verify the required parameter 'attributes' is set
if ('attributes' not in params) or (params['attributes'] is None):
raise ValueError("Missing the required parameter `attributes` when calling `update_file_systems`")
collection_formats = {}
path_params = {}
query_params = []
if 'name' in params:
query_params.append(('name', params['name']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'attributes' in params:
body_params = params['attributes']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/file-systems', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 41.847251 | 204 | 0.575607 |
from __future__ import absolute_import
import sys
import os
import re
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class FileSystemsApi(object):
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_file_systems(self, file_system, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_file_systems_with_http_info(file_system, **kwargs)
else:
(data) = self.create_file_systems_with_http_info(file_system, **kwargs)
return data
def create_file_systems_with_http_info(self, file_system, **kwargs):
all_params = ['file_system']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_file_systems" % key
)
params[key] = val
del params['kwargs']
if ('file_system' not in params) or (params['file_system'] is None):
raise ValueError("Missing the required parameter `file_system` when calling `create_file_systems`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'file_system' in params:
body_params = params['file_system']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/file-systems', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_file_systems(self, name, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_file_systems_with_http_info(name, **kwargs)
else:
(data) = self.delete_file_systems_with_http_info(name, **kwargs)
return data
def delete_file_systems_with_http_info(self, name, **kwargs):
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_file_systems" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_file_systems`")
collection_formats = {}
path_params = {}
query_params = []
if 'name' in params:
query_params.append(('name', params['name']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/file-systems', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_file_systems(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_file_systems_with_http_info(**kwargs)
else:
(data) = self.list_file_systems_with_http_info(**kwargs)
return data
def list_file_systems_with_http_info(self, **kwargs):
all_params = ['names', 'filter', 'sort', 'start', 'limit', 'token', 'total', 'total_only']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_file_systems" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'token' in params:
query_params.append(('token', params['token']))
if 'total' in params:
query_params.append(('total', params['total']))
if 'total_only' in params:
query_params.append(('total_only', params['total_only']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/file-systems', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_file_systems(self, name, attributes, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_file_systems_with_http_info(name, attributes, **kwargs)
else:
(data) = self.update_file_systems_with_http_info(name, attributes, **kwargs)
return data
def update_file_systems_with_http_info(self, name, attributes, **kwargs):
all_params = ['name', 'attributes']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_file_systems" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `update_file_systems`")
if ('attributes' not in params) or (params['attributes'] is None):
raise ValueError("Missing the required parameter `attributes` when calling `update_file_systems`")
collection_formats = {}
path_params = {}
query_params = []
if 'name' in params:
query_params.append(('name', params['name']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'attributes' in params:
body_params = params['attributes']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/file-systems', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| true | true |
f71516f50db00136d8aa0339afcc29694d8dbf29 | 131 | py | Python | Exercises/Exercises Chapter 06/Question 01.py | tonysulfaro/CSE-231 | 0e3ff5422fe42624a90a17d7f33174346662a6fc | [
"MIT"
] | 2 | 2021-09-23T19:17:24.000Z | 2021-11-29T09:03:56.000Z | Exercises/Exercises Chapter 06/Question 01.py | tonysulfaro/CSE-231 | 0e3ff5422fe42624a90a17d7f33174346662a6fc | [
"MIT"
] | null | null | null | Exercises/Exercises Chapter 06/Question 01.py | tonysulfaro/CSE-231 | 0e3ff5422fe42624a90a17d7f33174346662a6fc | [
"MIT"
] | 1 | 2020-10-25T13:03:18.000Z | 2020-10-25T13:03:18.000Z | fp = open('test.txt')
output = ""
for line in fp:
line = line.strip()
line = line.replace(" ", "")
output+=line
print(output) | 18.714286 | 30 | 0.603053 | fp = open('test.txt')
output = ""
for line in fp:
line = line.strip()
line = line.replace(" ", "")
output+=line
print(output) | true | true |
f71517a589dede7e1d422b652aa255171a8a9b17 | 27,911 | py | Python | Lib/site-packages/plumber/tests/test_plumber.py | Dr8Ninja/ShareSpace | 7b445783a313cbdebb1938e824e98370a42def5f | [
"MIT"
] | 1 | 2022-02-10T03:44:55.000Z | 2022-02-10T03:44:55.000Z | Lib/site-packages/plumber/tests/test_plumber.py | Dr8Ninja/ShareSpace | 7b445783a313cbdebb1938e824e98370a42def5f | [
"MIT"
] | 2 | 2022-01-29T15:29:19.000Z | 2022-02-13T20:28:17.000Z | Lib/site-packages/plumber/tests/test_plumber.py | Dr8Ninja/ShareSpace | 7b445783a313cbdebb1938e824e98370a42def5f | [
"MIT"
] | null | null | null | from plumber import Behavior
from plumber import PlumbingCollision
from plumber import default
from plumber import finalize
from plumber import override
from plumber import plumb
from plumber import plumber
from plumber import plumbifexists
from plumber import plumbing
from plumber.behavior import behaviormetaclass
from plumber.compat import add_metaclass
from plumber.instructions import Instruction
from plumber.instructions import _implements
from plumber.instructions import payload
from plumber.instructions import plumb_str
from plumber.plumber import searchnameinbases
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.interface import InterfaceClass
import inspect
import sys
if sys.version_info < (2, 7): # pragma: no cover
import unittest2 as unittest
else: # pragma: no cover
import unittest
class TestInstructions(unittest.TestCase):
def test_payload(self):
class Foo:
pass
self.assertTrue(payload(Instruction(Instruction(Foo))) is Foo)
def test_plumb_str(self):
leftdoc = """Left head
__plbnext__
Left tail
"""
rightdoc = """Right head
__plbnext__
Right tail
"""
self.assertEqual(plumb_str(leftdoc, rightdoc).split('\n'), [
'Left head',
'',
' Right head',
'',
' __plbnext__',
'',
' Right tail',
'',
' Left tail',
' '
])
leftdoc = """Left tail
"""
rightdoc = """Right tail
"""
self.assertEqual(plumb_str(leftdoc, rightdoc).split('\n'), [
'Right tail',
'',
'Left tail',
' '
])
class A:
pass
self.assertTrue(plumb_str(A, None) is A)
self.assertTrue(plumb_str(None, A) is A)
self.assertTrue(plumb_str(None, None) is None)
def test_instruction(self):
class Foo:
pass
self.assertTrue(Instruction(Foo).item is Foo)
self.assertTrue(Instruction(Foo).__name__ is None)
self.assertTrue(Instruction(Foo, name='foo').__name__ == 'foo')
self.assertRaises(
NotImplementedError,
lambda: Instruction(None) + 1
)
self.assertRaises(
NotImplementedError,
lambda: Instruction(None)(None)
)
def test_default(self):
# First default wins from left to right
def1 = default(1)
self.assertTrue(def1 + def1 is def1)
def2 = default(2)
self.assertTrue(def1 + def2 is def1)
self.assertTrue(def2 + def1 is def2)
# Override wins over default
ext3 = override(3)
self.assertTrue(def1 + ext3 is ext3)
# Finalize wins over default
fin4 = finalize(4)
self.assertTrue(def1 + fin4 is fin4)
# Adding with something else than default/override, raises
# ``PlumbingCollision``
err = None
try:
def1 + Instruction('foo')
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__class__.__name__, 'default')
self.assertEqual(err.left.payload, 1)
self.assertEqual(err.right.__class__.__name__, 'Instruction')
self.assertEqual(err.right.payload, 'foo')
def test_override(self):
# First override wins against following equal overrides and arbitrary
# defaults
ext1 = override(1)
self.assertTrue(ext1 + ext1 is ext1)
self.assertTrue(ext1 + override(1) is ext1)
self.assertTrue(ext1 + override(2) is ext1)
self.assertTrue(ext1 + default(2) is ext1)
fin3 = finalize(3)
self.assertTrue(ext1 + fin3 is fin3)
# Everything except default/override collides
err = None
try:
ext1 + Instruction(1)
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__class__.__name__, 'override')
self.assertEqual(err.left.payload, 1)
self.assertEqual(err.right.__class__.__name__, 'Instruction')
self.assertEqual(err.right.payload, 1)
def test_finalize(self):
# First override wins against following equal overrides and arbitrary
# defaults
fin1 = finalize(1)
self.assertTrue(fin1 + fin1 is fin1)
self.assertTrue(fin1 + finalize(1) is fin1)
self.assertTrue(fin1 + default(2) is fin1)
self.assertTrue(fin1 + override(2) is fin1)
# Two unequal finalize collide
err = None
try:
fin1 + finalize(2)
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__class__.__name__, 'finalize')
self.assertEqual(err.left.payload, 1)
self.assertEqual(err.right.__class__.__name__, 'finalize')
self.assertEqual(err.right.payload, 2)
# Everything except default/override collides
try:
fin1 + Instruction(1)
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__class__.__name__, 'finalize')
self.assertEqual(err.left.payload, 1)
self.assertEqual(err.right.__class__.__name__, 'Instruction')
self.assertEqual(err.right.payload, 1)
def test_plumb(self):
plb1 = plumb(1)
self.assertTrue(plb1 + plumb(1) is plb1)
err = None
try:
plb1 + Instruction(1)
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__class__.__name__, 'plumb')
self.assertEqual(err.left.payload, 1)
self.assertEqual(err.right.__class__.__name__, 'Instruction')
self.assertEqual(err.right.payload, 1)
try:
func_a = lambda x: None
prop_b = property(lambda x: None)
plumb(func_a) + plumb(prop_b)
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__class__.__name__, 'plumb')
self.assertEqual(err.left.payload, func_a)
self.assertEqual(err.right.__class__.__name__, 'plumb')
self.assertEqual(err.right.payload, prop_b)
try:
plumb(1) + plumb(2)
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__class__.__name__, 'plumb')
self.assertEqual(err.left.payload, 1)
self.assertEqual(err.right.__class__.__name__, 'plumb')
self.assertEqual(err.right.payload, 2)
def test_implements(self):
# classImplements interfaces
foo = _implements(('foo',))
self.assertTrue(foo == foo)
self.assertTrue(foo + foo is foo)
self.assertTrue(foo == _implements(('foo',)))
self.assertTrue(foo != _implements(('bar',)))
self.assertTrue(
_implements(('foo', 'bar')) == _implements(('bar', 'foo'))
)
self.assertTrue(foo + _implements(('foo',)) is foo)
bar = _implements(('bar',))
foobar = foo + bar
self.assertEqual(foobar.__class__.__name__, '_implements')
self.assertEqual(foobar.__name__, '__interfaces__')
self.assertEqual(foobar.payload, ('bar', 'foo'))
self.assertTrue(foo + bar == bar + foo)
err = None
try:
foo + Instruction("bar")
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__class__.__name__, '_implements')
self.assertEqual(err.left.__name__, '__interfaces__')
self.assertEqual(err.left.payload, ('foo',))
self.assertEqual(err.right.__class__.__name__, 'Instruction')
self.assertEqual(err.right.payload, 'bar')
class TestBehavior(unittest.TestCase):
def test_behaviormetaclass(self):
@add_metaclass(behaviormetaclass)
class A(object):
pass
self.assertEqual(
getattr(A, '__plumbing_instructions__', 'No behavior'),
'No behavior'
)
@add_metaclass(behaviormetaclass)
class B(Behavior):
pass
self.assertEqual(
getattr(B, '__plumbing_instructions__', None) and 'Behavior',
'Behavior'
)
class TestPlumber(unittest.TestCase):
def test_searchnameinbases(self):
class A(object):
foo = 1
class B(A):
pass
self.assertTrue(searchnameinbases('foo', (B,)))
self.assertFalse(searchnameinbases('bar', (B,)))
class TestGlobalMetaclass(unittest.TestCase):
@unittest.skipIf(
sys.version_info[0] >= 3,
'__metaclass__ attribute on module leven only works in python 2')
def test_global_metaclass(self):
from plumber.tests import globalmetaclass as gm
# A zope.interface.Interface is not affected by the global
# ``__metaclass__``.
self.assertEqual(gm.IBehavior1.__class__, InterfaceClass)
# A global meta-class declaration makes all classes at least new-style
# classes, even when not subclassing subclasses
self.assertEqual(gm.Foo.__class__, plumber)
self.assertTrue(issubclass(gm.Foo, object))
# If subclassing object, the global metaclass declaration is ignored::
self.assertEqual(gm.ClassMaybeUsingAPlumbing.__class__, type)
self.assertEqual(gm.ClassReallyUsingAPlumbing.__class__, plumber)
self.assertTrue(issubclass(gm.ClassReallyUsingAPlumbing, object))
self.assertTrue(
gm.IBehavior1.implementedBy(gm.ClassReallyUsingAPlumbing)
)
self.assertEqual(gm.BCClassReallyUsingAPlumbing.__class__, plumber)
self.assertTrue(issubclass(gm.BCClassReallyUsingAPlumbing, object))
self.assertTrue(
gm.IBehavior1.implementedBy(gm.BCClassReallyUsingAPlumbing)
)
class TestMetaclassHooks(unittest.TestCase):
def test_metaclasshook(self):
class IBehaviorInterface(Interface):
pass
@plumber.metaclasshook
def test_metclass_hook(cls, name, bases, dct):
if not IBehaviorInterface.implementedBy(cls):
return
cls.hooked = True
self.assertTrue(test_metclass_hook in plumber.__metaclass_hooks__)
@implementer(IBehaviorInterface)
class MetaclassConsideredBehavior(Behavior):
pass
@plumbing(MetaclassConsideredBehavior)
class Plumbing(object):
pass
self.assertTrue(Plumbing.hooked)
class BehaviorIgnoredByMetaclassHook(Behavior):
pass
@plumbing(BehaviorIgnoredByMetaclassHook)
class Plumbing2(object):
pass
self.assertRaises(AttributeError, lambda: Plumbing2.hooked)
plumber.__metaclass_hooks__.remove(test_metclass_hook)
class TestPlumberBasics(unittest.TestCase):
def test_basics(self):
class Behavior1(Behavior):
a = default(True)
@default
def foo(self):
return 42
class Behavior2(Behavior):
@default
@property
def bar(self):
return 17
Base = dict
@plumbing(Behavior1, Behavior2)
class Plumbing(Base):
def foobar(self):
return 5
plb = Plumbing()
self.assertTrue(plb.a)
self.assertEqual(plb.foo(), 42)
self.assertEqual(plb.bar, 17)
self.assertEqual(plb.foobar(), 5)
plb['a'] = 1
self.assertEqual(plb['a'], 1)
class Sub(Plumbing):
a = 'Sub'
self.assertEqual(Sub.a, 'Sub')
self.assertEqual(Sub().foo(), 42)
self.assertEqual(Sub().bar, 17)
self.assertEqual(Sub().foobar(), 5)
stacks = Plumbing.__plumbing_stacks__
self.assertEqual(len(stacks['history']), 5)
stages = stacks['stages']
self.assertEqual(sorted(list(stages.keys())), ['stage1', 'stage2'])
stage_1 = stages['stage1']
self.assertEqual(sorted(list(stage_1.keys())), ['a', 'bar', 'foo'])
stage_2 = stages['stage2']
self.assertEqual(sorted(list(stage_2.keys())), ['__interfaces__'])
@unittest.skipIf(
sys.version_info[0] >= 3,
'__metaclass__ property only works in python 2')
def test_bc_plumbing_py2(self):
class Behavior1(Behavior):
a = default(True)
class BCPlumbing(object):
__metaclass__ = plumber
__plumbing__ = Behavior1
plb = BCPlumbing()
self.assertTrue(plb.a)
class TestPlumberStage1(unittest.TestCase):
def test_finalize_instruction(self):
class Behavior1(Behavior):
N = finalize('Behavior1')
class Behavior2(Behavior):
M = finalize('Behavior2')
class Base(object):
K = 'Base'
@plumbing(Behavior1, Behavior2)
class Plumbing(Base):
L = 'Plumbing'
res = list()
for x in ['K', 'L', 'M', 'N']:
res.append("%s from %s" % (x, getattr(Plumbing, x)))
self.assertEqual(res, [
'K from Base',
'L from Plumbing',
'M from Behavior2',
'N from Behavior1',
])
def test_finalize_collisions(self):
err = None
class Behavior1(Behavior):
O = finalize(False)
try:
@plumbing(Behavior1)
class Plumbing(object):
O = True
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left, 'Plumbing class')
self.assertEqual(err.right.__parent__.__name__, 'Behavior1')
self.assertEqual(err.right.__class__.__name__, 'finalize')
self.assertEqual(err.right.__name__, 'O')
self.assertFalse(err.right.payload)
class Behavior2(Behavior):
P = finalize(False)
try:
@plumbing(Behavior2)
class Plumbing(object):
P = True
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left, 'Plumbing class')
self.assertEqual(err.right.__parent__.__name__, 'Behavior2')
self.assertEqual(err.right.__class__.__name__, 'finalize')
self.assertEqual(err.right.__name__, 'P')
self.assertFalse(err.right.payload)
class Behavior3(Behavior):
Q = finalize(False)
class Behavior4(Behavior):
Q = finalize(True)
try:
@plumbing(Behavior3, Behavior4)
class Plumbing(object):
pass
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__parent__.__name__, 'Behavior3')
self.assertEqual(err.left.__class__.__name__, 'finalize')
self.assertEqual(err.left.__name__, 'Q')
self.assertFalse(err.left.payload)
self.assertEqual(err.right.__parent__.__name__, 'Behavior4')
self.assertEqual(err.right.__class__.__name__, 'finalize')
self.assertEqual(err.right.__name__, 'Q')
self.assertTrue(err.right.payload)
def test_override_instruction(self):
class Behavior1(Behavior):
K = override('Behavior1')
M = override('Behavior1')
class Behavior2(Behavior):
K = override('Behavior2')
L = override('Behavior2')
M = override('Behavior2')
class Base(object):
K = 'Base'
L = 'Base'
M = 'Base'
@plumbing(Behavior1, Behavior2)
class Plumbing(Base):
K = 'Plumbing'
res = list()
for x in ['K', 'L', 'M']:
res.append("%s from %s" % (x, getattr(Plumbing, x)))
self.assertEqual(res, [
'K from Plumbing',
'L from Behavior2',
'M from Behavior1'
])
def test_default_instruction(self):
class Behavior1(Behavior):
N = default('Behavior1')
class Behavior2(Behavior):
K = default('Behavior2')
L = default('Behavior2')
M = default('Behavior2')
N = default('Behavior2')
class Base(object):
K = 'Base'
L = 'Base'
@plumbing(Behavior1, Behavior2)
class Plumbing(Base):
L = 'Plumbing'
res = list()
for x in ['K', 'L', 'M', 'N']:
res.append("%s from %s" % (x, getattr(Plumbing, x)))
self.assertEqual(res, [
'K from Base',
'L from Plumbing',
'M from Behavior2',
'N from Behavior1'
])
def test_finalize_wins_over_override(self):
class Behavior1(Behavior):
K = override('Behavior1')
L = finalize('Behavior1')
class Behavior2(Behavior):
K = finalize('Behavior2')
L = override('Behavior2')
class Base(object):
K = 'Base'
L = 'Base'
@plumbing(Behavior1, Behavior2)
class Plumbing(Base):
pass
res = list()
for x in ['K', 'L']:
res.append("%s from %s" % (x, getattr(Plumbing, x)))
self.assertEqual(res, [
'K from Behavior2',
'L from Behavior1'
])
def test_finalize_wins_over_default(self):
class Behavior1(Behavior):
K = default('Behavior1')
L = finalize('Behavior1')
class Behavior2(Behavior):
K = finalize('Behavior2')
L = default('Behavior2')
class Base(object):
K = 'Base'
L = 'Base'
@plumbing(Behavior1, Behavior2)
class Plumbing(Base):
pass
res = list()
for x in ['K', 'L']:
res.append("%s from %s" % (x, getattr(Plumbing, x)))
self.assertEqual(res, [
'K from Behavior2',
'L from Behavior1'
])
def test_override_wins_over_default(self):
class Behavior1(Behavior):
K = default('Behavior1')
L = override('Behavior1')
class Behavior2(Behavior):
K = override('Behavior2')
L = default('Behavior2')
class Base(object):
K = 'Base'
L = 'Base'
@plumbing(Behavior1, Behavior2)
class Plumbing(Base):
pass
res = list()
for x in ['K', 'L']:
res.append("%s from %s" % (x, getattr(Plumbing, x)))
self.assertEqual(res, [
'K from Behavior2',
'L from Behavior1'
])
def test_subclassing_behaviors(self):
class Behavior1(Behavior):
J = default('Behavior1')
K = default('Behavior1')
M = override('Behavior1')
class Behavior2(Behavior1):
# overrides ``J`` of ``Behavior1``
J = default('Behavior2')
L = default('Behavior2')
# this one wins, even if ``M`` on superclass is ``override``
# instruction due to ordinary inheritance behavior.
M = default('Behavior2')
@plumbing(Behavior2)
class Plumbing(object):
pass
plb = Plumbing()
self.assertEqual(plb.J, 'Behavior2')
self.assertEqual(plb.K, 'Behavior1')
self.assertEqual(plb.L, 'Behavior2')
self.assertEqual(plb.M, 'Behavior2')
class TestPlumberStage2(unittest.TestCase):
def test_method_pipelines(self):
res = list()
class Behavior1(Behavior):
@plumb
def __getitem__(_next, self, key):
res.append("Behavior1 start")
key = key.lower()
ret = _next(self, key)
res.append("Behavior1 stop")
return ret
class Behavior2(Behavior):
@plumb
def __getitem__(_next, self, key):
res.append("Behavior2 start")
ret = 2 * _next(self, key)
res.append("Behavior2 stop")
return ret
Base = dict
@plumbing(Behavior1, Behavior2)
class Plumbing(Base):
pass
plb = Plumbing()
plb['abc'] = 6
self.assertEqual(plb['AbC'], 12)
self.assertEqual(res, [
'Behavior1 start',
'Behavior2 start',
'Behavior2 stop',
'Behavior1 stop'
])
def test_endpoint_not_exists(self):
err = None
class Behavior1(Behavior):
@plumb
def foo(_next, self):
pass # pragma: no cover
try:
@plumbing(Behavior1)
class Plumbing(object):
pass
except AttributeError as e:
err = e
finally:
self.assertEqual(
str(err),
'type object \'Plumbing\' has no attribute \'foo\''
)
def test_plumb_if_exists(self):
class Behavior1(Behavior):
@plumbifexists
def foo(_next, self):
pass # pragma: no cover
@plumbifexists
def bar(_next, self):
return 2 * _next(self)
@plumbing(Behavior1)
class Plumbing(object):
def bar(self):
return 6
self.assertFalse(hasattr(Plumbing, 'foo'))
self.assertEqual(Plumbing().bar(), 12)
def test_property_pipelines(self):
class Behavior1(Behavior):
@plumb
@property
def foo(_next, self):
return 2 * _next(self)
@plumbing(Behavior1)
class Plumbing1(object):
@property
def foo(self):
return 3
plb = Plumbing1()
self.assertEqual(plb.foo, 6)
class Behavior2(Behavior):
@plumb
@property
def foo(_next, self):
return 2 * _next(self)
class Behavior3(Behavior):
def set_foo(self, value):
self._foo = value
foo = plumb(property(
None,
override(set_foo),
))
@plumbing(Behavior2, Behavior3)
class Plumbing2(object):
@property
def foo(self):
return self._foo
plb = Plumbing2()
plb.foo = 4
self.assertEqual(plb.foo, 8)
def test_subclassing_behaviors(self):
class Behavior1(Behavior):
@plumb
def foo(_next, self):
return 'Behavior1 ' + _next(self)
@plumb
def bar(_next, self):
return 'Behavior1 ' + _next(self)
class Behavior2(Behavior1):
@plumb
def foo(_next, self):
return 'Behavior2 ' + _next(self)
@plumbing(Behavior2)
class Plumbing(object):
def foo(self):
return 'foo'
def bar(self):
return 'bar'
plb = Plumbing()
self.assertEqual(plb.foo(), 'Behavior2 Behavior1 foo')
self.assertEqual(plb.bar(), 'Behavior1 bar')
def test_mixing_properties_and_methods(self):
err = None
class Behavior1(Behavior):
@plumb
def foo(_next, self):
return _next(self) # pragma: no cover
try:
@plumbing(Behavior1)
class Plumbing(object):
@property
def foo(self):
return 5 # pragma: no cover
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__parent__.__name__, 'Behavior1')
self.assertEqual(err.left.__class__.__name__, 'plumb')
self.assertEqual(err.left.__name__, 'foo')
self.assertEqual(err.left.payload.__name__, 'foo')
self.assertEqual(err.right.__name__, 'Plumbing')
self.assertTrue(inspect.isclass(err.right))
def test_docstrings_joined(self):
class P1(Behavior):
"""P1
"""
@plumb
def foo(self):
"""P1.foo
"""
bar = plumb(property(None, None, None, "P1.bar"))
class P2(Behavior):
@override
def foo(self):
"""P2.foo
"""
bar = plumb(property(None, None, None, "P2.bar"))
@plumbing(P1, P2)
class Plumbing(object):
"""Plumbing
"""
bar = property(None, None, None, "Plumbing.bar")
self.assertEqual(Plumbing.__doc__.strip(), 'Plumbing\n\nP1')
self.assertEqual(Plumbing.foo.__doc__.strip(), 'P2.foo\n\nP1.foo')
self.assertEqual(
Plumbing.bar.__doc__.strip(),
'Plumbing.bar\n\nP2.bar\n\nP1.bar'
)
def test_slots(self):
class P1(Behavior):
@default
def somewhing_which_writes_to_foo(self, foo_val):
self.foo = foo_val
@plumbing(P1)
class WithSlots(object):
__slots__ = 'foo'
self.assertEqual(
type(WithSlots.__dict__['foo']).__name__,
'member_descriptor'
)
ob = WithSlots()
ob.somewhing_which_writes_to_foo('foo')
self.assertEqual(ob.foo, 'foo')
def test_zope_interface(self):
class IBase(Interface):
pass
@implementer(IBase)
class Base(object):
pass
self.assertTrue(IBase.implementedBy(Base))
class IBehavior1(Interface):
pass
@implementer(IBehavior1)
class Behavior1(Behavior):
blub = 1
class IBehavior2Base(Interface):
pass
@implementer(IBehavior2Base)
class Behavior2Base(Behavior):
pass
class IBehavior2(Interface):
pass
@implementer(IBehavior2)
class Behavior2(Behavior2Base):
pass
self.assertTrue(IBehavior1.implementedBy(Behavior1))
self.assertTrue(IBehavior2Base.implementedBy(Behavior2Base))
self.assertTrue(IBehavior2Base.implementedBy(Behavior2))
self.assertTrue(IBehavior2.implementedBy(Behavior2))
class IPlumbingClass(Interface):
pass
@implementer(IPlumbingClass)
@plumbing(Behavior1, Behavior2)
class PlumbingClass(Base):
pass
self.assertTrue(IPlumbingClass.implementedBy(PlumbingClass))
self.assertTrue(IBase.implementedBy(PlumbingClass))
self.assertTrue(IBehavior1.implementedBy(PlumbingClass))
self.assertTrue(IBehavior2.implementedBy(PlumbingClass))
self.assertTrue(IBehavior2Base.implementedBy(PlumbingClass))
plb = PlumbingClass()
self.assertTrue(IPlumbingClass.providedBy(plb))
self.assertTrue(IBase.providedBy(plb))
self.assertTrue(IBehavior1.providedBy(plb))
self.assertTrue(IBehavior2.providedBy(plb))
self.assertTrue(IBehavior2Base.providedBy(plb))
if __name__ == '__main__':
unittest.main() # pragma: no cover
| 30.076509 | 79 | 0.555802 | from plumber import Behavior
from plumber import PlumbingCollision
from plumber import default
from plumber import finalize
from plumber import override
from plumber import plumb
from plumber import plumber
from plumber import plumbifexists
from plumber import plumbing
from plumber.behavior import behaviormetaclass
from plumber.compat import add_metaclass
from plumber.instructions import Instruction
from plumber.instructions import _implements
from plumber.instructions import payload
from plumber.instructions import plumb_str
from plumber.plumber import searchnameinbases
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.interface import InterfaceClass
import inspect
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class TestInstructions(unittest.TestCase):
def test_payload(self):
class Foo:
pass
self.assertTrue(payload(Instruction(Instruction(Foo))) is Foo)
def test_plumb_str(self):
leftdoc = """Left head
__plbnext__
Left tail
"""
rightdoc = """Right head
__plbnext__
Right tail
"""
self.assertEqual(plumb_str(leftdoc, rightdoc).split('\n'), [
'Left head',
'',
' Right head',
'',
' __plbnext__',
'',
' Right tail',
'',
' Left tail',
' '
])
leftdoc = """Left tail
"""
rightdoc = """Right tail
"""
self.assertEqual(plumb_str(leftdoc, rightdoc).split('\n'), [
'Right tail',
'',
'Left tail',
' '
])
class A:
pass
self.assertTrue(plumb_str(A, None) is A)
self.assertTrue(plumb_str(None, A) is A)
self.assertTrue(plumb_str(None, None) is None)
def test_instruction(self):
class Foo:
pass
self.assertTrue(Instruction(Foo).item is Foo)
self.assertTrue(Instruction(Foo).__name__ is None)
self.assertTrue(Instruction(Foo, name='foo').__name__ == 'foo')
self.assertRaises(
NotImplementedError,
lambda: Instruction(None) + 1
)
self.assertRaises(
NotImplementedError,
lambda: Instruction(None)(None)
)
def test_default(self):
def1 = default(1)
self.assertTrue(def1 + def1 is def1)
def2 = default(2)
self.assertTrue(def1 + def2 is def1)
self.assertTrue(def2 + def1 is def2)
ext3 = override(3)
self.assertTrue(def1 + ext3 is ext3)
fin4 = finalize(4)
self.assertTrue(def1 + fin4 is fin4)
err = None
try:
def1 + Instruction('foo')
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__class__.__name__, 'default')
self.assertEqual(err.left.payload, 1)
self.assertEqual(err.right.__class__.__name__, 'Instruction')
self.assertEqual(err.right.payload, 'foo')
def test_override(self):
ext1 = override(1)
self.assertTrue(ext1 + ext1 is ext1)
self.assertTrue(ext1 + override(1) is ext1)
self.assertTrue(ext1 + override(2) is ext1)
self.assertTrue(ext1 + default(2) is ext1)
fin3 = finalize(3)
self.assertTrue(ext1 + fin3 is fin3)
err = None
try:
ext1 + Instruction(1)
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__class__.__name__, 'override')
self.assertEqual(err.left.payload, 1)
self.assertEqual(err.right.__class__.__name__, 'Instruction')
self.assertEqual(err.right.payload, 1)
def test_finalize(self):
fin1 = finalize(1)
self.assertTrue(fin1 + fin1 is fin1)
self.assertTrue(fin1 + finalize(1) is fin1)
self.assertTrue(fin1 + default(2) is fin1)
self.assertTrue(fin1 + override(2) is fin1)
err = None
try:
fin1 + finalize(2)
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__class__.__name__, 'finalize')
self.assertEqual(err.left.payload, 1)
self.assertEqual(err.right.__class__.__name__, 'finalize')
self.assertEqual(err.right.payload, 2)
try:
fin1 + Instruction(1)
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__class__.__name__, 'finalize')
self.assertEqual(err.left.payload, 1)
self.assertEqual(err.right.__class__.__name__, 'Instruction')
self.assertEqual(err.right.payload, 1)
def test_plumb(self):
plb1 = plumb(1)
self.assertTrue(plb1 + plumb(1) is plb1)
err = None
try:
plb1 + Instruction(1)
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__class__.__name__, 'plumb')
self.assertEqual(err.left.payload, 1)
self.assertEqual(err.right.__class__.__name__, 'Instruction')
self.assertEqual(err.right.payload, 1)
try:
func_a = lambda x: None
prop_b = property(lambda x: None)
plumb(func_a) + plumb(prop_b)
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__class__.__name__, 'plumb')
self.assertEqual(err.left.payload, func_a)
self.assertEqual(err.right.__class__.__name__, 'plumb')
self.assertEqual(err.right.payload, prop_b)
try:
plumb(1) + plumb(2)
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__class__.__name__, 'plumb')
self.assertEqual(err.left.payload, 1)
self.assertEqual(err.right.__class__.__name__, 'plumb')
self.assertEqual(err.right.payload, 2)
def test_implements(self):
foo = _implements(('foo',))
self.assertTrue(foo == foo)
self.assertTrue(foo + foo is foo)
self.assertTrue(foo == _implements(('foo',)))
self.assertTrue(foo != _implements(('bar',)))
self.assertTrue(
_implements(('foo', 'bar')) == _implements(('bar', 'foo'))
)
self.assertTrue(foo + _implements(('foo',)) is foo)
bar = _implements(('bar',))
foobar = foo + bar
self.assertEqual(foobar.__class__.__name__, '_implements')
self.assertEqual(foobar.__name__, '__interfaces__')
self.assertEqual(foobar.payload, ('bar', 'foo'))
self.assertTrue(foo + bar == bar + foo)
err = None
try:
foo + Instruction("bar")
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__class__.__name__, '_implements')
self.assertEqual(err.left.__name__, '__interfaces__')
self.assertEqual(err.left.payload, ('foo',))
self.assertEqual(err.right.__class__.__name__, 'Instruction')
self.assertEqual(err.right.payload, 'bar')
class TestBehavior(unittest.TestCase):
def test_behaviormetaclass(self):
@add_metaclass(behaviormetaclass)
class A(object):
pass
self.assertEqual(
getattr(A, '__plumbing_instructions__', 'No behavior'),
'No behavior'
)
@add_metaclass(behaviormetaclass)
class B(Behavior):
pass
self.assertEqual(
getattr(B, '__plumbing_instructions__', None) and 'Behavior',
'Behavior'
)
class TestPlumber(unittest.TestCase):
def test_searchnameinbases(self):
class A(object):
foo = 1
class B(A):
pass
self.assertTrue(searchnameinbases('foo', (B,)))
self.assertFalse(searchnameinbases('bar', (B,)))
class TestGlobalMetaclass(unittest.TestCase):
@unittest.skipIf(
sys.version_info[0] >= 3,
'__metaclass__ attribute on module leven only works in python 2')
def test_global_metaclass(self):
from plumber.tests import globalmetaclass as gm
self.assertEqual(gm.IBehavior1.__class__, InterfaceClass)
self.assertEqual(gm.Foo.__class__, plumber)
self.assertTrue(issubclass(gm.Foo, object))
self.assertEqual(gm.ClassMaybeUsingAPlumbing.__class__, type)
self.assertEqual(gm.ClassReallyUsingAPlumbing.__class__, plumber)
self.assertTrue(issubclass(gm.ClassReallyUsingAPlumbing, object))
self.assertTrue(
gm.IBehavior1.implementedBy(gm.ClassReallyUsingAPlumbing)
)
self.assertEqual(gm.BCClassReallyUsingAPlumbing.__class__, plumber)
self.assertTrue(issubclass(gm.BCClassReallyUsingAPlumbing, object))
self.assertTrue(
gm.IBehavior1.implementedBy(gm.BCClassReallyUsingAPlumbing)
)
class TestMetaclassHooks(unittest.TestCase):
def test_metaclasshook(self):
class IBehaviorInterface(Interface):
pass
@plumber.metaclasshook
def test_metclass_hook(cls, name, bases, dct):
if not IBehaviorInterface.implementedBy(cls):
return
cls.hooked = True
self.assertTrue(test_metclass_hook in plumber.__metaclass_hooks__)
@implementer(IBehaviorInterface)
class MetaclassConsideredBehavior(Behavior):
pass
@plumbing(MetaclassConsideredBehavior)
class Plumbing(object):
pass
self.assertTrue(Plumbing.hooked)
class BehaviorIgnoredByMetaclassHook(Behavior):
pass
@plumbing(BehaviorIgnoredByMetaclassHook)
class Plumbing2(object):
pass
self.assertRaises(AttributeError, lambda: Plumbing2.hooked)
plumber.__metaclass_hooks__.remove(test_metclass_hook)
class TestPlumberBasics(unittest.TestCase):
def test_basics(self):
class Behavior1(Behavior):
a = default(True)
@default
def foo(self):
return 42
class Behavior2(Behavior):
@default
@property
def bar(self):
return 17
Base = dict
@plumbing(Behavior1, Behavior2)
class Plumbing(Base):
def foobar(self):
return 5
plb = Plumbing()
self.assertTrue(plb.a)
self.assertEqual(plb.foo(), 42)
self.assertEqual(plb.bar, 17)
self.assertEqual(plb.foobar(), 5)
plb['a'] = 1
self.assertEqual(plb['a'], 1)
class Sub(Plumbing):
a = 'Sub'
self.assertEqual(Sub.a, 'Sub')
self.assertEqual(Sub().foo(), 42)
self.assertEqual(Sub().bar, 17)
self.assertEqual(Sub().foobar(), 5)
stacks = Plumbing.__plumbing_stacks__
self.assertEqual(len(stacks['history']), 5)
stages = stacks['stages']
self.assertEqual(sorted(list(stages.keys())), ['stage1', 'stage2'])
stage_1 = stages['stage1']
self.assertEqual(sorted(list(stage_1.keys())), ['a', 'bar', 'foo'])
stage_2 = stages['stage2']
self.assertEqual(sorted(list(stage_2.keys())), ['__interfaces__'])
@unittest.skipIf(
sys.version_info[0] >= 3,
'__metaclass__ property only works in python 2')
def test_bc_plumbing_py2(self):
class Behavior1(Behavior):
a = default(True)
class BCPlumbing(object):
__metaclass__ = plumber
__plumbing__ = Behavior1
plb = BCPlumbing()
self.assertTrue(plb.a)
class TestPlumberStage1(unittest.TestCase):
def test_finalize_instruction(self):
class Behavior1(Behavior):
N = finalize('Behavior1')
class Behavior2(Behavior):
M = finalize('Behavior2')
class Base(object):
K = 'Base'
@plumbing(Behavior1, Behavior2)
class Plumbing(Base):
L = 'Plumbing'
res = list()
for x in ['K', 'L', 'M', 'N']:
res.append("%s from %s" % (x, getattr(Plumbing, x)))
self.assertEqual(res, [
'K from Base',
'L from Plumbing',
'M from Behavior2',
'N from Behavior1',
])
def test_finalize_collisions(self):
err = None
class Behavior1(Behavior):
O = finalize(False)
try:
@plumbing(Behavior1)
class Plumbing(object):
O = True
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left, 'Plumbing class')
self.assertEqual(err.right.__parent__.__name__, 'Behavior1')
self.assertEqual(err.right.__class__.__name__, 'finalize')
self.assertEqual(err.right.__name__, 'O')
self.assertFalse(err.right.payload)
class Behavior2(Behavior):
P = finalize(False)
try:
@plumbing(Behavior2)
class Plumbing(object):
P = True
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left, 'Plumbing class')
self.assertEqual(err.right.__parent__.__name__, 'Behavior2')
self.assertEqual(err.right.__class__.__name__, 'finalize')
self.assertEqual(err.right.__name__, 'P')
self.assertFalse(err.right.payload)
class Behavior3(Behavior):
Q = finalize(False)
class Behavior4(Behavior):
Q = finalize(True)
try:
@plumbing(Behavior3, Behavior4)
class Plumbing(object):
pass
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__parent__.__name__, 'Behavior3')
self.assertEqual(err.left.__class__.__name__, 'finalize')
self.assertEqual(err.left.__name__, 'Q')
self.assertFalse(err.left.payload)
self.assertEqual(err.right.__parent__.__name__, 'Behavior4')
self.assertEqual(err.right.__class__.__name__, 'finalize')
self.assertEqual(err.right.__name__, 'Q')
self.assertTrue(err.right.payload)
def test_override_instruction(self):
class Behavior1(Behavior):
K = override('Behavior1')
M = override('Behavior1')
class Behavior2(Behavior):
K = override('Behavior2')
L = override('Behavior2')
M = override('Behavior2')
class Base(object):
K = 'Base'
L = 'Base'
M = 'Base'
@plumbing(Behavior1, Behavior2)
class Plumbing(Base):
K = 'Plumbing'
res = list()
for x in ['K', 'L', 'M']:
res.append("%s from %s" % (x, getattr(Plumbing, x)))
self.assertEqual(res, [
'K from Plumbing',
'L from Behavior2',
'M from Behavior1'
])
def test_default_instruction(self):
class Behavior1(Behavior):
N = default('Behavior1')
class Behavior2(Behavior):
K = default('Behavior2')
L = default('Behavior2')
M = default('Behavior2')
N = default('Behavior2')
class Base(object):
K = 'Base'
L = 'Base'
@plumbing(Behavior1, Behavior2)
class Plumbing(Base):
L = 'Plumbing'
res = list()
for x in ['K', 'L', 'M', 'N']:
res.append("%s from %s" % (x, getattr(Plumbing, x)))
self.assertEqual(res, [
'K from Base',
'L from Plumbing',
'M from Behavior2',
'N from Behavior1'
])
def test_finalize_wins_over_override(self):
class Behavior1(Behavior):
K = override('Behavior1')
L = finalize('Behavior1')
class Behavior2(Behavior):
K = finalize('Behavior2')
L = override('Behavior2')
class Base(object):
K = 'Base'
L = 'Base'
@plumbing(Behavior1, Behavior2)
class Plumbing(Base):
pass
res = list()
for x in ['K', 'L']:
res.append("%s from %s" % (x, getattr(Plumbing, x)))
self.assertEqual(res, [
'K from Behavior2',
'L from Behavior1'
])
def test_finalize_wins_over_default(self):
class Behavior1(Behavior):
K = default('Behavior1')
L = finalize('Behavior1')
class Behavior2(Behavior):
K = finalize('Behavior2')
L = default('Behavior2')
class Base(object):
K = 'Base'
L = 'Base'
@plumbing(Behavior1, Behavior2)
class Plumbing(Base):
pass
res = list()
for x in ['K', 'L']:
res.append("%s from %s" % (x, getattr(Plumbing, x)))
self.assertEqual(res, [
'K from Behavior2',
'L from Behavior1'
])
def test_override_wins_over_default(self):
class Behavior1(Behavior):
K = default('Behavior1')
L = override('Behavior1')
class Behavior2(Behavior):
K = override('Behavior2')
L = default('Behavior2')
class Base(object):
K = 'Base'
L = 'Base'
@plumbing(Behavior1, Behavior2)
class Plumbing(Base):
pass
res = list()
for x in ['K', 'L']:
res.append("%s from %s" % (x, getattr(Plumbing, x)))
self.assertEqual(res, [
'K from Behavior2',
'L from Behavior1'
])
def test_subclassing_behaviors(self):
class Behavior1(Behavior):
J = default('Behavior1')
K = default('Behavior1')
M = override('Behavior1')
class Behavior2(Behavior1):
J = default('Behavior2')
L = default('Behavior2')
M = default('Behavior2')
@plumbing(Behavior2)
class Plumbing(object):
pass
plb = Plumbing()
self.assertEqual(plb.J, 'Behavior2')
self.assertEqual(plb.K, 'Behavior1')
self.assertEqual(plb.L, 'Behavior2')
self.assertEqual(plb.M, 'Behavior2')
class TestPlumberStage2(unittest.TestCase):
def test_method_pipelines(self):
res = list()
class Behavior1(Behavior):
@plumb
def __getitem__(_next, self, key):
res.append("Behavior1 start")
key = key.lower()
ret = _next(self, key)
res.append("Behavior1 stop")
return ret
class Behavior2(Behavior):
@plumb
def __getitem__(_next, self, key):
res.append("Behavior2 start")
ret = 2 * _next(self, key)
res.append("Behavior2 stop")
return ret
Base = dict
@plumbing(Behavior1, Behavior2)
class Plumbing(Base):
pass
plb = Plumbing()
plb['abc'] = 6
self.assertEqual(plb['AbC'], 12)
self.assertEqual(res, [
'Behavior1 start',
'Behavior2 start',
'Behavior2 stop',
'Behavior1 stop'
])
def test_endpoint_not_exists(self):
err = None
class Behavior1(Behavior):
@plumb
def foo(_next, self):
pass
try:
@plumbing(Behavior1)
class Plumbing(object):
pass
except AttributeError as e:
err = e
finally:
self.assertEqual(
str(err),
'type object \'Plumbing\' has no attribute \'foo\''
)
def test_plumb_if_exists(self):
class Behavior1(Behavior):
@plumbifexists
def foo(_next, self):
pass
@plumbifexists
def bar(_next, self):
return 2 * _next(self)
@plumbing(Behavior1)
class Plumbing(object):
def bar(self):
return 6
self.assertFalse(hasattr(Plumbing, 'foo'))
self.assertEqual(Plumbing().bar(), 12)
def test_property_pipelines(self):
class Behavior1(Behavior):
@plumb
@property
def foo(_next, self):
return 2 * _next(self)
@plumbing(Behavior1)
class Plumbing1(object):
@property
def foo(self):
return 3
plb = Plumbing1()
self.assertEqual(plb.foo, 6)
class Behavior2(Behavior):
@plumb
@property
def foo(_next, self):
return 2 * _next(self)
class Behavior3(Behavior):
def set_foo(self, value):
self._foo = value
foo = plumb(property(
None,
override(set_foo),
))
@plumbing(Behavior2, Behavior3)
class Plumbing2(object):
@property
def foo(self):
return self._foo
plb = Plumbing2()
plb.foo = 4
self.assertEqual(plb.foo, 8)
def test_subclassing_behaviors(self):
class Behavior1(Behavior):
@plumb
def foo(_next, self):
return 'Behavior1 ' + _next(self)
@plumb
def bar(_next, self):
return 'Behavior1 ' + _next(self)
class Behavior2(Behavior1):
@plumb
def foo(_next, self):
return 'Behavior2 ' + _next(self)
@plumbing(Behavior2)
class Plumbing(object):
def foo(self):
return 'foo'
def bar(self):
return 'bar'
plb = Plumbing()
self.assertEqual(plb.foo(), 'Behavior2 Behavior1 foo')
self.assertEqual(plb.bar(), 'Behavior1 bar')
def test_mixing_properties_and_methods(self):
err = None
class Behavior1(Behavior):
@plumb
def foo(_next, self):
return _next(self)
try:
@plumbing(Behavior1)
class Plumbing(object):
@property
def foo(self):
return 5
except PlumbingCollision as e:
err = e
finally:
self.assertEqual(err.left.__parent__.__name__, 'Behavior1')
self.assertEqual(err.left.__class__.__name__, 'plumb')
self.assertEqual(err.left.__name__, 'foo')
self.assertEqual(err.left.payload.__name__, 'foo')
self.assertEqual(err.right.__name__, 'Plumbing')
self.assertTrue(inspect.isclass(err.right))
def test_docstrings_joined(self):
class P1(Behavior):
@plumb
def foo(self):
bar = plumb(property(None, None, None, "P1.bar"))
class P2(Behavior):
@override
def foo(self):
bar = plumb(property(None, None, None, "P2.bar"))
@plumbing(P1, P2)
class Plumbing(object):
bar = property(None, None, None, "Plumbing.bar")
self.assertEqual(Plumbing.__doc__.strip(), 'Plumbing\n\nP1')
self.assertEqual(Plumbing.foo.__doc__.strip(), 'P2.foo\n\nP1.foo')
self.assertEqual(
Plumbing.bar.__doc__.strip(),
'Plumbing.bar\n\nP2.bar\n\nP1.bar'
)
def test_slots(self):
class P1(Behavior):
@default
def somewhing_which_writes_to_foo(self, foo_val):
self.foo = foo_val
@plumbing(P1)
class WithSlots(object):
__slots__ = 'foo'
self.assertEqual(
type(WithSlots.__dict__['foo']).__name__,
'member_descriptor'
)
ob = WithSlots()
ob.somewhing_which_writes_to_foo('foo')
self.assertEqual(ob.foo, 'foo')
def test_zope_interface(self):
class IBase(Interface):
pass
@implementer(IBase)
class Base(object):
pass
self.assertTrue(IBase.implementedBy(Base))
class IBehavior1(Interface):
pass
@implementer(IBehavior1)
class Behavior1(Behavior):
blub = 1
class IBehavior2Base(Interface):
pass
@implementer(IBehavior2Base)
class Behavior2Base(Behavior):
pass
class IBehavior2(Interface):
pass
@implementer(IBehavior2)
class Behavior2(Behavior2Base):
pass
self.assertTrue(IBehavior1.implementedBy(Behavior1))
self.assertTrue(IBehavior2Base.implementedBy(Behavior2Base))
self.assertTrue(IBehavior2Base.implementedBy(Behavior2))
self.assertTrue(IBehavior2.implementedBy(Behavior2))
class IPlumbingClass(Interface):
pass
@implementer(IPlumbingClass)
@plumbing(Behavior1, Behavior2)
class PlumbingClass(Base):
pass
self.assertTrue(IPlumbingClass.implementedBy(PlumbingClass))
self.assertTrue(IBase.implementedBy(PlumbingClass))
self.assertTrue(IBehavior1.implementedBy(PlumbingClass))
self.assertTrue(IBehavior2.implementedBy(PlumbingClass))
self.assertTrue(IBehavior2Base.implementedBy(PlumbingClass))
plb = PlumbingClass()
self.assertTrue(IPlumbingClass.providedBy(plb))
self.assertTrue(IBase.providedBy(plb))
self.assertTrue(IBehavior1.providedBy(plb))
self.assertTrue(IBehavior2.providedBy(plb))
self.assertTrue(IBehavior2Base.providedBy(plb))
if __name__ == '__main__':
unittest.main()
| true | true |
f71517dfa9159f9c8d86c55e1e9fd94923af99e2 | 2,797 | py | Python | trajectoryPlugin/collate.py | zhangyuwangumass/General-Data-Driven-Adaptive-Learning | 63c4ddef36b2b7bd7078cd9b431e3502c358915a | [
"MIT"
] | null | null | null | trajectoryPlugin/collate.py | zhangyuwangumass/General-Data-Driven-Adaptive-Learning | 63c4ddef36b2b7bd7078cd9b431e3502c358915a | [
"MIT"
] | null | null | null | trajectoryPlugin/collate.py | zhangyuwangumass/General-Data-Driven-Adaptive-Learning | 63c4ddef36b2b7bd7078cd9b431e3502c358915a | [
"MIT"
] | null | null | null | r""""Contains definitions of the methods used by the _DataLoaderIter workers to
collate samples fetched from dataset into Tensor(s).
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
"""
import torch
import re
from torch._six import container_abcs, string_classes, int_classes
_use_shared_memory = False
r"""Whether to use shared memory in default_collate"""
np_str_obj_array_pattern = re.compile(r'[SaUO]')
error_msg_fmt = "batch must contain tensors, numbers, dicts or lists; found {}"
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
def default_collate(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(error_msg_fmt.format(elem.dtype))
return default_collate([torch.from_numpy(b) for b in batch])
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], float):
return torch.tensor(batch, dtype=torch.float32)
elif isinstance(batch[0], int_classes):
return torch.tensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], container_abcs.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'): # namedtuple
return type(batch[0])(*(default_collate(samples) for samples in zip(*batch)))
elif isinstance(batch[0], container_abcs.Sequence):
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError((error_msg_fmt.format(type(batch[0])))) | 39.957143 | 85 | 0.672506 |
import torch
import re
from torch._six import container_abcs, string_classes, int_classes
_use_shared_memory = False
np_str_obj_array_pattern = re.compile(r'[SaUO]')
error_msg_fmt = "batch must contain tensors, numbers, dicts or lists; found {}"
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
def default_collate(batch):
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _use_shared_memory:
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(error_msg_fmt.format(elem.dtype))
return default_collate([torch.from_numpy(b) for b in batch])
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], float):
return torch.tensor(batch, dtype=torch.float32)
elif isinstance(batch[0], int_classes):
return torch.tensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], container_abcs.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'): # namedtuple
return type(batch[0])(*(default_collate(samples) for samples in zip(*batch)))
elif isinstance(batch[0], container_abcs.Sequence):
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError((error_msg_fmt.format(type(batch[0])))) | true | true |
f715180246192055bfecfdc8fa0f2adb72606868 | 793 | py | Python | Day 9/Blind Auction.py | anti-batman/100-Days-of-Code | 2ba087a8eacd86f23104349f3044baf9965d5073 | [
"MIT"
] | 72 | 2021-02-20T06:00:46.000Z | 2022-03-29T21:54:01.000Z | Day 9/Blind Auction.py | anti-batman/100-Days-of-Code | 2ba087a8eacd86f23104349f3044baf9965d5073 | [
"MIT"
] | 2 | 2021-06-05T17:39:16.000Z | 2022-01-30T08:58:14.000Z | Day 9/Blind Auction.py | anti-batman/100-Days-of-Code | 2ba087a8eacd86f23104349f3044baf9965d5073 | [
"MIT"
] | 21 | 2021-04-03T09:59:48.000Z | 2022-01-30T20:24:43.000Z | from replit import clear
from art import logo
print(logo)
bids = {}
bidding_finished = False
def find_highest_bidder(bidding_record):
highest_bid = 0
winner = ""
for bidder in bidding_record:
bid_amount = bidding_record[bidder]
if bid_amount > highest_bid:
highest_bid = bid_amount
winner = bidder
print(f"The winner is {winner} with a bid of ${highest_bid}")
while not bidding_finished:
name = input("What is your name?: ")
price = int(input("What is your bid?: $"))
bids[name] = price
should_continue = input("Are there any other bidders? Type 'yes or 'no'.\n")
if should_continue == "no":
bidding_finished = True
find_highest_bidder(bids)
elif should_continue == "yes":
clear() | 24.78125 | 80 | 0.644388 | from replit import clear
from art import logo
print(logo)
bids = {}
bidding_finished = False
def find_highest_bidder(bidding_record):
highest_bid = 0
winner = ""
for bidder in bidding_record:
bid_amount = bidding_record[bidder]
if bid_amount > highest_bid:
highest_bid = bid_amount
winner = bidder
print(f"The winner is {winner} with a bid of ${highest_bid}")
while not bidding_finished:
name = input("What is your name?: ")
price = int(input("What is your bid?: $"))
bids[name] = price
should_continue = input("Are there any other bidders? Type 'yes or 'no'.\n")
if should_continue == "no":
bidding_finished = True
find_highest_bidder(bids)
elif should_continue == "yes":
clear() | true | true |
f715181158ed97a842f823c3209fa5647bf6dec5 | 246 | py | Python | lightning_plus/api_basebone/app/client_urls.py | twocucao/lightning-plus | e69c81da9c15fdfc37355e0362ff7ed804e94b2a | [
"MIT"
] | 1 | 2021-04-15T14:52:12.000Z | 2021-04-15T14:52:12.000Z | lightning_plus/api_basebone/app/client_urls.py | twocucao/lightning | e69c81da9c15fdfc37355e0362ff7ed804e94b2a | [
"MIT"
] | null | null | null | lightning_plus/api_basebone/app/client_urls.py | twocucao/lightning | e69c81da9c15fdfc37355e0362ff7ed804e94b2a | [
"MIT"
] | null | null | null | from lightning_plus.api_basebone.drf.routers import SimpleRouter
from .upload import views as upload_views
router = SimpleRouter(custom_base_name="basebone-app")
router.register("upload", upload_views.UploadViewSet)
urlpatterns = router.urls
| 24.6 | 64 | 0.829268 | from lightning_plus.api_basebone.drf.routers import SimpleRouter
from .upload import views as upload_views
router = SimpleRouter(custom_base_name="basebone-app")
router.register("upload", upload_views.UploadViewSet)
urlpatterns = router.urls
| true | true |
f715182910b4d2b719d57bc37051be59f816ba91 | 2,850 | py | Python | curriculum/envs/maze/maze_swim/swimmer_env.py | coco-robotics/rllab-curriculum | f55b50224fcf5a9a5c064542eb0850a966cab223 | [
"MIT"
] | 115 | 2017-12-06T16:31:10.000Z | 2022-03-01T13:13:55.000Z | curriculum/envs/maze/maze_swim/swimmer_env.py | coco-robotics/rllab-curriculum | f55b50224fcf5a9a5c064542eb0850a966cab223 | [
"MIT"
] | 21 | 2017-11-15T18:28:16.000Z | 2021-04-22T15:26:45.000Z | curriculum/envs/maze/maze_swim/swimmer_env.py | coco-robotics/rllab-curriculum | f55b50224fcf5a9a5c064542eb0850a966cab223 | [
"MIT"
] | 46 | 2017-12-22T22:26:01.000Z | 2022-02-17T06:34:15.000Z | from rllab.envs.base import Step
from rllab.misc.overrides import overrides
from rllab.envs.mujoco.mujoco_env import MujocoEnv
import numpy as np
from rllab.core.serializable import Serializable
from rllab.misc import logger
from rllab.misc import autoargs
from contextlib import contextmanager
class SwimmerEnv(MujocoEnv, Serializable):
FILE = 'swimmer.xml'
@autoargs.arg('ctrl_cost_coeff', type=float,
help='cost coefficient for controls')
def __init__(
self,
ctrl_cost_coeff=1e-2,
*args, **kwargs):
self.ctrl_cost_coeff = ctrl_cost_coeff
super(SwimmerEnv, self).__init__(*args, **kwargs)
Serializable.quick_init(self, locals())
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat,
self.get_body_com("torso").flat,
]).reshape(-1)
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * self.ctrl_cost_coeff * np.sum(
np.square(action / scaling))
forward_reward = self.get_body_comvel("torso")[0]
reward = forward_reward - ctrl_cost
done = False
return Step(next_obs, reward, done)
# @overrides
# def reset_mujoco(self, init_state=None):
# super(SwimmerEnv, self).reset_mujoco(init)
# if init_state is not None:
# idx = self.model.body_names.index("torso")
# self.model.data.com_subtree[idx][0] = init_state[0]
# self.model.data.com_subtree[idx][1] = init_state[1]
@overrides # ignoring the goal
def reset(self, *args, **kwargs):
return super(SwimmerEnv, self).reset(*args, **kwargs) # passing in keyword arguments
@overrides
def log_diagnostics(self, paths):
if len(paths) > 0:
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
else:
logger.record_tabular('AverageForwardProgress', np.nan)
logger.record_tabular('MaxForwardProgress', np.nan)
logger.record_tabular('MinForwardProgress', np.nan)
logger.record_tabular('StdForwardProgress', np.nan)
@contextmanager
def set_kill_outside(self):
self.kill_outside = True
try:
yield
finally:
self.kill_outside = False | 36.538462 | 92 | 0.626316 | from rllab.envs.base import Step
from rllab.misc.overrides import overrides
from rllab.envs.mujoco.mujoco_env import MujocoEnv
import numpy as np
from rllab.core.serializable import Serializable
from rllab.misc import logger
from rllab.misc import autoargs
from contextlib import contextmanager
class SwimmerEnv(MujocoEnv, Serializable):
FILE = 'swimmer.xml'
@autoargs.arg('ctrl_cost_coeff', type=float,
help='cost coefficient for controls')
def __init__(
self,
ctrl_cost_coeff=1e-2,
*args, **kwargs):
self.ctrl_cost_coeff = ctrl_cost_coeff
super(SwimmerEnv, self).__init__(*args, **kwargs)
Serializable.quick_init(self, locals())
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat,
self.get_body_com("torso").flat,
]).reshape(-1)
def step(self, action):
self.forward_dynamics(action)
next_obs = self.get_current_obs()
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * self.ctrl_cost_coeff * np.sum(
np.square(action / scaling))
forward_reward = self.get_body_comvel("torso")[0]
reward = forward_reward - ctrl_cost
done = False
return Step(next_obs, reward, done)
@overrides
def reset(self, *args, **kwargs):
return super(SwimmerEnv, self).reset(*args, **kwargs)
@overrides
def log_diagnostics(self, paths):
if len(paths) > 0:
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
else:
logger.record_tabular('AverageForwardProgress', np.nan)
logger.record_tabular('MaxForwardProgress', np.nan)
logger.record_tabular('MinForwardProgress', np.nan)
logger.record_tabular('StdForwardProgress', np.nan)
@contextmanager
def set_kill_outside(self):
self.kill_outside = True
try:
yield
finally:
self.kill_outside = False | true | true |
f71519f0a0fdeeeaa35b6e3d88c07e3139a2deb6 | 24,935 | py | Python | tests/core/test_task.py | dsaxton/prefect | 2b7e9c33cfeedebdb6ce3a8e468ac130c3a48bbf | [
"Apache-2.0"
] | null | null | null | tests/core/test_task.py | dsaxton/prefect | 2b7e9c33cfeedebdb6ce3a8e468ac130c3a48bbf | [
"Apache-2.0"
] | null | null | null | tests/core/test_task.py | dsaxton/prefect | 2b7e9c33cfeedebdb6ce3a8e468ac130c3a48bbf | [
"Apache-2.0"
] | null | null | null | import inspect
import logging
from datetime import timedelta
from typing import Any, Tuple
import pytest
import prefect
from prefect.core import Edge, Flow, Parameter, Task
from prefect.engine.cache_validators import all_inputs, duration_only, never_use
from prefect.engine.results import PrefectResult, LocalResult
from prefect.utilities.configuration import set_temporary_config
from prefect.configuration import process_task_defaults
from prefect.utilities.tasks import task
class AddTask(Task):
def run(self, x, y=1):
return x + y
class TestCreateTask:
"""Test various Task constructors"""
def test_create_task_with_no_args(self):
"""Tasks can be created with no arguments"""
assert Task()
def test_create_task_is_not_auto_generated(self):
assert Task().auto_generated is False
def test_create_task_with_name(self):
t1 = Task()
assert t1.name == "Task"
t2 = Task(name="test")
assert t2.name == "test"
def test_create_task_with_cache_key(self):
t1 = Task()
assert t1.cache_key is None
t2 = Task(cache_key="test")
assert t2.cache_key == "test"
def test_create_task_with_slug(self):
t1 = Task()
assert t1.slug is None
t2 = Task(slug="test")
assert t2.slug == "test"
def test_create_task_with_max_retries(self):
t1 = Task()
assert t1.max_retries == 0
t2 = Task(max_retries=5, retry_delay=timedelta(0))
assert t2.max_retries == 5
with set_temporary_config({"tasks.defaults.max_retries": 3}) as config:
# Cover type casting of task defaults
process_task_defaults(config)
t3 = Task(retry_delay=timedelta(0))
assert t3.max_retries == 3
def test_create_task_with_retry_delay(self):
t1 = Task(retry_delay=timedelta(seconds=30), max_retries=1)
assert t1.retry_delay == timedelta(seconds=30)
with set_temporary_config({"tasks.defaults.retry_delay": 3}) as config:
# Cover type casting of task defaults
process_task_defaults(config)
t2 = Task(max_retries=1)
assert t2.retry_delay == timedelta(seconds=3)
def test_create_task_with_max_retries_and_no_retry_delay(self):
with pytest.raises(ValueError):
Task(max_retries=1, retry_delay=None)
def test_create_task_with_retry_delay_and_no_max_retries(self):
with pytest.raises(
ValueError,
match="A `max_retries` argument greater than 0 must be provided if specifying a retry delay",
):
Task(retry_delay=timedelta(seconds=30))
@pytest.mark.parametrize("max_retries", [None, 0, False])
def test_create_task_with_retry_delay_and_invalid_max_retries(self, max_retries):
with pytest.raises(
ValueError,
match="A `max_retries` argument greater than 0 must be provided if specifying a retry delay",
):
Task(retry_delay=timedelta(seconds=30), max_retries=max_retries)
def test_create_task_with_max_retry_override_to_0(self):
with set_temporary_config(
{"tasks.defaults.max_retries": 3, "tasks.defaults.retry_delay": 3}
) as config:
process_task_defaults(config)
t = Task(max_retries=0, retry_delay=None)
assert t.max_retries == 0
assert t.retry_delay is None
# max_retries set to 0 will not pull retry_delay from the config
process_task_defaults(config)
t = Task(max_retries=0)
assert t.max_retries == 0
assert t.retry_delay is None
def test_create_task_with_timeout(self):
t1 = Task()
assert t1.timeout is None
with pytest.raises(TypeError):
Task(timeout=0.5)
t3 = Task(timeout=1)
assert t3.timeout == 1
with set_temporary_config({"tasks.defaults.timeout": 3}) as config:
# Cover type casting of task defaults
process_task_defaults(config)
t4 = Task()
assert t4.timeout == 3
t4 = Task(timeout=timedelta(seconds=2))
assert t4.timeout == 2
with pytest.warns(UserWarning):
t5 = Task(timeout=timedelta(seconds=3, milliseconds=1, microseconds=1))
assert t5.timeout == 3
def test_create_task_with_trigger(self):
t1 = Task()
assert t1.trigger is prefect.triggers.all_successful
t2 = Task(trigger=prefect.triggers.all_failed)
assert t2.trigger == prefect.triggers.all_failed
def test_create_task_without_state_handler(self):
assert Task().state_handlers == []
@pytest.mark.parametrize("handlers", [[lambda *a: 1], [lambda *a: 1, lambda *a: 2]])
def test_create_task_with_state_handler(self, handlers):
assert Task(state_handlers=handlers).state_handlers == handlers
def test_create_task_with_on_failure(self):
t = Task(on_failure=lambda *args: None)
assert len(t.state_handlers) == 1
def test_create_task_illegal_handler(self):
with pytest.raises(TypeError):
Task(state_handlers=lambda *a: 1)
def test_class_instantiation_rejects_varargs(self):
with pytest.raises(ValueError):
class VarArgsTask(Task):
def run(self, x, *y):
pass
def test_class_instantiation_rejects_mapped_kwarg(self):
with pytest.raises(ValueError):
class MappedTasks(Task):
def run(self, x, mapped):
pass
with pytest.raises(ValueError):
class MappedTasks(Task):
def run(self, x, mapped=None):
pass
def test_class_instantiation_rejects_mapped_kwarg_decorator(self):
with pytest.raises(ValueError):
@task
def run(x, mapped):
pass
with pytest.raises(ValueError):
@task
def run(x, mapped=None):
pass
def test_class_instantiation_rejects_upstream_tasks_kwarg(self):
with pytest.raises(ValueError):
class UpstreamTasks(Task):
def run(self, x, upstream_tasks):
pass
with pytest.raises(ValueError):
class UpstreamTasks(Task):
def run(self, x, upstream_tasks=None):
pass
def test_class_instantiation_rejects_upstream_tasks_kwarg_decorator(self):
with pytest.raises(ValueError):
@task
def run(x, upstream_tasks):
pass
with pytest.raises(ValueError):
@task
def run(x, upstream_tasks=None):
pass
def test_class_instantiation_rejects_flow_kwarg(self):
with pytest.raises(ValueError):
class FlowTasks(Task):
def run(self, x, flow):
pass
with pytest.raises(ValueError):
class FlowTasks(Task):
def run(self, x, flow=None):
pass
def test_class_instantiation_rejects_flow_kwarg_decorator(self):
with pytest.raises(ValueError):
@task
def run(x, flow):
pass
with pytest.raises(ValueError):
@task
def run(x, flow=None):
pass
def test_class_instantiation_rejects_task_args_kwarg(self):
with pytest.raises(ValueError):
class TaskArgs(Task):
def run(self, x, task_args):
pass
with pytest.raises(ValueError):
class TaskArgs(Task):
def run(self, x, task_args=None):
pass
def test_class_instantiation_rejects_task_args_kwarg_decorator(self):
with pytest.raises(ValueError):
@task
def run(x, task_args):
pass
with pytest.raises(ValueError):
@task
def run(x, task_args=None):
pass
def test_class_instantiation_raises_helpful_warning_for_unsupported_callables(self):
with pytest.raises(ValueError, match="This function can not be inspected"):
task(zip)
def test_task_signature_generation(self):
class Test(Task):
def run(self, x: int, y: bool, z: int = 1, **kwargs):
pass
t = Test()
sig = inspect.signature(t)
# signature is a superset of the `run` method
for k, p in inspect.signature(t.run).parameters.items():
assert sig.parameters[k] == p
# extra kwonly args to __call__ also in sig
assert set(sig.parameters).issuperset(
{"mapped", "task_args", "upstream_tasks", "flow"}
)
assert sig.return_annotation == "Task"
# doesn't override class signature
class_sig = inspect.signature(Test)
assert "name" in class_sig.parameters
def test_create_task_with_and_without_cache_for(self):
t1 = Task()
assert t1.cache_validator is never_use
t2 = Task(cache_for=timedelta(days=1))
assert t2.cache_validator is duration_only
t3 = Task(cache_for=timedelta(days=1), cache_validator=all_inputs)
assert t3.cache_validator is all_inputs
def test_bad_cache_kwarg_combo(self):
with pytest.warns(UserWarning, match=".*Task will not be cached.*"):
Task(cache_validator=all_inputs)
def test_create_task_with_and_without_result(self):
t1 = Task()
assert t1.result is None
t2 = Task(result=PrefectResult())
assert isinstance(t2.result, PrefectResult)
def test_create_parameter_uses_prefect_result(self):
p = Parameter("p")
assert isinstance(p.result, PrefectResult)
def test_create_task_with_and_without_checkpoint(self):
t = Task()
assert t.checkpoint is None
s = Task(checkpoint=True)
assert s.checkpoint is True
def test_create_task_with_and_without_log_stdout(self):
t = Task()
assert t.log_stdout is False
s = Task(log_stdout=True)
assert s.log_stdout is True
def test_create_task_with_task_run_name(self):
t1 = Task()
assert t1.task_run_name is None
t2 = Task(task_run_name="test")
assert t2.task_run_name == "test"
t2 = Task(task_run_name=lambda: 42)
assert t2.task_run_name() == 42
def test_task_has_logger():
t = Task()
assert isinstance(t.logger, logging.Logger)
assert t.logger.name == "prefect.Task"
def test_task_has_logger_with_informative_name():
t = Task(name="foo")
assert isinstance(t.logger, logging.Logger)
assert t.logger.name == "prefect.foo"
def test_task_produces_no_result():
t = Task()
assert t.run() is None
def test_task_is_not_iterable():
t = Task()
with pytest.raises(TypeError):
list(t)
def test_tags_are_added_when_arguments_are_bound():
t1 = AddTask(tags=["math"])
t2 = AddTask(tags=["math"])
with prefect.context(tags=["test"]):
with Flow(name="test"):
t1.bind(1, 2)
t3 = t2(1, 2)
assert t1.tags == {"math", "test"}
assert t3.tags == {"math", "test"}
def test_tags():
t1 = Task()
assert t1.tags == set()
with pytest.raises(TypeError):
Task(tags="test")
t3 = Task(tags=["test", "test2", "test"])
assert t3.tags == {"test", "test2"}
with prefect.context(tags=["test"]):
t4 = Task()
assert t4.tags == {"test"}
with prefect.context(tags=["test1", "test2"]):
t5 = Task(tags=["test3"])
assert t5.tags == {"test1", "test2", "test3"}
class TestInputsOutputs:
class add(Task):
def run(self, x, y: int = 1) -> int:
return x + y
@task
def mult(x, y: int = 1) -> int:
return x * y
def test_inputs(self):
assert self.add().inputs() == dict(
x=dict(type=Any, required=True, default=None),
y=dict(type=int, required=False, default=1),
)
def test_inputs_task_decorator(self):
with Flow("test"):
assert self.mult(x=1).inputs() == dict(
x=dict(type=Any, required=True, default=None),
y=dict(type=int, required=False, default=1),
)
def test_outputs(self):
assert self.add().outputs() == int
def test_outputs_task_decorator(self):
with Flow("test"):
assert self.mult(x=1).outputs() == int
class TestTaskCopy:
def test_copy_copies(self):
class CopyTask(Task):
class_attr = 42
def __init__(self, instance_val, **kwargs):
self.instance_val = instance_val
super().__init__(**kwargs)
def run(self, run_val):
return (run_val, self.class_attr, self.instance_val)
ct = CopyTask("username")
other = ct.copy()
assert isinstance(other, CopyTask)
assert ct is not other
assert hash(ct) != hash(other)
assert ct != other
assert other.run("pass") == ("pass", 42, "username")
def test_copy_warns_if_dependencies_in_active_flow(self):
t1 = Task()
t2 = Task()
with Flow(name="test") as flow:
t1.set_dependencies(downstream_tasks=[t2])
with pytest.warns(UserWarning, match="You are making a copy"):
flow.add_task(t1.copy())
with Flow(name="test") as flow:
with pytest.warns(None) as rec:
flow.add_task(t1.copy())
# no dependencies in this flow
assert len(rec) == 0
def test_copy_changes_slug(self):
t1 = Task(slug="test")
t2 = t1.copy()
assert t1.slug == "test"
assert t1.slug != t2.slug
def test_copy_accepts_task_args(self):
t = Task()
t2 = t.copy(name="new-task")
t3 = t.copy(**{"max_retries": 4200})
assert t2.name == "new-task"
assert t3.max_retries == 4200
def test_copy_accepts_slug_as_task_args(self):
t = Task(slug="test")
t2 = t.copy(slug="test-2")
assert t.slug == "test"
assert t2.slug == "test-2"
def test_copy_appropriately_sets_result_target_if_target_provided(self):
# https://github.com/PrefectHQ/prefect/issues/2588
@task(target="target", result=LocalResult(dir="."))
def X():
pass
@task
def Y():
pass
with Flow("test"):
x = X()
y = Y(task_args=dict(target="target", result=LocalResult(dir=".")))
assert x.result.location == "target"
assert y.result.location == "target"
class TestDependencies:
def test_set_downstream(self):
f = Flow(name="test")
t1 = Task()
t2 = Task()
t1.set_downstream(t2, flow=f)
assert Edge(t1, t2) in f.edges
def test_set_downstream_context(self):
with Flow(name="test") as f:
t1 = Task()
t2 = Task()
t1.set_downstream(t2)
assert Edge(t1, t2) in f.edges
def test_set_downstream_no_flow(self):
t1 = Task()
t2 = Task()
with pytest.raises(ValueError, match="No Flow was passed"):
t1.set_downstream(t2)
@pytest.mark.parametrize(
"props", [{"mapped": True}, {"key": "x"}, {"key": "x", "mapped": True}]
)
def test_set_downstream_with_properties(self, props):
with Flow(name="test") as f:
t1 = Task()
t2 = Task()
t1.set_downstream(t2, **props)
assert Edge(t1, t2, **props) in f.edges
def test_set_upstream(self):
f = Flow(name="test")
t1 = Task()
t2 = Task()
t2.set_upstream(t1, flow=f)
assert Edge(t1, t2) in f.edges
def test_set_upstream_context(self):
with Flow(name="test") as f:
t1 = Task()
t2 = Task()
t2.set_upstream(t1)
assert Edge(t1, t2) in f.edges
def test_set_upstream_no_flow(self):
t1 = Task()
t2 = Task()
with pytest.raises(ValueError, match="No Flow was passed"):
t2.set_upstream(t1)
@pytest.mark.parametrize(
"props", [{"mapped": True}, {"key": "x"}, {"key": "x", "mapped": True}]
)
def test_set_upstream_with_properties(self, props):
with Flow(name="test") as f:
t1 = Task()
t2 = Task()
t2.set_upstream(t1, **props)
assert Edge(t1, t2, **props) in f.edges
def test_set_dependencies_stream_allows_chaining(self):
t1 = Task()
t2 = Task()
t3 = Task()
with Flow(name="test") as f:
t1_result = t1()
t2_result = t2()
t3_result = t3()
assert t1_result.set_downstream(t2_result) is t1_result
assert t3_result.set_upstream(t2_result) is t3_result
assert (
t3_result.set_dependencies(f, upstream_tasks=[t1_result]) is t3_result
)
class TestSerialization:
def test_serialization(self):
t = Task(name="test")
s = t.serialize()
assert isinstance(s, dict)
assert s["slug"] == t.slug
assert s["type"] == "prefect.core.task.Task"
assert s["name"] == t.name
def test_subclass_serialization(self):
class NewTask(Task):
pass
s = NewTask().serialize()
assert isinstance(s, dict)
assert s["type"].endswith(".NewTask")
def test_deserialization(self):
t = Task(name="test")
s = t.serialize()
t2 = prefect.serialization.task.TaskSchema().load(s)
assert isinstance(t2, Task)
assert t2.name == t.name
def test_subclass_deserialization(self):
class NewTask(Task):
pass
t = NewTask(name="test")
s = t.serialize()
t2 = prefect.serialization.task.TaskSchema().load(s)
assert type(t2) is Task
assert not isinstance(t2, NewTask)
assert t2.name == t.name
def test_parameter_serialization(self):
p = Parameter(name="p")
serialized = p.serialize()
assert serialized["name"] == "p"
assert serialized["default"] is None
assert serialized["required"] is True
def test_parameter_deserialization(self):
p = Parameter(name="p")
serialized = p.serialize()
p2 = prefect.serialization.task.ParameterSchema().load(serialized)
assert isinstance(p2, Parameter)
assert p2.name == p.name
assert p2.required == p.required
assert p2.default == p.default
class TestTaskArgs:
def test_task_args_raises_for_non_attrs(self):
t = Task()
with Flow(name="test"):
with pytest.raises(AttributeError, match="foo"):
t(task_args={"foo": "bar"})
@pytest.mark.parametrize(
"attr,val",
[
("name", "foo-bar"),
("slug", "foo-bar"),
("max_retries", 4200),
("retry_delay", timedelta(seconds=1)),
("timeout", 12),
("skip_on_upstream_skip", False),
("cache_for", timedelta(seconds=1)),
],
)
def test_task_args_sets_new_attrs(self, attr, val):
t = Task()
with Flow(name="test") as f:
t(task_args={attr: val})
assert getattr(f.tasks.pop(), attr) == val
@pytest.mark.parametrize(
"attr,val",
[
("name", "foo-bar"),
("slug", "foo-bar"),
("max_retries", 4200),
("retry_delay", timedelta(seconds=1)),
("timeout", 12),
("skip_on_upstream_skip", False),
("cache_for", timedelta(seconds=1)),
],
)
def test_task_args_sets_new_attrs_on_mapped_tasks(self, attr, val):
t = Task()
with Flow(name="test") as f:
t.map(upstream_tasks=[1, 2, 3, 4], task_args={attr: val})
tasks = f.get_tasks(name="Task")
assert all(getattr(tt, attr) == val for tt in tasks)
def test_tags_are_appended_to_when_updating_with_task_args(self):
t = AddTask(tags=["math"])
with prefect.context(tags=["test"]):
with Flow(name="test"):
t2 = t(1, 2, task_args={"name": "test-tags", "tags": ["new-tag"]})
assert t2.tags == {"math", "test", "new-tag"}
def test_task_check_mapped_args_are_subscriptable_in_advance(self):
t = Task()
with pytest.raises(TypeError):
with Flow(name="test"):
t.map({1, 2, 3, 4})
class TestTaskNout:
def test_nout_defaults_to_none(self):
@task
def test(self):
pass
assert test.nout is None
def test_nout_provided_explicitly(self):
@task(nout=2)
def test(self):
pass
assert test.nout == 2
@pytest.mark.parametrize(
"ret_type, nout",
[
(int, None),
(Tuple, None),
(Tuple[()], 0),
(Tuple[int, ...], None),
(Tuple[int, int], 2),
(Tuple[int, float, str], 3),
],
)
def test_nout_inferred_from_signature(self, ret_type, nout):
@task
def test(a) -> ret_type:
pass
assert test.nout == nout
def test_nout_none_not_iterable(self):
@task
def test(a):
return a + 1, a - 1
with Flow("test"):
with pytest.raises(TypeError, match="Task is not iterable"):
a, b = test(1)
def test_nout_provided_is_iterable(self):
@task(nout=2)
def test(a):
return a + 1, a - 1
with Flow("test") as flow:
a, b = test(1)
res = flow.run()
assert res.result[a].result == 2
assert res.result[b].result == 0
def test_nout_not_set_on_mapped_tasks(self):
@task(nout=2)
def test(a):
return a + 1, a - 1
with Flow("test"):
with pytest.raises(TypeError, match="Task is not iterable"):
a, b = test.map(range(10))
@pytest.mark.skip("Result handlers not yet deprecated")
def test_cache_options_show_deprecation():
with pytest.warns(
UserWarning, match=r"all cache_\* options on a Task will be deprecated*"
):
Task(cache_for=object())
with pytest.warns(
UserWarning, match=r"all cache_\* options on a Task will be deprecated*"
):
Task(cache_validator=object())
with pytest.warns(
UserWarning, match=r"all cache_\* options on a Task will be deprecated*"
):
Task(cache_key=object())
def test_passing_task_to_task_constructor_raises_helpful_warning():
class MyTask(Task):
def __init__(self, a, b, **kwargs):
self.a = a
self.b = b
super().__init__(**kwargs)
with Flow("test"):
a = Task()()
with pytest.warns(
UserWarning, match="A Task was passed as an argument to MyTask"
):
t = MyTask(1, a)()
# Warning doesn't stop normal operation
assert t.a == 1
assert t.b == a
def test_task_init_uses_reserved_attribute_raises_helpful_warning():
class MyTask(Task):
def __init__(self, **kwargs):
self.a = 1
self.target = "oh no!"
super().__init__(**kwargs)
with Flow("test"):
with pytest.warns(UserWarning, match="`MyTask` sets a `target` attribute"):
MyTask()
@pytest.mark.parametrize("use_function_task", [True, False])
def test_task_called_outside_flow_context_raises_helpful_error(use_function_task):
if use_function_task:
@prefect.task
def fn(x):
return x
else:
class Fn(Task):
def run(self, x):
return x
fn = Fn()
with pytest.raises(
ValueError,
match=f"Could not infer an active Flow context while creating edge to {fn}",
) as exc_info:
fn(1)
run_call = "`fn.run(...)`" if use_function_task else "`Fn(...).run(...)`"
assert (
"If you're trying to run this task outside of a Flow context, "
f"you need to call {run_call}" in str(exc_info)
)
def test_task_call_with_self_succeeds():
import dataclasses
@dataclasses.dataclass
class TestClass:
count: int
def increment(self):
self.count = self.count + 1
seconds_task = task(
TestClass.increment, target="{{task_slug}}_{{map_index}}", result=LocalResult()
)
initial = TestClass(count=0)
with Flow("test") as flow:
seconds_task(initial)
assert flow.run().is_successful()
| 29.404481 | 105 | 0.585121 | import inspect
import logging
from datetime import timedelta
from typing import Any, Tuple
import pytest
import prefect
from prefect.core import Edge, Flow, Parameter, Task
from prefect.engine.cache_validators import all_inputs, duration_only, never_use
from prefect.engine.results import PrefectResult, LocalResult
from prefect.utilities.configuration import set_temporary_config
from prefect.configuration import process_task_defaults
from prefect.utilities.tasks import task
class AddTask(Task):
def run(self, x, y=1):
return x + y
class TestCreateTask:
def test_create_task_with_no_args(self):
assert Task()
def test_create_task_is_not_auto_generated(self):
assert Task().auto_generated is False
def test_create_task_with_name(self):
t1 = Task()
assert t1.name == "Task"
t2 = Task(name="test")
assert t2.name == "test"
def test_create_task_with_cache_key(self):
t1 = Task()
assert t1.cache_key is None
t2 = Task(cache_key="test")
assert t2.cache_key == "test"
def test_create_task_with_slug(self):
t1 = Task()
assert t1.slug is None
t2 = Task(slug="test")
assert t2.slug == "test"
def test_create_task_with_max_retries(self):
t1 = Task()
assert t1.max_retries == 0
t2 = Task(max_retries=5, retry_delay=timedelta(0))
assert t2.max_retries == 5
with set_temporary_config({"tasks.defaults.max_retries": 3}) as config:
process_task_defaults(config)
t3 = Task(retry_delay=timedelta(0))
assert t3.max_retries == 3
def test_create_task_with_retry_delay(self):
t1 = Task(retry_delay=timedelta(seconds=30), max_retries=1)
assert t1.retry_delay == timedelta(seconds=30)
with set_temporary_config({"tasks.defaults.retry_delay": 3}) as config:
process_task_defaults(config)
t2 = Task(max_retries=1)
assert t2.retry_delay == timedelta(seconds=3)
def test_create_task_with_max_retries_and_no_retry_delay(self):
with pytest.raises(ValueError):
Task(max_retries=1, retry_delay=None)
def test_create_task_with_retry_delay_and_no_max_retries(self):
with pytest.raises(
ValueError,
match="A `max_retries` argument greater than 0 must be provided if specifying a retry delay",
):
Task(retry_delay=timedelta(seconds=30))
@pytest.mark.parametrize("max_retries", [None, 0, False])
def test_create_task_with_retry_delay_and_invalid_max_retries(self, max_retries):
with pytest.raises(
ValueError,
match="A `max_retries` argument greater than 0 must be provided if specifying a retry delay",
):
Task(retry_delay=timedelta(seconds=30), max_retries=max_retries)
def test_create_task_with_max_retry_override_to_0(self):
with set_temporary_config(
{"tasks.defaults.max_retries": 3, "tasks.defaults.retry_delay": 3}
) as config:
process_task_defaults(config)
t = Task(max_retries=0, retry_delay=None)
assert t.max_retries == 0
assert t.retry_delay is None
process_task_defaults(config)
t = Task(max_retries=0)
assert t.max_retries == 0
assert t.retry_delay is None
def test_create_task_with_timeout(self):
t1 = Task()
assert t1.timeout is None
with pytest.raises(TypeError):
Task(timeout=0.5)
t3 = Task(timeout=1)
assert t3.timeout == 1
with set_temporary_config({"tasks.defaults.timeout": 3}) as config:
process_task_defaults(config)
t4 = Task()
assert t4.timeout == 3
t4 = Task(timeout=timedelta(seconds=2))
assert t4.timeout == 2
with pytest.warns(UserWarning):
t5 = Task(timeout=timedelta(seconds=3, milliseconds=1, microseconds=1))
assert t5.timeout == 3
def test_create_task_with_trigger(self):
t1 = Task()
assert t1.trigger is prefect.triggers.all_successful
t2 = Task(trigger=prefect.triggers.all_failed)
assert t2.trigger == prefect.triggers.all_failed
def test_create_task_without_state_handler(self):
assert Task().state_handlers == []
@pytest.mark.parametrize("handlers", [[lambda *a: 1], [lambda *a: 1, lambda *a: 2]])
def test_create_task_with_state_handler(self, handlers):
assert Task(state_handlers=handlers).state_handlers == handlers
def test_create_task_with_on_failure(self):
t = Task(on_failure=lambda *args: None)
assert len(t.state_handlers) == 1
def test_create_task_illegal_handler(self):
with pytest.raises(TypeError):
Task(state_handlers=lambda *a: 1)
def test_class_instantiation_rejects_varargs(self):
with pytest.raises(ValueError):
class VarArgsTask(Task):
def run(self, x, *y):
pass
def test_class_instantiation_rejects_mapped_kwarg(self):
with pytest.raises(ValueError):
class MappedTasks(Task):
def run(self, x, mapped):
pass
with pytest.raises(ValueError):
class MappedTasks(Task):
def run(self, x, mapped=None):
pass
def test_class_instantiation_rejects_mapped_kwarg_decorator(self):
with pytest.raises(ValueError):
@task
def run(x, mapped):
pass
with pytest.raises(ValueError):
@task
def run(x, mapped=None):
pass
def test_class_instantiation_rejects_upstream_tasks_kwarg(self):
with pytest.raises(ValueError):
class UpstreamTasks(Task):
def run(self, x, upstream_tasks):
pass
with pytest.raises(ValueError):
class UpstreamTasks(Task):
def run(self, x, upstream_tasks=None):
pass
def test_class_instantiation_rejects_upstream_tasks_kwarg_decorator(self):
with pytest.raises(ValueError):
@task
def run(x, upstream_tasks):
pass
with pytest.raises(ValueError):
@task
def run(x, upstream_tasks=None):
pass
def test_class_instantiation_rejects_flow_kwarg(self):
with pytest.raises(ValueError):
class FlowTasks(Task):
def run(self, x, flow):
pass
with pytest.raises(ValueError):
class FlowTasks(Task):
def run(self, x, flow=None):
pass
def test_class_instantiation_rejects_flow_kwarg_decorator(self):
with pytest.raises(ValueError):
@task
def run(x, flow):
pass
with pytest.raises(ValueError):
@task
def run(x, flow=None):
pass
def test_class_instantiation_rejects_task_args_kwarg(self):
with pytest.raises(ValueError):
class TaskArgs(Task):
def run(self, x, task_args):
pass
with pytest.raises(ValueError):
class TaskArgs(Task):
def run(self, x, task_args=None):
pass
def test_class_instantiation_rejects_task_args_kwarg_decorator(self):
with pytest.raises(ValueError):
@task
def run(x, task_args):
pass
with pytest.raises(ValueError):
@task
def run(x, task_args=None):
pass
def test_class_instantiation_raises_helpful_warning_for_unsupported_callables(self):
with pytest.raises(ValueError, match="This function can not be inspected"):
task(zip)
def test_task_signature_generation(self):
class Test(Task):
def run(self, x: int, y: bool, z: int = 1, **kwargs):
pass
t = Test()
sig = inspect.signature(t)
for k, p in inspect.signature(t.run).parameters.items():
assert sig.parameters[k] == p
assert set(sig.parameters).issuperset(
{"mapped", "task_args", "upstream_tasks", "flow"}
)
assert sig.return_annotation == "Task"
class_sig = inspect.signature(Test)
assert "name" in class_sig.parameters
def test_create_task_with_and_without_cache_for(self):
t1 = Task()
assert t1.cache_validator is never_use
t2 = Task(cache_for=timedelta(days=1))
assert t2.cache_validator is duration_only
t3 = Task(cache_for=timedelta(days=1), cache_validator=all_inputs)
assert t3.cache_validator is all_inputs
def test_bad_cache_kwarg_combo(self):
with pytest.warns(UserWarning, match=".*Task will not be cached.*"):
Task(cache_validator=all_inputs)
def test_create_task_with_and_without_result(self):
t1 = Task()
assert t1.result is None
t2 = Task(result=PrefectResult())
assert isinstance(t2.result, PrefectResult)
def test_create_parameter_uses_prefect_result(self):
p = Parameter("p")
assert isinstance(p.result, PrefectResult)
def test_create_task_with_and_without_checkpoint(self):
t = Task()
assert t.checkpoint is None
s = Task(checkpoint=True)
assert s.checkpoint is True
def test_create_task_with_and_without_log_stdout(self):
t = Task()
assert t.log_stdout is False
s = Task(log_stdout=True)
assert s.log_stdout is True
def test_create_task_with_task_run_name(self):
t1 = Task()
assert t1.task_run_name is None
t2 = Task(task_run_name="test")
assert t2.task_run_name == "test"
t2 = Task(task_run_name=lambda: 42)
assert t2.task_run_name() == 42
def test_task_has_logger():
t = Task()
assert isinstance(t.logger, logging.Logger)
assert t.logger.name == "prefect.Task"
def test_task_has_logger_with_informative_name():
t = Task(name="foo")
assert isinstance(t.logger, logging.Logger)
assert t.logger.name == "prefect.foo"
def test_task_produces_no_result():
t = Task()
assert t.run() is None
def test_task_is_not_iterable():
t = Task()
with pytest.raises(TypeError):
list(t)
def test_tags_are_added_when_arguments_are_bound():
t1 = AddTask(tags=["math"])
t2 = AddTask(tags=["math"])
with prefect.context(tags=["test"]):
with Flow(name="test"):
t1.bind(1, 2)
t3 = t2(1, 2)
assert t1.tags == {"math", "test"}
assert t3.tags == {"math", "test"}
def test_tags():
t1 = Task()
assert t1.tags == set()
with pytest.raises(TypeError):
Task(tags="test")
t3 = Task(tags=["test", "test2", "test"])
assert t3.tags == {"test", "test2"}
with prefect.context(tags=["test"]):
t4 = Task()
assert t4.tags == {"test"}
with prefect.context(tags=["test1", "test2"]):
t5 = Task(tags=["test3"])
assert t5.tags == {"test1", "test2", "test3"}
class TestInputsOutputs:
class add(Task):
def run(self, x, y: int = 1) -> int:
return x + y
@task
def mult(x, y: int = 1) -> int:
return x * y
def test_inputs(self):
assert self.add().inputs() == dict(
x=dict(type=Any, required=True, default=None),
y=dict(type=int, required=False, default=1),
)
def test_inputs_task_decorator(self):
with Flow("test"):
assert self.mult(x=1).inputs() == dict(
x=dict(type=Any, required=True, default=None),
y=dict(type=int, required=False, default=1),
)
def test_outputs(self):
assert self.add().outputs() == int
def test_outputs_task_decorator(self):
with Flow("test"):
assert self.mult(x=1).outputs() == int
class TestTaskCopy:
def test_copy_copies(self):
class CopyTask(Task):
class_attr = 42
def __init__(self, instance_val, **kwargs):
self.instance_val = instance_val
super().__init__(**kwargs)
def run(self, run_val):
return (run_val, self.class_attr, self.instance_val)
ct = CopyTask("username")
other = ct.copy()
assert isinstance(other, CopyTask)
assert ct is not other
assert hash(ct) != hash(other)
assert ct != other
assert other.run("pass") == ("pass", 42, "username")
def test_copy_warns_if_dependencies_in_active_flow(self):
t1 = Task()
t2 = Task()
with Flow(name="test") as flow:
t1.set_dependencies(downstream_tasks=[t2])
with pytest.warns(UserWarning, match="You are making a copy"):
flow.add_task(t1.copy())
with Flow(name="test") as flow:
with pytest.warns(None) as rec:
flow.add_task(t1.copy())
# no dependencies in this flow
assert len(rec) == 0
def test_copy_changes_slug(self):
t1 = Task(slug="test")
t2 = t1.copy()
assert t1.slug == "test"
assert t1.slug != t2.slug
def test_copy_accepts_task_args(self):
t = Task()
t2 = t.copy(name="new-task")
t3 = t.copy(**{"max_retries": 4200})
assert t2.name == "new-task"
assert t3.max_retries == 4200
def test_copy_accepts_slug_as_task_args(self):
t = Task(slug="test")
t2 = t.copy(slug="test-2")
assert t.slug == "test"
assert t2.slug == "test-2"
def test_copy_appropriately_sets_result_target_if_target_provided(self):
# https://github.com/PrefectHQ/prefect/issues/2588
@task(target="target", result=LocalResult(dir="."))
def X():
pass
@task
def Y():
pass
with Flow("test"):
x = X()
y = Y(task_args=dict(target="target", result=LocalResult(dir=".")))
assert x.result.location == "target"
assert y.result.location == "target"
class TestDependencies:
def test_set_downstream(self):
f = Flow(name="test")
t1 = Task()
t2 = Task()
t1.set_downstream(t2, flow=f)
assert Edge(t1, t2) in f.edges
def test_set_downstream_context(self):
with Flow(name="test") as f:
t1 = Task()
t2 = Task()
t1.set_downstream(t2)
assert Edge(t1, t2) in f.edges
def test_set_downstream_no_flow(self):
t1 = Task()
t2 = Task()
with pytest.raises(ValueError, match="No Flow was passed"):
t1.set_downstream(t2)
@pytest.mark.parametrize(
"props", [{"mapped": True}, {"key": "x"}, {"key": "x", "mapped": True}]
)
def test_set_downstream_with_properties(self, props):
with Flow(name="test") as f:
t1 = Task()
t2 = Task()
t1.set_downstream(t2, **props)
assert Edge(t1, t2, **props) in f.edges
def test_set_upstream(self):
f = Flow(name="test")
t1 = Task()
t2 = Task()
t2.set_upstream(t1, flow=f)
assert Edge(t1, t2) in f.edges
def test_set_upstream_context(self):
with Flow(name="test") as f:
t1 = Task()
t2 = Task()
t2.set_upstream(t1)
assert Edge(t1, t2) in f.edges
def test_set_upstream_no_flow(self):
t1 = Task()
t2 = Task()
with pytest.raises(ValueError, match="No Flow was passed"):
t2.set_upstream(t1)
@pytest.mark.parametrize(
"props", [{"mapped": True}, {"key": "x"}, {"key": "x", "mapped": True}]
)
def test_set_upstream_with_properties(self, props):
with Flow(name="test") as f:
t1 = Task()
t2 = Task()
t2.set_upstream(t1, **props)
assert Edge(t1, t2, **props) in f.edges
def test_set_dependencies_stream_allows_chaining(self):
t1 = Task()
t2 = Task()
t3 = Task()
with Flow(name="test") as f:
t1_result = t1()
t2_result = t2()
t3_result = t3()
assert t1_result.set_downstream(t2_result) is t1_result
assert t3_result.set_upstream(t2_result) is t3_result
assert (
t3_result.set_dependencies(f, upstream_tasks=[t1_result]) is t3_result
)
class TestSerialization:
def test_serialization(self):
t = Task(name="test")
s = t.serialize()
assert isinstance(s, dict)
assert s["slug"] == t.slug
assert s["type"] == "prefect.core.task.Task"
assert s["name"] == t.name
def test_subclass_serialization(self):
class NewTask(Task):
pass
s = NewTask().serialize()
assert isinstance(s, dict)
assert s["type"].endswith(".NewTask")
def test_deserialization(self):
t = Task(name="test")
s = t.serialize()
t2 = prefect.serialization.task.TaskSchema().load(s)
assert isinstance(t2, Task)
assert t2.name == t.name
def test_subclass_deserialization(self):
class NewTask(Task):
pass
t = NewTask(name="test")
s = t.serialize()
t2 = prefect.serialization.task.TaskSchema().load(s)
assert type(t2) is Task
assert not isinstance(t2, NewTask)
assert t2.name == t.name
def test_parameter_serialization(self):
p = Parameter(name="p")
serialized = p.serialize()
assert serialized["name"] == "p"
assert serialized["default"] is None
assert serialized["required"] is True
def test_parameter_deserialization(self):
p = Parameter(name="p")
serialized = p.serialize()
p2 = prefect.serialization.task.ParameterSchema().load(serialized)
assert isinstance(p2, Parameter)
assert p2.name == p.name
assert p2.required == p.required
assert p2.default == p.default
class TestTaskArgs:
def test_task_args_raises_for_non_attrs(self):
t = Task()
with Flow(name="test"):
with pytest.raises(AttributeError, match="foo"):
t(task_args={"foo": "bar"})
@pytest.mark.parametrize(
"attr,val",
[
("name", "foo-bar"),
("slug", "foo-bar"),
("max_retries", 4200),
("retry_delay", timedelta(seconds=1)),
("timeout", 12),
("skip_on_upstream_skip", False),
("cache_for", timedelta(seconds=1)),
],
)
def test_task_args_sets_new_attrs(self, attr, val):
t = Task()
with Flow(name="test") as f:
t(task_args={attr: val})
assert getattr(f.tasks.pop(), attr) == val
@pytest.mark.parametrize(
"attr,val",
[
("name", "foo-bar"),
("slug", "foo-bar"),
("max_retries", 4200),
("retry_delay", timedelta(seconds=1)),
("timeout", 12),
("skip_on_upstream_skip", False),
("cache_for", timedelta(seconds=1)),
],
)
def test_task_args_sets_new_attrs_on_mapped_tasks(self, attr, val):
t = Task()
with Flow(name="test") as f:
t.map(upstream_tasks=[1, 2, 3, 4], task_args={attr: val})
tasks = f.get_tasks(name="Task")
assert all(getattr(tt, attr) == val for tt in tasks)
def test_tags_are_appended_to_when_updating_with_task_args(self):
t = AddTask(tags=["math"])
with prefect.context(tags=["test"]):
with Flow(name="test"):
t2 = t(1, 2, task_args={"name": "test-tags", "tags": ["new-tag"]})
assert t2.tags == {"math", "test", "new-tag"}
def test_task_check_mapped_args_are_subscriptable_in_advance(self):
t = Task()
with pytest.raises(TypeError):
with Flow(name="test"):
t.map({1, 2, 3, 4})
class TestTaskNout:
def test_nout_defaults_to_none(self):
@task
def test(self):
pass
assert test.nout is None
def test_nout_provided_explicitly(self):
@task(nout=2)
def test(self):
pass
assert test.nout == 2
@pytest.mark.parametrize(
"ret_type, nout",
[
(int, None),
(Tuple, None),
(Tuple[()], 0),
(Tuple[int, ...], None),
(Tuple[int, int], 2),
(Tuple[int, float, str], 3),
],
)
def test_nout_inferred_from_signature(self, ret_type, nout):
@task
def test(a) -> ret_type:
pass
assert test.nout == nout
def test_nout_none_not_iterable(self):
@task
def test(a):
return a + 1, a - 1
with Flow("test"):
with pytest.raises(TypeError, match="Task is not iterable"):
a, b = test(1)
def test_nout_provided_is_iterable(self):
@task(nout=2)
def test(a):
return a + 1, a - 1
with Flow("test") as flow:
a, b = test(1)
res = flow.run()
assert res.result[a].result == 2
assert res.result[b].result == 0
def test_nout_not_set_on_mapped_tasks(self):
@task(nout=2)
def test(a):
return a + 1, a - 1
with Flow("test"):
with pytest.raises(TypeError, match="Task is not iterable"):
a, b = test.map(range(10))
@pytest.mark.skip("Result handlers not yet deprecated")
def test_cache_options_show_deprecation():
with pytest.warns(
UserWarning, match=r"all cache_\* options on a Task will be deprecated*"
):
Task(cache_for=object())
with pytest.warns(
UserWarning, match=r"all cache_\* options on a Task will be deprecated*"
):
Task(cache_validator=object())
with pytest.warns(
UserWarning, match=r"all cache_\* options on a Task will be deprecated*"
):
Task(cache_key=object())
def test_passing_task_to_task_constructor_raises_helpful_warning():
class MyTask(Task):
def __init__(self, a, b, **kwargs):
self.a = a
self.b = b
super().__init__(**kwargs)
with Flow("test"):
a = Task()()
with pytest.warns(
UserWarning, match="A Task was passed as an argument to MyTask"
):
t = MyTask(1, a)()
# Warning doesn't stop normal operation
assert t.a == 1
assert t.b == a
def test_task_init_uses_reserved_attribute_raises_helpful_warning():
class MyTask(Task):
def __init__(self, **kwargs):
self.a = 1
self.target = "oh no!"
super().__init__(**kwargs)
with Flow("test"):
with pytest.warns(UserWarning, match="`MyTask` sets a `target` attribute"):
MyTask()
@pytest.mark.parametrize("use_function_task", [True, False])
def test_task_called_outside_flow_context_raises_helpful_error(use_function_task):
if use_function_task:
@prefect.task
def fn(x):
return x
else:
class Fn(Task):
def run(self, x):
return x
fn = Fn()
with pytest.raises(
ValueError,
match=f"Could not infer an active Flow context while creating edge to {fn}",
) as exc_info:
fn(1)
run_call = "`fn.run(...)`" if use_function_task else "`Fn(...).run(...)`"
assert (
"If you're trying to run this task outside of a Flow context, "
f"you need to call {run_call}" in str(exc_info)
)
def test_task_call_with_self_succeeds():
import dataclasses
@dataclasses.dataclass
class TestClass:
count: int
def increment(self):
self.count = self.count + 1
seconds_task = task(
TestClass.increment, target="{{task_slug}}_{{map_index}}", result=LocalResult()
)
initial = TestClass(count=0)
with Flow("test") as flow:
seconds_task(initial)
assert flow.run().is_successful()
| true | true |
f7151a63c16bac48a603b6c0bc9d747a9402ec51 | 5,462 | py | Python | openstates/openstates-master/openstates/wv/actions.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | openstates/openstates-master/openstates/wv/actions.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | openstates/openstates-master/openstates/wv/actions.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | '''
'''
import re
from billy.scrape.actions import Rule, BaseCategorizer
committees = [
u"Veterans' Affairs",
u'Agriculture and Agri-business Committee',
u'Agriculture',
u'Banking and Insurance',
u'Banking',
u'Children, Juveniles and Other Issues',
u'Constitutional Revision',
u'Council of Finance and Administration',
u'Economic Development and Small Business',
u'Economic Development',
u'Education Accountability',
u'Education',
u'Employee Suggestion Award Board',
u'Energy, Industry and Labor',
u'Energy, Industry and Labor/Economic Development and Small Business',
u'Enrolled Bills',
u'Equal Pay Commission',
u'Finance',
u'Forest Management Review Commission',
u'Government and Finance',
u'Government Operations',
u'Government Organization',
u'Health and Human Resources Accountability',
u'Health and Human Resources',
u'Health',
u'Homeland Security',
u'House Rules',
u'House Select Committee on Redistricting',
u'Infrastructure',
u'Insurance',
u'Intern Committee',
u'Interstate Cooperation',
u'Judiciary',
u'Law Institute',
u'Minority Issues',
u'Natural Resources',
u'Outcomes-Based Funding Models in Higher Education',
u'Parks, Recreation and Natural Resources',
u'PEIA, Seniors and Long Term Care',
u'Pensions and Retirement',
u'Political Subdivisions',
u'Post Audits',
u'Regional Jail and Correctional Facility Authority',
u'Roads and Transportation',
u'Rule-Making Review Committee',
u'Senior Citizen Issues',
u'Special Investigations',
u'Technology',
u'Veterans Affairs',
u'Veterans Affairs/ Homeland Security',
u'Water Resources',
u'Workforce Investment for Economic Development',
]
committees_rgx = '(%s)' % '|'.join(sorted(committees, key=len, reverse=True))
rules = (
Rule(['Communicated to Senate', 'Senate received',
'Ordered to Senate'], actor='upper'),
Rule(['Communicated to House', 'House received',
'Ordered to House'], actor='lower'),
Rule('Read 1st time', 'bill:reading:1'),
Rule('Read 2nd time', 'bill:reading:2'),
Rule('Read 3rd time', 'bill:reading:3'),
Rule('Filed for introduction', 'bill:filed'),
Rule('^Introduced in', 'bill:introduced'),
Rule(['Passed Senate', 'Passed House'], 'bill:passed'),
Rule(['Reported do pass', 'With amendment, do pass'], 'committee:passed'),
Rule([u', but first to .+?; then (?P<committees>[^;]+)',
u'To (?P<committees>.+?) then']),
Rule(u'(?i)voice vote', voice_vote=True),
Rule([u'Amendment rejected'], [u'amendment:failed']),
Rule([u'To Governor'], [u'governor:received']),
Rule([u'Passed House'], [u'bill:passed']),
Rule([u'Read 2nd time'], [u'bill:reading:2']),
Rule([u', but first to (?P<committees>[^;]+)', u'Rejected'], []),
Rule([u'Approved by Governor \d{1,2}/\d{1,2}/\d{1,2}$'], [u'governor:signed']),
Rule([u'^Introduced'], [u'bill:introduced']),
Rule([u'To .+? then (?P<committees>.+)'], []),
Rule([u'^Filed for intro'], [u'bill:filed']),
Rule([u'(?i)referred to (?P<committees>.+)'], [u'committee:referred']),
Rule(u'Senator (?P<legislators>.+? )requests '
u'to be removed as sponsor of bill'),
Rule([u'To House (?P<committees>[A-Z].+)'], [u'committee:referred']),
Rule([u'Passed Senate'], [u'bill:passed']),
Rule([u'(?i)committed to (?P<committees>.+?) on'], []),
Rule([u'Vetoed by Governor'], [u'governor:vetoed']),
Rule([u'(?i)House concurred in senate amendment'], []),
Rule([u'Be rejected'], [u'bill:failed']),
Rule([u'To .+? then (?P<committees>.+) then',
u'reading to (?P<committees>.+)']),
Rule([u'Adopted by'], [u'bill:passed']),
Rule([u'House appointed conferees: (?P<legislators>.+)'], []),
Rule([u'Read 3rd time'], [u'bill:reading:3']),
Rule([u'Be adopted$'], [u'bill:passed']),
Rule([u'(?i)originating in (House|Senate) (?P<committees>.+)',
u'(?i)to house (?P<committees>.+)']),
Rule([u'Read 1st time'], [u'bill:reading:1']),
Rule([u'To .+? then .+? then (?P<committees>.+)']),
Rule(r'To %s' % committees_rgx, 'committee:referred')
)
class Categorizer(BaseCategorizer):
rules = rules
def categorize(self, text):
'''Wrap categorize and add boilerplate committees.
'''
attrs = BaseCategorizer.categorize(self, text)
committees = attrs['committees']
for committee in re.findall(committees_rgx, text, re.I):
if committee not in committees:
committees.append(committee)
return attrs
def post_categorize(self, attrs):
res = set()
if 'legislators' in attrs:
for text in attrs['legislators']:
rgx = r'(,\s+(?![a-z]\.)|\s+and\s+)'
legs = re.split(rgx, text)
legs = filter(lambda x: x not in [', ', ' and '], legs)
res |= set(legs)
attrs['legislators'] = list(res)
res = set()
if 'committees' in attrs:
for text in attrs['committees']:
# Strip stuff like "Rules on 1st reading"
for text in text.split('then'):
text = re.sub(r' on .+', '', text)
text = text.strip()
res.add(text)
attrs['committees'] = list(res)
return attrs
| 36.413333 | 83 | 0.598499 | import re
from billy.scrape.actions import Rule, BaseCategorizer
committees = [
u"Veterans' Affairs",
u'Agriculture and Agri-business Committee',
u'Agriculture',
u'Banking and Insurance',
u'Banking',
u'Children, Juveniles and Other Issues',
u'Constitutional Revision',
u'Council of Finance and Administration',
u'Economic Development and Small Business',
u'Economic Development',
u'Education Accountability',
u'Education',
u'Employee Suggestion Award Board',
u'Energy, Industry and Labor',
u'Energy, Industry and Labor/Economic Development and Small Business',
u'Enrolled Bills',
u'Equal Pay Commission',
u'Finance',
u'Forest Management Review Commission',
u'Government and Finance',
u'Government Operations',
u'Government Organization',
u'Health and Human Resources Accountability',
u'Health and Human Resources',
u'Health',
u'Homeland Security',
u'House Rules',
u'House Select Committee on Redistricting',
u'Infrastructure',
u'Insurance',
u'Intern Committee',
u'Interstate Cooperation',
u'Judiciary',
u'Law Institute',
u'Minority Issues',
u'Natural Resources',
u'Outcomes-Based Funding Models in Higher Education',
u'Parks, Recreation and Natural Resources',
u'PEIA, Seniors and Long Term Care',
u'Pensions and Retirement',
u'Political Subdivisions',
u'Post Audits',
u'Regional Jail and Correctional Facility Authority',
u'Roads and Transportation',
u'Rule-Making Review Committee',
u'Senior Citizen Issues',
u'Special Investigations',
u'Technology',
u'Veterans Affairs',
u'Veterans Affairs/ Homeland Security',
u'Water Resources',
u'Workforce Investment for Economic Development',
]
committees_rgx = '(%s)' % '|'.join(sorted(committees, key=len, reverse=True))
rules = (
Rule(['Communicated to Senate', 'Senate received',
'Ordered to Senate'], actor='upper'),
Rule(['Communicated to House', 'House received',
'Ordered to House'], actor='lower'),
Rule('Read 1st time', 'bill:reading:1'),
Rule('Read 2nd time', 'bill:reading:2'),
Rule('Read 3rd time', 'bill:reading:3'),
Rule('Filed for introduction', 'bill:filed'),
Rule('^Introduced in', 'bill:introduced'),
Rule(['Passed Senate', 'Passed House'], 'bill:passed'),
Rule(['Reported do pass', 'With amendment, do pass'], 'committee:passed'),
Rule([u', but first to .+?; then (?P<committees>[^;]+)',
u'To (?P<committees>.+?) then']),
Rule(u'(?i)voice vote', voice_vote=True),
Rule([u'Amendment rejected'], [u'amendment:failed']),
Rule([u'To Governor'], [u'governor:received']),
Rule([u'Passed House'], [u'bill:passed']),
Rule([u'Read 2nd time'], [u'bill:reading:2']),
Rule([u', but first to (?P<committees>[^;]+)', u'Rejected'], []),
Rule([u'Approved by Governor \d{1,2}/\d{1,2}/\d{1,2}$'], [u'governor:signed']),
Rule([u'^Introduced'], [u'bill:introduced']),
Rule([u'To .+? then (?P<committees>.+)'], []),
Rule([u'^Filed for intro'], [u'bill:filed']),
Rule([u'(?i)referred to (?P<committees>.+)'], [u'committee:referred']),
Rule(u'Senator (?P<legislators>.+? )requests '
u'to be removed as sponsor of bill'),
Rule([u'To House (?P<committees>[A-Z].+)'], [u'committee:referred']),
Rule([u'Passed Senate'], [u'bill:passed']),
Rule([u'(?i)committed to (?P<committees>.+?) on'], []),
Rule([u'Vetoed by Governor'], [u'governor:vetoed']),
Rule([u'(?i)House concurred in senate amendment'], []),
Rule([u'Be rejected'], [u'bill:failed']),
Rule([u'To .+? then (?P<committees>.+) then',
u'reading to (?P<committees>.+)']),
Rule([u'Adopted by'], [u'bill:passed']),
Rule([u'House appointed conferees: (?P<legislators>.+)'], []),
Rule([u'Read 3rd time'], [u'bill:reading:3']),
Rule([u'Be adopted$'], [u'bill:passed']),
Rule([u'(?i)originating in (House|Senate) (?P<committees>.+)',
u'(?i)to house (?P<committees>.+)']),
Rule([u'Read 1st time'], [u'bill:reading:1']),
Rule([u'To .+? then .+? then (?P<committees>.+)']),
Rule(r'To %s' % committees_rgx, 'committee:referred')
)
class Categorizer(BaseCategorizer):
rules = rules
def categorize(self, text):
attrs = BaseCategorizer.categorize(self, text)
committees = attrs['committees']
for committee in re.findall(committees_rgx, text, re.I):
if committee not in committees:
committees.append(committee)
return attrs
def post_categorize(self, attrs):
res = set()
if 'legislators' in attrs:
for text in attrs['legislators']:
rgx = r'(,\s+(?![a-z]\.)|\s+and\s+)'
legs = re.split(rgx, text)
legs = filter(lambda x: x not in [', ', ' and '], legs)
res |= set(legs)
attrs['legislators'] = list(res)
res = set()
if 'committees' in attrs:
for text in attrs['committees']:
# Strip stuff like "Rules on 1st reading"
for text in text.split('then'):
text = re.sub(r' on .+', '', text)
text = text.strip()
res.add(text)
attrs['committees'] = list(res)
return attrs
| true | true |
f7151a7c2677a8ca25b3ce9f5abd7ef3436c4d4b | 576 | py | Python | demo/django/tutorial/polls/admin.py | sirex/htsql | 52275f6a584b412c109822d2ed2a5e69ac522cdf | [
"Apache-2.0"
] | 15 | 2020-02-11T11:24:34.000Z | 2022-03-03T20:46:34.000Z | demo/django/tutorial/polls/admin.py | sirex/htsql | 52275f6a584b412c109822d2ed2a5e69ac522cdf | [
"Apache-2.0"
] | 1 | 2020-02-13T14:08:34.000Z | 2020-02-13T14:16:04.000Z | demo/django/tutorial/polls/admin.py | sirex/htsql | 52275f6a584b412c109822d2ed2a5e69ac522cdf | [
"Apache-2.0"
] | 2 | 2020-02-13T14:10:06.000Z | 2021-02-25T04:36:05.000Z | from polls.models import Poll, Choice
from django.contrib import admin
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class PollAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
list_display = ('question', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['question']
date_hierarchy = 'pub_date'
admin.site.register(Poll, PollAdmin)
| 28.8 | 80 | 0.645833 | from polls.models import Poll, Choice
from django.contrib import admin
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class PollAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question']}),
('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
]
inlines = [ChoiceInline]
list_display = ('question', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['question']
date_hierarchy = 'pub_date'
admin.site.register(Poll, PollAdmin)
| true | true |
f7151aa262aee8fc1d6bd3aea5334c778feb0cc4 | 219 | py | Python | parsyfiles/profiling/exec_on_test_by_type.py | smarie/python-simple-file-collection-parsing-framework | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | [
"BSD-3-Clause"
] | null | null | null | parsyfiles/profiling/exec_on_test_by_type.py | smarie/python-simple-file-collection-parsing-framework | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | [
"BSD-3-Clause"
] | null | null | null | parsyfiles/profiling/exec_on_test_by_type.py | smarie/python-simple-file-collection-parsing-framework | 344b37e1151e8d4e7c2ee49ae09d6568715ae64e | [
"BSD-3-Clause"
] | null | null | null | import os
import pytest
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
if __name__ == '__main__':
# for profiling purposes...
pytest.main(os.path.join(THIS_DIR, '../tests/test_parsyfiles_by_type.py'))
| 21.9 | 78 | 0.721461 | import os
import pytest
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
if __name__ == '__main__':
pytest.main(os.path.join(THIS_DIR, '../tests/test_parsyfiles_by_type.py'))
| true | true |
f7151ade5974d9fd42771cf7639194622d837538 | 5,015 | py | Python | src/phoebe_shelves_clt/manage.py | anthony-agbay/owl_shelves_clt | da09e1579f8d134a585b50de2f8da38c889c23b9 | [
"MIT"
] | 1 | 2021-05-04T03:06:13.000Z | 2021-05-04T03:06:13.000Z | src/phoebe_shelves_clt/manage.py | anthony-agbay/phoebe-shelves-clt | da09e1579f8d134a585b50de2f8da38c889c23b9 | [
"MIT"
] | null | null | null | src/phoebe_shelves_clt/manage.py | anthony-agbay/phoebe-shelves-clt | da09e1579f8d134a585b50de2f8da38c889c23b9 | [
"MIT"
] | null | null | null | """ Launching point and supporting functions for database management tools.
This module serves as the launching point for the database management tools.
Backend-specific implementations are located within their specific modules and
common functions and methods are included in this file.
"""
import numpy as np
from typing import Tuple, Dict
from phoebe_shelves_clt.csv_backend import manage_csv
from phoebe_shelves_clt.sql_backend import manage_sql
from phoebe_shelves_clt.utils import data_model
from phoebe_shelves_clt.utils import sql_api
def prompt_for_rating(prompt: str):
"""Prompt user for an integer rating (max 5).
Args:
prompt: Prompt that user sees on the command line
Outputs:
rating (int | float): Intger rating or np.nan if empty string is passed
"""
rating = input(prompt)
while rating not in {"", "1", "2", "3", "4", "5"}:
rating = input("Choose an integer between 1 and 5 or leave blank: ")
# Format rating
rating = int(rating) if rating != "" else np.nan
return(rating)
def prompt_for_title(backend: str, *args) -> Tuple[str, Dict[str, int]]:
""" Prompt for a title from the books table and return the title and ID
Prompts the user to provide a title and returns the title and ID of any
books that match the title *exactly*.
Args:
backend: Backend to use
Positional Args:
(CSVDataModel): Current instance of the CSV backend database
(psycopg2.connection): Connection to the PostgreSQL database
Returns:
A tuple containing the following:
title: Title of the book provided by the user
title_results: Dictionary mapping possible titles to their ID's
"""
title = input("Please enter the book title: ")
if backend == "csv":
title_results = args[0].get_books_dict(title)
else:
query = f"SELECT title, id FROM books WHERE title ILIKE '{title}'"
title_results = dict(sql_api.execute_query(args[0], query,
"to_list")) # type: ignore
return(title, title_results)
def prompt_for_author(backend: str, *args) -> Tuple[str, Dict]:
""" Prompt for an author from the authors table and return the name and ID
Prompts the user to provide an author's last name and returns the names
and ID's of possible matches based on the last name.
Args:
backend: Backend to use
Positional Args:
(CSVDataModel): Current instance of the CSV backend database
(psycopg2.connection): Connection to the PostgreSQL database
Returns:
A tuple containing the following:
last_name: Last name provided by the user
author_results: Dictionary mapping possible authors to their ID's
"""
last_name = input("Please enter the author's last name: ")
if backend == "csv":
author_results = args[0].get_authors_dict(last_name)
else:
author_query = (sql_api.read_query('author_filter').format(last_name))
author_results = dict(sql_api.execute_query(args[0], author_query,
"to_list")) # type: ignore
return(last_name, author_results)
def prompt_for_genre(backend: str, *args) -> Tuple[str, Dict]:
""" Prompt for an genre from the genres table and return the name and ID
Prompts user to enter a genre name. It then retrieves the potential
matching options for further processing.
Args:
backend: Backend to use
Positional Args:
(CSVDataModel): Current instance of the CSV backend database
(psycopg2.connection): Connection to the PostgreSQL database
Returns:
A tuple containing the following:
genre_name: Genre name provided by the user
genreresults: Dictionary mapping possible genres to their ID's
"""
genre_name = input("Please enter the genre name: ")
if backend == "csv":
genre_results = args[0].get_genres_dict(genre_name)
else:
genre_query = f"SELECT name, id from genres where name ilike '{genre_name}'"
genre_results = dict(sql_api.execute_query(args[0], genre_query,
"to_list")) # type: ignore
return(genre_name, genre_results)
def manage_module(backend: str, db_select: str, mode: str, **kwargs):
""" Launch management workflows for either backend
Launch the mangement workflows for either the CSV or SQL backends
Args:
backend: Backend to use
db_select: Database to manage
mode: Management mode
Keyword Args:
data_directory (string): Path to CSV backend data directory
sql_configs (Dict): SQL server configurations
"""
if backend == "csv":
model = data_model.CSVDataModel(kwargs["data_directory"])
manage_csv.main(db_select, mode, model)
else:
manage_sql.main(db_select, mode, kwargs["sql_configs"]) | 36.079137 | 84 | 0.664008 |
import numpy as np
from typing import Tuple, Dict
from phoebe_shelves_clt.csv_backend import manage_csv
from phoebe_shelves_clt.sql_backend import manage_sql
from phoebe_shelves_clt.utils import data_model
from phoebe_shelves_clt.utils import sql_api
def prompt_for_rating(prompt: str):
rating = input(prompt)
while rating not in {"", "1", "2", "3", "4", "5"}:
rating = input("Choose an integer between 1 and 5 or leave blank: ")
rating = int(rating) if rating != "" else np.nan
return(rating)
def prompt_for_title(backend: str, *args) -> Tuple[str, Dict[str, int]]:
title = input("Please enter the book title: ")
if backend == "csv":
title_results = args[0].get_books_dict(title)
else:
query = f"SELECT title, id FROM books WHERE title ILIKE '{title}'"
title_results = dict(sql_api.execute_query(args[0], query,
"to_list"))
return(title, title_results)
def prompt_for_author(backend: str, *args) -> Tuple[str, Dict]:
last_name = input("Please enter the author's last name: ")
if backend == "csv":
author_results = args[0].get_authors_dict(last_name)
else:
author_query = (sql_api.read_query('author_filter').format(last_name))
author_results = dict(sql_api.execute_query(args[0], author_query,
"to_list")) # type: ignore
return(last_name, author_results)
def prompt_for_genre(backend: str, *args) -> Tuple[str, Dict]:
genre_name = input("Please enter the genre name: ")
if backend == "csv":
genre_results = args[0].get_genres_dict(genre_name)
else:
genre_query = f"SELECT name, id from genres where name ilike '{genre_name}'"
genre_results = dict(sql_api.execute_query(args[0], genre_query,
"to_list")) # type: ignore
return(genre_name, genre_results)
def manage_module(backend: str, db_select: str, mode: str, **kwargs):
if backend == "csv":
model = data_model.CSVDataModel(kwargs["data_directory"])
manage_csv.main(db_select, mode, model)
else:
manage_sql.main(db_select, mode, kwargs["sql_configs"]) | true | true |
f7151b2281b1a36bdf79beb5dc8c7718f7a9c8cb | 4,165 | py | Python | ansible/roles/relay/files/fanout/hls.py | fkusei/cm | 7a6b20f5c57bb90a3568cbbf67d72f3a41721c89 | [
"MIT"
] | null | null | null | ansible/roles/relay/files/fanout/hls.py | fkusei/cm | 7a6b20f5c57bb90a3568cbbf67d72f3a41721c89 | [
"MIT"
] | null | null | null | ansible/roles/relay/files/fanout/hls.py | fkusei/cm | 7a6b20f5c57bb90a3568cbbf67d72f3a41721c89 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import time
import itertools
import contextlib
import fanout_utils
def fanout_hls(context):
context += {
"starttime": int(time.time()),
}
cleanup(context)
context += calculate_map_and_varmap(context)
generate_master_playlists(context)
fanout(context)
print("Cleaning up")
cleanup(context)
def cleanup(c):
with contextlib.suppress(FileExistsError):
os.mkdir(os.path.join(c.hls_write_path, c.stream))
with contextlib.suppress(FileNotFoundError):
fanout_utils.remove_glob(os.path.join(
c.hls_write_path, c.stream, "*.ts"))
fanout_utils.remove_glob(os.path.join(
c.hls_write_path, "%s/*.m3u8" % c.stream))
fanout_utils.remove_glob(os.path.join(
c.hls_write_path, "%s_*.m3u8" % c.stream))
def calculate_map_and_varmap(c):
first_audio_stream_index = len(c.video_tracks)
# HD+Native
maps = ["-map 0:v:0 -map 0:a:0"]
varmaps = ["v:0,a:0"]
if 'SD' in c.video_tracks:
# SD+Native
maps += ["-map 0:v:1 -map 0:a:0"]
varmaps += ["v:1,a:1"]
if 'Slides' in c.video_tracks:
# Slides+Native
maps += ["-map 0:v:2 -map 0:a:0"]
varmaps += ["v:2,a:2"]
if 'Translated' in c.audio_tracks:
# Translated
maps += ["-map 0:a:1"]
varmaps += ["a:%d" % (first_audio_stream_index+0)]
if 'Translated-2' in c.audio_tracks:
# Translated-2
maps += ["-map 0:a:2"]
varmaps += ["a:%d" % (first_audio_stream_index+1)]
return {
"maps": maps,
"varmaps": varmaps,
"first_audio_stream_index": first_audio_stream_index,
}
def generate_master_playlists(c):
for video_track, audio_track in itertools.product(c.video_tracks, c.audio_tracks):
playlist_context = c + {
"video_track": video_track,
"audio_track": audio_track,
}
master_playlist = fanout_utils.format_and_strip(playlist_context, """
#EXTM3U
#EXT-X-VERSION:3
#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",NAME="Untranslated",DEFAULT={{ 'YES' if audio_track == 'Native' else 'NO' }}
{% if 'Translated' in audio_tracks %}
#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",NAME="Translation 1",DEFAULT={{ 'YES' if audio_track == 'Translated' else 'NO' }},URI="{{ stream }}/chunks_{{ first_audio_stream_index+0 }}.m3u8"
{% endif %}
{% if 'Translated-2' in audio_tracks %}
#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",NAME="Translation 2",DEFAULT={{ 'YES' if audio_track == 'Translated-2' else 'NO' }},URI="{{ stream }}/chunks_{{ first_audio_stream_index+1 }}.m3u8"
{% endif %}
{% if video_track in ['HD'] %}
#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=5000000,RESOLUTION=1920x1080,CODECS="avc1.4d0028,mp4a.40.2",AUDIO="audio"
{{ stream }}/chunks_0.m3u8
{% endif %}
{% if 'SD' in video_tracks and video_track in ['HD', 'SD'] %}
#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=800000,RESOLUTION=1024x576,CODECS="avc1.4d0028,mp4a.40.2",AUDIO="audio"
{{ stream }}/chunks_1.m3u8
{% endif %}
{% if 'Slides' in video_tracks and video_track in ['HD', 'SD', 'Slides'] %}
#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=100000,RESOLUTION=1024x576,CODECS="avc1.4d0028,mp4a.40.2",AUDIO="audio"
{{ stream }}/chunks_2.m3u8
{% endif %}
""")
master_playlist_file = os.path.join(
c.hls_write_path,
"%s_%s_%s.m3u8" % (c.stream, audio_track.lower(), video_track.lower())
)
print("Writing Master Playlist-File %s" % master_playlist_file)
with open(master_playlist_file, "w") as f:
f.write(master_playlist)
def fanout(c):
command = fanout_utils.format_and_strip(c, """
ffmpeg -v warning -nostats -nostdin -y -analyzeduration 3000000
-i {{ pull_url }}
-c:v copy
-c:a copy
{{ maps | join("\n\t") }}
-hls_time 6
-hls_list_size 200
-hls_segment_filename "{{ hls_write_path }}/{{ stream }}/{{ starttime }}-%d_%v.ts"
-hls_flags +delete_segments+omit_endlist+independent_segments
-var_stream_map '{{ varmaps | join(" ") }}'
"{{ hls_write_path }}/{{ stream }}/chunks_%v.m3u8"
""")
fanout_utils.call(command)
if __name__ == "__main__":
parser = fanout_utils.setup_argparse(name="hls")
parser.add_argument('--hls_write_path', metavar='PATH', type=str,
help='Path to write the HLS-Pieces and Master-Playlists to')
args = parser.parse_args()
fanout_utils.mainloop(name="hls", transcoding_stream="h264", calback=fanout_hls, args=args)
| 28.141892 | 188 | 0.693637 |
import os
import time
import itertools
import contextlib
import fanout_utils
def fanout_hls(context):
context += {
"starttime": int(time.time()),
}
cleanup(context)
context += calculate_map_and_varmap(context)
generate_master_playlists(context)
fanout(context)
print("Cleaning up")
cleanup(context)
def cleanup(c):
with contextlib.suppress(FileExistsError):
os.mkdir(os.path.join(c.hls_write_path, c.stream))
with contextlib.suppress(FileNotFoundError):
fanout_utils.remove_glob(os.path.join(
c.hls_write_path, c.stream, "*.ts"))
fanout_utils.remove_glob(os.path.join(
c.hls_write_path, "%s/*.m3u8" % c.stream))
fanout_utils.remove_glob(os.path.join(
c.hls_write_path, "%s_*.m3u8" % c.stream))
def calculate_map_and_varmap(c):
first_audio_stream_index = len(c.video_tracks)
maps = ["-map 0:v:0 -map 0:a:0"]
varmaps = ["v:0,a:0"]
if 'SD' in c.video_tracks:
maps += ["-map 0:v:1 -map 0:a:0"]
varmaps += ["v:1,a:1"]
if 'Slides' in c.video_tracks:
maps += ["-map 0:v:2 -map 0:a:0"]
varmaps += ["v:2,a:2"]
if 'Translated' in c.audio_tracks:
maps += ["-map 0:a:1"]
varmaps += ["a:%d" % (first_audio_stream_index+0)]
if 'Translated-2' in c.audio_tracks:
maps += ["-map 0:a:2"]
varmaps += ["a:%d" % (first_audio_stream_index+1)]
return {
"maps": maps,
"varmaps": varmaps,
"first_audio_stream_index": first_audio_stream_index,
}
def generate_master_playlists(c):
for video_track, audio_track in itertools.product(c.video_tracks, c.audio_tracks):
playlist_context = c + {
"video_track": video_track,
"audio_track": audio_track,
}
master_playlist = fanout_utils.format_and_strip(playlist_context, """
#EXTM3U
#EXT-X-VERSION:3
#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",NAME="Untranslated",DEFAULT={{ 'YES' if audio_track == 'Native' else 'NO' }}
{% if 'Translated' in audio_tracks %}
#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",NAME="Translation 1",DEFAULT={{ 'YES' if audio_track == 'Translated' else 'NO' }},URI="{{ stream }}/chunks_{{ first_audio_stream_index+0 }}.m3u8"
{% endif %}
{% if 'Translated-2' in audio_tracks %}
#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",NAME="Translation 2",DEFAULT={{ 'YES' if audio_track == 'Translated-2' else 'NO' }},URI="{{ stream }}/chunks_{{ first_audio_stream_index+1 }}.m3u8"
{% endif %}
{% if video_track in ['HD'] %}
#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=5000000,RESOLUTION=1920x1080,CODECS="avc1.4d0028,mp4a.40.2",AUDIO="audio"
{{ stream }}/chunks_0.m3u8
{% endif %}
{% if 'SD' in video_tracks and video_track in ['HD', 'SD'] %}
#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=800000,RESOLUTION=1024x576,CODECS="avc1.4d0028,mp4a.40.2",AUDIO="audio"
{{ stream }}/chunks_1.m3u8
{% endif %}
{% if 'Slides' in video_tracks and video_track in ['HD', 'SD', 'Slides'] %}
#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=100000,RESOLUTION=1024x576,CODECS="avc1.4d0028,mp4a.40.2",AUDIO="audio"
{{ stream }}/chunks_2.m3u8
{% endif %}
""")
master_playlist_file = os.path.join(
c.hls_write_path,
"%s_%s_%s.m3u8" % (c.stream, audio_track.lower(), video_track.lower())
)
print("Writing Master Playlist-File %s" % master_playlist_file)
with open(master_playlist_file, "w") as f:
f.write(master_playlist)
def fanout(c):
command = fanout_utils.format_and_strip(c, """
ffmpeg -v warning -nostats -nostdin -y -analyzeduration 3000000
-i {{ pull_url }}
-c:v copy
-c:a copy
{{ maps | join("\n\t") }}
-hls_time 6
-hls_list_size 200
-hls_segment_filename "{{ hls_write_path }}/{{ stream }}/{{ starttime }}-%d_%v.ts"
-hls_flags +delete_segments+omit_endlist+independent_segments
-var_stream_map '{{ varmaps | join(" ") }}'
"{{ hls_write_path }}/{{ stream }}/chunks_%v.m3u8"
""")
fanout_utils.call(command)
if __name__ == "__main__":
parser = fanout_utils.setup_argparse(name="hls")
parser.add_argument('--hls_write_path', metavar='PATH', type=str,
help='Path to write the HLS-Pieces and Master-Playlists to')
args = parser.parse_args()
fanout_utils.mainloop(name="hls", transcoding_stream="h264", calback=fanout_hls, args=args)
| true | true |
f7151b5dd7ec24f50098503c2316a5b09f21b826 | 1,104 | py | Python | tests/test_httpclient.py | singulret/pyrabbit | b7efb24e9f1da5ad903e6b8699f807a144acb1a0 | [
"BSD-3-Clause"
] | 41 | 2015-01-27T15:10:28.000Z | 2021-11-03T17:57:49.000Z | tests/test_httpclient.py | singulret/pyrabbit | b7efb24e9f1da5ad903e6b8699f807a144acb1a0 | [
"BSD-3-Clause"
] | 34 | 2015-01-21T17:11:00.000Z | 2022-01-07T15:21:36.000Z | tests/test_httpclient.py | singulret/pyrabbit | b7efb24e9f1da5ad903e6b8699f807a144acb1a0 | [
"BSD-3-Clause"
] | 58 | 2015-01-28T19:23:43.000Z | 2022-03-20T08:14:05.000Z | try:
import unittest2 as unittest
except ImportError:
import unittest
import sys
sys.path.append('..')
from pyrabbit import http
class TestHTTPClient(unittest.TestCase):
"""
Except for the init test, these are largely functional tests that
require a RabbitMQ management API to be available on localhost
"""
testhost = 'localhost:15672'
testuser = 'guest'
testpass = 'guest'
def setUp(self):
self.c = http.HTTPClient(self.testhost, self.testuser, self.testpass)
def test_client_init(self):
c = http.HTTPClient(self.testhost, self.testuser, self.testpass)
self.assertIsInstance(c, http.HTTPClient)
def test_client_init_sets_credentials(self):
self.assertEqual(self.c.auth.username, self.testuser)
self.assertEqual(self.c.auth.password, self.testpass)
def test_client_init_sets_default_timeout(self):
self.assertEqual(self.c.timeout, 5)
def test_client_init_with_timeout(self):
c = http.HTTPClient(self.testhost, self.testuser, self.testpass, 1)
self.assertEqual(c.timeout, 1)
| 28.307692 | 77 | 0.707428 | try:
import unittest2 as unittest
except ImportError:
import unittest
import sys
sys.path.append('..')
from pyrabbit import http
class TestHTTPClient(unittest.TestCase):
testhost = 'localhost:15672'
testuser = 'guest'
testpass = 'guest'
def setUp(self):
self.c = http.HTTPClient(self.testhost, self.testuser, self.testpass)
def test_client_init(self):
c = http.HTTPClient(self.testhost, self.testuser, self.testpass)
self.assertIsInstance(c, http.HTTPClient)
def test_client_init_sets_credentials(self):
self.assertEqual(self.c.auth.username, self.testuser)
self.assertEqual(self.c.auth.password, self.testpass)
def test_client_init_sets_default_timeout(self):
self.assertEqual(self.c.timeout, 5)
def test_client_init_with_timeout(self):
c = http.HTTPClient(self.testhost, self.testuser, self.testpass, 1)
self.assertEqual(c.timeout, 1)
| true | true |
f7151c247a299ed10ae06ffcbdf0a28cce6a04c2 | 18,329 | py | Python | src/olympia/editors/views_themes.py | mstriemer/olympia | 2e700c20e0a8ed3f0dd389d1521c3798bf7ed7f7 | [
"BSD-3-Clause"
] | 1 | 2020-04-07T07:21:25.000Z | 2020-04-07T07:21:25.000Z | src/olympia/editors/views_themes.py | mstriemer/olympia | 2e700c20e0a8ed3f0dd389d1521c3798bf7ed7f7 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/editors/views_themes.py | mstriemer/olympia | 2e700c20e0a8ed3f0dd389d1521c3798bf7ed7f7 | [
"BSD-3-Clause"
] | 2 | 2018-03-04T00:11:22.000Z | 2019-12-14T09:45:55.000Z | import datetime
import json
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.forms.formsets import formset_factory
from django.shortcuts import get_object_or_404, redirect
from django.utils.datastructures import MultiValueDictKeyError
from django.utils.translation import ugettext as _, ungettext as ngettext
from olympia import amo
from olympia.constants import editors as rvw
from olympia.access import acl
from olympia.addons.models import Addon, Persona
from olympia.amo.decorators import json_view, post_required
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import paginate, render
from olympia.devhub.models import ActivityLog
from olympia.editors import forms
from olympia.editors.models import RereviewQueueTheme, ReviewerScore, ThemeLock
from olympia.editors.views import base_context as context
from olympia.search.views import name_only_query
from olympia.zadmin.decorators import admin_required
from .decorators import personas_reviewer_required
QUEUE_PER_PAGE = 100
@personas_reviewer_required
def home(request):
data = context(
reviews_total=ActivityLog.objects.total_reviews(theme=True)[:5],
reviews_monthly=ActivityLog.objects.monthly_reviews(theme=True)[:5],
queue_counts=queue_counts_themes(request)
)
return render(request, 'editors/themes/home.html', data)
def queue_counts_themes(request):
counts = {
'themes': Persona.objects.no_cache()
.filter(addon__status=amo.STATUS_PENDING)
.count(),
}
if acl.action_allowed(request, 'SeniorPersonasTools', 'View'):
counts.update({
'flagged_themes': (Persona.objects.no_cache()
.filter(addon__status=amo.STATUS_REVIEW_PENDING)
.count()),
'rereview_themes': RereviewQueueTheme.objects.count()
})
rv = {}
if isinstance(type, basestring):
return counts[type]
for k, v in counts.items():
if not isinstance(type, list) or k in type:
rv[k] = v
return rv
@personas_reviewer_required
def themes_list(request, flagged=False, rereview=False):
"""Themes queue in list format."""
themes = []
if flagged:
# TODO (ngoke): rename to STATUS_FLAGGED.
themes = Addon.objects.filter(status=amo.STATUS_REVIEW_PENDING,
type=amo.ADDON_PERSONA,
persona__isnull=False)
elif rereview:
themes = [
rqt.theme.addon for rqt in
RereviewQueueTheme.objects.select_related('theme__addon')]
else:
themes = Addon.objects.filter(status=amo.STATUS_PENDING,
type=amo.ADDON_PERSONA,
persona__isnull=False)
search_form = forms.ThemeSearchForm(request.GET)
per_page = request.GET.get('per_page', QUEUE_PER_PAGE)
pager = paginate(request, themes, per_page)
return render(request, 'editors/themes/queue_list.html', context(
**{'addons': pager.object_list,
'flagged': flagged,
'pager': pager,
'rereview': rereview,
'theme_search_form': search_form,
'statuses': dict((k, unicode(v)) for k, v in
amo.STATUS_CHOICES_API.items()),
'tab': ('rereview_themes' if rereview else
'flagged_themes' if flagged else 'pending_themes')}))
def _themes_queue(request, flagged=False, rereview=False):
"""Themes queue in interactive format."""
themes = _get_themes(request, request.user, flagged=flagged,
rereview=rereview)
ThemeReviewFormset = formset_factory(forms.ThemeReviewForm)
formset = ThemeReviewFormset(
initial=[{'theme': _rereview_to_theme(rereview, theme).id} for theme
in themes])
return render(request, 'editors/themes/queue.html', context(
**{'actions': get_actions_json(),
'formset': formset,
'flagged': flagged,
'reject_reasons': rvw.THEME_REJECT_REASONS,
'rereview': rereview,
'reviewable': True,
'theme_formsets': zip(themes, formset),
'theme_count': len(themes),
'tab': (
'flagged' if flagged else
'rereview' if rereview else 'pending')}))
def _get_themes(request, reviewer, flagged=False, rereview=False):
"""Check out themes.
:param flagged: Flagged themes (amo.STATUS_REVIEW_PENDING)
:param rereview: Re-uploaded themes (RereviewQueueTheme)
"""
num = 0
themes = []
locks = []
status = (amo.STATUS_REVIEW_PENDING if flagged else
amo.STATUS_PUBLIC if rereview else amo.STATUS_PENDING)
if rereview:
# Rereview themes.
num, themes, locks = _get_rereview_themes(reviewer)
else:
# Pending and flagged themes.
locks = ThemeLock.objects.no_cache().filter(
reviewer=reviewer, theme__addon__status=status)
num, themes = _calc_num_themes_checkout(locks)
if themes:
return themes
themes = Persona.objects.no_cache().filter(
addon__status=status, themelock=None)
# Don't allow self-reviews.
if (not settings.ALLOW_SELF_REVIEWS and
not acl.action_allowed(request, 'Admin', '%')):
if rereview:
themes = themes.exclude(theme__addon__addonuser__user=reviewer)
else:
themes = themes.exclude(addon__addonuser__user=reviewer)
# Check out themes by setting lock.
themes = list(themes)[:num]
expiry = get_updated_expiry()
for theme in themes:
ThemeLock.objects.create(theme=_rereview_to_theme(rereview, theme),
reviewer=reviewer, expiry=expiry)
# Empty pool? Go look for some expired locks.
if not themes:
expired_locks = ThemeLock.objects.filter(
expiry__lte=datetime.datetime.now(),
theme__addon__status=status)[:rvw.THEME_INITIAL_LOCKS]
# Steal expired locks.
for lock in expired_locks:
lock.reviewer = reviewer
lock.expiry = expiry
lock.save()
if expired_locks:
locks = expired_locks
if rereview:
return (RereviewQueueTheme.objects.no_cache()
.filter(theme__themelock__reviewer=reviewer)
.exclude(theme__addon__status=amo.STATUS_REJECTED))
# New theme locks may have been created, grab all reviewer's themes again.
return [lock.theme for lock in locks]
@json_view
@personas_reviewer_required
def themes_search(request):
search_form = forms.ThemeSearchForm(request.GET)
if search_form.is_valid():
q = search_form.cleaned_data['q']
rereview = search_form.cleaned_data['queue_type'] == 'rereview'
flagged = search_form.cleaned_data['queue_type'] == 'flagged'
# ES query on name.
themes = Addon.search().filter(type=amo.ADDON_PERSONA)
if rereview:
themes = themes.filter(has_theme_rereview=True)
else:
themes = themes.filter(status=(amo.STATUS_REVIEW_PENDING if flagged
else amo.STATUS_PENDING),
has_theme_rereview=False)
themes = themes.query(or_=name_only_query(q))[:100]
now = datetime.datetime.now()
reviewers = []
for theme in themes:
try:
themelock = theme.persona.themelock
if themelock.expiry > now:
reviewers.append(themelock.reviewer.email)
else:
reviewers.append('')
except ObjectDoesNotExist:
reviewers.append('')
themes = list(themes.values_dict('name', 'slug', 'status'))
for theme, reviewer in zip(themes, reviewers):
# Collapse single value fields from a list.
theme['id'] = theme['id'][0]
theme['slug'] = theme['slug'][0]
theme['status'] = theme['status'][0]
# Dehydrate.
theme['reviewer'] = reviewer
return {'objects': themes, 'meta': {'total_count': len(themes)}}
@personas_reviewer_required
def themes_queue(request):
# By default, redirect back to the queue after a commit.
request.session['theme_redirect_url'] = reverse(
'editors.themes.queue_themes')
return _themes_queue(request)
@admin_required(theme_reviewers=True)
def themes_queue_flagged(request):
# By default, redirect back to the queue after a commit.
request.session['theme_redirect_url'] = reverse(
'editors.themes.queue_flagged')
return _themes_queue(request, flagged=True)
@admin_required(theme_reviewers=True)
def themes_queue_rereview(request):
# By default, redirect back to the queue after a commit.
request.session['theme_redirect_url'] = reverse(
'editors.themes.queue_rereview')
return _themes_queue(request, rereview=True)
def _rereview_to_theme(rereview, theme):
"""
Follows foreign key of RereviewQueueTheme object to theme if in rereview
queue.
"""
if rereview:
return theme.theme
return theme
def _calc_num_themes_checkout(locks):
"""
Calculate number of themes to check out based on how many themes user
currently has checked out.
"""
current_num = locks.count()
if current_num < rvw.THEME_INITIAL_LOCKS:
# Check out themes from the pool if none or not enough checked out.
return rvw.THEME_INITIAL_LOCKS - current_num, []
else:
# Update the expiry on currently checked-out themes.
locks.update(expiry=get_updated_expiry())
return 0, [lock.theme for lock in locks]
def _get_rereview_themes(reviewer):
"""Check out re-uploaded themes."""
locks = (ThemeLock.objects.select_related().no_cache()
.filter(reviewer=reviewer,
theme__rereviewqueuetheme__isnull=False)
.exclude(theme__addon__status=amo.STATUS_REJECTED))
num, updated_locks = _calc_num_themes_checkout(locks)
if updated_locks:
locks = updated_locks
themes = (RereviewQueueTheme.objects.no_cache()
.filter(theme__addon__isnull=False, theme__themelock=None)
.exclude(theme__addon__status=amo.STATUS_REJECTED))
return num, themes, locks
@post_required
@personas_reviewer_required
def themes_commit(request):
ThemeReviewFormset = formset_factory(forms.ThemeReviewForm)
formset = ThemeReviewFormset(request.POST)
scores = []
for form in formset:
try:
lock = ThemeLock.objects.filter(
theme_id=form.data[form.prefix + '-theme'],
reviewer=request.user)
except MultiValueDictKeyError:
# Address off-by-one error caused by management form.
continue
if lock and form.is_valid():
scores.append(form.save())
# Success message.
points = sum(scores)
success = ngettext(
# L10n: {0} is the number of reviews. {1} is the points just earned.
# L10n: {2} is the total number of points the reviewer has overall.
'{0} theme review successfully processed (+{1} points, {2} total).',
'{0} theme reviews successfully processed (+{1} points, {2} total).',
len(scores)).format(len(scores), points,
ReviewerScore.get_total(request.user))
amo.messages.success(request, success)
if 'theme_redirect_url' in request.session:
return redirect(request.session['theme_redirect_url'])
else:
return redirect(reverse('editors.themes.queue_themes'))
@personas_reviewer_required
def release_locks(request):
ThemeLock.objects.filter(reviewer=request.user).delete()
amo.messages.success(
request,
_('Your theme locks have successfully been released. '
'Other reviewers may now review those released themes. '
'You may have to refresh the page to see the changes reflected in '
'the table below.'))
return redirect(reverse('editors.themes.list'))
@personas_reviewer_required
def themes_single(request, slug):
"""
Like a detail page, manually review a single theme if it is pending
and isn't locked.
"""
reviewer = request.user
reviewable = True
# Don't review an already reviewed theme.
theme = get_object_or_404(Persona, addon__slug=slug)
if (theme.addon.status != amo.STATUS_PENDING and
not theme.rereviewqueuetheme_set.all()):
reviewable = False
if (not settings.ALLOW_SELF_REVIEWS and
not acl.action_allowed(request, 'Admin', '%') and
theme.addon.has_author(request.user)):
reviewable = False
else:
# Don't review a locked theme (that's not locked to self).
try:
lock = theme.themelock
if (lock.reviewer.id != reviewer.id and
lock.expiry > datetime.datetime.now()):
reviewable = False
elif (lock.reviewer.id != reviewer.id and
lock.expiry < datetime.datetime.now()):
# Steal expired lock.
lock.reviewer = reviewer
lock.expiry = get_updated_expiry()
lock.save()
else:
# Update expiry.
lock.expiry = get_updated_expiry()
lock.save()
except ThemeLock.DoesNotExist:
# Create lock if not created.
ThemeLock.objects.create(theme=theme, reviewer=reviewer,
expiry=get_updated_expiry())
ThemeReviewFormset = formset_factory(forms.ThemeReviewForm)
formset = ThemeReviewFormset(initial=[{'theme': theme.id}])
# Since we started the review on the single page, we want to return to the
# single page rather than get shot back to the queue.
request.session['theme_redirect_url'] = reverse('editors.themes.single',
args=[theme.addon.slug])
rereview = (theme.rereviewqueuetheme_set.all()[0] if
theme.rereviewqueuetheme_set.exists() else None)
return render(request, 'editors/themes/single.html', context(
**{'formset': formset,
'theme': rereview if rereview else theme,
'theme_formsets': zip([rereview if rereview else theme], formset),
'theme_reviews': paginate(request, ActivityLog.objects.filter(
action=amo.LOG.THEME_REVIEW.id,
_arguments__contains=theme.addon.id)),
'actions': get_actions_json(),
'theme_count': 1,
'rereview': rereview,
'reviewable': reviewable,
'reject_reasons': rvw.THEME_REJECT_REASONS,
'action_dict': rvw.REVIEW_ACTIONS,
'tab': ('flagged' if theme.addon.status == amo.STATUS_REVIEW_PENDING
else 'rereview' if rereview else 'pending')}))
@personas_reviewer_required
def themes_logs(request):
data = request.GET.copy()
if not data.get('start') and not data.get('end'):
today = datetime.date.today()
data['start'] = datetime.date(today.year, today.month, 1)
form = forms.ReviewThemeLogForm(data)
theme_logs = ActivityLog.objects.filter(action=amo.LOG.THEME_REVIEW.id)
if form.is_valid():
data = form.cleaned_data
if data.get('start'):
theme_logs = theme_logs.filter(created__gte=data['start'])
if data.get('end'):
theme_logs = theme_logs.filter(created__lte=data['end'])
if data.get('search'):
term = data['search']
theme_logs = theme_logs.filter(
Q(_details__icontains=term) |
Q(user__display_name__icontains=term) |
Q(user__username__icontains=term)).distinct()
pager = paginate(request, theme_logs, 30)
data = context(form=form, pager=pager,
ACTION_DICT=rvw.REVIEW_ACTIONS,
REJECT_REASONS=rvw.THEME_REJECT_REASONS, tab='themes')
return render(request, 'editors/themes/logs.html', data)
@admin_required(theme_reviewers=True)
def deleted_themes(request):
data = request.GET.copy()
deleted = Addon.unfiltered.filter(type=amo.ADDON_PERSONA,
status=amo.STATUS_DELETED)
if not data.get('start') and not data.get('end'):
today = datetime.date.today()
data['start'] = datetime.date(today.year, today.month, 1)
form = forms.DeletedThemeLogForm(data)
if form.is_valid():
data = form.cleaned_data
if data.get('start'):
deleted = deleted.filter(modified__gte=data['start'])
if data.get('end'):
deleted = deleted.filter(modified__lte=data['end'])
if data.get('search'):
term = data['search']
deleted = deleted.filter(
Q(name__localized_string__icontains=term))
return render(request, 'editors/themes/deleted.html', {
'form': form,
'pager': paginate(request, deleted.order_by('-modified'), 30),
'tab': 'deleted'
})
@personas_reviewer_required
def themes_history(request, username):
if not username:
username = request.user.username
return render(request, 'editors/themes/history.html', context(
**{'theme_reviews':
paginate(request, ActivityLog.objects.filter(
action=amo.LOG.THEME_REVIEW.id, user__username=username), 20),
'user_history': True,
'username': username,
'reject_reasons': rvw.THEME_REJECT_REASONS,
'action_dict': rvw.REVIEW_ACTIONS}))
def get_actions_json():
return json.dumps({
'moreinfo': rvw.ACTION_MOREINFO,
'flag': rvw.ACTION_FLAG,
'duplicate': rvw.ACTION_DUPLICATE,
'reject': rvw.ACTION_REJECT,
'approve': rvw.ACTION_APPROVE,
})
def get_updated_expiry():
return (datetime.datetime.now() +
datetime.timedelta(minutes=rvw.THEME_LOCK_EXPIRY))
| 36.29505 | 79 | 0.634132 | import datetime
import json
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.forms.formsets import formset_factory
from django.shortcuts import get_object_or_404, redirect
from django.utils.datastructures import MultiValueDictKeyError
from django.utils.translation import ugettext as _, ungettext as ngettext
from olympia import amo
from olympia.constants import editors as rvw
from olympia.access import acl
from olympia.addons.models import Addon, Persona
from olympia.amo.decorators import json_view, post_required
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import paginate, render
from olympia.devhub.models import ActivityLog
from olympia.editors import forms
from olympia.editors.models import RereviewQueueTheme, ReviewerScore, ThemeLock
from olympia.editors.views import base_context as context
from olympia.search.views import name_only_query
from olympia.zadmin.decorators import admin_required
from .decorators import personas_reviewer_required
QUEUE_PER_PAGE = 100
@personas_reviewer_required
def home(request):
data = context(
reviews_total=ActivityLog.objects.total_reviews(theme=True)[:5],
reviews_monthly=ActivityLog.objects.monthly_reviews(theme=True)[:5],
queue_counts=queue_counts_themes(request)
)
return render(request, 'editors/themes/home.html', data)
def queue_counts_themes(request):
counts = {
'themes': Persona.objects.no_cache()
.filter(addon__status=amo.STATUS_PENDING)
.count(),
}
if acl.action_allowed(request, 'SeniorPersonasTools', 'View'):
counts.update({
'flagged_themes': (Persona.objects.no_cache()
.filter(addon__status=amo.STATUS_REVIEW_PENDING)
.count()),
'rereview_themes': RereviewQueueTheme.objects.count()
})
rv = {}
if isinstance(type, basestring):
return counts[type]
for k, v in counts.items():
if not isinstance(type, list) or k in type:
rv[k] = v
return rv
@personas_reviewer_required
def themes_list(request, flagged=False, rereview=False):
themes = []
if flagged:
themes = Addon.objects.filter(status=amo.STATUS_REVIEW_PENDING,
type=amo.ADDON_PERSONA,
persona__isnull=False)
elif rereview:
themes = [
rqt.theme.addon for rqt in
RereviewQueueTheme.objects.select_related('theme__addon')]
else:
themes = Addon.objects.filter(status=amo.STATUS_PENDING,
type=amo.ADDON_PERSONA,
persona__isnull=False)
search_form = forms.ThemeSearchForm(request.GET)
per_page = request.GET.get('per_page', QUEUE_PER_PAGE)
pager = paginate(request, themes, per_page)
return render(request, 'editors/themes/queue_list.html', context(
**{'addons': pager.object_list,
'flagged': flagged,
'pager': pager,
'rereview': rereview,
'theme_search_form': search_form,
'statuses': dict((k, unicode(v)) for k, v in
amo.STATUS_CHOICES_API.items()),
'tab': ('rereview_themes' if rereview else
'flagged_themes' if flagged else 'pending_themes')}))
def _themes_queue(request, flagged=False, rereview=False):
themes = _get_themes(request, request.user, flagged=flagged,
rereview=rereview)
ThemeReviewFormset = formset_factory(forms.ThemeReviewForm)
formset = ThemeReviewFormset(
initial=[{'theme': _rereview_to_theme(rereview, theme).id} for theme
in themes])
return render(request, 'editors/themes/queue.html', context(
**{'actions': get_actions_json(),
'formset': formset,
'flagged': flagged,
'reject_reasons': rvw.THEME_REJECT_REASONS,
'rereview': rereview,
'reviewable': True,
'theme_formsets': zip(themes, formset),
'theme_count': len(themes),
'tab': (
'flagged' if flagged else
'rereview' if rereview else 'pending')}))
def _get_themes(request, reviewer, flagged=False, rereview=False):
num = 0
themes = []
locks = []
status = (amo.STATUS_REVIEW_PENDING if flagged else
amo.STATUS_PUBLIC if rereview else amo.STATUS_PENDING)
if rereview:
num, themes, locks = _get_rereview_themes(reviewer)
else:
locks = ThemeLock.objects.no_cache().filter(
reviewer=reviewer, theme__addon__status=status)
num, themes = _calc_num_themes_checkout(locks)
if themes:
return themes
themes = Persona.objects.no_cache().filter(
addon__status=status, themelock=None)
if (not settings.ALLOW_SELF_REVIEWS and
not acl.action_allowed(request, 'Admin', '%')):
if rereview:
themes = themes.exclude(theme__addon__addonuser__user=reviewer)
else:
themes = themes.exclude(addon__addonuser__user=reviewer)
# Check out themes by setting lock.
themes = list(themes)[:num]
expiry = get_updated_expiry()
for theme in themes:
ThemeLock.objects.create(theme=_rereview_to_theme(rereview, theme),
reviewer=reviewer, expiry=expiry)
# Empty pool? Go look for some expired locks.
if not themes:
expired_locks = ThemeLock.objects.filter(
expiry__lte=datetime.datetime.now(),
theme__addon__status=status)[:rvw.THEME_INITIAL_LOCKS]
# Steal expired locks.
for lock in expired_locks:
lock.reviewer = reviewer
lock.expiry = expiry
lock.save()
if expired_locks:
locks = expired_locks
if rereview:
return (RereviewQueueTheme.objects.no_cache()
.filter(theme__themelock__reviewer=reviewer)
.exclude(theme__addon__status=amo.STATUS_REJECTED))
# New theme locks may have been created, grab all reviewer's themes again.
return [lock.theme for lock in locks]
@json_view
@personas_reviewer_required
def themes_search(request):
search_form = forms.ThemeSearchForm(request.GET)
if search_form.is_valid():
q = search_form.cleaned_data['q']
rereview = search_form.cleaned_data['queue_type'] == 'rereview'
flagged = search_form.cleaned_data['queue_type'] == 'flagged'
themes = Addon.search().filter(type=amo.ADDON_PERSONA)
if rereview:
themes = themes.filter(has_theme_rereview=True)
else:
themes = themes.filter(status=(amo.STATUS_REVIEW_PENDING if flagged
else amo.STATUS_PENDING),
has_theme_rereview=False)
themes = themes.query(or_=name_only_query(q))[:100]
now = datetime.datetime.now()
reviewers = []
for theme in themes:
try:
themelock = theme.persona.themelock
if themelock.expiry > now:
reviewers.append(themelock.reviewer.email)
else:
reviewers.append('')
except ObjectDoesNotExist:
reviewers.append('')
themes = list(themes.values_dict('name', 'slug', 'status'))
for theme, reviewer in zip(themes, reviewers):
theme['id'] = theme['id'][0]
theme['slug'] = theme['slug'][0]
theme['status'] = theme['status'][0]
theme['reviewer'] = reviewer
return {'objects': themes, 'meta': {'total_count': len(themes)}}
@personas_reviewer_required
def themes_queue(request):
request.session['theme_redirect_url'] = reverse(
'editors.themes.queue_themes')
return _themes_queue(request)
@admin_required(theme_reviewers=True)
def themes_queue_flagged(request):
request.session['theme_redirect_url'] = reverse(
'editors.themes.queue_flagged')
return _themes_queue(request, flagged=True)
@admin_required(theme_reviewers=True)
def themes_queue_rereview(request):
request.session['theme_redirect_url'] = reverse(
'editors.themes.queue_rereview')
return _themes_queue(request, rereview=True)
def _rereview_to_theme(rereview, theme):
if rereview:
return theme.theme
return theme
def _calc_num_themes_checkout(locks):
current_num = locks.count()
if current_num < rvw.THEME_INITIAL_LOCKS:
return rvw.THEME_INITIAL_LOCKS - current_num, []
else:
locks.update(expiry=get_updated_expiry())
return 0, [lock.theme for lock in locks]
def _get_rereview_themes(reviewer):
locks = (ThemeLock.objects.select_related().no_cache()
.filter(reviewer=reviewer,
theme__rereviewqueuetheme__isnull=False)
.exclude(theme__addon__status=amo.STATUS_REJECTED))
num, updated_locks = _calc_num_themes_checkout(locks)
if updated_locks:
locks = updated_locks
themes = (RereviewQueueTheme.objects.no_cache()
.filter(theme__addon__isnull=False, theme__themelock=None)
.exclude(theme__addon__status=amo.STATUS_REJECTED))
return num, themes, locks
@post_required
@personas_reviewer_required
def themes_commit(request):
ThemeReviewFormset = formset_factory(forms.ThemeReviewForm)
formset = ThemeReviewFormset(request.POST)
scores = []
for form in formset:
try:
lock = ThemeLock.objects.filter(
theme_id=form.data[form.prefix + '-theme'],
reviewer=request.user)
except MultiValueDictKeyError:
continue
if lock and form.is_valid():
scores.append(form.save())
points = sum(scores)
success = ngettext(
'{0} theme review successfully processed (+{1} points, {2} total).',
'{0} theme reviews successfully processed (+{1} points, {2} total).',
len(scores)).format(len(scores), points,
ReviewerScore.get_total(request.user))
amo.messages.success(request, success)
if 'theme_redirect_url' in request.session:
return redirect(request.session['theme_redirect_url'])
else:
return redirect(reverse('editors.themes.queue_themes'))
@personas_reviewer_required
def release_locks(request):
ThemeLock.objects.filter(reviewer=request.user).delete()
amo.messages.success(
request,
_('Your theme locks have successfully been released. '
'Other reviewers may now review those released themes. '
'You may have to refresh the page to see the changes reflected in '
'the table below.'))
return redirect(reverse('editors.themes.list'))
@personas_reviewer_required
def themes_single(request, slug):
reviewer = request.user
reviewable = True
theme = get_object_or_404(Persona, addon__slug=slug)
if (theme.addon.status != amo.STATUS_PENDING and
not theme.rereviewqueuetheme_set.all()):
reviewable = False
if (not settings.ALLOW_SELF_REVIEWS and
not acl.action_allowed(request, 'Admin', '%') and
theme.addon.has_author(request.user)):
reviewable = False
else:
# Don't review a locked theme (that's not locked to self).
try:
lock = theme.themelock
if (lock.reviewer.id != reviewer.id and
lock.expiry > datetime.datetime.now()):
reviewable = False
elif (lock.reviewer.id != reviewer.id and
lock.expiry < datetime.datetime.now()):
# Steal expired lock.
lock.reviewer = reviewer
lock.expiry = get_updated_expiry()
lock.save()
else:
# Update expiry.
lock.expiry = get_updated_expiry()
lock.save()
except ThemeLock.DoesNotExist:
# Create lock if not created.
ThemeLock.objects.create(theme=theme, reviewer=reviewer,
expiry=get_updated_expiry())
ThemeReviewFormset = formset_factory(forms.ThemeReviewForm)
formset = ThemeReviewFormset(initial=[{'theme': theme.id}])
# Since we started the review on the single page, we want to return to the
# single page rather than get shot back to the queue.
request.session['theme_redirect_url'] = reverse('editors.themes.single',
args=[theme.addon.slug])
rereview = (theme.rereviewqueuetheme_set.all()[0] if
theme.rereviewqueuetheme_set.exists() else None)
return render(request, 'editors/themes/single.html', context(
**{'formset': formset,
'theme': rereview if rereview else theme,
'theme_formsets': zip([rereview if rereview else theme], formset),
'theme_reviews': paginate(request, ActivityLog.objects.filter(
action=amo.LOG.THEME_REVIEW.id,
_arguments__contains=theme.addon.id)),
'actions': get_actions_json(),
'theme_count': 1,
'rereview': rereview,
'reviewable': reviewable,
'reject_reasons': rvw.THEME_REJECT_REASONS,
'action_dict': rvw.REVIEW_ACTIONS,
'tab': ('flagged' if theme.addon.status == amo.STATUS_REVIEW_PENDING
else 'rereview' if rereview else 'pending')}))
@personas_reviewer_required
def themes_logs(request):
data = request.GET.copy()
if not data.get('start') and not data.get('end'):
today = datetime.date.today()
data['start'] = datetime.date(today.year, today.month, 1)
form = forms.ReviewThemeLogForm(data)
theme_logs = ActivityLog.objects.filter(action=amo.LOG.THEME_REVIEW.id)
if form.is_valid():
data = form.cleaned_data
if data.get('start'):
theme_logs = theme_logs.filter(created__gte=data['start'])
if data.get('end'):
theme_logs = theme_logs.filter(created__lte=data['end'])
if data.get('search'):
term = data['search']
theme_logs = theme_logs.filter(
Q(_details__icontains=term) |
Q(user__display_name__icontains=term) |
Q(user__username__icontains=term)).distinct()
pager = paginate(request, theme_logs, 30)
data = context(form=form, pager=pager,
ACTION_DICT=rvw.REVIEW_ACTIONS,
REJECT_REASONS=rvw.THEME_REJECT_REASONS, tab='themes')
return render(request, 'editors/themes/logs.html', data)
@admin_required(theme_reviewers=True)
def deleted_themes(request):
data = request.GET.copy()
deleted = Addon.unfiltered.filter(type=amo.ADDON_PERSONA,
status=amo.STATUS_DELETED)
if not data.get('start') and not data.get('end'):
today = datetime.date.today()
data['start'] = datetime.date(today.year, today.month, 1)
form = forms.DeletedThemeLogForm(data)
if form.is_valid():
data = form.cleaned_data
if data.get('start'):
deleted = deleted.filter(modified__gte=data['start'])
if data.get('end'):
deleted = deleted.filter(modified__lte=data['end'])
if data.get('search'):
term = data['search']
deleted = deleted.filter(
Q(name__localized_string__icontains=term))
return render(request, 'editors/themes/deleted.html', {
'form': form,
'pager': paginate(request, deleted.order_by('-modified'), 30),
'tab': 'deleted'
})
@personas_reviewer_required
def themes_history(request, username):
if not username:
username = request.user.username
return render(request, 'editors/themes/history.html', context(
**{'theme_reviews':
paginate(request, ActivityLog.objects.filter(
action=amo.LOG.THEME_REVIEW.id, user__username=username), 20),
'user_history': True,
'username': username,
'reject_reasons': rvw.THEME_REJECT_REASONS,
'action_dict': rvw.REVIEW_ACTIONS}))
def get_actions_json():
return json.dumps({
'moreinfo': rvw.ACTION_MOREINFO,
'flag': rvw.ACTION_FLAG,
'duplicate': rvw.ACTION_DUPLICATE,
'reject': rvw.ACTION_REJECT,
'approve': rvw.ACTION_APPROVE,
})
def get_updated_expiry():
return (datetime.datetime.now() +
datetime.timedelta(minutes=rvw.THEME_LOCK_EXPIRY))
| true | true |
f7151d1d59556e1af3df7bc93bfb1d6c1c861363 | 1,217 | py | Python | django_2gis_maps/tests/test_widget.py | NursErgesh/django_2gis_maps | 42f561519eeb769c8713fdb0cd394313a657eb9f | [
"MIT"
] | 7 | 2018-07-30T03:20:33.000Z | 2020-09-15T08:20:31.000Z | django_2gis_maps/tests/test_widget.py | NursErgesh/django-2gis-maps | 42f561519eeb769c8713fdb0cd394313a657eb9f | [
"MIT"
] | 4 | 2020-04-13T11:22:57.000Z | 2020-09-16T00:24:54.000Z | django_2gis_maps/tests/test_widget.py | NursErgesh/django-2gis-maps | 42f561519eeb769c8713fdb0cd394313a657eb9f | [
"MIT"
] | 2 | 2018-07-29T17:55:12.000Z | 2020-09-16T05:41:12.000Z | from django import test
from django.conf import settings
from django_2gis_maps.widgets import DoubleGisMapsAddressWidget
class WidgetTests(test.TestCase):
def test_render_returns_xxxxxxx(self):
widget = DoubleGisMapsAddressWidget()
results = widget.render('name', 'value', attrs={'a1': 1, 'a2': 2})
expected = '<input a1="1" a2="2" name="name" type="text" value="value" />'
expected += '<div class="map_canvas_wrapper">'
expected += '<div id="map_canvas"></div></div>'
self.assertHTMLEqual(expected, results)
def test_render_returns_blank_for_value_when_none(self):
widget = DoubleGisMapsAddressWidget()
results = widget.render('name', None, attrs={'a1': 1, 'a2': 2})
expected = '<input a1="1" a2="2" name="name" type="text" />'
expected += '<div class="map_canvas_wrapper">'
expected += '<div id="map_canvas"></div></div>'
self.assertHTMLEqual(expected, results)
def test_maps_js_uses_api_key(self):
widget = DoubleGisMapsAddressWidget()
django_2gis_maps_js = "https://maps.api.2gis.ru/2.0/loader.js?pkg=full&skin=dark"
self.assertEqual(django_2gis_maps_js, widget.Media().js[1])
| 45.074074 | 89 | 0.665571 | from django import test
from django.conf import settings
from django_2gis_maps.widgets import DoubleGisMapsAddressWidget
class WidgetTests(test.TestCase):
def test_render_returns_xxxxxxx(self):
widget = DoubleGisMapsAddressWidget()
results = widget.render('name', 'value', attrs={'a1': 1, 'a2': 2})
expected = '<input a1="1" a2="2" name="name" type="text" value="value" />'
expected += '<div class="map_canvas_wrapper">'
expected += '<div id="map_canvas"></div></div>'
self.assertHTMLEqual(expected, results)
def test_render_returns_blank_for_value_when_none(self):
widget = DoubleGisMapsAddressWidget()
results = widget.render('name', None, attrs={'a1': 1, 'a2': 2})
expected = '<input a1="1" a2="2" name="name" type="text" />'
expected += '<div class="map_canvas_wrapper">'
expected += '<div id="map_canvas"></div></div>'
self.assertHTMLEqual(expected, results)
def test_maps_js_uses_api_key(self):
widget = DoubleGisMapsAddressWidget()
django_2gis_maps_js = "https://maps.api.2gis.ru/2.0/loader.js?pkg=full&skin=dark"
self.assertEqual(django_2gis_maps_js, widget.Media().js[1])
| true | true |
f7151d81ac3753cf1b8aa541756a774f0dd9b255 | 198 | py | Python | Harvard's CS50/ints.py | RichelleT/Python | 87aff2392964ca5630ffa44225f9e13d040cdd91 | [
"MIT"
] | 1 | 2019-03-04T05:43:35.000Z | 2019-03-04T05:43:35.000Z | Harvard's CS50/ints.py | RichelleT/Python | 87aff2392964ca5630ffa44225f9e13d040cdd91 | [
"MIT"
] | null | null | null | Harvard's CS50/ints.py | RichelleT/Python | 87aff2392964ca5630ffa44225f9e13d040cdd91 | [
"MIT"
] | null | null | null | from cs50 import get_int
x = get_int("x: ")
y = get_int("y: ")
print(f"x + y = {x + y}")
print(f"x - y = {x - y}")
print(f"x * y = {x * y}")
print(f"x / y = {x / y}")
print(f"x mod y = {x % y}")
| 16.5 | 27 | 0.464646 | from cs50 import get_int
x = get_int("x: ")
y = get_int("y: ")
print(f"x + y = {x + y}")
print(f"x - y = {x - y}")
print(f"x * y = {x * y}")
print(f"x / y = {x / y}")
print(f"x mod y = {x % y}")
| true | true |
f7151db4fc3ad6adda43fc7246c135eb4b5779ae | 1,370 | py | Python | cornflow-server/migrations/versions/a472b5ad50b7_.py | ggsdc/corn | 4c17c46a70f95b8882bcb6a55ef7daa1f69e0456 | [
"MIT"
] | 2 | 2020-07-09T20:58:47.000Z | 2020-07-20T20:40:46.000Z | cornflow-server/migrations/versions/a472b5ad50b7_.py | baobabsoluciones/cornflow | bd7cae22107e5fe148704d5f41d4f58f9c410b40 | [
"Apache-2.0"
] | 2 | 2022-03-31T08:42:10.000Z | 2022-03-31T12:05:23.000Z | cornflow-server/migrations/versions/a472b5ad50b7_.py | ggsdc/corn | 4c17c46a70f95b8882bcb6a55ef7daa1f69e0456 | [
"MIT"
] | null | null | null | """
Modified state columns in executions table
Revision ID: a472b5ad50b7
Revises: e1a50dae1ac9
Create Date: 2021-01-21 13:25:45.815775
"""
import sqlalchemy as sa
from alembic import op
# TODO: import DEFAULT EXECUTION CODE HERE
# revision identifiers, used by Alembic.
revision = "a472b5ad50b7"
down_revision = "e1a50dae1ac9"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"executions",
sa.Column(
"state",
sa.SmallInteger(),
nullable=False,
server_default=sa.text(str(0)),
),
)
op.add_column("executions", sa.Column("state_message", sa.TEXT(), nullable=True))
# workaround to make migration work in sqlite:
with op.batch_alter_table("executions") as batch_op:
batch_op.drop_column("finished")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("executions", "state_message")
op.drop_column("executions", "state")
op.add_column(
"executions",
sa.Column(
"finished",
sa.BOOLEAN(),
server_default=sa.text("false"),
autoincrement=False,
nullable=False,
),
)
# ### end Alembic commands ###
| 24.909091 | 85 | 0.621898 | import sqlalchemy as sa
from alembic import op
revision = "a472b5ad50b7"
down_revision = "e1a50dae1ac9"
branch_labels = None
depends_on = None
def upgrade():
0)),
),
)
op.add_column("executions", sa.Column("state_message", sa.TEXT(), nullable=True))
with op.batch_alter_table("executions") as batch_op:
batch_op.drop_column("finished")
e"),
autoincrement=False,
nullable=False,
),
)
| true | true |
f7151e12353ec6f42deedb97b338a0018cfc050e | 6,304 | py | Python | sdk/python/pulumi_gcp/compute/route.py | stack72/pulumi-gcp | e63e4ed3129fe8e64e4869f4839ba2b20f57cb57 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_gcp/compute/route.py | stack72/pulumi-gcp | e63e4ed3129fe8e64e4869f4839ba2b20f57cb57 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_gcp/compute/route.py | stack72/pulumi-gcp | e63e4ed3129fe8e64e4869f4839ba2b20f57cb57 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class Route(pulumi.CustomResource):
description: pulumi.Output[str]
dest_range: pulumi.Output[str]
name: pulumi.Output[str]
network: pulumi.Output[str]
next_hop_gateway: pulumi.Output[str]
next_hop_instance: pulumi.Output[str]
next_hop_instance_zone: pulumi.Output[str]
"""
(Optional when `next_hop_instance` is
specified) The zone of the instance specified in
`next_hop_instance`. Omit if `next_hop_instance` is specified as
a URL.
"""
next_hop_ip: pulumi.Output[str]
next_hop_network: pulumi.Output[str]
next_hop_vpn_tunnel: pulumi.Output[str]
priority: pulumi.Output[float]
project: pulumi.Output[str]
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
self_link: pulumi.Output[str]
"""
The URI of the created resource.
"""
tags: pulumi.Output[list]
def __init__(__self__, resource_name, opts=None, description=None, dest_range=None, name=None, network=None, next_hop_gateway=None, next_hop_instance=None, next_hop_instance_zone=None, next_hop_ip=None, next_hop_vpn_tunnel=None, priority=None, project=None, tags=None, __name__=None, __opts__=None):
"""
Represents a Route resource.
A route is a rule that specifies how certain packets should be handled by
the virtual network. Routes are associated with virtual machines by tag,
and the set of routes for a particular virtual machine is called its
routing table. For each packet leaving a virtual machine, the system
searches that virtual machine's routing table for a single best matching
route.
Routes match packets by destination IP address, preferring smaller or more
specific ranges over larger ones. If there is a tie, the system selects
the route with the smallest priority value. If there is still a tie, it
uses the layer three and four packet headers to select just one of the
remaining matching routes. The packet is then forwarded as specified by
the next_hop field of the winning route -- either to another virtual
machine destination, a virtual machine gateway or a Compute
Engine-operated gateway. Packets that do not match any route in the
sending virtual machine's routing table will be dropped.
A Route resource must have exactly one specification of either
nextHopGateway, nextHopInstance, nextHopIp, or nextHopVpnTunnel.
To get more information about Route, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/routes)
* How-to Guides
* [Using Routes](https://cloud.google.com/vpc/docs/using-routes)
<div class = "oics-button" style="float: right; margin: 0 0 -15px">
<a href="https://console.cloud.google.com/cloudshell/open?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2Fterraform-google-modules%2Fdocs-examples.git&cloudshell_working_dir=route_basic&cloudshell_image=gcr.io%2Fgraphite-cloud-shell-images%2Fterraform%3Alatest&open_in_editor=main.tf&cloudshell_print=.%2Fmotd&cloudshell_tutorial=.%2Ftutorial.md" target="_blank">
<img alt="Open in Cloud Shell" src="//gstatic.com/cloudssh/images/open-btn.svg" style="max-height: 44px; margin: 32px auto; max-width: 100%;">
</a>
</div>
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] next_hop_instance_zone: (Optional when `next_hop_instance` is
specified) The zone of the instance specified in
`next_hop_instance`. Omit if `next_hop_instance` is specified as
a URL.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['description'] = description
if dest_range is None:
raise TypeError('Missing required property dest_range')
__props__['dest_range'] = dest_range
__props__['name'] = name
if network is None:
raise TypeError('Missing required property network')
__props__['network'] = network
__props__['next_hop_gateway'] = next_hop_gateway
__props__['next_hop_instance'] = next_hop_instance
__props__['next_hop_instance_zone'] = next_hop_instance_zone
__props__['next_hop_ip'] = next_hop_ip
__props__['next_hop_vpn_tunnel'] = next_hop_vpn_tunnel
__props__['priority'] = priority
__props__['project'] = project
__props__['tags'] = tags
__props__['next_hop_network'] = None
__props__['self_link'] = None
super(Route, __self__).__init__(
'gcp:compute/route:Route',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 43.777778 | 372 | 0.682265 |
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class Route(pulumi.CustomResource):
description: pulumi.Output[str]
dest_range: pulumi.Output[str]
name: pulumi.Output[str]
network: pulumi.Output[str]
next_hop_gateway: pulumi.Output[str]
next_hop_instance: pulumi.Output[str]
next_hop_instance_zone: pulumi.Output[str]
next_hop_ip: pulumi.Output[str]
next_hop_network: pulumi.Output[str]
next_hop_vpn_tunnel: pulumi.Output[str]
priority: pulumi.Output[float]
project: pulumi.Output[str]
self_link: pulumi.Output[str]
tags: pulumi.Output[list]
def __init__(__self__, resource_name, opts=None, description=None, dest_range=None, name=None, network=None, next_hop_gateway=None, next_hop_instance=None, next_hop_instance_zone=None, next_hop_ip=None, next_hop_vpn_tunnel=None, priority=None, project=None, tags=None, __name__=None, __opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['description'] = description
if dest_range is None:
raise TypeError('Missing required property dest_range')
__props__['dest_range'] = dest_range
__props__['name'] = name
if network is None:
raise TypeError('Missing required property network')
__props__['network'] = network
__props__['next_hop_gateway'] = next_hop_gateway
__props__['next_hop_instance'] = next_hop_instance
__props__['next_hop_instance_zone'] = next_hop_instance_zone
__props__['next_hop_ip'] = next_hop_ip
__props__['next_hop_vpn_tunnel'] = next_hop_vpn_tunnel
__props__['priority'] = priority
__props__['project'] = project
__props__['tags'] = tags
__props__['next_hop_network'] = None
__props__['self_link'] = None
super(Route, __self__).__init__(
'gcp:compute/route:Route',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f7151ede1353f7e6906097e8ade4346f2390410d | 3,022 | py | Python | code/main.py | ynandwan/step-function-approximator | 7f4a59841d6d938e0cc97e726ce6ba6b65a6267f | [
"MIT"
] | null | null | null | code/main.py | ynandwan/step-function-approximator | 7f4a59841d6d938e0cc97e726ce6ba6b65a6267f | [
"MIT"
] | null | null | null | code/main.py | ynandwan/step-function-approximator | 7f4a59841d6d938e0cc97e726ce6ba6b65a6267f | [
"MIT"
] | null | null | null | from __future__ import print_function
import os
import utils
import argparse
import point
import one_step_approximator
from IPython.core.debugger import Pdb
MAX = float('inf')
def print_output(output,output_file):
if output_file == '':
fh = None
else:
fh = open(output_file,'w')
#
print(len(output),file=fh)
for mp in output:
print(mp[0],mp[1] ,file=fh)
#
if fh:
fh.close()
def main(input_file,output_file):
#Pdb().set_trace()
k,error_type,points = utils.read_input(input_file)
error_fn = utils.get_error_fn(error_type)
ssa = one_step_approximator.get_one_step_approximator(error_type, points)
n = len(points)
if k >= n:
output = [(p.x,p.y) for p in points]
print_output(output,output_file)
return
#base case -
#size of error - table k x n
error_table = []
back_pointers = []
last_error_row = [0]*n
this_back_pointers = [-1]*n
for j in range(k-1,n):
last_error_row[j],this_back_pointers[j] = ssa.get_approximation(j,n-1)
#
#Pdb().set_trace()
back_pointers.append(this_back_pointers)
for i in range(k-1):
step_no = i+2
this_error_row = [0]*n
this_back_pointers = [-1]*n
#at step i
for j in range(k-step_no,n):
#num_points_on_right = n-j
if (n-j) == step_no:
this_error_row[j] = 0
this_back_pointers[j] = (points[j].y,j+1)
break
#
current_min = MAX
current_min_index = -1
current_ssay = -1
for l in range(j+1,n-i):
this_ssa_e,this_ssa_y = ssa.get_approximation(j,l-1)
this_score = ssa.combine(last_error_row[l], this_ssa_e)
if this_score < current_min:
current_min = this_score
current_min_index = l
current_ssay = this_ssa_y
#
#
this_error_row[j] = current_min
this_back_pointers[j] = (current_ssay, current_min_index)
if step_no == k:
break
#
last_error_row = this_error_row
back_pointers.append(this_back_pointers)
output = []
current_x_ind = 0
current_back_pointer = back_pointers[-1][current_x_ind]
for i in range(k-2,-1,-1):
output.append((points[current_x_ind].x, current_back_pointer[0]))
current_x_ind = current_back_pointer[1]
current_back_pointer = back_pointers[i][current_x_ind]
#
output.append((points[current_x_ind].x, current_back_pointer))
print_output(output,output_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_file',help='input_file_name',type=str,default='input.txt')
parser.add_argument('--output_file',help='output written in output file',default='')
args = parser.parse_args()
main(args.input_file, args.output_file)
| 31.479167 | 91 | 0.603905 | from __future__ import print_function
import os
import utils
import argparse
import point
import one_step_approximator
from IPython.core.debugger import Pdb
MAX = float('inf')
def print_output(output,output_file):
if output_file == '':
fh = None
else:
fh = open(output_file,'w')
print(len(output),file=fh)
for mp in output:
print(mp[0],mp[1] ,file=fh)
if fh:
fh.close()
def main(input_file,output_file):
k,error_type,points = utils.read_input(input_file)
error_fn = utils.get_error_fn(error_type)
ssa = one_step_approximator.get_one_step_approximator(error_type, points)
n = len(points)
if k >= n:
output = [(p.x,p.y) for p in points]
print_output(output,output_file)
return
error_table = []
back_pointers = []
last_error_row = [0]*n
this_back_pointers = [-1]*n
for j in range(k-1,n):
last_error_row[j],this_back_pointers[j] = ssa.get_approximation(j,n-1)
back_pointers.append(this_back_pointers)
for i in range(k-1):
step_no = i+2
this_error_row = [0]*n
this_back_pointers = [-1]*n
for j in range(k-step_no,n):
if (n-j) == step_no:
this_error_row[j] = 0
this_back_pointers[j] = (points[j].y,j+1)
break
current_min = MAX
current_min_index = -1
current_ssay = -1
for l in range(j+1,n-i):
this_ssa_e,this_ssa_y = ssa.get_approximation(j,l-1)
this_score = ssa.combine(last_error_row[l], this_ssa_e)
if this_score < current_min:
current_min = this_score
current_min_index = l
current_ssay = this_ssa_y
this_error_row[j] = current_min
this_back_pointers[j] = (current_ssay, current_min_index)
if step_no == k:
break
last_error_row = this_error_row
back_pointers.append(this_back_pointers)
output = []
current_x_ind = 0
current_back_pointer = back_pointers[-1][current_x_ind]
for i in range(k-2,-1,-1):
output.append((points[current_x_ind].x, current_back_pointer[0]))
current_x_ind = current_back_pointer[1]
current_back_pointer = back_pointers[i][current_x_ind]
output.append((points[current_x_ind].x, current_back_pointer))
print_output(output,output_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_file',help='input_file_name',type=str,default='input.txt')
parser.add_argument('--output_file',help='output written in output file',default='')
args = parser.parse_args()
main(args.input_file, args.output_file)
| true | true |
f71521568544b636c6596c9aea9ff6e5391602ad | 27,240 | py | Python | src/geopackage/_wkb.py | karmic-creditor/pygeopackage | 13366d54f80bd827b84c6538b9b08b6656111ef4 | [
"Apache-2.0"
] | null | null | null | src/geopackage/_wkb.py | karmic-creditor/pygeopackage | 13366d54f80bd827b84c6538b9b08b6656111ef4 | [
"Apache-2.0"
] | null | null | null | src/geopackage/_wkb.py | karmic-creditor/pygeopackage | 13366d54f80bd827b84c6538b9b08b6656111ef4 | [
"Apache-2.0"
] | null | null | null | """
This code has been a variation of geomet: https://github.com/geomet/geomet
It has been modified under the Apache 2.0 license to fit the needs of the
Esri JSON specificaction as defined here: https://developers.arcgis.com/documentation/common-data-types/geometry-objects.htm
"""
import binascii
import struct
from ._utils import block_splitter
from ._utils import take
from ._utils import as_bin_str
from ._utils import flatten_multi_dim
from itertools import chain
#: '\x00': The first byte of any WKB string. Indicates big endian byte
#: ordering for the data.
BIG_ENDIAN = b'\x00'
#: '\x01': The first byte of any WKB string. Indicates little endian byte
#: ordering for the data.
LITTLE_ENDIAN = b'\x01'
#: High byte in a 4-byte geometry type field to indicate that a 4-byte SRID
#: field follows.
SRID_FLAG = b'\x20'
#: Mapping of GeoJSON geometry types to the "2D" 4-byte binary string
#: representation for WKB. "2D" indicates that the geometry is 2-dimensional,
#: X and Y components.
#: NOTE: Byte ordering is big endian.
WKB_2D = {
'Point': b'\x00\x00\x00\x01',
'LineString': b'\x00\x00\x00\x02',
'Polygon': b'\x00\x00\x00\x03',
'MultiPoint': b'\x00\x00\x00\x04',
'MultiLineString': b'\x00\x00\x00\x05',
'MultiPolygon': b'\x00\x00\x00\x06',
'GeometryCollection': b'\x00\x00\x00\x07',
}
#: Mapping of GeoJSON geometry types to the "Z" 4-byte binary string
#: representation for WKB. "Z" indicates that the geometry is 3-dimensional,
#: with X, Y, and Z components.
#: NOTE: Byte ordering is big endian.
WKB_Z = {
'Point': b'\x00\x00\x03\xe9',
'LineString': b'\x00\x00\x03\xea',
'Polygon': b'\x00\x00\x03\xeb',
'MultiPoint': b'\x00\x00\x03\xec',
'MultiLineString': b'\x00\x00\x03\xed',
'MultiPolygon': b'\x00\x00\x03\xee',
'GeometryCollection': b'\x00\x00\x03\xef',
}
#: Mapping of GeoJSON geometry types to the "M" 4-byte binary string
#: representation for WKB. "M" indicates that the geometry is 2-dimensional,
#: with X, Y, and M ("Measure") components.
#: NOTE: Byte ordering is big endian.
WKB_M = {
'Point': b'\x00\x00\x07\xd1',
'LineString': b'\x00\x00\x07\xd2',
'Polygon': b'\x00\x00\x07\xd3',
'MultiPoint': b'\x00\x00\x07\xd4',
'MultiLineString': b'\x00\x00\x07\xd5',
'MultiPolygon': b'\x00\x00\x07\xd6',
'GeometryCollection': b'\x00\x00\x07\xd7',
}
#: Mapping of GeoJSON geometry types to the "ZM" 4-byte binary string
#: representation for WKB. "ZM" indicates that the geometry is 4-dimensional,
#: with X, Y, Z, and M ("Measure") components.
#: NOTE: Byte ordering is big endian.
WKB_ZM = {
'Point': b'\x00\x00\x0b\xb9',
'LineString': b'\x00\x00\x0b\xba',
'Polygon': b'\x00\x00\x0b\xbb',
'MultiPoint': b'\x00\x00\x0b\xbc',
'MultiLineString': b'\x00\x00\x0b\xbd',
'MultiPolygon': b'\x00\x00\x0b\xbe',
'GeometryCollection': b'\x00\x00\x0b\xbf',
}
#: Mapping of dimension types to maps of GeoJSON geometry type -> 4-byte binary
#: string representation for WKB.
_WKB = {
'2D': WKB_2D,
'Z': WKB_Z,
'M': WKB_M,
'ZM': WKB_ZM,
}
#: Mapping from binary geometry type (as a 4-byte binary string) to GeoJSON
#: geometry type.
#: NOTE: Byte ordering is big endian.
_BINARY_TO_GEOM_TYPE = dict(
chain(*((reversed(x) for x in wkb_map.items())
for wkb_map in _WKB.values()))
)
_INT_TO_DIM_LABEL = {2: '2D', 3: 'Z', 4: 'ZM'}
def _get_geom_type(type_bytes):
"""Get the GeoJSON geometry type label from a WKB type byte string.
:param type_bytes:
4 byte string in big endian byte order containing a WKB type number.
It may also contain a "has SRID" flag in the high byte (the first type,
since this is big endian byte order), indicated as 0x20. If the SRID
flag is not set, the high byte will always be null (0x00).
:returns:
3-tuple ofGeoJSON geometry type label, the bytes resprenting the
geometry type, and a separate "has SRID" flag. If the input
`type_bytes` contains an SRID flag, it will be removed.
>>> # Z Point, with SRID flag
>>> _get_geom_type(b'\\x20\\x00\\x03\\xe9') == (
... 'Point', b'\\x00\\x00\\x03\\xe9', True)
True
>>> # 2D MultiLineString, without SRID flag
>>> _get_geom_type(b'\\x00\\x00\\x00\\x05') == (
... 'MultiLineString', b'\\x00\\x00\\x00\\x05', False)
True
"""
# slice off the high byte, which may contain the SRID flag
high_byte = type_bytes[0]
high_byte = bytes([high_byte])
has_srid = high_byte == b'\x20'
if has_srid:
# replace the high byte with a null byte
type_bytes = as_bin_str(b'\x00' + type_bytes[1:])
else:
type_bytes = as_bin_str(type_bytes)
# look up the geometry type
geom_type = _BINARY_TO_GEOM_TYPE.get(type_bytes)
return geom_type, type_bytes, has_srid
def dump(obj, dest_file):
"""
Dump GeoJSON-like `dict` to WKB and write it to the `dest_file`.
:param dict obj:
A GeoJSON-like dictionary. It must at least the keys 'type' and
'coordinates'.
:param dest_file:
Open and writable file-like object.
"""
dest_file.write(dumps(obj))
def load(source_file, wkid=4326):
"""
Load a EsriJSON `dict` object from a ``source_file`` containing WKB (as a
byte string).
:param source_file:
Open and readable file-like object.
:returns:
A GeoJSON `dict` representing the geometry read from the file.
"""
return loads(source_file.read(), wkid=wkid)
def dumps(obj, big_endian=False):
"""
Dump a EsriJSON-like `dict` to a WKB string.
:param dict obj:
GeoJson-like `dict` object.
:param bool big_endian:
Defaults to `False`. If `True`, data values in the generated WKB will
be represented using big endian byte order. Else, little endian.
:returns:
A WKB binary string representing of the ``obj``.
"""
def lu_geom(ks):
if 'point' in ks:
return "Point"
elif 'paths' in ks:
return "MultiLineString"
elif 'x' in ks:
return "Point"
elif 'rings' in ks:
return "MultiPolygon"
elif 'points' in ks:
return "MultiPoint"
geom_type = lu_geom(obj.keys())
meta = obj.get('meta', {})
exporter = _dumps_registry.get(geom_type)
if exporter is None:
_unsupported_geom_type(geom_type)
return exporter(obj, big_endian, meta)
def loads(string, wkid=4326):
"""
Construct a EsriJSON `dict` from WKB (`string`).
:param str string:
WKB string.
:param int wkid:
The srid of the coordinate system. The default is 4326.
"""
string = iter(string)
endianness = as_bin_str(take(1, string))
if endianness == BIG_ENDIAN:
big_endian = True
elif endianness == LITTLE_ENDIAN:
big_endian = False
else:
raise ValueError("Invalid endian byte: '0x%s'. Expected 0x00 or 0x01"
% binascii.hexlify(endianness.encode()).decode())
endian_token = '>' if big_endian else '<'
# type_bytes = string[1:5]
type_bytes = as_bin_str(take(4, string))
if not big_endian:
# To identify the type, order the type bytes in big endian:
type_bytes = type_bytes[::-1]
geom_type, type_bytes, has_srid = _get_geom_type(type_bytes)
srid = None
if has_srid:
srid_field = as_bin_str(take(4, string))
[srid] = struct.unpack('%si' % endian_token, srid_field)
# data_bytes = string[5:] # FIXME: This won't work for GeometryCollections
data_bytes = string
importer = _loads_registry_esri.get(geom_type)
if importer is None:
_unsupported_geom_type(geom_type)
data_bytes = iter(data_bytes)
result = importer(big_endian, type_bytes, data_bytes, wkid)
if has_srid:
# As mentioned in the docstring above, includeEsriJSONpproaches to
# indicating the SRID.
result['meta'] = {'srid': int(srid)}
result['crs'] = {
'type': 'name',
'properties': {'name': 'EPSG%s' % srid},
}
return result
def _unsupported_geom_type(geom_type):
raise ValueError("Unsupported geometry type '%s'" % geom_type)
# TODO: dont default meta to none
def _header_bytefmt_byteorder(geom_type, num_dims, big_endian, meta=None):
"""
Utility function to get the WKB header (endian byte + type header), byte
format string, and byte order string.
"""
dim = _INT_TO_DIM_LABEL.get(num_dims)
if dim is None:
pass # TODO: raise
type_byte_str = _WKB[dim][geom_type]
srid = meta.get('srid')
if srid is not None:
# Add the srid flag
type_byte_str = SRID_FLAG + type_byte_str[1:]
if big_endian:
header = BIG_ENDIAN
byte_fmt = b'>'
byte_order = '>'
else:
header = LITTLE_ENDIAN
byte_fmt = b'<'
byte_order = '<'
# reverse the byte ordering for little endian
type_byte_str = type_byte_str[::-1]
header += type_byte_str
if srid is not None:
srid = int(srid)
if big_endian:
srid_header = struct.pack('>i', srid)
else:
srid_header = struct.pack('<i', srid)
header += srid_header
byte_fmt += b'd' * num_dims
return header, byte_fmt, byte_order
def _dump_point(obj, big_endian, meta):
"""
Dump a EsriJSON-like `dict` to a point WKB string.
:param dict obj:
EsriJSON-like `dict` object.
:param bool big_endian:
If `True`, data values in the generated WKB will be represented using
big endian byte order. Else, little endian.
:param dict meta:
Metadata associated with the GeoJSON object. Currently supported
metadata:
- srid: Used to support EWKT/EWKB. For example, ``meta`` equal to
``{'srid': '4326'}`` indicates that the geometry is defined using
Extended WKT/WKB and that it bears a Spatial Reference System
Identifier of 4326. This ID will be encoded into the resulting
binary.
Any other meta data objects will simply be ignored by this function.
:returns:
A WKB binary string representing of the Point ``obj``.
"""
coords = [obj['x'], obj['y']]
num_dims = len(coords)
wkb_string, byte_fmt, _ = _header_bytefmt_byteorder(
'Point', num_dims, big_endian, meta
)
wkb_string += struct.pack(byte_fmt, *coords)
return wkb_string
def _dump_linestring(obj, big_endian, meta):
"""
Dump a GeoJSON-like `dict` to a linestring WKB string.
Input parameters and output are similar to :func:`_dump_point`.
"""
coords = obj['coordinates']
vertex = coords[0]
# Infer the number of dimensions from the first vertex
num_dims = len(vertex)
wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder(
'LineString', num_dims, big_endian, meta
)
# append number of vertices in linestring
wkb_string += struct.pack('%sl' % byte_order, len(coords))
for vertex in coords:
wkb_string += struct.pack(byte_fmt, *vertex)
return wkb_string
def _dump_polygon(obj, big_endian, meta):
"""
Dump a GeoJSON-like `dict` to a polygon WKB string.
Input parameters and output are similar to :funct:`_dump_point`.
"""
coords = obj['coordinates']
vertex = coords[0][0]
# Infer the number of dimensions from the first vertex
num_dims = len(vertex)
wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder(
'Polygon', num_dims, big_endian, meta
)
# number of rings:
wkb_string += struct.pack('%sl' % byte_order, len(coords))
for ring in coords:
# number of verts in this ring:
wkb_string += struct.pack('%sl' % byte_order, len(ring))
for vertex in ring:
wkb_string += struct.pack(byte_fmt, *vertex)
return wkb_string
def _dump_multipoint(obj, big_endian, meta):
"""
Dump a GeoJSON-like `dict` to a multipoint WKB string.
Input parameters and output are similar to :funct:`_dump_point`.
"""
coords = obj['points']
vertex = coords[0]
num_dims = len(vertex)
wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder(
'MultiPoint', num_dims, big_endian, meta
)
point_type = _WKB[_INT_TO_DIM_LABEL.get(num_dims)]['Point']
if big_endian:
point_type = BIG_ENDIAN + point_type
else:
point_type = LITTLE_ENDIAN + point_type[::-1]
wkb_string += struct.pack('%sl' % byte_order, len(coords))
for vertex in coords:
# POINT type strings
wkb_string += point_type
wkb_string += struct.pack(byte_fmt, *vertex)
return wkb_string
def _dump_multilinestring(obj, big_endian, meta):
"""
Dump a GeoJSON-like `dict` to a multilinestring WKB string.
Input parameters and output are similar to :funct:`_dump_point`.
"""
coords = obj['paths']
vertex = coords[0][0]
num_dims = len(vertex)
wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder(
'MultiLineString', num_dims, big_endian, meta
)
ls_type = _WKB[_INT_TO_DIM_LABEL.get(num_dims)]['LineString']
if big_endian:
ls_type = BIG_ENDIAN + ls_type
else:
ls_type = LITTLE_ENDIAN + ls_type[::-1]
# append the number of linestrings
wkb_string += struct.pack('%sl' % byte_order, len(coords))
for linestring in coords:
wkb_string += ls_type
# append the number of vertices in each linestring
wkb_string += struct.pack('%sl' % byte_order, len(linestring))
for vertex in linestring:
wkb_string += struct.pack(byte_fmt, *vertex)
return wkb_string
def _dump_multipolygon(obj, big_endian, meta):
"""
Dump a GeoJSON-like `dict` to a multipolygon WKB string.
Input parameters and output are similar to :funct:`_dump_point`.
"""
coords = [obj['rings']]
vertex = coords[0][0][0]
num_dims = len(vertex)
wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder(
'MultiPolygon', num_dims, big_endian, meta
)
poly_type = _WKB[_INT_TO_DIM_LABEL.get(num_dims)]['Polygon']
if big_endian:
poly_type = BIG_ENDIAN + poly_type
else:
poly_type = LITTLE_ENDIAN + poly_type[::-1]
# apped the number of polygons
wkb_string += struct.pack('%sl' % byte_order, len(coords))
for polygon in coords:
# append polygon header
wkb_string += poly_type
# append the number of rings in this polygon
wkb_string += struct.pack('%sl' % byte_order, len(polygon))
for ring in polygon:
# append the number of vertices in this ring
wkb_string += struct.pack('%sl' % byte_order, len(ring))
for vertex in ring:
wkb_string += struct.pack(byte_fmt, *vertex)
return wkb_string
def _dump_geometrycollection(obj, big_endian, meta):
# TODO: handle empty collections
geoms = obj['geometries']
# determine the dimensionality (2d, 3d, 4d) of the collection
# by sampling the first geometry
first_geom = geoms[0]
rest = geoms[1:]
first_wkb = dumps(first_geom, big_endian=big_endian)
first_type = first_wkb[1:5]
if not big_endian:
first_type = first_type[::-1]
if first_type in WKB_2D.values():
num_dims = 2
elif first_type in WKB_Z.values():
num_dims = 3
elif first_type in WKB_ZM.values():
num_dims = 4
wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder(
'GeometryCollection', num_dims, big_endian, meta
)
# append the number of geometries
wkb_string += struct.pack('%sl' % byte_order, len(geoms))
wkb_string += first_wkb
for geom in rest:
wkb_string += dumps(geom, big_endian=big_endian)
return wkb_string
def _load_point_esri(big_endian, type_bytes, data_bytes, wkid):
"""
Convert byte data for a Point to a EsriJSON `dict`.
:param bool big_endian:
If `True`, interpret the ``data_bytes`` in big endian order, else
little endian.
:param str type_bytes:
4-byte integer (as a binary string) indicating the geometry type
(Point) and the dimensions (2D, Z, M or ZM). For consistency, these
bytes are expected to always be in big endian order, regardless of the
value of ``big_endian``.
:param str data_bytes:
Coordinate data in a binary string.
:returns:
EsriJSON `dict` representing the Point geometry.
"""
endian_token = '>' if big_endian else '<'
if type_bytes == WKB_2D['Point']:
coords = struct.unpack('%sdd' % endian_token,
as_bin_str(take(16, data_bytes)))
elif type_bytes == WKB_Z['Point']:
coords = struct.unpack('%sddd' % endian_token,
as_bin_str(take(24, data_bytes)))
elif type_bytes == WKB_M['Point']:
# NOTE: The use of XYM types geometries is quite rare. In the interest
# of removing ambiguity, we will treat all XYM geometries as XYZM when
# generate the GeoJSON. A default Z value of `0.0` will be given in
# this case.
coords = list(struct.unpack('%sddd' % endian_token,
as_bin_str(take(24, data_bytes))))
coords.insert(2, 0.0)
elif type_bytes == WKB_ZM['Point']:
coords = struct.unpack('%sdddd' % endian_token,
as_bin_str(take(32, data_bytes)))
return { 'x': coords[0], 'y': coords[1],
"spatialReference" : {'wkid' : wkid}}
def _load_linestring_esri(big_endian, type_bytes, data_bytes, wkid):
"""converts wkb to esri json"""
endian_token = '>' if big_endian else '<'
is_m = False
if type_bytes in WKB_2D.values():
num_dims = 2
elif type_bytes in WKB_Z.values():
num_dims = 3
elif type_bytes in WKB_M.values():
num_dims = 3
is_m = True
elif type_bytes in WKB_ZM.values():
num_dims = 4
coords = []
[num_verts] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
while True:
vert_wkb = as_bin_str(take(8 * num_dims, data_bytes))
fmt = '%s' + 'd' * num_dims
vert = list(struct.unpack(fmt % endian_token, vert_wkb))
if is_m:
vert.insert(2, 0.0)
coords.append(vert)
if len(coords) == num_verts:
break
return dict(paths=[list(coords)], spatialReference={'wkid' : wkid})
def _load_polygon_esri(big_endian, type_bytes, data_bytes, wkid):
"""converts wkb to esri json"""
endian_token = '>' if big_endian else '<'
data_bytes = iter(data_bytes)
is_m = False
if type_bytes in WKB_2D.values():
num_dims = 2
elif type_bytes in WKB_Z.values():
num_dims = 3
elif type_bytes in WKB_M.values():
num_dims = 3
is_m = True
elif type_bytes in WKB_ZM.values():
num_dims = 4
coords = []
[num_rings] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
while True:
ring = []
[num_verts] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
verts_wkb = as_bin_str(take(8 * num_verts * num_dims, data_bytes))
verts = block_splitter(verts_wkb, 8)
verts = (b''.join(bytes([y]) for y in x) for x in verts)
for vert_wkb in block_splitter(verts, num_dims):
values = [struct.unpack('%sd' % endian_token, x)[0]
for x in vert_wkb]
if is_m:
values.insert(2, 0.0)
ring.append(values)
coords.append(ring)
if len(coords) == num_rings:
break
return dict(rings=coords, spatialReference={'wkid' : wkid})
def _load_multipoint_esri(big_endian, type_bytes, data_bytes, wkid):
"""converts wkb to esri json"""
endian_token = '>' if big_endian else '<'
data_bytes = iter(data_bytes)
is_m = False
if type_bytes in WKB_2D.values():
num_dims = 2
elif type_bytes in WKB_Z.values():
num_dims = 3
elif type_bytes in WKB_M.values():
num_dims = 3
is_m = True
elif type_bytes in WKB_ZM.values():
num_dims = 4
if is_m:
dim = 'M'
else:
dim = _INT_TO_DIM_LABEL[num_dims]
coords = []
[num_points] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
while True:
point_endian = as_bin_str(take(1, data_bytes))
point_type = as_bin_str(take(4, data_bytes))
values = struct.unpack('%s%s' % (endian_token, 'd' * num_dims),
as_bin_str(take(8 * num_dims, data_bytes)))
values = list(values)
if is_m:
values.insert(2, 0.0)
if big_endian:
assert point_endian == BIG_ENDIAN
assert point_type == _WKB[dim]['Point']
else:
assert point_endian == LITTLE_ENDIAN
assert point_type[::-1] == _WKB[dim]['Point']
coords.append(list(values))
if len(coords) == num_points:
break
return dict(points=coords, spatialReference={'wkid' : wkid})
def _load_multilinestring_esri(big_endian, type_bytes, data_bytes, wkid):
"""converts wkb to esri json"""
endian_token = '>' if big_endian else '<'
data_bytes = iter(data_bytes)
is_m = False
if type_bytes in WKB_2D.values():
num_dims = 2
elif type_bytes in WKB_Z.values():
num_dims = 3
elif type_bytes in WKB_M.values():
num_dims = 3
is_m = True
elif type_bytes in WKB_ZM.values():
num_dims = 4
if is_m:
dim = 'M'
else:
dim = _INT_TO_DIM_LABEL[num_dims]
[num_ls] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
coords = []
while True:
ls_endian = as_bin_str(take(1, data_bytes))
ls_type = as_bin_str(take(4, data_bytes))
if big_endian:
assert ls_endian == BIG_ENDIAN
assert ls_type == _WKB[dim]['LineString']
else:
assert ls_endian == LITTLE_ENDIAN
assert ls_type[::-1] == _WKB[dim]['LineString']
[num_verts] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
num_values = num_dims * num_verts
values = struct.unpack(endian_token + 'd' * num_values,
as_bin_str(take(8 * num_values, data_bytes)))
values = list(block_splitter(values, num_dims))
if is_m:
for v in values:
v.insert(2, 0.0)
coords.append(values)
if len(coords) == num_ls:
break
return dict(paths=coords, spatialReference={'wkid' : wkid})
def _load_multipolygon_esri(big_endian, type_bytes, data_bytes, wkid):
"""converts wkb to esri json"""
endian_token = '>' if big_endian else '<'
is_m = False
if type_bytes in WKB_2D.values():
num_dims = 2
elif type_bytes in WKB_Z.values():
num_dims = 3
elif type_bytes in WKB_M.values():
num_dims = 3
is_m = True
elif type_bytes in WKB_ZM.values():
num_dims = 4
if is_m:
dim = 'M'
else:
dim = _INT_TO_DIM_LABEL[num_dims]
[num_polys] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
coords = []
while True:
polygon = []
poly_endian = as_bin_str(take(1, data_bytes))
poly_type = as_bin_str(take(4, data_bytes))
if big_endian:
assert poly_endian == BIG_ENDIAN
assert poly_type == _WKB[dim]['Polygon']
else:
assert poly_endian == LITTLE_ENDIAN
assert poly_type[::-1] == _WKB[dim]['Polygon']
[num_rings] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
for _ in range(num_rings):
ring = []
[num_verts] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
for _ in range(num_verts):
vert_wkb = as_bin_str(take(8 * num_dims, data_bytes))
fmt = '%s' + 'd' * num_dims
vert = list(struct.unpack(fmt % endian_token, vert_wkb))
if is_m:
vert.insert(2, 0.0)
ring.append(vert)
polygon.append(ring)
coords.append(polygon)
if len(coords) == num_polys:
break
return dict(rings=[coord[0] for coord in coords],
spatialReference={'wkid' : wkid})
def _check_dimensionality(geom, num_dims):
def first_geom(gc):
for g in gc['geometries']:
if not g['type'] == 'GeometryCollection':
return g
first_vert = {
'Point': lambda x: x['coordinates'],
'LineString': lambda x: x['coordinates'][0],
'Polygon': lambda x: x['coordinates'][0][0],
'MultiLineString': lambda x: x['coordinates'][0][0],
'MultiPolygon': lambda x: x['coordinates'][0][0][0],
'GeometryCollection': first_geom,
}
if not len(first_vert[geom['type']](geom)) == num_dims:
error = 'Cannot mix dimensionality in a geometry'
raise Exception(error)
def _load_geometrycollection(big_endian, type_bytes, data_bytes):
endian_token = '>' if big_endian else '<'
is_m = False
if type_bytes in WKB_2D.values():
num_dims = 2
elif type_bytes in WKB_Z.values():
num_dims = 3
elif type_bytes in WKB_M.values():
num_dims = 3
is_m = True
elif type_bytes in WKB_ZM.values():
num_dims = 4
geometries = []
[num_geoms] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
while True:
geometry = loads(data_bytes)
if is_m:
_check_dimensionality(geometry, 4)
else:
_check_dimensionality(geometry, num_dims)
# TODO(LB): Add type assertions for the geometry; collections should
# not mix 2d, 3d, 4d, etc.
geometries.append(geometry)
if len(geometries) == num_geoms:
break
return dict(type='GeometryCollection', geometries=geometries)
_dumps_registry = {
'Point': _dump_point,
'LineString': _dump_linestring,
'Polygon': _dump_polygon,
'MultiPoint': _dump_multipoint,
'MultiLineString': _dump_multilinestring,
'MultiPolygon': _dump_multipolygon,
'GeometryCollection': _dump_geometrycollection,
}
_loads_registry_esri = {
'Point': _load_point_esri,
'LineString': _load_linestring_esri,
'Polygon': _load_polygon_esri,
'MultiPoint': _load_multipoint_esri,
'MultiLineString': _load_multilinestring_esri,
'MultiPolygon': _load_multipolygon_esri
}
| 31.310345 | 124 | 0.615932 | import binascii
import struct
from ._utils import block_splitter
from ._utils import take
from ._utils import as_bin_str
from ._utils import flatten_multi_dim
from itertools import chain
BIG_ENDIAN = b'\x00'
LITTLE_ENDIAN = b'\x01'
SRID_FLAG = b'\x20'
WKB_2D = {
'Point': b'\x00\x00\x00\x01',
'LineString': b'\x00\x00\x00\x02',
'Polygon': b'\x00\x00\x00\x03',
'MultiPoint': b'\x00\x00\x00\x04',
'MultiLineString': b'\x00\x00\x00\x05',
'MultiPolygon': b'\x00\x00\x00\x06',
'GeometryCollection': b'\x00\x00\x00\x07',
}
WKB_Z = {
'Point': b'\x00\x00\x03\xe9',
'LineString': b'\x00\x00\x03\xea',
'Polygon': b'\x00\x00\x03\xeb',
'MultiPoint': b'\x00\x00\x03\xec',
'MultiLineString': b'\x00\x00\x03\xed',
'MultiPolygon': b'\x00\x00\x03\xee',
'GeometryCollection': b'\x00\x00\x03\xef',
}
WKB_M = {
'Point': b'\x00\x00\x07\xd1',
'LineString': b'\x00\x00\x07\xd2',
'Polygon': b'\x00\x00\x07\xd3',
'MultiPoint': b'\x00\x00\x07\xd4',
'MultiLineString': b'\x00\x00\x07\xd5',
'MultiPolygon': b'\x00\x00\x07\xd6',
'GeometryCollection': b'\x00\x00\x07\xd7',
}
WKB_ZM = {
'Point': b'\x00\x00\x0b\xb9',
'LineString': b'\x00\x00\x0b\xba',
'Polygon': b'\x00\x00\x0b\xbb',
'MultiPoint': b'\x00\x00\x0b\xbc',
'MultiLineString': b'\x00\x00\x0b\xbd',
'MultiPolygon': b'\x00\x00\x0b\xbe',
'GeometryCollection': b'\x00\x00\x0b\xbf',
}
_WKB = {
'2D': WKB_2D,
'Z': WKB_Z,
'M': WKB_M,
'ZM': WKB_ZM,
}
_BINARY_TO_GEOM_TYPE = dict(
chain(*((reversed(x) for x in wkb_map.items())
for wkb_map in _WKB.values()))
)
_INT_TO_DIM_LABEL = {2: '2D', 3: 'Z', 4: 'ZM'}
def _get_geom_type(type_bytes):
high_byte = type_bytes[0]
high_byte = bytes([high_byte])
has_srid = high_byte == b'\x20'
if has_srid:
type_bytes = as_bin_str(b'\x00' + type_bytes[1:])
else:
type_bytes = as_bin_str(type_bytes)
geom_type = _BINARY_TO_GEOM_TYPE.get(type_bytes)
return geom_type, type_bytes, has_srid
def dump(obj, dest_file):
dest_file.write(dumps(obj))
def load(source_file, wkid=4326):
return loads(source_file.read(), wkid=wkid)
def dumps(obj, big_endian=False):
def lu_geom(ks):
if 'point' in ks:
return "Point"
elif 'paths' in ks:
return "MultiLineString"
elif 'x' in ks:
return "Point"
elif 'rings' in ks:
return "MultiPolygon"
elif 'points' in ks:
return "MultiPoint"
geom_type = lu_geom(obj.keys())
meta = obj.get('meta', {})
exporter = _dumps_registry.get(geom_type)
if exporter is None:
_unsupported_geom_type(geom_type)
return exporter(obj, big_endian, meta)
def loads(string, wkid=4326):
string = iter(string)
endianness = as_bin_str(take(1, string))
if endianness == BIG_ENDIAN:
big_endian = True
elif endianness == LITTLE_ENDIAN:
big_endian = False
else:
raise ValueError("Invalid endian byte: '0x%s'. Expected 0x00 or 0x01"
% binascii.hexlify(endianness.encode()).decode())
endian_token = '>' if big_endian else '<'
type_bytes = as_bin_str(take(4, string))
if not big_endian:
type_bytes = type_bytes[::-1]
geom_type, type_bytes, has_srid = _get_geom_type(type_bytes)
srid = None
if has_srid:
srid_field = as_bin_str(take(4, string))
[srid] = struct.unpack('%si' % endian_token, srid_field)
registry_esri.get(geom_type)
if importer is None:
_unsupported_geom_type(geom_type)
data_bytes = iter(data_bytes)
result = importer(big_endian, type_bytes, data_bytes, wkid)
if has_srid:
# As mentioned in the docstring above, includeEsriJSONpproaches to
# indicating the SRID.
result['meta'] = {'srid': int(srid)}
result['crs'] = {
'type': 'name',
'properties': {'name': 'EPSG%s' % srid},
}
return result
def _unsupported_geom_type(geom_type):
raise ValueError("Unsupported geometry type '%s'" % geom_type)
# TODO: dont default meta to none
def _header_bytefmt_byteorder(geom_type, num_dims, big_endian, meta=None):
dim = _INT_TO_DIM_LABEL.get(num_dims)
if dim is None:
pass # TODO: raise
type_byte_str = _WKB[dim][geom_type]
srid = meta.get('srid')
if srid is not None:
# Add the srid flag
type_byte_str = SRID_FLAG + type_byte_str[1:]
if big_endian:
header = BIG_ENDIAN
byte_fmt = b'>'
byte_order = '>'
else:
header = LITTLE_ENDIAN
byte_fmt = b'<'
byte_order = '<'
# reverse the byte ordering for little endian
type_byte_str = type_byte_str[::-1]
header += type_byte_str
if srid is not None:
srid = int(srid)
if big_endian:
srid_header = struct.pack('>i', srid)
else:
srid_header = struct.pack('<i', srid)
header += srid_header
byte_fmt += b'd' * num_dims
return header, byte_fmt, byte_order
def _dump_point(obj, big_endian, meta):
coords = [obj['x'], obj['y']]
num_dims = len(coords)
wkb_string, byte_fmt, _ = _header_bytefmt_byteorder(
'Point', num_dims, big_endian, meta
)
wkb_string += struct.pack(byte_fmt, *coords)
return wkb_string
def _dump_linestring(obj, big_endian, meta):
coords = obj['coordinates']
vertex = coords[0]
# Infer the number of dimensions from the first vertex
num_dims = len(vertex)
wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder(
'LineString', num_dims, big_endian, meta
)
# append number of vertices in linestring
wkb_string += struct.pack('%sl' % byte_order, len(coords))
for vertex in coords:
wkb_string += struct.pack(byte_fmt, *vertex)
return wkb_string
def _dump_polygon(obj, big_endian, meta):
coords = obj['coordinates']
vertex = coords[0][0]
# Infer the number of dimensions from the first vertex
num_dims = len(vertex)
wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder(
'Polygon', num_dims, big_endian, meta
)
# number of rings:
wkb_string += struct.pack('%sl' % byte_order, len(coords))
for ring in coords:
# number of verts in this ring:
wkb_string += struct.pack('%sl' % byte_order, len(ring))
for vertex in ring:
wkb_string += struct.pack(byte_fmt, *vertex)
return wkb_string
def _dump_multipoint(obj, big_endian, meta):
coords = obj['points']
vertex = coords[0]
num_dims = len(vertex)
wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder(
'MultiPoint', num_dims, big_endian, meta
)
point_type = _WKB[_INT_TO_DIM_LABEL.get(num_dims)]['Point']
if big_endian:
point_type = BIG_ENDIAN + point_type
else:
point_type = LITTLE_ENDIAN + point_type[::-1]
wkb_string += struct.pack('%sl' % byte_order, len(coords))
for vertex in coords:
# POINT type strings
wkb_string += point_type
wkb_string += struct.pack(byte_fmt, *vertex)
return wkb_string
def _dump_multilinestring(obj, big_endian, meta):
coords = obj['paths']
vertex = coords[0][0]
num_dims = len(vertex)
wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder(
'MultiLineString', num_dims, big_endian, meta
)
ls_type = _WKB[_INT_TO_DIM_LABEL.get(num_dims)]['LineString']
if big_endian:
ls_type = BIG_ENDIAN + ls_type
else:
ls_type = LITTLE_ENDIAN + ls_type[::-1]
# append the number of linestrings
wkb_string += struct.pack('%sl' % byte_order, len(coords))
for linestring in coords:
wkb_string += ls_type
# append the number of vertices in each linestring
wkb_string += struct.pack('%sl' % byte_order, len(linestring))
for vertex in linestring:
wkb_string += struct.pack(byte_fmt, *vertex)
return wkb_string
def _dump_multipolygon(obj, big_endian, meta):
coords = [obj['rings']]
vertex = coords[0][0][0]
num_dims = len(vertex)
wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder(
'MultiPolygon', num_dims, big_endian, meta
)
poly_type = _WKB[_INT_TO_DIM_LABEL.get(num_dims)]['Polygon']
if big_endian:
poly_type = BIG_ENDIAN + poly_type
else:
poly_type = LITTLE_ENDIAN + poly_type[::-1]
# apped the number of polygons
wkb_string += struct.pack('%sl' % byte_order, len(coords))
for polygon in coords:
# append polygon header
wkb_string += poly_type
# append the number of rings in this polygon
wkb_string += struct.pack('%sl' % byte_order, len(polygon))
for ring in polygon:
# append the number of vertices in this ring
wkb_string += struct.pack('%sl' % byte_order, len(ring))
for vertex in ring:
wkb_string += struct.pack(byte_fmt, *vertex)
return wkb_string
def _dump_geometrycollection(obj, big_endian, meta):
# TODO: handle empty collections
geoms = obj['geometries']
# determine the dimensionality (2d, 3d, 4d) of the collection
# by sampling the first geometry
first_geom = geoms[0]
rest = geoms[1:]
first_wkb = dumps(first_geom, big_endian=big_endian)
first_type = first_wkb[1:5]
if not big_endian:
first_type = first_type[::-1]
if first_type in WKB_2D.values():
num_dims = 2
elif first_type in WKB_Z.values():
num_dims = 3
elif first_type in WKB_ZM.values():
num_dims = 4
wkb_string, byte_fmt, byte_order = _header_bytefmt_byteorder(
'GeometryCollection', num_dims, big_endian, meta
)
# append the number of geometries
wkb_string += struct.pack('%sl' % byte_order, len(geoms))
wkb_string += first_wkb
for geom in rest:
wkb_string += dumps(geom, big_endian=big_endian)
return wkb_string
def _load_point_esri(big_endian, type_bytes, data_bytes, wkid):
endian_token = '>' if big_endian else '<'
if type_bytes == WKB_2D['Point']:
coords = struct.unpack('%sdd' % endian_token,
as_bin_str(take(16, data_bytes)))
elif type_bytes == WKB_Z['Point']:
coords = struct.unpack('%sddd' % endian_token,
as_bin_str(take(24, data_bytes)))
elif type_bytes == WKB_M['Point']:
# NOTE: The use of XYM types geometries is quite rare. In the interest
# of removing ambiguity, we will treat all XYM geometries as XYZM when
# generate the GeoJSON. A default Z value of `0.0` will be given in
# this case.
coords = list(struct.unpack('%sddd' % endian_token,
as_bin_str(take(24, data_bytes))))
coords.insert(2, 0.0)
elif type_bytes == WKB_ZM['Point']:
coords = struct.unpack('%sdddd' % endian_token,
as_bin_str(take(32, data_bytes)))
return { 'x': coords[0], 'y': coords[1],
"spatialReference" : {'wkid' : wkid}}
def _load_linestring_esri(big_endian, type_bytes, data_bytes, wkid):
endian_token = '>' if big_endian else '<'
is_m = False
if type_bytes in WKB_2D.values():
num_dims = 2
elif type_bytes in WKB_Z.values():
num_dims = 3
elif type_bytes in WKB_M.values():
num_dims = 3
is_m = True
elif type_bytes in WKB_ZM.values():
num_dims = 4
coords = []
[num_verts] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
while True:
vert_wkb = as_bin_str(take(8 * num_dims, data_bytes))
fmt = '%s' + 'd' * num_dims
vert = list(struct.unpack(fmt % endian_token, vert_wkb))
if is_m:
vert.insert(2, 0.0)
coords.append(vert)
if len(coords) == num_verts:
break
return dict(paths=[list(coords)], spatialReference={'wkid' : wkid})
def _load_polygon_esri(big_endian, type_bytes, data_bytes, wkid):
endian_token = '>' if big_endian else '<'
data_bytes = iter(data_bytes)
is_m = False
if type_bytes in WKB_2D.values():
num_dims = 2
elif type_bytes in WKB_Z.values():
num_dims = 3
elif type_bytes in WKB_M.values():
num_dims = 3
is_m = True
elif type_bytes in WKB_ZM.values():
num_dims = 4
coords = []
[num_rings] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
while True:
ring = []
[num_verts] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
verts_wkb = as_bin_str(take(8 * num_verts * num_dims, data_bytes))
verts = block_splitter(verts_wkb, 8)
verts = (b''.join(bytes([y]) for y in x) for x in verts)
for vert_wkb in block_splitter(verts, num_dims):
values = [struct.unpack('%sd' % endian_token, x)[0]
for x in vert_wkb]
if is_m:
values.insert(2, 0.0)
ring.append(values)
coords.append(ring)
if len(coords) == num_rings:
break
return dict(rings=coords, spatialReference={'wkid' : wkid})
def _load_multipoint_esri(big_endian, type_bytes, data_bytes, wkid):
endian_token = '>' if big_endian else '<'
data_bytes = iter(data_bytes)
is_m = False
if type_bytes in WKB_2D.values():
num_dims = 2
elif type_bytes in WKB_Z.values():
num_dims = 3
elif type_bytes in WKB_M.values():
num_dims = 3
is_m = True
elif type_bytes in WKB_ZM.values():
num_dims = 4
if is_m:
dim = 'M'
else:
dim = _INT_TO_DIM_LABEL[num_dims]
coords = []
[num_points] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
while True:
point_endian = as_bin_str(take(1, data_bytes))
point_type = as_bin_str(take(4, data_bytes))
values = struct.unpack('%s%s' % (endian_token, 'd' * num_dims),
as_bin_str(take(8 * num_dims, data_bytes)))
values = list(values)
if is_m:
values.insert(2, 0.0)
if big_endian:
assert point_endian == BIG_ENDIAN
assert point_type == _WKB[dim]['Point']
else:
assert point_endian == LITTLE_ENDIAN
assert point_type[::-1] == _WKB[dim]['Point']
coords.append(list(values))
if len(coords) == num_points:
break
return dict(points=coords, spatialReference={'wkid' : wkid})
def _load_multilinestring_esri(big_endian, type_bytes, data_bytes, wkid):
endian_token = '>' if big_endian else '<'
data_bytes = iter(data_bytes)
is_m = False
if type_bytes in WKB_2D.values():
num_dims = 2
elif type_bytes in WKB_Z.values():
num_dims = 3
elif type_bytes in WKB_M.values():
num_dims = 3
is_m = True
elif type_bytes in WKB_ZM.values():
num_dims = 4
if is_m:
dim = 'M'
else:
dim = _INT_TO_DIM_LABEL[num_dims]
[num_ls] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
coords = []
while True:
ls_endian = as_bin_str(take(1, data_bytes))
ls_type = as_bin_str(take(4, data_bytes))
if big_endian:
assert ls_endian == BIG_ENDIAN
assert ls_type == _WKB[dim]['LineString']
else:
assert ls_endian == LITTLE_ENDIAN
assert ls_type[::-1] == _WKB[dim]['LineString']
[num_verts] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
num_values = num_dims * num_verts
values = struct.unpack(endian_token + 'd' * num_values,
as_bin_str(take(8 * num_values, data_bytes)))
values = list(block_splitter(values, num_dims))
if is_m:
for v in values:
v.insert(2, 0.0)
coords.append(values)
if len(coords) == num_ls:
break
return dict(paths=coords, spatialReference={'wkid' : wkid})
def _load_multipolygon_esri(big_endian, type_bytes, data_bytes, wkid):
endian_token = '>' if big_endian else '<'
is_m = False
if type_bytes in WKB_2D.values():
num_dims = 2
elif type_bytes in WKB_Z.values():
num_dims = 3
elif type_bytes in WKB_M.values():
num_dims = 3
is_m = True
elif type_bytes in WKB_ZM.values():
num_dims = 4
if is_m:
dim = 'M'
else:
dim = _INT_TO_DIM_LABEL[num_dims]
[num_polys] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
coords = []
while True:
polygon = []
poly_endian = as_bin_str(take(1, data_bytes))
poly_type = as_bin_str(take(4, data_bytes))
if big_endian:
assert poly_endian == BIG_ENDIAN
assert poly_type == _WKB[dim]['Polygon']
else:
assert poly_endian == LITTLE_ENDIAN
assert poly_type[::-1] == _WKB[dim]['Polygon']
[num_rings] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
for _ in range(num_rings):
ring = []
[num_verts] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
for _ in range(num_verts):
vert_wkb = as_bin_str(take(8 * num_dims, data_bytes))
fmt = '%s' + 'd' * num_dims
vert = list(struct.unpack(fmt % endian_token, vert_wkb))
if is_m:
vert.insert(2, 0.0)
ring.append(vert)
polygon.append(ring)
coords.append(polygon)
if len(coords) == num_polys:
break
return dict(rings=[coord[0] for coord in coords],
spatialReference={'wkid' : wkid})
def _check_dimensionality(geom, num_dims):
def first_geom(gc):
for g in gc['geometries']:
if not g['type'] == 'GeometryCollection':
return g
first_vert = {
'Point': lambda x: x['coordinates'],
'LineString': lambda x: x['coordinates'][0],
'Polygon': lambda x: x['coordinates'][0][0],
'MultiLineString': lambda x: x['coordinates'][0][0],
'MultiPolygon': lambda x: x['coordinates'][0][0][0],
'GeometryCollection': first_geom,
}
if not len(first_vert[geom['type']](geom)) == num_dims:
error = 'Cannot mix dimensionality in a geometry'
raise Exception(error)
def _load_geometrycollection(big_endian, type_bytes, data_bytes):
endian_token = '>' if big_endian else '<'
is_m = False
if type_bytes in WKB_2D.values():
num_dims = 2
elif type_bytes in WKB_Z.values():
num_dims = 3
elif type_bytes in WKB_M.values():
num_dims = 3
is_m = True
elif type_bytes in WKB_ZM.values():
num_dims = 4
geometries = []
[num_geoms] = struct.unpack('%sl' % endian_token,
as_bin_str(take(4, data_bytes)))
while True:
geometry = loads(data_bytes)
if is_m:
_check_dimensionality(geometry, 4)
else:
_check_dimensionality(geometry, num_dims)
# TODO(LB): Add type assertions for the geometry; collections should
# not mix 2d, 3d, 4d, etc.
geometries.append(geometry)
if len(geometries) == num_geoms:
break
return dict(type='GeometryCollection', geometries=geometries)
_dumps_registry = {
'Point': _dump_point,
'LineString': _dump_linestring,
'Polygon': _dump_polygon,
'MultiPoint': _dump_multipoint,
'MultiLineString': _dump_multilinestring,
'MultiPolygon': _dump_multipolygon,
'GeometryCollection': _dump_geometrycollection,
}
_loads_registry_esri = {
'Point': _load_point_esri,
'LineString': _load_linestring_esri,
'Polygon': _load_polygon_esri,
'MultiPoint': _load_multipoint_esri,
'MultiLineString': _load_multilinestring_esri,
'MultiPolygon': _load_multipolygon_esri
}
| true | true |
f715225dc353f1b54d4a6b1bdb2fd62dea0595db | 1,881 | py | Python | alipay/aop/api/domain/ContentPrizeInfoModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/ContentPrizeInfoModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/ContentPrizeInfoModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class ContentPrizeInfoModel(object):
def __init__(self):
self._prize_id = None
self._prize_logo = None
self._prize_name = None
@property
def prize_id(self):
return self._prize_id
@prize_id.setter
def prize_id(self, value):
self._prize_id = value
@property
def prize_logo(self):
return self._prize_logo
@prize_logo.setter
def prize_logo(self, value):
self._prize_logo = value
@property
def prize_name(self):
return self._prize_name
@prize_name.setter
def prize_name(self, value):
self._prize_name = value
def to_alipay_dict(self):
params = dict()
if self.prize_id:
if hasattr(self.prize_id, 'to_alipay_dict'):
params['prize_id'] = self.prize_id.to_alipay_dict()
else:
params['prize_id'] = self.prize_id
if self.prize_logo:
if hasattr(self.prize_logo, 'to_alipay_dict'):
params['prize_logo'] = self.prize_logo.to_alipay_dict()
else:
params['prize_logo'] = self.prize_logo
if self.prize_name:
if hasattr(self.prize_name, 'to_alipay_dict'):
params['prize_name'] = self.prize_name.to_alipay_dict()
else:
params['prize_name'] = self.prize_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ContentPrizeInfoModel()
if 'prize_id' in d:
o.prize_id = d['prize_id']
if 'prize_logo' in d:
o.prize_logo = d['prize_logo']
if 'prize_name' in d:
o.prize_name = d['prize_name']
return o
| 26.492958 | 71 | 0.585327 |
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class ContentPrizeInfoModel(object):
def __init__(self):
self._prize_id = None
self._prize_logo = None
self._prize_name = None
@property
def prize_id(self):
return self._prize_id
@prize_id.setter
def prize_id(self, value):
self._prize_id = value
@property
def prize_logo(self):
return self._prize_logo
@prize_logo.setter
def prize_logo(self, value):
self._prize_logo = value
@property
def prize_name(self):
return self._prize_name
@prize_name.setter
def prize_name(self, value):
self._prize_name = value
def to_alipay_dict(self):
params = dict()
if self.prize_id:
if hasattr(self.prize_id, 'to_alipay_dict'):
params['prize_id'] = self.prize_id.to_alipay_dict()
else:
params['prize_id'] = self.prize_id
if self.prize_logo:
if hasattr(self.prize_logo, 'to_alipay_dict'):
params['prize_logo'] = self.prize_logo.to_alipay_dict()
else:
params['prize_logo'] = self.prize_logo
if self.prize_name:
if hasattr(self.prize_name, 'to_alipay_dict'):
params['prize_name'] = self.prize_name.to_alipay_dict()
else:
params['prize_name'] = self.prize_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ContentPrizeInfoModel()
if 'prize_id' in d:
o.prize_id = d['prize_id']
if 'prize_logo' in d:
o.prize_logo = d['prize_logo']
if 'prize_name' in d:
o.prize_name = d['prize_name']
return o
| true | true |
f71522b5c9555ec99171925ed4d27c211020afac | 28,605 | py | Python | pandas/io/common.py | stragu/pandas | b8890eb33b40993da00656f16c65070c42429f0d | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/io/common.py | stragu/pandas | b8890eb33b40993da00656f16c65070c42429f0d | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/io/common.py | stragu/pandas | b8890eb33b40993da00656f16c65070c42429f0d | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | """Common IO api utilities"""
from __future__ import annotations
import bz2
import codecs
from collections import abc
import dataclasses
import gzip
from io import BufferedIOBase, BytesIO, RawIOBase, StringIO, TextIOWrapper
import mmap
import os
from typing import IO, Any, AnyStr, Dict, List, Mapping, Optional, Tuple, Union, cast
from urllib.parse import (
urljoin,
urlparse as parse_url,
uses_netloc,
uses_params,
uses_relative,
)
import warnings
import zipfile
from pandas._typing import (
Buffer,
CompressionDict,
CompressionOptions,
FileOrBuffer,
FilePathOrBuffer,
StorageOptions,
)
from pandas.compat import get_lzma_file, import_lzma
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.common import is_file_like
lzma = import_lzma()
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard("")
@dataclasses.dataclass
class IOArgs:
"""
Return value of io/common.py:_get_filepath_or_buffer.
Note (copy&past from io/parsers):
filepath_or_buffer can be Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile]
though mypy handling of conditional imports is difficult.
See https://github.com/python/mypy/issues/1297
"""
filepath_or_buffer: FileOrBuffer
encoding: str
mode: str
compression: CompressionDict
should_close: bool = False
@dataclasses.dataclass
class IOHandles:
"""
Return value of io/common.py:get_handle
Can be used as a context manager.
This is used to easily close created buffers and to handle corner cases when
TextIOWrapper is inserted.
handle: The file handle to be used.
created_handles: All file handles that are created by get_handle
is_wrapped: Whether a TextIOWrapper needs to be detached.
"""
handle: Buffer
compression: CompressionDict
created_handles: List[Buffer] = dataclasses.field(default_factory=list)
is_wrapped: bool = False
is_mmap: bool = False
def close(self) -> None:
"""
Close all created buffers.
Note: If a TextIOWrapper was inserted, it is flushed and detached to
avoid closing the potentially user-created buffer.
"""
if self.is_wrapped:
assert isinstance(self.handle, TextIOWrapper)
self.handle.flush()
self.handle.detach()
self.created_handles.remove(self.handle)
try:
for handle in self.created_handles:
handle.close()
except (OSError, ValueError):
pass
self.created_handles = []
self.is_wrapped = False
def __enter__(self) -> IOHandles:
return self
def __exit__(self, *args: Any) -> None:
self.close()
def is_url(url) -> bool:
"""
Check to see if a URL has a valid protocol.
Parameters
----------
url : str or unicode
Returns
-------
isurl : bool
If `url` has a valid protocol return True otherwise False.
"""
if not isinstance(url, str):
return False
return parse_url(url).scheme in _VALID_URLS
def _expand_user(filepath_or_buffer: FileOrBuffer[AnyStr]) -> FileOrBuffer[AnyStr]:
"""
Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
Parameters
----------
filepath_or_buffer : object to be converted if possible
Returns
-------
expanded_filepath_or_buffer : an expanded filepath or the
input if not expandable
"""
if isinstance(filepath_or_buffer, str):
return os.path.expanduser(filepath_or_buffer)
return filepath_or_buffer
def validate_header_arg(header) -> None:
if isinstance(header, bool):
raise TypeError(
"Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify "
"the row(s) making up the column names"
)
def stringify_path(
filepath_or_buffer: FilePathOrBuffer[AnyStr],
convert_file_like: bool = False,
) -> FileOrBuffer[AnyStr]:
"""
Attempt to convert a path-like object to a string.
Parameters
----------
filepath_or_buffer : object to be converted
Returns
-------
str_filepath_or_buffer : maybe a string version of the object
Notes
-----
Objects supporting the fspath protocol (python 3.6+) are coerced
according to its __fspath__ method.
Any other object is passed through unchanged, which includes bytes,
strings, buffers, or anything else that's not even path-like.
"""
if not convert_file_like and is_file_like(filepath_or_buffer):
# GH 38125: some fsspec objects implement os.PathLike but have already opened a
# file. This prevents opening the file a second time. infer_compression calls
# this function with convert_file_like=True to infer the compression.
return cast(FileOrBuffer[AnyStr], filepath_or_buffer)
if isinstance(filepath_or_buffer, os.PathLike):
filepath_or_buffer = filepath_or_buffer.__fspath__()
return _expand_user(filepath_or_buffer)
def urlopen(*args, **kwargs):
"""
Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of
the stdlib.
"""
import urllib.request
return urllib.request.urlopen(*args, **kwargs)
def is_fsspec_url(url: FilePathOrBuffer) -> bool:
"""
Returns true if the given URL looks like
something fsspec can handle
"""
return (
isinstance(url, str)
and "://" in url
and not url.startswith(("http://", "https://"))
)
def _get_filepath_or_buffer(
filepath_or_buffer: FilePathOrBuffer,
encoding: str = "utf-8",
compression: CompressionOptions = None,
mode: str = "r",
storage_options: StorageOptions = None,
) -> IOArgs:
"""
If the filepath_or_buffer is a url, translate and return the buffer.
Otherwise passthrough.
Parameters
----------
filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
or buffer
compression : {{'gzip', 'bz2', 'zip', 'xz', None}}, optional
encoding : the encoding to use to decode bytes, default is 'utf-8'
mode : str, optional
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc., if using a URL that will
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
docs for the set of allowed keys and values
.. versionadded:: 1.2.0
..versionchange:: 1.2.0
Returns the dataclass IOArgs.
"""
filepath_or_buffer = stringify_path(filepath_or_buffer)
# handle compression dict
compression_method, compression = get_compression_method(compression)
compression_method = infer_compression(filepath_or_buffer, compression_method)
# GH21227 internal compression is not used for non-binary handles.
if compression_method and hasattr(filepath_or_buffer, "write") and "b" not in mode:
warnings.warn(
"compression has no effect when passing a non-binary object as input.",
RuntimeWarning,
stacklevel=2,
)
compression_method = None
compression = dict(compression, method=compression_method)
# uniform encoding names
if encoding is not None:
encoding = encoding.replace("_", "-").lower()
# bz2 and xz do not write the byte order mark for utf-16 and utf-32
# print a warning when writing such files
if (
"w" in mode
and compression_method in ["bz2", "xz"]
and encoding in ["utf-16", "utf-32"]
):
warnings.warn(
f"{compression} will not write the byte order mark for {encoding}",
UnicodeWarning,
)
# Use binary mode when converting path-like objects to file-like objects (fsspec)
# except when text mode is explicitly requested. The original mode is returned if
# fsspec is not used.
fsspec_mode = mode
if "t" not in fsspec_mode and "b" not in fsspec_mode:
fsspec_mode += "b"
if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer):
# TODO: fsspec can also handle HTTP via requests, but leaving this
# unchanged. using fsspec appears to break the ability to infer if the
# server responded with gzipped data
storage_options = storage_options or {}
# waiting until now for importing to match intended lazy logic of
# urlopen function defined elsewhere in this module
import urllib.request
# assuming storage_options is to be interpreted as headers
req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options)
with urlopen(req_info) as req:
content_encoding = req.headers.get("Content-Encoding", None)
if content_encoding == "gzip":
# Override compression based on Content-Encoding header
compression = {"method": "gzip"}
reader = BytesIO(req.read())
return IOArgs(
filepath_or_buffer=reader,
encoding=encoding,
compression=compression,
should_close=True,
mode=fsspec_mode,
)
if is_fsspec_url(filepath_or_buffer):
assert isinstance(
filepath_or_buffer, str
) # just to appease mypy for this branch
# two special-case s3-like protocols; these have special meaning in Hadoop,
# but are equivalent to just "s3" from fsspec's point of view
# cc #11071
if filepath_or_buffer.startswith("s3a://"):
filepath_or_buffer = filepath_or_buffer.replace("s3a://", "s3://")
if filepath_or_buffer.startswith("s3n://"):
filepath_or_buffer = filepath_or_buffer.replace("s3n://", "s3://")
fsspec = import_optional_dependency("fsspec")
# If botocore is installed we fallback to reading with anon=True
# to allow reads from public buckets
err_types_to_retry_with_anon: List[Any] = []
try:
import_optional_dependency("botocore")
from botocore.exceptions import ClientError, NoCredentialsError
err_types_to_retry_with_anon = [
ClientError,
NoCredentialsError,
PermissionError,
]
except ImportError:
pass
try:
file_obj = fsspec.open(
filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
).open()
# GH 34626 Reads from Public Buckets without Credentials needs anon=True
except tuple(err_types_to_retry_with_anon):
if storage_options is None:
storage_options = {"anon": True}
else:
# don't mutate user input.
storage_options = dict(storage_options)
storage_options["anon"] = True
file_obj = fsspec.open(
filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
).open()
return IOArgs(
filepath_or_buffer=file_obj,
encoding=encoding,
compression=compression,
should_close=True,
mode=fsspec_mode,
)
elif storage_options:
raise ValueError(
"storage_options passed with file object or non-fsspec file path"
)
if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)):
return IOArgs(
filepath_or_buffer=_expand_user(filepath_or_buffer),
encoding=encoding,
compression=compression,
should_close=False,
mode=mode,
)
if not is_file_like(filepath_or_buffer):
msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}"
raise ValueError(msg)
return IOArgs(
filepath_or_buffer=filepath_or_buffer,
encoding=encoding,
compression=compression,
should_close=False,
mode=mode,
)
def file_path_to_url(path: str) -> str:
"""
converts an absolute native path to a FILE URL.
Parameters
----------
path : a path in native format
Returns
-------
a valid FILE URL
"""
# lazify expensive import (~30ms)
from urllib.request import pathname2url
return urljoin("file:", pathname2url(path))
_compression_to_extension = {"gzip": ".gz", "bz2": ".bz2", "zip": ".zip", "xz": ".xz"}
def get_compression_method(
compression: CompressionOptions,
) -> Tuple[Optional[str], CompressionDict]:
"""
Simplifies a compression argument to a compression method string and
a mapping containing additional arguments.
Parameters
----------
compression : str or mapping
If string, specifies the compression method. If mapping, value at key
'method' specifies compression method.
Returns
-------
tuple of ({compression method}, Optional[str]
{compression arguments}, Dict[str, Any])
Raises
------
ValueError on mapping missing 'method' key
"""
compression_method: Optional[str]
if isinstance(compression, Mapping):
compression_args = dict(compression)
try:
compression_method = compression_args.pop("method")
except KeyError as err:
raise ValueError("If mapping, compression must have key 'method'") from err
else:
compression_args = {}
compression_method = compression
return compression_method, compression_args
def infer_compression(
filepath_or_buffer: FilePathOrBuffer, compression: Optional[str]
) -> Optional[str]:
"""
Get the compression method for filepath_or_buffer. If compression='infer',
the inferred compression method is returned. Otherwise, the input
compression method is returned unchanged, unless it's invalid, in which
case an error is raised.
Parameters
----------
filepath_or_buffer : str or file handle
File path or object.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
If 'infer' and `filepath_or_buffer` is path-like, then detect
compression from the following extensions: '.gz', '.bz2', '.zip',
or '.xz' (otherwise no compression).
Returns
-------
string or None
Raises
------
ValueError on invalid compression specified.
"""
if compression is None:
return None
# Infer compression
if compression == "infer":
# Convert all path types (e.g. pathlib.Path) to strings
filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True)
if not isinstance(filepath_or_buffer, str):
# Cannot infer compression of a buffer, assume no compression
return None
# Infer compression from the filename/URL extension
for compression, extension in _compression_to_extension.items():
if filepath_or_buffer.lower().endswith(extension):
return compression
return None
# Compression has been specified. Check that it's valid
if compression in _compression_to_extension:
return compression
# https://github.com/python/mypy/issues/5492
# Unsupported operand types for + ("List[Optional[str]]" and "List[str]")
valid = ["infer", None] + sorted(
_compression_to_extension
) # type: ignore[operator]
msg = (
f"Unrecognized compression type: {compression}\n"
f"Valid compression types are {valid}"
)
raise ValueError(msg)
def get_handle(
path_or_buf: FilePathOrBuffer,
mode: str,
encoding: Optional[str] = None,
compression: CompressionOptions = None,
memory_map: bool = False,
is_text: bool = True,
errors: Optional[str] = None,
storage_options: StorageOptions = None,
) -> IOHandles:
"""
Get file handle for given path/buffer and mode.
Parameters
----------
path_or_buf : str or file handle
File path or object.
mode : str
Mode to open path_or_buf with.
encoding : str or None
Encoding to use.
compression : str or dict, default None
If string, specifies compression mode. If dict, value at key 'method'
specifies compression mode. Compression mode must be one of {'infer',
'gzip', 'bz2', 'zip', 'xz', None}. If compression mode is 'infer'
and `filepath_or_buffer` is path-like, then detect compression from
the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise
no compression). If dict and compression mode is one of
{'zip', 'gzip', 'bz2'}, or inferred as one of the above,
other entries passed as additional compression options.
.. versionchanged:: 1.0.0
May now be a dict with key 'method' as compression mode
and other keys as compression options if compression
mode is 'zip'.
.. versionchanged:: 1.1.0
Passing compression options as keys in dict is now
supported for compression modes 'gzip' and 'bz2' as well as 'zip'.
memory_map : boolean, default False
See parsers._parser_params for more information.
is_text : boolean, default True
Whether the type of the content passed to the file/buffer is string or
bytes. This is not the same as `"b" not in mode`. If a string content is
passed to a binary file/buffer, a wrapper is inserted.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
storage_options: StorageOptions = None
Passed to _get_filepath_or_buffer
.. versionchanged:: 1.2.0
Returns the dataclass IOHandles
"""
# Windows does not default to utf-8. Set to utf-8 for a consistent behavior
encoding_passed, encoding = encoding, encoding or "utf-8"
# read_csv does not know whether the buffer is opened in binary/text mode
if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
mode += "b"
# open URLs
ioargs = _get_filepath_or_buffer(
path_or_buf,
encoding=encoding,
compression=compression,
mode=mode,
storage_options=storage_options,
)
handle = ioargs.filepath_or_buffer
handles: List[Buffer]
# memory mapping needs to be the first step
handle, memory_map, handles = _maybe_memory_map(
handle, memory_map, ioargs.encoding, ioargs.mode, errors
)
is_path = isinstance(handle, str)
compression_args = dict(ioargs.compression)
compression = compression_args.pop("method")
if compression:
# compression libraries do not like an explicit text-mode
ioargs.mode = ioargs.mode.replace("t", "")
# GZ Compression
if compression == "gzip":
if is_path:
assert isinstance(handle, str)
handle = gzip.GzipFile(
filename=handle,
mode=ioargs.mode,
**compression_args,
)
else:
handle = gzip.GzipFile(
fileobj=handle, # type: ignore[arg-type]
mode=ioargs.mode,
**compression_args,
)
# BZ Compression
elif compression == "bz2":
handle = bz2.BZ2File(
handle, # type: ignore[arg-type]
mode=ioargs.mode,
**compression_args,
)
# ZIP Compression
elif compression == "zip":
handle = _BytesZipFile(handle, ioargs.mode, **compression_args)
if handle.mode == "r":
handles.append(handle)
zip_names = handle.namelist()
if len(zip_names) == 1:
handle = handle.open(zip_names.pop())
elif len(zip_names) == 0:
raise ValueError(f"Zero files found in ZIP file {path_or_buf}")
else:
raise ValueError(
"Multiple files found in ZIP file. "
f"Only one file per ZIP: {zip_names}"
)
# XZ Compression
elif compression == "xz":
handle = get_lzma_file(lzma)(handle, ioargs.mode)
# Unrecognized Compression
else:
msg = f"Unrecognized compression type: {compression}"
raise ValueError(msg)
assert not isinstance(handle, str)
handles.append(handle)
elif isinstance(handle, str):
# Check whether the filename is to be opened in binary mode.
# Binary mode does not support 'encoding' and 'newline'.
if ioargs.encoding and "b" not in ioargs.mode:
if errors is None and encoding_passed is None:
# ignore errors when no encoding is specified
errors = "replace"
# Encoding
handle = open(
handle,
ioargs.mode,
encoding=ioargs.encoding,
errors=errors,
newline="",
)
else:
# Binary mode
handle = open(handle, ioargs.mode)
handles.append(handle)
# Convert BytesIO or file objects passed with an encoding
is_wrapped = False
if is_text and (compression or _is_binary_mode(handle, ioargs.mode)):
handle = TextIOWrapper(
handle, # type: ignore[arg-type]
encoding=ioargs.encoding,
errors=errors,
newline="",
)
handles.append(handle)
# only marked as wrapped when the caller provided a handle
is_wrapped = not (
isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close
)
handles.reverse() # close the most recently added buffer first
if ioargs.should_close:
assert not isinstance(ioargs.filepath_or_buffer, str)
handles.append(ioargs.filepath_or_buffer)
assert not isinstance(handle, str)
return IOHandles(
handle=handle,
created_handles=handles,
is_wrapped=is_wrapped,
is_mmap=memory_map,
compression=ioargs.compression,
)
# error: Definition of "__exit__" in base class "ZipFile" is incompatible with
# definition in base class "BytesIO" [misc]
# error: Definition of "__enter__" in base class "ZipFile" is incompatible with
# definition in base class "BytesIO" [misc]
# error: Definition of "__enter__" in base class "ZipFile" is incompatible with
# definition in base class "BinaryIO" [misc]
# error: Definition of "__enter__" in base class "ZipFile" is incompatible with
# definition in base class "IO" [misc]
# error: Definition of "read" in base class "ZipFile" is incompatible with
# definition in base class "BytesIO" [misc]
# error: Definition of "read" in base class "ZipFile" is incompatible with
# definition in base class "IO" [misc]
class _BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore[misc]
"""
Wrapper for standard library class ZipFile and allow the returned file-like
handle to accept byte strings via `write` method.
BytesIO provides attributes of file-like object and ZipFile.writestr writes
bytes strings into a member of the archive.
"""
# GH 17778
def __init__(
self,
file: FilePathOrBuffer,
mode: str,
archive_name: Optional[str] = None,
**kwargs,
):
mode = mode.replace("b", "")
self.archive_name = archive_name
self.multiple_write_buffer: Optional[Union[StringIO, BytesIO]] = None
kwargs_zip: Dict[str, Any] = {"compression": zipfile.ZIP_DEFLATED}
kwargs_zip.update(kwargs)
super().__init__(file, mode, **kwargs_zip) # type: ignore[arg-type]
def write(self, data):
# buffer multiple write calls, write on flush
if self.multiple_write_buffer is None:
self.multiple_write_buffer = (
BytesIO() if isinstance(data, bytes) else StringIO()
)
self.multiple_write_buffer.write(data)
def flush(self) -> None:
# write to actual handle and close write buffer
if self.multiple_write_buffer is None or self.multiple_write_buffer.closed:
return
# ZipFile needs a non-empty string
archive_name = self.archive_name or self.filename or "zip"
with self.multiple_write_buffer:
super().writestr(archive_name, self.multiple_write_buffer.getvalue())
def close(self):
self.flush()
super().close()
@property
def closed(self):
return self.fp is None
class _MMapWrapper(abc.Iterator):
"""
Wrapper for the Python's mmap class so that it can be properly read in
by Python's csv.reader class.
Parameters
----------
f : file object
File object to be mapped onto memory. Must support the 'fileno'
method or have an equivalent attribute
"""
def __init__(self, f: IO):
self.attributes = {}
for attribute in ("seekable", "readable", "writeable"):
if not hasattr(f, attribute):
continue
self.attributes[attribute] = getattr(f, attribute)()
self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
def __getattr__(self, name: str):
if name in self.attributes:
return lambda: self.attributes[name]
return getattr(self.mmap, name)
def __iter__(self) -> _MMapWrapper:
return self
def __next__(self) -> str:
newbytes = self.mmap.readline()
# readline returns bytes, not str, but Python's CSV reader
# expects str, so convert the output to str before continuing
newline = newbytes.decode("utf-8")
# mmap doesn't raise if reading past the allocated
# data but instead returns an empty string, so raise
# if that is returned
if newline == "":
raise StopIteration
return newline
def _maybe_memory_map(
handle: FileOrBuffer,
memory_map: bool,
encoding: str,
mode: str,
errors: Optional[str],
) -> Tuple[FileOrBuffer, bool, List[Buffer]]:
"""Try to memory map file/buffer."""
handles: List[Buffer] = []
memory_map &= hasattr(handle, "fileno") or isinstance(handle, str)
if not memory_map:
return handle, memory_map, handles
# need to open the file first
if isinstance(handle, str):
if encoding and "b" not in mode:
# Encoding
handle = open(handle, mode, encoding=encoding, errors=errors, newline="")
else:
# Binary mode
handle = open(handle, mode)
handles.append(handle)
try:
wrapped = cast(mmap.mmap, _MMapWrapper(handle)) # type: ignore[arg-type]
handle.close()
handles.remove(handle)
handles.append(wrapped)
handle = wrapped
except Exception:
# we catch any errors that may have occurred
# because that is consistent with the lower-level
# functionality of the C engine (pd.read_csv), so
# leave the file handler as is then
memory_map = False
return handle, memory_map, handles
def file_exists(filepath_or_buffer: FilePathOrBuffer) -> bool:
"""Test whether file exists."""
exists = False
filepath_or_buffer = stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, str):
return exists
try:
exists = os.path.exists(filepath_or_buffer)
# gh-5874: if the filepath is too long will raise here
except (TypeError, ValueError):
pass
return exists
def _is_binary_mode(handle: FilePathOrBuffer, mode: str) -> bool:
"""Whether the handle is opened in binary mode"""
# classes that expect string but have 'b' in mode
text_classes = (codecs.StreamReaderWriter,)
if isinstance(handle, text_classes):
return False
# classes that expect bytes
binary_classes = (BufferedIOBase, RawIOBase)
return isinstance(handle, binary_classes) or "b" in getattr(handle, "mode", mode)
| 32.917146 | 87 | 0.635833 | from __future__ import annotations
import bz2
import codecs
from collections import abc
import dataclasses
import gzip
from io import BufferedIOBase, BytesIO, RawIOBase, StringIO, TextIOWrapper
import mmap
import os
from typing import IO, Any, AnyStr, Dict, List, Mapping, Optional, Tuple, Union, cast
from urllib.parse import (
urljoin,
urlparse as parse_url,
uses_netloc,
uses_params,
uses_relative,
)
import warnings
import zipfile
from pandas._typing import (
Buffer,
CompressionDict,
CompressionOptions,
FileOrBuffer,
FilePathOrBuffer,
StorageOptions,
)
from pandas.compat import get_lzma_file, import_lzma
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.common import is_file_like
lzma = import_lzma()
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard("")
@dataclasses.dataclass
class IOArgs:
filepath_or_buffer: FileOrBuffer
encoding: str
mode: str
compression: CompressionDict
should_close: bool = False
@dataclasses.dataclass
class IOHandles:
handle: Buffer
compression: CompressionDict
created_handles: List[Buffer] = dataclasses.field(default_factory=list)
is_wrapped: bool = False
is_mmap: bool = False
def close(self) -> None:
if self.is_wrapped:
assert isinstance(self.handle, TextIOWrapper)
self.handle.flush()
self.handle.detach()
self.created_handles.remove(self.handle)
try:
for handle in self.created_handles:
handle.close()
except (OSError, ValueError):
pass
self.created_handles = []
self.is_wrapped = False
def __enter__(self) -> IOHandles:
return self
def __exit__(self, *args: Any) -> None:
self.close()
def is_url(url) -> bool:
if not isinstance(url, str):
return False
return parse_url(url).scheme in _VALID_URLS
def _expand_user(filepath_or_buffer: FileOrBuffer[AnyStr]) -> FileOrBuffer[AnyStr]:
if isinstance(filepath_or_buffer, str):
return os.path.expanduser(filepath_or_buffer)
return filepath_or_buffer
def validate_header_arg(header) -> None:
if isinstance(header, bool):
raise TypeError(
"Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify "
"the row(s) making up the column names"
)
def stringify_path(
filepath_or_buffer: FilePathOrBuffer[AnyStr],
convert_file_like: bool = False,
) -> FileOrBuffer[AnyStr]:
if not convert_file_like and is_file_like(filepath_or_buffer):
return cast(FileOrBuffer[AnyStr], filepath_or_buffer)
if isinstance(filepath_or_buffer, os.PathLike):
filepath_or_buffer = filepath_or_buffer.__fspath__()
return _expand_user(filepath_or_buffer)
def urlopen(*args, **kwargs):
import urllib.request
return urllib.request.urlopen(*args, **kwargs)
def is_fsspec_url(url: FilePathOrBuffer) -> bool:
return (
isinstance(url, str)
and "://" in url
and not url.startswith(("http://", "https://"))
)
def _get_filepath_or_buffer(
filepath_or_buffer: FilePathOrBuffer,
encoding: str = "utf-8",
compression: CompressionOptions = None,
mode: str = "r",
storage_options: StorageOptions = None,
) -> IOArgs:
filepath_or_buffer = stringify_path(filepath_or_buffer)
compression_method, compression = get_compression_method(compression)
compression_method = infer_compression(filepath_or_buffer, compression_method)
if compression_method and hasattr(filepath_or_buffer, "write") and "b" not in mode:
warnings.warn(
"compression has no effect when passing a non-binary object as input.",
RuntimeWarning,
stacklevel=2,
)
compression_method = None
compression = dict(compression, method=compression_method)
if encoding is not None:
encoding = encoding.replace("_", "-").lower()
if (
"w" in mode
and compression_method in ["bz2", "xz"]
and encoding in ["utf-16", "utf-32"]
):
warnings.warn(
f"{compression} will not write the byte order mark for {encoding}",
UnicodeWarning,
)
fsspec_mode = mode
if "t" not in fsspec_mode and "b" not in fsspec_mode:
fsspec_mode += "b"
if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer):
storage_options = storage_options or {}
import urllib.request
req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options)
with urlopen(req_info) as req:
content_encoding = req.headers.get("Content-Encoding", None)
if content_encoding == "gzip":
compression = {"method": "gzip"}
reader = BytesIO(req.read())
return IOArgs(
filepath_or_buffer=reader,
encoding=encoding,
compression=compression,
should_close=True,
mode=fsspec_mode,
)
if is_fsspec_url(filepath_or_buffer):
assert isinstance(
filepath_or_buffer, str
)
# cc #11071
if filepath_or_buffer.startswith("s3a://"):
filepath_or_buffer = filepath_or_buffer.replace("s3a://", "s3://")
if filepath_or_buffer.startswith("s3n://"):
filepath_or_buffer = filepath_or_buffer.replace("s3n://", "s3://")
fsspec = import_optional_dependency("fsspec")
# If botocore is installed we fallback to reading with anon=True
# to allow reads from public buckets
err_types_to_retry_with_anon: List[Any] = []
try:
import_optional_dependency("botocore")
from botocore.exceptions import ClientError, NoCredentialsError
err_types_to_retry_with_anon = [
ClientError,
NoCredentialsError,
PermissionError,
]
except ImportError:
pass
try:
file_obj = fsspec.open(
filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
).open()
# GH 34626 Reads from Public Buckets without Credentials needs anon=True
except tuple(err_types_to_retry_with_anon):
if storage_options is None:
storage_options = {"anon": True}
else:
# don't mutate user input.
storage_options = dict(storage_options)
storage_options["anon"] = True
file_obj = fsspec.open(
filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
).open()
return IOArgs(
filepath_or_buffer=file_obj,
encoding=encoding,
compression=compression,
should_close=True,
mode=fsspec_mode,
)
elif storage_options:
raise ValueError(
"storage_options passed with file object or non-fsspec file path"
)
if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)):
return IOArgs(
filepath_or_buffer=_expand_user(filepath_or_buffer),
encoding=encoding,
compression=compression,
should_close=False,
mode=mode,
)
if not is_file_like(filepath_or_buffer):
msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}"
raise ValueError(msg)
return IOArgs(
filepath_or_buffer=filepath_or_buffer,
encoding=encoding,
compression=compression,
should_close=False,
mode=mode,
)
def file_path_to_url(path: str) -> str:
from urllib.request import pathname2url
return urljoin("file:", pathname2url(path))
_compression_to_extension = {"gzip": ".gz", "bz2": ".bz2", "zip": ".zip", "xz": ".xz"}
def get_compression_method(
compression: CompressionOptions,
) -> Tuple[Optional[str], CompressionDict]:
compression_method: Optional[str]
if isinstance(compression, Mapping):
compression_args = dict(compression)
try:
compression_method = compression_args.pop("method")
except KeyError as err:
raise ValueError("If mapping, compression must have key 'method'") from err
else:
compression_args = {}
compression_method = compression
return compression_method, compression_args
def infer_compression(
filepath_or_buffer: FilePathOrBuffer, compression: Optional[str]
) -> Optional[str]:
if compression is None:
return None
if compression == "infer":
filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True)
if not isinstance(filepath_or_buffer, str):
return None
for compression, extension in _compression_to_extension.items():
if filepath_or_buffer.lower().endswith(extension):
return compression
return None
if compression in _compression_to_extension:
return compression
# https://github.com/python/mypy/issues/5492
# Unsupported operand types for + ("List[Optional[str]]" and "List[str]")
valid = ["infer", None] + sorted(
_compression_to_extension
) # type: ignore[operator]
msg = (
f"Unrecognized compression type: {compression}\n"
f"Valid compression types are {valid}"
)
raise ValueError(msg)
def get_handle(
path_or_buf: FilePathOrBuffer,
mode: str,
encoding: Optional[str] = None,
compression: CompressionOptions = None,
memory_map: bool = False,
is_text: bool = True,
errors: Optional[str] = None,
storage_options: StorageOptions = None,
) -> IOHandles:
# Windows does not default to utf-8. Set to utf-8 for a consistent behavior
encoding_passed, encoding = encoding, encoding or "utf-8"
# read_csv does not know whether the buffer is opened in binary/text mode
if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
mode += "b"
# open URLs
ioargs = _get_filepath_or_buffer(
path_or_buf,
encoding=encoding,
compression=compression,
mode=mode,
storage_options=storage_options,
)
handle = ioargs.filepath_or_buffer
handles: List[Buffer]
# memory mapping needs to be the first step
handle, memory_map, handles = _maybe_memory_map(
handle, memory_map, ioargs.encoding, ioargs.mode, errors
)
is_path = isinstance(handle, str)
compression_args = dict(ioargs.compression)
compression = compression_args.pop("method")
if compression:
# compression libraries do not like an explicit text-mode
ioargs.mode = ioargs.mode.replace("t", "")
# GZ Compression
if compression == "gzip":
if is_path:
assert isinstance(handle, str)
handle = gzip.GzipFile(
filename=handle,
mode=ioargs.mode,
**compression_args,
)
else:
handle = gzip.GzipFile(
fileobj=handle, # type: ignore[arg-type]
mode=ioargs.mode,
**compression_args,
)
# BZ Compression
elif compression == "bz2":
handle = bz2.BZ2File(
handle, # type: ignore[arg-type]
mode=ioargs.mode,
**compression_args,
)
# ZIP Compression
elif compression == "zip":
handle = _BytesZipFile(handle, ioargs.mode, **compression_args)
if handle.mode == "r":
handles.append(handle)
zip_names = handle.namelist()
if len(zip_names) == 1:
handle = handle.open(zip_names.pop())
elif len(zip_names) == 0:
raise ValueError(f"Zero files found in ZIP file {path_or_buf}")
else:
raise ValueError(
"Multiple files found in ZIP file. "
f"Only one file per ZIP: {zip_names}"
)
# XZ Compression
elif compression == "xz":
handle = get_lzma_file(lzma)(handle, ioargs.mode)
# Unrecognized Compression
else:
msg = f"Unrecognized compression type: {compression}"
raise ValueError(msg)
assert not isinstance(handle, str)
handles.append(handle)
elif isinstance(handle, str):
# Check whether the filename is to be opened in binary mode.
# Binary mode does not support 'encoding' and 'newline'.
if ioargs.encoding and "b" not in ioargs.mode:
if errors is None and encoding_passed is None:
# ignore errors when no encoding is specified
errors = "replace"
# Encoding
handle = open(
handle,
ioargs.mode,
encoding=ioargs.encoding,
errors=errors,
newline="",
)
else:
# Binary mode
handle = open(handle, ioargs.mode)
handles.append(handle)
# Convert BytesIO or file objects passed with an encoding
is_wrapped = False
if is_text and (compression or _is_binary_mode(handle, ioargs.mode)):
handle = TextIOWrapper(
handle, # type: ignore[arg-type]
encoding=ioargs.encoding,
errors=errors,
newline="",
)
handles.append(handle)
# only marked as wrapped when the caller provided a handle
is_wrapped = not (
isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close
)
handles.reverse() # close the most recently added buffer first
if ioargs.should_close:
assert not isinstance(ioargs.filepath_or_buffer, str)
handles.append(ioargs.filepath_or_buffer)
assert not isinstance(handle, str)
return IOHandles(
handle=handle,
created_handles=handles,
is_wrapped=is_wrapped,
is_mmap=memory_map,
compression=ioargs.compression,
)
# error: Definition of "__exit__" in base class "ZipFile" is incompatible with
# definition in base class "BytesIO" [misc]
# error: Definition of "__enter__" in base class "ZipFile" is incompatible with
# definition in base class "BytesIO" [misc]
# error: Definition of "__enter__" in base class "ZipFile" is incompatible with
# definition in base class "BinaryIO" [misc]
# error: Definition of "__enter__" in base class "ZipFile" is incompatible with
# definition in base class "IO" [misc]
# error: Definition of "read" in base class "ZipFile" is incompatible with
# definition in base class "BytesIO" [misc]
# error: Definition of "read" in base class "ZipFile" is incompatible with
# definition in base class "IO" [misc]
class _BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore[misc]
# GH 17778
def __init__(
self,
file: FilePathOrBuffer,
mode: str,
archive_name: Optional[str] = None,
**kwargs,
):
mode = mode.replace("b", "")
self.archive_name = archive_name
self.multiple_write_buffer: Optional[Union[StringIO, BytesIO]] = None
kwargs_zip: Dict[str, Any] = {"compression": zipfile.ZIP_DEFLATED}
kwargs_zip.update(kwargs)
super().__init__(file, mode, **kwargs_zip) # type: ignore[arg-type]
def write(self, data):
# buffer multiple write calls, write on flush
if self.multiple_write_buffer is None:
self.multiple_write_buffer = (
BytesIO() if isinstance(data, bytes) else StringIO()
)
self.multiple_write_buffer.write(data)
def flush(self) -> None:
# write to actual handle and close write buffer
if self.multiple_write_buffer is None or self.multiple_write_buffer.closed:
return
# ZipFile needs a non-empty string
archive_name = self.archive_name or self.filename or "zip"
with self.multiple_write_buffer:
super().writestr(archive_name, self.multiple_write_buffer.getvalue())
def close(self):
self.flush()
super().close()
@property
def closed(self):
return self.fp is None
class _MMapWrapper(abc.Iterator):
def __init__(self, f: IO):
self.attributes = {}
for attribute in ("seekable", "readable", "writeable"):
if not hasattr(f, attribute):
continue
self.attributes[attribute] = getattr(f, attribute)()
self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
def __getattr__(self, name: str):
if name in self.attributes:
return lambda: self.attributes[name]
return getattr(self.mmap, name)
def __iter__(self) -> _MMapWrapper:
return self
def __next__(self) -> str:
newbytes = self.mmap.readline()
# readline returns bytes, not str, but Python's CSV reader
newline = newbytes.decode("utf-8")
# data but instead returns an empty string, so raise
# if that is returned
if newline == "":
raise StopIteration
return newline
def _maybe_memory_map(
handle: FileOrBuffer,
memory_map: bool,
encoding: str,
mode: str,
errors: Optional[str],
) -> Tuple[FileOrBuffer, bool, List[Buffer]]:
handles: List[Buffer] = []
memory_map &= hasattr(handle, "fileno") or isinstance(handle, str)
if not memory_map:
return handle, memory_map, handles
# need to open the file first
if isinstance(handle, str):
if encoding and "b" not in mode:
# Encoding
handle = open(handle, mode, encoding=encoding, errors=errors, newline="")
else:
# Binary mode
handle = open(handle, mode)
handles.append(handle)
try:
wrapped = cast(mmap.mmap, _MMapWrapper(handle)) # type: ignore[arg-type]
handle.close()
handles.remove(handle)
handles.append(wrapped)
handle = wrapped
except Exception:
# we catch any errors that may have occurred
# because that is consistent with the lower-level
# functionality of the C engine (pd.read_csv), so
# leave the file handler as is then
memory_map = False
return handle, memory_map, handles
def file_exists(filepath_or_buffer: FilePathOrBuffer) -> bool:
exists = False
filepath_or_buffer = stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, str):
return exists
try:
exists = os.path.exists(filepath_or_buffer)
# gh-5874: if the filepath is too long will raise here
except (TypeError, ValueError):
pass
return exists
def _is_binary_mode(handle: FilePathOrBuffer, mode: str) -> bool:
# classes that expect string but have 'b' in mode
text_classes = (codecs.StreamReaderWriter,)
if isinstance(handle, text_classes):
return False
# classes that expect bytes
binary_classes = (BufferedIOBase, RawIOBase)
return isinstance(handle, binary_classes) or "b" in getattr(handle, "mode", mode)
| true | true |
f715230aa02adaf67d5dae2703f0b43403c478f7 | 4,874 | py | Python | pyhanabi/common_utils/helper.py | ravihammond/hanabi-convention-adaptation | 5dafa91742de8e8d5810e8213e0e2771818b2f54 | [
"MIT"
] | 1 | 2022-03-24T19:41:22.000Z | 2022-03-24T19:41:22.000Z | pyhanabi/common_utils/helper.py | ravihammond/hanabi-convention-adaptation | 5dafa91742de8e8d5810e8213e0e2771818b2f54 | [
"MIT"
] | null | null | null | pyhanabi/common_utils/helper.py | ravihammond/hanabi-convention-adaptation | 5dafa91742de8e8d5810e8213e0e2771818b2f54 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import random
import numpy as np
import torch
from torch import nn
from typing import Dict
def to_device(data, device):
if isinstance(data, torch.Tensor):
return data.to(device)
elif isinstance(data, dict):
return {k: to_device(v, device) for k, v in data.items()}
elif isinstance(data, list):
return [to_device(v, device) for v in data]
def get_all_files(root, file_extension, contain=None):
files = []
for folder, _, fs in os.walk(root):
for f in fs:
if file_extension is not None:
if f.endswith(file_extension):
if contain is None or contain in os.path.join(folder, f):
files.append(os.path.join(folder, f))
else:
if contain in f:
files.append(os.path.join(folder, f))
return files
def flatten(s):
if s == []:
return s
if isinstance(s[0], list):
return flatten(s[0]) + flatten(s[1:])
return s[:1] + flatten(s[1:])
def moving_average(data, period):
# padding
left_pad = [data[0] for _ in range(period // 2)]
right_pad = data[-period // 2 + 1 :]
data = left_pad + data + right_pad
weights = np.ones(period) / period
return np.convolve(data, weights, mode="valid")
def mem2str(num_bytes):
assert num_bytes >= 0
if num_bytes >= 2 ** 30: # GB
val = float(num_bytes) / (2 ** 30)
result = "%.3f GB" % val
elif num_bytes >= 2 ** 20: # MB
val = float(num_bytes) / (2 ** 20)
result = "%.3f MB" % val
elif num_bytes >= 2 ** 10: # KB
val = float(num_bytes) / (2 ** 10)
result = "%.3f KB" % val
else:
result = "%d bytes" % num_bytes
return result
def sec2str(seconds):
seconds = int(seconds)
hour = seconds // 3600
seconds = seconds % (24 * 3600)
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%dH %02dM %02dS" % (hour, minutes, seconds)
def num2str(n):
if n < 1e3:
s = str(n)
unit = ""
elif n < 1e6:
n /= 1e3
s = "%.3f" % n
unit = "K"
else:
n /= 1e6
s = "%.3f" % n
unit = "M"
s = s.rstrip("0").rstrip(".")
return s + unit
def get_mem_usage():
import psutil
mem = psutil.virtual_memory()
result = ""
result += "available: %s, " % (mem2str(mem.available))
result += "used: %s, " % (mem2str(mem.used))
result += "free: %s" % (mem2str(mem.free))
return result
def flatten_first2dim(batch):
if isinstance(batch, torch.Tensor):
size = batch.size()[2:]
batch = batch.view(-1, *size)
return batch
elif isinstance(batch, dict):
return {key: flatten_first2dim(batch[key]) for key in batch}
else:
assert False, "unsupported type: %s" % type(batch)
def _tensor_slice(t, dim, b, e):
if dim == 0:
return t[b:e]
elif dim == 1:
return t[:, b:e]
elif dim == 2:
return t[:, :, b:e]
else:
raise ValueError("unsupported %d in tensor_slice" % dim)
def tensor_slice(t, dim, b, e):
if isinstance(t, dict):
return {key: tensor_slice(t[key], dim, b, e) for key in t}
elif isinstance(t, torch.Tensor):
return _tensor_slice(t, dim, b, e).contiguous()
else:
assert False, "Error: unsupported type: %s" % (type(t))
def tensor_index(t, dim, i):
if isinstance(t, dict):
return {key: tensor_index(t[key], dim, i) for key in t}
elif isinstance(t, torch.Tensor):
return _tensor_slice(t, dim, i, i + 1).squeeze(dim).contiguous()
else:
assert False, "Error: unsupported type: %s" % (type(t))
def one_hot(x, n):
assert x.dim() == 2 and x.size(1) == 1
one_hot_x = torch.zeros(x.size(0), n, device=x.device)
one_hot_x.scatter_(1, x, 1)
return one_hot_x
def set_all_seeds(rand_seed):
random.seed(rand_seed)
np.random.seed(rand_seed + 1)
torch.manual_seed(rand_seed + 2)
torch.cuda.manual_seed(rand_seed + 3)
def weights_init(m):
"""custom weights initialization"""
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal(m.weight.data)
nn.init.orthogonal_(m.weight.data)
else:
print("%s is not custom-initialized." % m.__class__)
def init_net(net, net_file):
if net_file:
net.load_state_dict(torch.load(net_file))
else:
net.apply(weights_init)
def count_output_size(input_shape, model):
fake_input = torch.FloatTensor(*input_shape)
output_size = model.forward(fake_input).view(-1).size()[0]
return output_size
| 26.48913 | 77 | 0.586992 |
import os
import random
import numpy as np
import torch
from torch import nn
from typing import Dict
def to_device(data, device):
if isinstance(data, torch.Tensor):
return data.to(device)
elif isinstance(data, dict):
return {k: to_device(v, device) for k, v in data.items()}
elif isinstance(data, list):
return [to_device(v, device) for v in data]
def get_all_files(root, file_extension, contain=None):
files = []
for folder, _, fs in os.walk(root):
for f in fs:
if file_extension is not None:
if f.endswith(file_extension):
if contain is None or contain in os.path.join(folder, f):
files.append(os.path.join(folder, f))
else:
if contain in f:
files.append(os.path.join(folder, f))
return files
def flatten(s):
if s == []:
return s
if isinstance(s[0], list):
return flatten(s[0]) + flatten(s[1:])
return s[:1] + flatten(s[1:])
def moving_average(data, period):
left_pad = [data[0] for _ in range(period // 2)]
right_pad = data[-period // 2 + 1 :]
data = left_pad + data + right_pad
weights = np.ones(period) / period
return np.convolve(data, weights, mode="valid")
def mem2str(num_bytes):
assert num_bytes >= 0
if num_bytes >= 2 ** 30:
val = float(num_bytes) / (2 ** 30)
result = "%.3f GB" % val
elif num_bytes >= 2 ** 20:
val = float(num_bytes) / (2 ** 20)
result = "%.3f MB" % val
elif num_bytes >= 2 ** 10:
val = float(num_bytes) / (2 ** 10)
result = "%.3f KB" % val
else:
result = "%d bytes" % num_bytes
return result
def sec2str(seconds):
seconds = int(seconds)
hour = seconds // 3600
seconds = seconds % (24 * 3600)
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%dH %02dM %02dS" % (hour, minutes, seconds)
def num2str(n):
if n < 1e3:
s = str(n)
unit = ""
elif n < 1e6:
n /= 1e3
s = "%.3f" % n
unit = "K"
else:
n /= 1e6
s = "%.3f" % n
unit = "M"
s = s.rstrip("0").rstrip(".")
return s + unit
def get_mem_usage():
import psutil
mem = psutil.virtual_memory()
result = ""
result += "available: %s, " % (mem2str(mem.available))
result += "used: %s, " % (mem2str(mem.used))
result += "free: %s" % (mem2str(mem.free))
return result
def flatten_first2dim(batch):
if isinstance(batch, torch.Tensor):
size = batch.size()[2:]
batch = batch.view(-1, *size)
return batch
elif isinstance(batch, dict):
return {key: flatten_first2dim(batch[key]) for key in batch}
else:
assert False, "unsupported type: %s" % type(batch)
def _tensor_slice(t, dim, b, e):
if dim == 0:
return t[b:e]
elif dim == 1:
return t[:, b:e]
elif dim == 2:
return t[:, :, b:e]
else:
raise ValueError("unsupported %d in tensor_slice" % dim)
def tensor_slice(t, dim, b, e):
if isinstance(t, dict):
return {key: tensor_slice(t[key], dim, b, e) for key in t}
elif isinstance(t, torch.Tensor):
return _tensor_slice(t, dim, b, e).contiguous()
else:
assert False, "Error: unsupported type: %s" % (type(t))
def tensor_index(t, dim, i):
if isinstance(t, dict):
return {key: tensor_index(t[key], dim, i) for key in t}
elif isinstance(t, torch.Tensor):
return _tensor_slice(t, dim, i, i + 1).squeeze(dim).contiguous()
else:
assert False, "Error: unsupported type: %s" % (type(t))
def one_hot(x, n):
assert x.dim() == 2 and x.size(1) == 1
one_hot_x = torch.zeros(x.size(0), n, device=x.device)
one_hot_x.scatter_(1, x, 1)
return one_hot_x
def set_all_seeds(rand_seed):
random.seed(rand_seed)
np.random.seed(rand_seed + 1)
torch.manual_seed(rand_seed + 2)
torch.cuda.manual_seed(rand_seed + 3)
def weights_init(m):
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
nn.init.orthogonal_(m.weight.data)
else:
print("%s is not custom-initialized." % m.__class__)
def init_net(net, net_file):
if net_file:
net.load_state_dict(torch.load(net_file))
else:
net.apply(weights_init)
def count_output_size(input_shape, model):
fake_input = torch.FloatTensor(*input_shape)
output_size = model.forward(fake_input).view(-1).size()[0]
return output_size
| true | true |
f715230f4f2ce5d2a6cd06dbc6f0136712d5c553 | 4,871 | py | Python | pymetalog/pdf_quantile_functions.py | gboehl/pymetalog | bcc1bfbf658f44f48d63a594d2b9de8b700a11a7 | [
"MIT"
] | null | null | null | pymetalog/pdf_quantile_functions.py | gboehl/pymetalog | bcc1bfbf658f44f48d63a594d2b9de8b700a11a7 | [
"MIT"
] | null | null | null | pymetalog/pdf_quantile_functions.py | gboehl/pymetalog | bcc1bfbf658f44f48d63a594d2b9de8b700a11a7 | [
"MIT"
] | null | null | null | import numpy as np
from .support import pdfMetalog, quantileMetalog
def pdf_quantile_builder(temp, y, term_limit, bounds, boundedness):
"""Builds the metalog pdf and quantile arrays based on the a coefficients found by fitting metalog distribution.
Args:
temp (:obj: `numpy.ndarray` of type float): Array of a coefficients found by fitting metalog distribution.
- Fit method is specified by metalog.fit_method attribute
y (:obj: `numpy.ndarray` of type float): Array of bin widths specified for `a` parameter
term_limit (:obj: `int`): The upper limit of the range of metalog terms to use to fit the data.
- metalog.term_limit attribute
- in range [3,30]
bounds (:obj:`list`): Upper and lower limits to filter the data with before calculating metalog quantiles/pdfs.
- metalog.bounds attribute
- Default: [0,1]
boundedness (:obj: `str`): String that is used to specify the type of metalog to fit.
- metalog.boundedness attribute
Returns:
q_dict (:obj:`dict` with keys ['m', 'M', 'y', 'valid']): Initialized output_dict variable from metalog class.
- q_dict['m']: (:obj:`numpy.ndarray` of type float): Array of metalog pdf values.
* Returned by `pdfMetalog` method
* Influenced by `boundedness` parameter
* A valid metalog fit will return an array having all elements strictly > 0
- q_dict['M']: (:obj:`numpy.ndarray` of type float): Array of metalog quantile values.
* Returned by `quantileMetalog` method
* Influenced by `boundedness` parameter
- `boundedness` = 'sl': Inserts `bounds`[0] to the front of the quantile array
- `boundedness` = 'su': Appends `bounds`[1] to the end of the quantile array
- `boundedness` = 'b': Inserts `bounds`[0] to the front of the quantile array
and appends `bounds`[1] to the end of the quantile array
- q_dict['y']: (:obj:`numpy.ndarray` of type float): Array of bin widths specified for the pdfs/quantiles.
* Influenced by `boundedness` parameter
- `boundedness` = 'sl': Inserts `bounds`[0] at the front of the quantile array
- `boundedness` = 'su': Appends `bounds`[1] to the end of the quantile array
- `boundedness` = 'b': Inserts `bounds`[0] at the front of the quantile array
and appends `bounds`[1] to the end of the quantile array
- q_dict['valid']: (:obj:`str`): A string indicating if the metalog pdf generated by `pdfMetalog` method is valid or not.
* If all values in the metalog pdf are >= 0, q_dict['valid'] = 'yes'
* If any values in the metalog pdf are < 0, q_dict['valid'] = 'no'
"""
q_dict = {}
# build pdf
m = pdfMetalog(temp, y[0], term_limit, bounds=bounds, boundedness=boundedness)
for j in range(2, len(y) + 1):
tempPDF = pdfMetalog(
temp, y[j - 1], term_limit, bounds=bounds, boundedness=boundedness
)
m = np.append(m, tempPDF)
# Build quantile values
M = quantileMetalog(temp, y[1], term_limit, bounds=bounds, boundedness=boundedness)
for j in range(2, len(y) + 1):
tempQant = quantileMetalog(
temp, y[j - 1], term_limit, bounds=bounds, boundedness=boundedness
)
M = np.append(M, tempQant)
# Add trailing and leading zero's for pdf bounds
if boundedness == "sl":
m = np.append(0, m)
M = np.append(bounds[0], M)
if boundedness == "su":
m = np.append(m, 0)
M = np.append(M, bounds[1])
if boundedness == "b":
m = np.append(0, m)
m = np.append(m, 0)
M = np.append(bounds[0], M)
M = np.append(M, bounds[1])
# Add y values for bounded models
if boundedness == "sl":
y = np.append(0, y)
if boundedness == "su":
y = np.append(y, 1)
if boundedness == "b":
y = np.append(0, y)
y = np.append(y, 1)
q_dict["m"] = m
q_dict["M"] = M
q_dict["y"] = y
# PDF validation
q_dict["valid"] = pdfMetalogValidation(q_dict["m"])
return q_dict
def pdfMetalogValidation(x):
"""Validation that all calculated metalog pdf values are greater than or equal to 0.
Args:
x (:obj: `numpy.ndarray` of type float): Array of metalog pdf values.
- Returned by `pdfMetalog` method
- Influenced by `boundedness` parameter
Returns:
'yes' | 'no' (:obj:`str`): 'yes' if all elements strictly >= 0, else 'no'.
"""
y = np.min(x)
if y >= 0:
return "yes"
else:
return "no"
| 39.282258 | 133 | 0.583453 | import numpy as np
from .support import pdfMetalog, quantileMetalog
def pdf_quantile_builder(temp, y, term_limit, bounds, boundedness):
q_dict = {}
m = pdfMetalog(temp, y[0], term_limit, bounds=bounds, boundedness=boundedness)
for j in range(2, len(y) + 1):
tempPDF = pdfMetalog(
temp, y[j - 1], term_limit, bounds=bounds, boundedness=boundedness
)
m = np.append(m, tempPDF)
M = quantileMetalog(temp, y[1], term_limit, bounds=bounds, boundedness=boundedness)
for j in range(2, len(y) + 1):
tempQant = quantileMetalog(
temp, y[j - 1], term_limit, bounds=bounds, boundedness=boundedness
)
M = np.append(M, tempQant)
if boundedness == "sl":
m = np.append(0, m)
M = np.append(bounds[0], M)
if boundedness == "su":
m = np.append(m, 0)
M = np.append(M, bounds[1])
if boundedness == "b":
m = np.append(0, m)
m = np.append(m, 0)
M = np.append(bounds[0], M)
M = np.append(M, bounds[1])
# Add y values for bounded models
if boundedness == "sl":
y = np.append(0, y)
if boundedness == "su":
y = np.append(y, 1)
if boundedness == "b":
y = np.append(0, y)
y = np.append(y, 1)
q_dict["m"] = m
q_dict["M"] = M
q_dict["y"] = y
# PDF validation
q_dict["valid"] = pdfMetalogValidation(q_dict["m"])
return q_dict
def pdfMetalogValidation(x):
y = np.min(x)
if y >= 0:
return "yes"
else:
return "no"
| true | true |
f7152491c656ec2239d0ca0d5473ee941e003d64 | 24,833 | py | Python | airbyte-integrations/connectors/source-s3/source_s3/source_files_abstract/stream.py | Mu-L/airbyte | d6c684b3e495f1cb5c08d94e57ab55288ce47ea6 | [
"MIT"
] | 1 | 2022-02-02T20:42:41.000Z | 2022-02-02T20:42:41.000Z | airbyte-integrations/connectors/source-s3/source_s3/source_files_abstract/stream.py | Mu-L/airbyte | d6c684b3e495f1cb5c08d94e57ab55288ce47ea6 | [
"MIT"
] | 2 | 2021-09-30T16:58:58.000Z | 2021-11-26T17:58:59.000Z | airbyte-integrations/connectors/source-s3/source_s3/source_files_abstract/stream.py | Mu-L/airbyte | d6c684b3e495f1cb5c08d94e57ab55288ce47ea6 | [
"MIT"
] | 1 | 2022-03-18T21:58:33.000Z | 2022-03-18T21:58:33.000Z | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import concurrent
import json
from abc import ABC, abstractmethod
from copy import deepcopy
from datetime import datetime
from functools import lru_cache
from operator import itemgetter
from traceback import format_exc
from typing import Any, Iterable, Iterator, List, Mapping, MutableMapping, Optional, Tuple, Union
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.models.airbyte_protocol import SyncMode
from airbyte_cdk.sources.streams import Stream
from wcmatch.glob import GLOBSTAR, SPLIT, globmatch
from .formats.csv_parser import CsvParser
from .formats.parquet_parser import ParquetParser
JSON_TYPES = ["string", "number", "integer", "object", "array", "boolean", "null"]
LOGGER = AirbyteLogger()
class ConfigurationError(Exception):
"""Client mis-configured"""
class FileStream(Stream, ABC):
@property
def fileformatparser_map(self):
"""Mapping where every key is equal 'filetype' and values are corresponding parser classes."""
return {
"csv": CsvParser,
"parquet": ParquetParser,
}
# TODO: make these user configurable in spec.json
ab_additional_col = "_ab_additional_properties"
ab_last_mod_col = "_ab_source_file_last_modified"
ab_file_name_col = "_ab_source_file_url"
airbyte_columns = [ab_additional_col, ab_last_mod_col, ab_file_name_col]
datetime_format_string = "%Y-%m-%dT%H:%M:%S%z"
def __init__(self, dataset: str, provider: dict, format: dict, path_pattern: str, schema: str = None):
"""
:param dataset: table name for this stream
:param provider: provider specific mapping as described in spec.json
:param format: file format specific mapping as described in spec.json
:param path_pattern: glob-style pattern for file-matching (https://facelessuser.github.io/wcmatch/glob/)
:param schema: JSON-syntax user provided schema, defaults to None
"""
self.dataset = dataset
self._path_pattern = path_pattern
self._provider = provider
self._format = format
self._schema = {}
if schema:
self._schema = self._parse_user_input_schema(schema)
self.master_schema = None
LOGGER.info(f"initialised stream with format: {format}")
@staticmethod
def _parse_user_input_schema(schema: str) -> Mapping[str, str]:
"""
If the user provided a schema, we run this method to convert to a python dict and verify it
This verifies:
- that the provided string is valid JSON
- that it is a key:value map with no nested values (objects or arrays)
- that all values in the map correspond to a JsonSchema datatype
If this passes, we are confident that the user-provided schema is valid and will work as expected with the rest of the code
:param schema: JSON-syntax user provided schema
:raises ConfigurationError: if any of the verification steps above fail
:return: the input schema (json string) as a python dict
"""
try:
py_schema = json.loads(schema)
except json.decoder.JSONDecodeError as err:
error_msg = f"Failed to parse schema {repr(err)}\n{schema}\n{format_exc()}"
raise ConfigurationError(error_msg) from err
# enforce all keys and values are of type string as required (i.e. no nesting)
if not all([isinstance(k, str) and isinstance(v, str) for k, v in py_schema.items()]):
raise ConfigurationError("Invalid schema provided, all column names and datatypes must be in string format")
# enforce all values (datatypes) are valid JsonSchema datatypes
if not all([datatype in JSON_TYPES for datatype in py_schema.values()]):
raise ConfigurationError(f"Invalid schema provided, datatypes must each be one of {JSON_TYPES}")
return py_schema
@property
def name(self) -> str:
return self.dataset
@property
def primary_key(self) -> Optional[Union[str, List[str], List[List[str]]]]:
return None
@property
def fileformatparser_class(self) -> type:
"""
:return: reference to the relevant fileformatparser class e.g. CsvParser
"""
filetype = self._format.get("filetype")
file_reader = self.fileformatparser_map.get(self._format.get("filetype"))
if not file_reader:
raise RuntimeError(
f"Detected mismatched file format '{filetype}'. Available values: '{list( self.fileformatparser_map.keys())}''."
)
return file_reader
@property
@abstractmethod
def storagefile_class(self) -> type:
"""
Override this to point to the relevant provider-specific StorageFile class e.g. S3File
:return: reference to relevant class
"""
@abstractmethod
def filepath_iterator() -> Iterator[str]:
"""
Provider-specific method to iterate through bucket/container/etc. and yield each full filepath.
This should supply the 'url' to use in StorageFile(). This is possibly better described as blob or file path.
e.g. for AWS: f"s3://{aws_access_key_id}:{aws_secret_access_key}@{self.url}" <- self.url is what we want to yield here
:yield: url filepath to use in StorageFile()
"""
def pattern_matched_filepath_iterator(self, filepaths: Iterable[str]) -> Iterator[str]:
"""
iterates through iterable filepaths and yields only those filepaths that match user-provided path patterns
:param filepaths: filepath_iterator(), this is a param rather than method reference in order to unit test this
:yield: url filepath to use in StorageFile(), if matching on user-provided path patterns
"""
for filepath in filepaths:
if globmatch(filepath, self._path_pattern, flags=GLOBSTAR | SPLIT):
yield filepath
@lru_cache(maxsize=None)
def get_time_ordered_filepaths(self) -> Iterable[Tuple[datetime, str]]:
"""
Iterates through pattern_matched_filepath_iterator(), acquiring last_modified property of each file to return in time ascending order.
Uses concurrent.futures to thread this asynchronously in order to improve performance when there are many files (network I/O)
Caches results after first run of method to avoid repeating network calls as this is used more than once
:return: list in time-ascending order
"""
def get_storagefile_with_lastmod(filepath: str) -> Tuple[datetime, str]:
fc = self.storagefile_class(filepath, self._provider)
return (fc.last_modified, filepath)
storagefiles = []
# use concurrent future threads to parallelise grabbing last_modified from all the files
# TODO: don't hardcode max_workers like this
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as executor:
filepath_gen = self.pattern_matched_filepath_iterator(self.filepath_iterator())
futures = [executor.submit(get_storagefile_with_lastmod, fp) for fp in filepath_gen]
for future in concurrent.futures.as_completed(futures):
# this will failfast on any errors
storagefiles.append(future.result())
# The array storagefiles contain tuples of (last_modified, filepath), so sort by last_modified
return sorted(storagefiles, key=itemgetter(0))
def _get_schema_map(self) -> Mapping[str, Any]:
if self._schema != {}:
return_schema = deepcopy(self._schema)
else: # we have no provided schema or schema state from a previous incremental run
return_schema = self._get_master_schema()
return_schema[self.ab_additional_col] = "object"
return_schema[self.ab_last_mod_col] = "string"
return_schema[self.ab_file_name_col] = "string"
return return_schema
def get_json_schema(self) -> Mapping[str, Any]:
"""
:return: the JSON schema representing this stream.
"""
# note: making every non-airbyte column nullable for compatibility
# TODO: ensure this behaviour still makes sense as we add new file formats
properties = {}
for column, typ in self._get_schema_map().items():
properties[column] = {"type": ["null", typ]} if column not in self.airbyte_columns else {"type": typ}
properties[self.ab_last_mod_col]["format"] = "date-time"
return {"type": "object", "properties": properties}
def _get_master_schema(self, min_datetime: datetime = None) -> Mapping[str, Any]:
"""
In order to auto-infer a schema across many files and/or allow for additional properties (columns),
we need to determine the superset of schemas across all relevant files.
This method iterates through get_time_ordered_filepaths() obtaining the inferred schema (process implemented per file format),
to build up this superset schema (master_schema).
This runs datatype checks to Warn or Error if we find incompatible schemas (e.g. same column is 'date' in one file but 'float' in another).
This caches the master_schema after first run in order to avoid repeated compute and network calls to infer schema on all files.
:param min_datetime: if passed, will only use files with last_modified >= this to determine master schema
:raises RuntimeError: if we find datatype mismatches between files or between a file and schema state (provided or from previous inc. batch)
:return: A dict of the JSON schema representing this stream.
"""
# TODO: could implement a (user-beware) 'lazy' mode that skips schema checking to improve performance
# TODO: could utilise min_datetime to add a start_date parameter in spec for user
if self.master_schema is None:
master_schema = deepcopy(self._schema)
file_reader = self.fileformatparser_class(self._format)
for last_mod, filepath in self.get_time_ordered_filepaths():
# skip this file if it's earlier than min_datetime
if (min_datetime is not None) and (last_mod < min_datetime):
continue
storagefile = self.storagefile_class(filepath, self._provider)
with storagefile.open(file_reader.is_binary) as f:
this_schema = file_reader.get_inferred_schema(f)
if this_schema == master_schema:
continue # exact schema match so go to next file
# creates a superset of columns retaining order of master_schema with any additional columns added to end
column_superset = list(master_schema.keys()) + [c for c in this_schema.keys() if c not in master_schema.keys()]
# this compares datatype of every column that the two schemas have in common
for col in column_superset:
if (col in master_schema.keys()) and (col in this_schema.keys()) and (master_schema[col] != this_schema[col]):
# if this column exists in a provided schema or schema state, we'll WARN here rather than throw an error
# this is to allow more leniency as we may be able to coerce this datatype mismatch on read according to provided schema state
# if not, then the read will error anyway
if col in self._schema.keys():
LOGGER.warn(
f"Detected mismatched datatype on column '{col}', in file '{storagefile.url}'. "
+ f"Should be '{master_schema[col]}', but found '{this_schema[col]}'. "
+ f"Airbyte will attempt to coerce this to {master_schema[col]} on read."
)
# else we're inferring the schema (or at least this column) from scratch and therefore throw an error on mismatching datatypes
else:
raise RuntimeError(
f"Detected mismatched datatype on column '{col}', in file '{storagefile.url}'. "
+ f"Should be '{master_schema[col]}', but found '{this_schema[col]}'."
)
# missing columns in this_schema doesn't affect our master_schema so we don't check for it here
# add to master_schema any columns from this_schema that aren't already present
for col, datatype in this_schema.items():
if col not in master_schema.keys():
master_schema[col] = datatype
LOGGER.info(f"determined master schema: {master_schema}")
self.master_schema = master_schema
return self.master_schema
def stream_slices(
self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
"""
This builds full-refresh stream_slices regardless of sync_mode param.
For full refresh, 1 file == 1 stream_slice.
The structure of a stream slice is [ {file}, ... ].
In incremental mode, a stream slice may have more than one file so we mirror that format here.
Incremental stream_slices are implemented in the IncrementalFileStream child class.
"""
# TODO: this could be optimised via concurrent reads, however we'd lose chronology and need to deal with knock-ons of that
# we could do this concurrently both full and incremental by running batches in parallel
# and then incrementing the cursor per each complete batch
for last_mod, filepath in self.get_time_ordered_filepaths():
storagefile = self.storagefile_class(filepath, self._provider)
yield [{"unique_url": storagefile.url, "last_modified": last_mod, "storagefile": storagefile}]
def _match_target_schema(self, record: Mapping[str, Any], target_columns: List) -> Mapping[str, Any]:
"""
This method handles missing or additional fields in each record, according to the provided target_columns.
All missing fields are added, with a value of None (null)
All additional fields are packed into the _ab_additional_properties object column
We start off with a check to see if we're already lined up to target in order to avoid unnecessary iterations (useful if many columns)
:param record: json-like representation of a data row {column:value}
:param target_columns: list of column names to mutate this record into (obtained via self._get_schema_map().keys() as of now)
:return: mutated record with columns lining up to target_columns
"""
compare_columns = [c for c in target_columns if c not in [self.ab_last_mod_col, self.ab_file_name_col]]
# check if we're already matching to avoid unnecessary iteration
if set(list(record.keys()) + [self.ab_additional_col]) == set(compare_columns):
record[self.ab_additional_col] = {}
return record
# missing columns
for c in [col for col in compare_columns if col != self.ab_additional_col]:
if c not in record.keys():
record[c] = None
# additional columns
record[self.ab_additional_col] = {c: deepcopy(record[c]) for c in record.keys() if c not in compare_columns}
for c in record[self.ab_additional_col].keys():
del record[c]
return record
def _add_extra_fields_from_map(self, record: Mapping[str, Any], extra_map: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Simple method to take a mapping of columns:values and add them to the provided record
:param record: json-like representation of a data row {column:value}
:param extra_map: map of additional columns and values to add
:return: mutated record with additional fields
"""
for key, value in extra_map.items():
record[key] = value
return record
def _read_from_slice(
self,
file_reader,
stream_slice: Mapping[str, Any],
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""
Uses provider-relevant StorageFile to open file and then iterates through stream_records() using format-relevant AbstractFileParser.
Records are mutated on the fly using _match_target_schema() and _add_extra_fields_from_map() to achieve desired final schema.
Since this is called per stream_slice, this method works for both full_refresh and incremental.
"""
# TODO: read all files in a stream_slice concurrently
for file_info in stream_slice:
with file_info["storagefile"].open(file_reader.is_binary) as f:
# TODO: make this more efficient than mutating every record one-by-one as they stream
for record in file_reader.stream_records(f):
schema_matched_record = self._match_target_schema(record, list(self._get_schema_map().keys()))
complete_record = self._add_extra_fields_from_map(
schema_matched_record,
{
self.ab_last_mod_col: datetime.strftime(file_info["last_modified"], self.datetime_format_string),
self.ab_file_name_col: file_info["unique_url"],
},
)
yield complete_record
LOGGER.info("finished reading a stream slice")
# Always return an empty generator just in case no records were ever yielded
yield from []
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""
The heavy lifting sits in _read_from_slice() which is full refresh / incremental agnostic
"""
stream_slice = stream_slice if stream_slice is not None else []
file_reader = self.fileformatparser_class(self._format, self._get_master_schema())
yield from self._read_from_slice(file_reader, stream_slice)
class IncrementalFileStream(FileStream, ABC):
# TODO: ideally want to checkpoint after every file or stream slice rather than N records
state_checkpoint_interval = None
@property
def cursor_field(self) -> str:
"""
:return: The name of the cursor field.
"""
return self.ab_last_mod_col
def _get_datetime_from_stream_state(self, stream_state: Mapping[str, Any] = None) -> datetime:
"""if no state, we default to 1970-01-01 in order to pick up all files present."""
if stream_state is not None and self.cursor_field in stream_state.keys():
return datetime.strptime(stream_state[self.cursor_field], self.datetime_format_string)
else:
return datetime.strptime("1970-01-01T00:00:00+0000", self.datetime_format_string)
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Inspects the latest record extracted from the data source and the current state object and return an updated state object.
In the case where current_stream_state is null, we default to 1970-01-01 in order to pick up all files present.
We also save the schema into the state here so that we can use it on future incremental batches, allowing for additional/missing columns.
:param current_stream_state: The stream's current state object
:param latest_record: The latest record extracted from the stream
:return: An updated state object
"""
state_dict = {}
current_parsed_datetime = self._get_datetime_from_stream_state(current_stream_state)
latest_record_datetime = datetime.strptime(
latest_record.get(self.cursor_field, "1970-01-01T00:00:00+0000"), self.datetime_format_string
)
state_dict[self.cursor_field] = datetime.strftime(max(current_parsed_datetime, latest_record_datetime), self.datetime_format_string)
state_dict["schema"] = self._get_schema_map()
return state_dict
def stream_slices(
self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
"""
Builds either full_refresh or incremental stream_slices based on sync_mode.
An incremental stream_slice is a group of all files with the exact same last_modified timestamp.
This ensures we only update the cursor state to a given timestamp after ALL files with that timestamp have been successfully read.
Slight nuance: as we iterate through get_time_ordered_filepaths(),
we yield the stream_slice containing file(s) up to and EXcluding the file on the current iteration.
The stream_slice is then cleared (if we yielded it) and this iteration's file appended to the (next) stream_slice
"""
if sync_mode == SyncMode.full_refresh:
yield from super().stream_slices(sync_mode=sync_mode, cursor_field=cursor_field, stream_state=stream_state)
else:
# if necessary and present, let's update this object's schema attribute to the schema stored in state
# TODO: ideally we could do this on __init__ but I'm not sure that's possible without breaking from cdk style implementation
if self._schema == {} and stream_state is not None and "schema" in stream_state.keys():
self._schema = stream_state["schema"]
# logic here is to bundle all files with exact same last modified timestamp together in each slice
prev_file_last_mod = None # init variable to hold previous iterations last modified
stream_slice = []
for last_mod, filepath in self.get_time_ordered_filepaths():
# skip this file if last_mod is earlier than our cursor value from state
if (
stream_state is not None
and self.cursor_field in stream_state.keys()
and last_mod <= self._get_datetime_from_stream_state(stream_state)
):
continue
storagefile = self.storagefile_class(filepath, self._provider)
# check if this storagefile belongs in the next slice, if so yield the current slice before this file
if (prev_file_last_mod is not None) and (last_mod != prev_file_last_mod):
yield stream_slice
stream_slice.clear()
# now we either have an empty stream_slice or a stream_slice that this file shares a last modified with, so append it
stream_slice.append({"unique_url": storagefile.url, "last_modified": last_mod, "storagefile": storagefile})
# update our prev_file_last_mod to the current one for next iteration
prev_file_last_mod = last_mod
# now yield the final stream_slice. This is required because our loop only yields the slice previous to its current iteration.
if len(stream_slice) > 0:
yield stream_slice
# in case we have no files
yield from [None]
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""
The heavy lifting sits in _read_from_slice() which is full refresh / incremental agnostic.
We override this for incremental so we can pass our minimum datetime from state into _get_master_schema().
This means we only parse the schema of new files on incremental runs rather than all files in the bucket.
"""
if sync_mode == SyncMode.full_refresh:
yield from super().read_records(sync_mode, cursor_field, stream_slice, stream_state)
else:
stream_slice = stream_slice if stream_slice is not None else []
file_reader = self.fileformatparser_class(
self._format, self._get_master_schema(self._get_datetime_from_stream_state(stream_state))
)
yield from self._read_from_slice(file_reader, stream_slice)
| 51.951883 | 150 | 0.663432 |
import concurrent
import json
from abc import ABC, abstractmethod
from copy import deepcopy
from datetime import datetime
from functools import lru_cache
from operator import itemgetter
from traceback import format_exc
from typing import Any, Iterable, Iterator, List, Mapping, MutableMapping, Optional, Tuple, Union
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.models.airbyte_protocol import SyncMode
from airbyte_cdk.sources.streams import Stream
from wcmatch.glob import GLOBSTAR, SPLIT, globmatch
from .formats.csv_parser import CsvParser
from .formats.parquet_parser import ParquetParser
JSON_TYPES = ["string", "number", "integer", "object", "array", "boolean", "null"]
LOGGER = AirbyteLogger()
class ConfigurationError(Exception):
class FileStream(Stream, ABC):
@property
def fileformatparser_map(self):
return {
"csv": CsvParser,
"parquet": ParquetParser,
}
ab_additional_col = "_ab_additional_properties"
ab_last_mod_col = "_ab_source_file_last_modified"
ab_file_name_col = "_ab_source_file_url"
airbyte_columns = [ab_additional_col, ab_last_mod_col, ab_file_name_col]
datetime_format_string = "%Y-%m-%dT%H:%M:%S%z"
def __init__(self, dataset: str, provider: dict, format: dict, path_pattern: str, schema: str = None):
self.dataset = dataset
self._path_pattern = path_pattern
self._provider = provider
self._format = format
self._schema = {}
if schema:
self._schema = self._parse_user_input_schema(schema)
self.master_schema = None
LOGGER.info(f"initialised stream with format: {format}")
@staticmethod
def _parse_user_input_schema(schema: str) -> Mapping[str, str]:
try:
py_schema = json.loads(schema)
except json.decoder.JSONDecodeError as err:
error_msg = f"Failed to parse schema {repr(err)}\n{schema}\n{format_exc()}"
raise ConfigurationError(error_msg) from err
if not all([isinstance(k, str) and isinstance(v, str) for k, v in py_schema.items()]):
raise ConfigurationError("Invalid schema provided, all column names and datatypes must be in string format")
if not all([datatype in JSON_TYPES for datatype in py_schema.values()]):
raise ConfigurationError(f"Invalid schema provided, datatypes must each be one of {JSON_TYPES}")
return py_schema
@property
def name(self) -> str:
return self.dataset
@property
def primary_key(self) -> Optional[Union[str, List[str], List[List[str]]]]:
return None
@property
def fileformatparser_class(self) -> type:
filetype = self._format.get("filetype")
file_reader = self.fileformatparser_map.get(self._format.get("filetype"))
if not file_reader:
raise RuntimeError(
f"Detected mismatched file format '{filetype}'. Available values: '{list( self.fileformatparser_map.keys())}''."
)
return file_reader
@property
@abstractmethod
def storagefile_class(self) -> type:
@abstractmethod
def filepath_iterator() -> Iterator[str]:
def pattern_matched_filepath_iterator(self, filepaths: Iterable[str]) -> Iterator[str]:
for filepath in filepaths:
if globmatch(filepath, self._path_pattern, flags=GLOBSTAR | SPLIT):
yield filepath
@lru_cache(maxsize=None)
def get_time_ordered_filepaths(self) -> Iterable[Tuple[datetime, str]]:
def get_storagefile_with_lastmod(filepath: str) -> Tuple[datetime, str]:
fc = self.storagefile_class(filepath, self._provider)
return (fc.last_modified, filepath)
storagefiles = []
# use concurrent future threads to parallelise grabbing last_modified from all the files
# TODO: don't hardcode max_workers like this
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as executor:
filepath_gen = self.pattern_matched_filepath_iterator(self.filepath_iterator())
futures = [executor.submit(get_storagefile_with_lastmod, fp) for fp in filepath_gen]
for future in concurrent.futures.as_completed(futures):
storagefiles.append(future.result())
return sorted(storagefiles, key=itemgetter(0))
def _get_schema_map(self) -> Mapping[str, Any]:
if self._schema != {}:
return_schema = deepcopy(self._schema)
else:
return_schema = self._get_master_schema()
return_schema[self.ab_additional_col] = "object"
return_schema[self.ab_last_mod_col] = "string"
return_schema[self.ab_file_name_col] = "string"
return return_schema
def get_json_schema(self) -> Mapping[str, Any]:
properties = {}
for column, typ in self._get_schema_map().items():
properties[column] = {"type": ["null", typ]} if column not in self.airbyte_columns else {"type": typ}
properties[self.ab_last_mod_col]["format"] = "date-time"
return {"type": "object", "properties": properties}
def _get_master_schema(self, min_datetime: datetime = None) -> Mapping[str, Any]:
if self.master_schema is None:
master_schema = deepcopy(self._schema)
file_reader = self.fileformatparser_class(self._format)
for last_mod, filepath in self.get_time_ordered_filepaths():
if (min_datetime is not None) and (last_mod < min_datetime):
continue
storagefile = self.storagefile_class(filepath, self._provider)
with storagefile.open(file_reader.is_binary) as f:
this_schema = file_reader.get_inferred_schema(f)
if this_schema == master_schema:
continue # exact schema match so go to next file
# creates a superset of columns retaining order of master_schema with any additional columns added to end
column_superset = list(master_schema.keys()) + [c for c in this_schema.keys() if c not in master_schema.keys()]
# this compares datatype of every column that the two schemas have in common
for col in column_superset:
if (col in master_schema.keys()) and (col in this_schema.keys()) and (master_schema[col] != this_schema[col]):
# if this column exists in a provided schema or schema state, we'll WARN here rather than throw an error
if col in self._schema.keys():
LOGGER.warn(
f"Detected mismatched datatype on column '{col}', in file '{storagefile.url}'. "
+ f"Should be '{master_schema[col]}', but found '{this_schema[col]}'. "
+ f"Airbyte will attempt to coerce this to {master_schema[col]} on read."
)
else:
raise RuntimeError(
f"Detected mismatched datatype on column '{col}', in file '{storagefile.url}'. "
+ f"Should be '{master_schema[col]}', but found '{this_schema[col]}'."
)
# missing columns in this_schema doesn't affect our master_schema so we don't check for it here
# add to master_schema any columns from this_schema that aren't already present
for col, datatype in this_schema.items():
if col not in master_schema.keys():
master_schema[col] = datatype
LOGGER.info(f"determined master schema: {master_schema}")
self.master_schema = master_schema
return self.master_schema
def stream_slices(
self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
# we could do this concurrently both full and incremental by running batches in parallel
# and then incrementing the cursor per each complete batch
for last_mod, filepath in self.get_time_ordered_filepaths():
storagefile = self.storagefile_class(filepath, self._provider)
yield [{"unique_url": storagefile.url, "last_modified": last_mod, "storagefile": storagefile}]
def _match_target_schema(self, record: Mapping[str, Any], target_columns: List) -> Mapping[str, Any]:
compare_columns = [c for c in target_columns if c not in [self.ab_last_mod_col, self.ab_file_name_col]]
# check if we're already matching to avoid unnecessary iteration
if set(list(record.keys()) + [self.ab_additional_col]) == set(compare_columns):
record[self.ab_additional_col] = {}
return record
for c in [col for col in compare_columns if col != self.ab_additional_col]:
if c not in record.keys():
record[c] = None
record[self.ab_additional_col] = {c: deepcopy(record[c]) for c in record.keys() if c not in compare_columns}
for c in record[self.ab_additional_col].keys():
del record[c]
return record
def _add_extra_fields_from_map(self, record: Mapping[str, Any], extra_map: Mapping[str, Any]) -> Mapping[str, Any]:
for key, value in extra_map.items():
record[key] = value
return record
def _read_from_slice(
self,
file_reader,
stream_slice: Mapping[str, Any],
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
for file_info in stream_slice:
with file_info["storagefile"].open(file_reader.is_binary) as f:
for record in file_reader.stream_records(f):
schema_matched_record = self._match_target_schema(record, list(self._get_schema_map().keys()))
complete_record = self._add_extra_fields_from_map(
schema_matched_record,
{
self.ab_last_mod_col: datetime.strftime(file_info["last_modified"], self.datetime_format_string),
self.ab_file_name_col: file_info["unique_url"],
},
)
yield complete_record
LOGGER.info("finished reading a stream slice")
yield from []
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
stream_slice = stream_slice if stream_slice is not None else []
file_reader = self.fileformatparser_class(self._format, self._get_master_schema())
yield from self._read_from_slice(file_reader, stream_slice)
class IncrementalFileStream(FileStream, ABC):
state_checkpoint_interval = None
@property
def cursor_field(self) -> str:
return self.ab_last_mod_col
def _get_datetime_from_stream_state(self, stream_state: Mapping[str, Any] = None) -> datetime:
if stream_state is not None and self.cursor_field in stream_state.keys():
return datetime.strptime(stream_state[self.cursor_field], self.datetime_format_string)
else:
return datetime.strptime("1970-01-01T00:00:00+0000", self.datetime_format_string)
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
state_dict = {}
current_parsed_datetime = self._get_datetime_from_stream_state(current_stream_state)
latest_record_datetime = datetime.strptime(
latest_record.get(self.cursor_field, "1970-01-01T00:00:00+0000"), self.datetime_format_string
)
state_dict[self.cursor_field] = datetime.strftime(max(current_parsed_datetime, latest_record_datetime), self.datetime_format_string)
state_dict["schema"] = self._get_schema_map()
return state_dict
def stream_slices(
self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
if sync_mode == SyncMode.full_refresh:
yield from super().stream_slices(sync_mode=sync_mode, cursor_field=cursor_field, stream_state=stream_state)
else:
if self._schema == {} and stream_state is not None and "schema" in stream_state.keys():
self._schema = stream_state["schema"]
prev_file_last_mod = None
stream_slice = []
for last_mod, filepath in self.get_time_ordered_filepaths():
if (
stream_state is not None
and self.cursor_field in stream_state.keys()
and last_mod <= self._get_datetime_from_stream_state(stream_state)
):
continue
storagefile = self.storagefile_class(filepath, self._provider)
if (prev_file_last_mod is not None) and (last_mod != prev_file_last_mod):
yield stream_slice
stream_slice.clear()
stream_slice.append({"unique_url": storagefile.url, "last_modified": last_mod, "storagefile": storagefile})
prev_file_last_mod = last_mod
if len(stream_slice) > 0:
yield stream_slice
yield from [None]
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
if sync_mode == SyncMode.full_refresh:
yield from super().read_records(sync_mode, cursor_field, stream_slice, stream_state)
else:
stream_slice = stream_slice if stream_slice is not None else []
file_reader = self.fileformatparser_class(
self._format, self._get_master_schema(self._get_datetime_from_stream_state(stream_state))
)
yield from self._read_from_slice(file_reader, stream_slice)
| true | true |
f715250228b280bbd2a5350070a71e1887f4c22e | 36,247 | py | Python | lib/python3.7/site-packages/boltons/funcutils.py | nguyentranhoan/uit-mobile | 8546312b01373d94cf00c64f7eacb769e0f4ccce | [
"BSD-3-Clause"
] | null | null | null | lib/python3.7/site-packages/boltons/funcutils.py | nguyentranhoan/uit-mobile | 8546312b01373d94cf00c64f7eacb769e0f4ccce | [
"BSD-3-Clause"
] | null | null | null | lib/python3.7/site-packages/boltons/funcutils.py | nguyentranhoan/uit-mobile | 8546312b01373d94cf00c64f7eacb769e0f4ccce | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Python's built-in :mod:`functools` module builds several useful
utilities on top of Python's first-class function
support. ``funcutils`` generally stays in the same vein, adding to and
correcting Python's standard metaprogramming facilities.
"""
from __future__ import print_function
import sys
import re
import inspect
import functools
import itertools
from types import MethodType, FunctionType
try:
xrange
make_method = MethodType
except NameError:
# Python 3
make_method = lambda desc, obj, obj_type: MethodType(desc, obj)
basestring = (str, bytes) # Python 3 compat
_IS_PY2 = False
else:
_IS_PY2 = True
try:
_inspect_iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
# Python 3.4
_inspect_iscoroutinefunction = lambda func: False
try:
from boltons.typeutils import make_sentinel
NO_DEFAULT = make_sentinel(var_name='NO_DEFAULT')
except ImportError:
NO_DEFAULT = object()
_IS_PY35 = sys.version_info >= (3, 5)
if not _IS_PY35:
# py35+ wants you to use signature instead, but
# inspect_formatargspec is way simpler for what it is. Copied the
# vendoring approach from alembic:
# https://github.com/sqlalchemy/alembic/blob/4cdad6aec32b4b5573a2009cc356cb4b144bd359/alembic/util/compat.py#L92
from inspect import formatargspec as inspect_formatargspec
else:
from inspect import formatannotation
def inspect_formatargspec(
args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation):
"""Copy formatargspec from python 3.7 standard library.
Python 3 has deprecated formatargspec and requested that Signature
be used instead, however this requires a full reimplementation
of formatargspec() in terms of creating Parameter objects and such.
Instead of introducing all the object-creation overhead and having
to reinvent from scratch, just copy their compatibility routine.
"""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
def get_module_callables(mod, ignore=None):
"""Returns two maps of (*types*, *funcs*) from *mod*, optionally
ignoring based on the :class:`bool` return value of the *ignore*
callable. *mod* can be a string name of a module in
:data:`sys.modules` or the module instance itself.
"""
if isinstance(mod, basestring):
mod = sys.modules[mod]
types, funcs = {}, {}
for attr_name in dir(mod):
if ignore and ignore(attr_name):
continue
try:
attr = getattr(mod, attr_name)
except Exception:
continue
try:
attr_mod_name = attr.__module__
except AttributeError:
continue
if attr_mod_name != mod.__name__:
continue
if isinstance(attr, type):
types[attr_name] = attr
elif callable(attr):
funcs[attr_name] = attr
return types, funcs
def mro_items(type_obj):
"""Takes a type and returns an iterator over all class variables
throughout the type hierarchy (respecting the MRO).
>>> sorted(set([k for k, v in mro_items(int) if not k.startswith('__') and 'bytes' not in k and not callable(v)]))
['denominator', 'imag', 'numerator', 'real']
"""
# TODO: handle slots?
return itertools.chain.from_iterable(ct.__dict__.items()
for ct in type_obj.__mro__)
def dir_dict(obj, raise_exc=False):
"""Return a dictionary of attribute names to values for a given
object. Unlike ``obj.__dict__``, this function returns all
attributes on the object, including ones on parent classes.
"""
# TODO: separate function for handling descriptors on types?
ret = {}
for k in dir(obj):
try:
ret[k] = getattr(obj, k)
except Exception:
if raise_exc:
raise
return ret
def copy_function(orig, copy_dict=True):
"""Returns a shallow copy of the function, including code object,
globals, closure, etc.
>>> func = lambda: func
>>> func() is func
True
>>> func_copy = copy_function(func)
>>> func_copy() is func
True
>>> func_copy is not func
True
Args:
orig (function): The function to be copied. Must be a
function, not just any method or callable.
copy_dict (bool): Also copy any attributes set on the function
instance. Defaults to ``True``.
"""
ret = FunctionType(orig.__code__,
orig.__globals__,
name=orig.__name__,
argdefs=getattr(orig, "__defaults__", None),
closure=getattr(orig, "__closure__", None))
if copy_dict:
ret.__dict__.update(orig.__dict__)
return ret
def partial_ordering(cls):
"""Class decorator, similar to :func:`functools.total_ordering`,
except it is used to define `partial orderings`_ (i.e., it is
possible that *x* is neither greater than, equal to, or less than
*y*). It assumes the presence of the ``__le__()`` and ``__ge__()``
method, but nothing else. It will not override any existing
additional comparison methods.
.. _partial orderings: https://en.wikipedia.org/wiki/Partially_ordered_set
>>> @partial_ordering
... class MySet(set):
... def __le__(self, other):
... return self.issubset(other)
... def __ge__(self, other):
... return self.issuperset(other)
...
>>> a = MySet([1,2,3])
>>> b = MySet([1,2])
>>> c = MySet([1,2,4])
>>> b < a
True
>>> b > a
False
>>> b < c
True
>>> a < c
False
>>> c > a
False
"""
def __lt__(self, other): return self <= other and not self >= other
def __gt__(self, other): return self >= other and not self <= other
def __eq__(self, other): return self >= other and self <= other
if not hasattr(cls, '__lt__'): cls.__lt__ = __lt__
if not hasattr(cls, '__gt__'): cls.__gt__ = __gt__
if not hasattr(cls, '__eq__'): cls.__eq__ = __eq__
return cls
class InstancePartial(functools.partial):
""":class:`functools.partial` is a huge convenience for anyone
working with Python's great first-class functions. It allows
developers to curry arguments and incrementally create simpler
callables for a variety of use cases.
Unfortunately there's one big gap in its usefulness:
methods. Partials just don't get bound as methods and
automatically handed a reference to ``self``. The
``InstancePartial`` type remedies this by inheriting from
:class:`functools.partial` and implementing the necessary
descriptor protocol. There are no other differences in
implementation or usage. :class:`CachedInstancePartial`, below,
has the same ability, but is slightly more efficient.
"""
def __get__(self, obj, obj_type):
return make_method(self, obj, obj_type)
class CachedInstancePartial(functools.partial):
"""The ``CachedInstancePartial`` is virtually the same as
:class:`InstancePartial`, adding support for method-usage to
:class:`functools.partial`, except that upon first access, it
caches the bound method on the associated object, speeding it up
for future accesses, and bringing the method call overhead to
about the same as non-``partial`` methods.
See the :class:`InstancePartial` docstring for more details.
"""
def __get__(self, obj, obj_type):
# These assignments could've been in __init__, but there was
# no simple way to do it without breaking one of PyPy or Py3.
self.__name__ = None
self.__doc__ = self.func.__doc__
self.__module__ = self.func.__module__
name = self.__name__
if name is None:
for k, v in mro_items(obj_type):
if v is self:
self.__name__ = name = k
if obj is None:
return make_method(self, obj, obj_type)
try:
# since this is a data descriptor, this block
# is probably only hit once (per object)
return obj.__dict__[name]
except KeyError:
obj.__dict__[name] = ret = make_method(self, obj, obj_type)
return ret
partial = CachedInstancePartial
def format_invocation(name='', args=(), kwargs=None):
"""Given a name, positional arguments, and keyword arguments, format
a basic Python-style function call.
>>> print(format_invocation('func', args=(1, 2), kwargs={'c': 3}))
func(1, 2, c=3)
>>> print(format_invocation('a_func', args=(1,)))
a_func(1)
>>> print(format_invocation('kw_func', kwargs=[('a', 1), ('b', 2)]))
kw_func(a=1, b=2)
"""
kwargs = kwargs or {}
a_text = ', '.join([repr(a) for a in args])
if isinstance(kwargs, dict):
kwarg_items = kwargs.items()
else:
kwarg_items = kwargs
kw_text = ', '.join(['%s=%r' % (k, v) for k, v in kwarg_items])
all_args_text = a_text
if all_args_text and kw_text:
all_args_text += ', '
all_args_text += kw_text
return '%s(%s)' % (name, all_args_text)
def format_exp_repr(obj, pos_names, req_names=None, opt_names=None, opt_key=None):
"""Render an expression-style repr of an object, based on attribute
names, which are assumed to line up with arguments to an initializer.
>>> class Flag(object):
... def __init__(self, length, width, depth=None):
... self.length = length
... self.width = width
... self.depth = depth
...
That's our Flag object, here are some example reprs for it:
>>> flag = Flag(5, 10)
>>> print(format_exp_repr(flag, ['length', 'width'], [], ['depth']))
Flag(5, 10)
>>> flag2 = Flag(5, 15, 2)
>>> print(format_exp_repr(flag2, ['length'], ['width', 'depth']))
Flag(5, width=15, depth=2)
By picking the pos_names, req_names, opt_names, and opt_key, you
can fine-tune how you want the repr to look.
Args:
obj (object): The object whose type name will be used and
attributes will be checked
pos_names (list): Required list of attribute names which will be
rendered as positional arguments in the output repr.
req_names (list): List of attribute names which will always
appear in the keyword arguments in the output repr. Defaults to None.
opt_names (list): List of attribute names which may appear in
the keyword arguments in the output repr, provided they pass
the *opt_key* check. Defaults to None.
opt_key (callable): A function or callable which checks whether
an opt_name should be in the repr. Defaults to a
``None``-check.
"""
cn = obj.__class__.__name__
req_names = req_names or []
opt_names = opt_names or []
uniq_names, all_names = set(), []
for name in req_names + opt_names:
if name in uniq_names:
continue
uniq_names.add(name)
all_names.append(name)
if opt_key is None:
opt_key = lambda v: v is None
assert callable(opt_key)
args = [getattr(obj, name, None) for name in pos_names]
kw_items = [(name, getattr(obj, name, None)) for name in all_names]
kw_items = [(name, val) for name, val in kw_items
if not (name in opt_names and opt_key(val))]
return format_invocation(cn, args, kw_items)
def format_nonexp_repr(obj, req_names=None, opt_names=None, opt_key=None):
"""Format a non-expression-style repr
Some object reprs look like object instantiation, e.g., App(r=[], mw=[]).
This makes sense for smaller, lower-level objects whose state
roundtrips. But a lot of objects contain values that don't
roundtrip, like types and functions.
For those objects, there is the non-expression style repr, which
mimic's Python's default style to make a repr like so:
>>> class Flag(object):
... def __init__(self, length, width, depth=None):
... self.length = length
... self.width = width
... self.depth = depth
...
>>> flag = Flag(5, 10)
>>> print(format_nonexp_repr(flag, ['length', 'width'], ['depth']))
<Flag length=5 width=10>
If no attributes are specified or set, utilizes the id, not unlike Python's
built-in behavior.
>>> print(format_nonexp_repr(flag))
<Flag id=...>
"""
cn = obj.__class__.__name__
req_names = req_names or []
opt_names = opt_names or []
uniq_names, all_names = set(), []
for name in req_names + opt_names:
if name in uniq_names:
continue
uniq_names.add(name)
all_names.append(name)
if opt_key is None:
opt_key = lambda v: v is None
assert callable(opt_key)
items = [(name, getattr(obj, name, None)) for name in all_names]
labels = ['%s=%r' % (name, val) for name, val in items
if not (name in opt_names and opt_key(val))]
if not labels:
labels = ['id=%s' % id(obj)]
ret = '<%s %s>' % (cn, ' '.join(labels))
return ret
# # #
# # # Function builder
# # #
def wraps(func, injected=None, expected=None, **kw):
"""Modeled after the built-in :func:`functools.wraps`, this function is
used to make your decorator's wrapper functions reflect the
wrapped function's:
* Name
* Documentation
* Module
* Signature
The built-in :func:`functools.wraps` copies the first three, but
does not copy the signature. This version of ``wraps`` can copy
the inner function's signature exactly, allowing seamless usage
and :mod:`introspection <inspect>`. Usage is identical to the
built-in version::
>>> from boltons.funcutils import wraps
>>>
>>> def print_return(func):
... @wraps(func)
... def wrapper(*args, **kwargs):
... ret = func(*args, **kwargs)
... print(ret)
... return ret
... return wrapper
...
>>> @print_return
... def example():
... '''docstring'''
... return 'example return value'
>>>
>>> val = example()
example return value
>>> example.__name__
'example'
>>> example.__doc__
'docstring'
In addition, the boltons version of wraps supports modifying the
outer signature based on the inner signature. By passing a list of
*injected* argument names, those arguments will be removed from
the outer wrapper's signature, allowing your decorator to provide
arguments that aren't passed in.
Args:
func (function): The callable whose attributes are to be copied.
injected (list): An optional list of argument names which
should not appear in the new wrapper's signature.
expected (list): An optional list of argument names (or (name,
default) pairs) representing new arguments introduced by
the wrapper (the opposite of *injected*). See
:meth:`FunctionBuilder.add_arg()` for more details.
update_dict (bool): Whether to copy other, non-standard
attributes of *func* over to the wrapper. Defaults to True.
inject_to_varkw (bool): Ignore missing arguments when a
``**kwargs``-type catch-all is present. Defaults to True.
For more in-depth wrapping of functions, see the
:class:`FunctionBuilder` type, on which wraps was built.
"""
if injected is None:
injected = []
elif isinstance(injected, basestring):
injected = [injected]
else:
injected = list(injected)
expected_items = _parse_wraps_expected(expected)
if isinstance(func, (classmethod, staticmethod)):
raise TypeError('wraps does not support wrapping classmethods and'
' staticmethods, change the order of wrapping to'
' wrap the underlying function: %r'
% (getattr(func, '__func__', None),))
update_dict = kw.pop('update_dict', True)
inject_to_varkw = kw.pop('inject_to_varkw', True)
if kw:
raise TypeError('unexpected kwargs: %r' % kw.keys())
fb = FunctionBuilder.from_func(func)
for arg in injected:
try:
fb.remove_arg(arg)
except MissingArgument:
if inject_to_varkw and fb.varkw is not None:
continue # keyword arg will be caught by the varkw
raise
for arg, default in expected_items:
fb.add_arg(arg, default) # may raise ExistingArgument
if fb.is_async:
fb.body = 'return await _call(%s)' % fb.get_invocation_str()
else:
fb.body = 'return _call(%s)' % fb.get_invocation_str()
def wrapper_wrapper(wrapper_func):
execdict = dict(_call=wrapper_func, _func=func)
fully_wrapped = fb.get_func(execdict, with_dict=update_dict)
fully_wrapped.__wrapped__ = func # ref to the original function (#115)
return fully_wrapped
return wrapper_wrapper
def _parse_wraps_expected(expected):
# expected takes a pretty powerful argument, it's processed
# here. admittedly this would be less trouble if I relied on
# OrderedDict (there's an impl of that in the commit history if
# you look
if expected is None:
expected = []
elif isinstance(expected, basestring):
expected = [(expected, NO_DEFAULT)]
expected_items = []
try:
expected_iter = iter(expected)
except TypeError as e:
raise ValueError('"expected" takes string name, sequence of string names,'
' iterable of (name, default) pairs, or a mapping of '
' {name: default}, not %r (got: %r)' % (expected, e))
for argname in expected_iter:
if isinstance(argname, basestring):
# dict keys and bare strings
try:
default = expected[argname]
except TypeError:
default = NO_DEFAULT
else:
# pairs
try:
argname, default = argname
except (TypeError, ValueError):
raise ValueError('"expected" takes string name, sequence of string names,'
' iterable of (name, default) pairs, or a mapping of '
' {name: default}, not %r')
if not isinstance(argname, basestring):
raise ValueError('all "expected" argnames must be strings, not %r' % (argname,))
expected_items.append((argname, default))
return expected_items
class FunctionBuilder(object):
"""The FunctionBuilder type provides an interface for programmatically
creating new functions, either based on existing functions or from
scratch.
Values are passed in at construction or set as attributes on the
instance. For creating a new function based of an existing one,
see the :meth:`~FunctionBuilder.from_func` classmethod. At any
point, :meth:`~FunctionBuilder.get_func` can be called to get a
newly compiled function, based on the values configured.
>>> fb = FunctionBuilder('return_five', doc='returns the integer 5',
... body='return 5')
>>> f = fb.get_func()
>>> f()
5
>>> fb.varkw = 'kw'
>>> f_kw = fb.get_func()
>>> f_kw(ignored_arg='ignored_val')
5
Note that function signatures themselves changed quite a bit in
Python 3, so several arguments are only applicable to
FunctionBuilder in Python 3. Except for *name*, all arguments to
the constructor are keyword arguments.
Args:
name (str): Name of the function.
doc (str): `Docstring`_ for the function, defaults to empty.
module (str): Name of the module from which this function was
imported. Defaults to None.
body (str): String version of the code representing the body
of the function. Defaults to ``'pass'``, which will result
in a function which does nothing and returns ``None``.
args (list): List of argument names, defaults to empty list,
denoting no arguments.
varargs (str): Name of the catch-all variable for positional
arguments. E.g., "args" if the resultant function is to have
``*args`` in the signature. Defaults to None.
varkw (str): Name of the catch-all variable for keyword
arguments. E.g., "kwargs" if the resultant function is to have
``**kwargs`` in the signature. Defaults to None.
defaults (tuple): A tuple containing default argument values for
those arguments that have defaults.
kwonlyargs (list): Argument names which are only valid as
keyword arguments. **Python 3 only.**
kwonlydefaults (dict): A mapping, same as normal *defaults*,
but only for the *kwonlyargs*. **Python 3 only.**
annotations (dict): Mapping of type hints and so
forth. **Python 3 only.**
filename (str): The filename that will appear in
tracebacks. Defaults to "boltons.funcutils.FunctionBuilder".
indent (int): Number of spaces with which to indent the
function *body*. Values less than 1 will result in an error.
dict (dict): Any other attributes which should be added to the
functions compiled with this FunctionBuilder.
All of these arguments are also made available as attributes which
can be mutated as necessary.
.. _Docstring: https://en.wikipedia.org/wiki/Docstring#Python
"""
if _IS_PY2:
_argspec_defaults = {'args': list,
'varargs': lambda: None,
'varkw': lambda: None,
'defaults': lambda: None}
@classmethod
def _argspec_to_dict(cls, f):
args, varargs, varkw, defaults = inspect.getargspec(f)
return {'args': args,
'varargs': varargs,
'varkw': varkw,
'defaults': defaults}
else:
_argspec_defaults = {'args': list,
'varargs': lambda: None,
'varkw': lambda: None,
'defaults': lambda: None,
'kwonlyargs': list,
'kwonlydefaults': dict,
'annotations': dict}
@classmethod
def _argspec_to_dict(cls, f):
argspec = inspect.getfullargspec(f)
return dict((attr, getattr(argspec, attr))
for attr in cls._argspec_defaults)
_defaults = {'doc': str,
'dict': dict,
'is_async': lambda: False,
'module': lambda: None,
'body': lambda: 'pass',
'indent': lambda: 4,
"annotations": dict,
'filename': lambda: 'boltons.funcutils.FunctionBuilder'}
_defaults.update(_argspec_defaults)
_compile_count = itertools.count()
def __init__(self, name, **kw):
self.name = name
for a, default_factory in self._defaults.items():
val = kw.pop(a, None)
if val is None:
val = default_factory()
setattr(self, a, val)
if kw:
raise TypeError('unexpected kwargs: %r' % kw.keys())
return
# def get_argspec(self): # TODO
if _IS_PY2:
def get_sig_str(self, with_annotations=True):
"""Return function signature as a string.
with_annotations is ignored on Python 2. On Python 3 signature
will omit annotations if it is set to False.
"""
return inspect_formatargspec(self.args, self.varargs,
self.varkw, [])
def get_invocation_str(self):
return inspect_formatargspec(self.args, self.varargs,
self.varkw, [])[1:-1]
else:
def get_sig_str(self, with_annotations=True):
"""Return function signature as a string.
with_annotations is ignored on Python 2. On Python 3 signature
will omit annotations if it is set to False.
"""
if with_annotations:
annotations = self.annotations
else:
annotations = {}
return inspect_formatargspec(self.args,
self.varargs,
self.varkw,
[],
self.kwonlyargs,
{},
annotations)
_KWONLY_MARKER = re.compile(r"""
\* # a star
\s* # followed by any amount of whitespace
, # followed by a comma
\s* # followed by any amount of whitespace
""", re.VERBOSE)
def get_invocation_str(self):
kwonly_pairs = None
formatters = {}
if self.kwonlyargs:
kwonly_pairs = dict((arg, arg)
for arg in self.kwonlyargs)
formatters['formatvalue'] = lambda value: '=' + value
sig = inspect_formatargspec(self.args,
self.varargs,
self.varkw,
[],
kwonly_pairs,
kwonly_pairs,
{},
**formatters)
sig = self._KWONLY_MARKER.sub('', sig)
return sig[1:-1]
@classmethod
def from_func(cls, func):
"""Create a new FunctionBuilder instance based on an existing
function. The original function will not be stored or
modified.
"""
# TODO: copy_body? gonna need a good signature regex.
# TODO: might worry about __closure__?
if not callable(func):
raise TypeError('expected callable object, not %r' % (func,))
kwargs = {'name': func.__name__,
'doc': func.__doc__,
'module': func.__module__,
'annotations': getattr(func, "__annotations__", {}),
'dict': getattr(func, '__dict__', {})}
kwargs.update(cls._argspec_to_dict(func))
if _inspect_iscoroutinefunction(func):
kwargs['is_async'] = True
return cls(**kwargs)
def get_func(self, execdict=None, add_source=True, with_dict=True):
"""Compile and return a new function based on the current values of
the FunctionBuilder.
Args:
execdict (dict): The dictionary representing the scope in
which the compilation should take place. Defaults to an empty
dict.
add_source (bool): Whether to add the source used to a
special ``__source__`` attribute on the resulting
function. Defaults to True.
with_dict (bool): Add any custom attributes, if
applicable. Defaults to True.
To see an example of usage, see the implementation of
:func:`~boltons.funcutils.wraps`.
"""
execdict = execdict or {}
body = self.body or self._default_body
tmpl = 'def {name}{sig_str}:'
tmpl += '\n{body}'
if self.is_async:
tmpl = 'async ' + tmpl
body = _indent(self.body, ' ' * self.indent)
name = self.name.replace('<', '_').replace('>', '_') # lambdas
src = tmpl.format(name=name, sig_str=self.get_sig_str(with_annotations=False),
doc=self.doc, body=body)
self._compile(src, execdict)
func = execdict[name]
func.__name__ = self.name
func.__doc__ = self.doc
func.__defaults__ = self.defaults
if not _IS_PY2:
func.__kwdefaults__ = self.kwonlydefaults
func.__annotations__ = self.annotations
if with_dict:
func.__dict__.update(self.dict)
func.__module__ = self.module
# TODO: caller module fallback?
if add_source:
func.__source__ = src
return func
def get_defaults_dict(self):
"""Get a dictionary of function arguments with defaults and the
respective values.
"""
ret = dict(reversed(list(zip(reversed(self.args),
reversed(self.defaults or [])))))
kwonlydefaults = getattr(self, 'kwonlydefaults', None)
if kwonlydefaults:
ret.update(kwonlydefaults)
return ret
def get_arg_names(self, only_required=False):
arg_names = tuple(self.args) + tuple(getattr(self, 'kwonlyargs', ()))
if only_required:
defaults_dict = self.get_defaults_dict()
arg_names = tuple([an for an in arg_names if an not in defaults_dict])
return arg_names
if _IS_PY2:
def add_arg(self, arg_name, default=NO_DEFAULT):
"Add an argument with optional *default* (defaults to ``funcutils.NO_DEFAULT``)."
if arg_name in self.args:
raise ExistingArgument('arg %r already in func %s arg list' % (arg_name, self.name))
self.args.append(arg_name)
if default is not NO_DEFAULT:
self.defaults = (self.defaults or ()) + (default,)
return
else:
def add_arg(self, arg_name, default=NO_DEFAULT, kwonly=False):
"""Add an argument with optional *default* (defaults to
``funcutils.NO_DEFAULT``). Pass *kwonly=True* to add a
keyword-only argument
"""
if arg_name in self.args:
raise ExistingArgument('arg %r already in func %s arg list' % (arg_name, self.name))
if arg_name in self.kwonlyargs:
raise ExistingArgument('arg %r already in func %s kwonly arg list' % (arg_name, self.name))
if not kwonly:
self.args.append(arg_name)
if default is not NO_DEFAULT:
self.defaults = (self.defaults or ()) + (default,)
else:
self.kwonlyargs.append(arg_name)
if default is not NO_DEFAULT:
self.kwonlydefaults[arg_name] = default
return
def remove_arg(self, arg_name):
"""Remove an argument from this FunctionBuilder's argument list. The
resulting function will have one less argument per call to
this function.
Args:
arg_name (str): The name of the argument to remove.
Raises a :exc:`ValueError` if the argument is not present.
"""
args = self.args
d_dict = self.get_defaults_dict()
try:
args.remove(arg_name)
except ValueError:
try:
self.kwonlyargs.remove(arg_name)
except (AttributeError, ValueError):
# py2, or py3 and missing from both
exc = MissingArgument('arg %r not found in %s argument list:'
' %r' % (arg_name, self.name, args))
exc.arg_name = arg_name
raise exc
else:
self.kwonlydefaults.pop(arg_name, None)
else:
d_dict.pop(arg_name, None)
self.defaults = tuple([d_dict[a] for a in args if a in d_dict])
return
def _compile(self, src, execdict):
filename = ('<%s-%d>'
% (self.filename, next(self._compile_count),))
try:
code = compile(src, filename, 'single')
exec(code, execdict)
except Exception:
raise
return execdict
class MissingArgument(ValueError):
pass
class ExistingArgument(ValueError):
pass
def _indent(text, margin, newline='\n', key=bool):
"based on boltons.strutils.indent"
indented_lines = [(margin + line if key(line) else line)
for line in text.splitlines()]
return newline.join(indented_lines)
try:
from functools import total_ordering # 2.7+
except ImportError:
# python 2.6
def total_ordering(cls):
"""Class decorator that fills in missing comparators/ordering
methods. Backport of :func:`functools.total_ordering` to work
with Python 2.6.
Code from http://code.activestate.com/recipes/576685/
"""
convert = {
'__lt__': [
('__gt__',
lambda self, other: not (self < other or self == other)),
('__le__',
lambda self, other: self < other or self == other),
('__ge__',
lambda self, other: not self < other)],
'__le__': [
('__ge__',
lambda self, other: not self <= other or self == other),
('__lt__',
lambda self, other: self <= other and not self == other),
('__gt__',
lambda self, other: not self <= other)],
'__gt__': [
('__lt__',
lambda self, other: not (self > other or self == other)),
('__ge__',
lambda self, other: self > other or self == other),
('__le__',
lambda self, other: not self > other)],
'__ge__': [
('__le__',
lambda self, other: (not self >= other) or self == other),
('__gt__',
lambda self, other: self >= other and not self == other),
('__lt__',
lambda self, other: not self >= other)]
}
roots = set(dir(cls)) & set(convert)
if not roots:
raise ValueError('must define at least one ordering operation:'
' < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
# end funcutils.py
| 36.539315 | 118 | 0.585648 |
from __future__ import print_function
import sys
import re
import inspect
import functools
import itertools
from types import MethodType, FunctionType
try:
xrange
make_method = MethodType
except NameError:
make_method = lambda desc, obj, obj_type: MethodType(desc, obj)
basestring = (str, bytes)
_IS_PY2 = False
else:
_IS_PY2 = True
try:
_inspect_iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
_inspect_iscoroutinefunction = lambda func: False
try:
from boltons.typeutils import make_sentinel
NO_DEFAULT = make_sentinel(var_name='NO_DEFAULT')
except ImportError:
NO_DEFAULT = object()
_IS_PY35 = sys.version_info >= (3, 5)
if not _IS_PY35:
from inspect import formatargspec as inspect_formatargspec
else:
from inspect import formatannotation
def inspect_formatargspec(
args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation):
"""Copy formatargspec from python 3.7 standard library.
Python 3 has deprecated formatargspec and requested that Signature
be used instead, however this requires a full reimplementation
of formatargspec() in terms of creating Parameter objects and such.
Instead of introducing all the object-creation overhead and having
to reinvent from scratch, just copy their compatibility routine.
"""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
def get_module_callables(mod, ignore=None):
if isinstance(mod, basestring):
mod = sys.modules[mod]
types, funcs = {}, {}
for attr_name in dir(mod):
if ignore and ignore(attr_name):
continue
try:
attr = getattr(mod, attr_name)
except Exception:
continue
try:
attr_mod_name = attr.__module__
except AttributeError:
continue
if attr_mod_name != mod.__name__:
continue
if isinstance(attr, type):
types[attr_name] = attr
elif callable(attr):
funcs[attr_name] = attr
return types, funcs
def mro_items(type_obj):
return itertools.chain.from_iterable(ct.__dict__.items()
for ct in type_obj.__mro__)
def dir_dict(obj, raise_exc=False):
ret = {}
for k in dir(obj):
try:
ret[k] = getattr(obj, k)
except Exception:
if raise_exc:
raise
return ret
def copy_function(orig, copy_dict=True):
ret = FunctionType(orig.__code__,
orig.__globals__,
name=orig.__name__,
argdefs=getattr(orig, "__defaults__", None),
closure=getattr(orig, "__closure__", None))
if copy_dict:
ret.__dict__.update(orig.__dict__)
return ret
def partial_ordering(cls):
def __lt__(self, other): return self <= other and not self >= other
def __gt__(self, other): return self >= other and not self <= other
def __eq__(self, other): return self >= other and self <= other
if not hasattr(cls, '__lt__'): cls.__lt__ = __lt__
if not hasattr(cls, '__gt__'): cls.__gt__ = __gt__
if not hasattr(cls, '__eq__'): cls.__eq__ = __eq__
return cls
class InstancePartial(functools.partial):
def __get__(self, obj, obj_type):
return make_method(self, obj, obj_type)
class CachedInstancePartial(functools.partial):
def __get__(self, obj, obj_type):
# no simple way to do it without breaking one of PyPy or Py3.
self.__name__ = None
self.__doc__ = self.func.__doc__
self.__module__ = self.func.__module__
name = self.__name__
if name is None:
for k, v in mro_items(obj_type):
if v is self:
self.__name__ = name = k
if obj is None:
return make_method(self, obj, obj_type)
try:
# since this is a data descriptor, this block
# is probably only hit once (per object)
return obj.__dict__[name]
except KeyError:
obj.__dict__[name] = ret = make_method(self, obj, obj_type)
return ret
partial = CachedInstancePartial
def format_invocation(name='', args=(), kwargs=None):
kwargs = kwargs or {}
a_text = ', '.join([repr(a) for a in args])
if isinstance(kwargs, dict):
kwarg_items = kwargs.items()
else:
kwarg_items = kwargs
kw_text = ', '.join(['%s=%r' % (k, v) for k, v in kwarg_items])
all_args_text = a_text
if all_args_text and kw_text:
all_args_text += ', '
all_args_text += kw_text
return '%s(%s)' % (name, all_args_text)
def format_exp_repr(obj, pos_names, req_names=None, opt_names=None, opt_key=None):
cn = obj.__class__.__name__
req_names = req_names or []
opt_names = opt_names or []
uniq_names, all_names = set(), []
for name in req_names + opt_names:
if name in uniq_names:
continue
uniq_names.add(name)
all_names.append(name)
if opt_key is None:
opt_key = lambda v: v is None
assert callable(opt_key)
args = [getattr(obj, name, None) for name in pos_names]
kw_items = [(name, getattr(obj, name, None)) for name in all_names]
kw_items = [(name, val) for name, val in kw_items
if not (name in opt_names and opt_key(val))]
return format_invocation(cn, args, kw_items)
def format_nonexp_repr(obj, req_names=None, opt_names=None, opt_key=None):
cn = obj.__class__.__name__
req_names = req_names or []
opt_names = opt_names or []
uniq_names, all_names = set(), []
for name in req_names + opt_names:
if name in uniq_names:
continue
uniq_names.add(name)
all_names.append(name)
if opt_key is None:
opt_key = lambda v: v is None
assert callable(opt_key)
items = [(name, getattr(obj, name, None)) for name in all_names]
labels = ['%s=%r' % (name, val) for name, val in items
if not (name in opt_names and opt_key(val))]
if not labels:
labels = ['id=%s' % id(obj)]
ret = '<%s %s>' % (cn, ' '.join(labels))
return ret
# # #
# # # Function builder
# # #
def wraps(func, injected=None, expected=None, **kw):
if injected is None:
injected = []
elif isinstance(injected, basestring):
injected = [injected]
else:
injected = list(injected)
expected_items = _parse_wraps_expected(expected)
if isinstance(func, (classmethod, staticmethod)):
raise TypeError('wraps does not support wrapping classmethods and'
' staticmethods, change the order of wrapping to'
' wrap the underlying function: %r'
% (getattr(func, '__func__', None),))
update_dict = kw.pop('update_dict', True)
inject_to_varkw = kw.pop('inject_to_varkw', True)
if kw:
raise TypeError('unexpected kwargs: %r' % kw.keys())
fb = FunctionBuilder.from_func(func)
for arg in injected:
try:
fb.remove_arg(arg)
except MissingArgument:
if inject_to_varkw and fb.varkw is not None:
continue # keyword arg will be caught by the varkw
raise
for arg, default in expected_items:
fb.add_arg(arg, default) # may raise ExistingArgument
if fb.is_async:
fb.body = 'return await _call(%s)' % fb.get_invocation_str()
else:
fb.body = 'return _call(%s)' % fb.get_invocation_str()
def wrapper_wrapper(wrapper_func):
execdict = dict(_call=wrapper_func, _func=func)
fully_wrapped = fb.get_func(execdict, with_dict=update_dict)
fully_wrapped.__wrapped__ = func # ref to the original function (#115)
return fully_wrapped
return wrapper_wrapper
def _parse_wraps_expected(expected):
# expected takes a pretty powerful argument, it's processed
# you look
if expected is None:
expected = []
elif isinstance(expected, basestring):
expected = [(expected, NO_DEFAULT)]
expected_items = []
try:
expected_iter = iter(expected)
except TypeError as e:
raise ValueError('"expected" takes string name, sequence of string names,'
' iterable of (name, default) pairs, or a mapping of '
' {name: default}, not %r (got: %r)' % (expected, e))
for argname in expected_iter:
if isinstance(argname, basestring):
# dict keys and bare strings
try:
default = expected[argname]
except TypeError:
default = NO_DEFAULT
else:
# pairs
try:
argname, default = argname
except (TypeError, ValueError):
raise ValueError('"expected" takes string name, sequence of string names,'
' iterable of (name, default) pairs, or a mapping of '
' {name: default}, not %r')
if not isinstance(argname, basestring):
raise ValueError('all "expected" argnames must be strings, not %r' % (argname,))
expected_items.append((argname, default))
return expected_items
class FunctionBuilder(object):
if _IS_PY2:
_argspec_defaults = {'args': list,
'varargs': lambda: None,
'varkw': lambda: None,
'defaults': lambda: None}
@classmethod
def _argspec_to_dict(cls, f):
args, varargs, varkw, defaults = inspect.getargspec(f)
return {'args': args,
'varargs': varargs,
'varkw': varkw,
'defaults': defaults}
else:
_argspec_defaults = {'args': list,
'varargs': lambda: None,
'varkw': lambda: None,
'defaults': lambda: None,
'kwonlyargs': list,
'kwonlydefaults': dict,
'annotations': dict}
@classmethod
def _argspec_to_dict(cls, f):
argspec = inspect.getfullargspec(f)
return dict((attr, getattr(argspec, attr))
for attr in cls._argspec_defaults)
_defaults = {'doc': str,
'dict': dict,
'is_async': lambda: False,
'module': lambda: None,
'body': lambda: 'pass',
'indent': lambda: 4,
"annotations": dict,
'filename': lambda: 'boltons.funcutils.FunctionBuilder'}
_defaults.update(_argspec_defaults)
_compile_count = itertools.count()
def __init__(self, name, **kw):
self.name = name
for a, default_factory in self._defaults.items():
val = kw.pop(a, None)
if val is None:
val = default_factory()
setattr(self, a, val)
if kw:
raise TypeError('unexpected kwargs: %r' % kw.keys())
return
# def get_argspec(self): # TODO
if _IS_PY2:
def get_sig_str(self, with_annotations=True):
return inspect_formatargspec(self.args, self.varargs,
self.varkw, [])
def get_invocation_str(self):
return inspect_formatargspec(self.args, self.varargs,
self.varkw, [])[1:-1]
else:
def get_sig_str(self, with_annotations=True):
"""Return function signature as a string.
with_annotations is ignored on Python 2. On Python 3 signature
will omit annotations if it is set to False.
"""
if with_annotations:
annotations = self.annotations
else:
annotations = {}
return inspect_formatargspec(self.args,
self.varargs,
self.varkw,
[],
self.kwonlyargs,
{},
annotations)
_KWONLY_MARKER = re.compile(r"""
\* # a star
\s* # followed by any amount of whitespace
, # followed by a comma
\s* # followed by any amount of whitespace
""", re.VERBOSE)
def get_invocation_str(self):
kwonly_pairs = None
formatters = {}
if self.kwonlyargs:
kwonly_pairs = dict((arg, arg)
for arg in self.kwonlyargs)
formatters['formatvalue'] = lambda value: '=' + value
sig = inspect_formatargspec(self.args,
self.varargs,
self.varkw,
[],
kwonly_pairs,
kwonly_pairs,
{},
**formatters)
sig = self._KWONLY_MARKER.sub('', sig)
return sig[1:-1]
@classmethod
def from_func(cls, func):
# TODO: copy_body? gonna need a good signature regex.
# TODO: might worry about __closure__?
if not callable(func):
raise TypeError('expected callable object, not %r' % (func,))
kwargs = {'name': func.__name__,
'doc': func.__doc__,
'module': func.__module__,
'annotations': getattr(func, "__annotations__", {}),
'dict': getattr(func, '__dict__', {})}
kwargs.update(cls._argspec_to_dict(func))
if _inspect_iscoroutinefunction(func):
kwargs['is_async'] = True
return cls(**kwargs)
def get_func(self, execdict=None, add_source=True, with_dict=True):
execdict = execdict or {}
body = self.body or self._default_body
tmpl = 'def {name}{sig_str}:'
tmpl += '\n{body}'
if self.is_async:
tmpl = 'async ' + tmpl
body = _indent(self.body, ' ' * self.indent)
name = self.name.replace('<', '_').replace('>', '_') # lambdas
src = tmpl.format(name=name, sig_str=self.get_sig_str(with_annotations=False),
doc=self.doc, body=body)
self._compile(src, execdict)
func = execdict[name]
func.__name__ = self.name
func.__doc__ = self.doc
func.__defaults__ = self.defaults
if not _IS_PY2:
func.__kwdefaults__ = self.kwonlydefaults
func.__annotations__ = self.annotations
if with_dict:
func.__dict__.update(self.dict)
func.__module__ = self.module
# TODO: caller module fallback?
if add_source:
func.__source__ = src
return func
def get_defaults_dict(self):
ret = dict(reversed(list(zip(reversed(self.args),
reversed(self.defaults or [])))))
kwonlydefaults = getattr(self, 'kwonlydefaults', None)
if kwonlydefaults:
ret.update(kwonlydefaults)
return ret
def get_arg_names(self, only_required=False):
arg_names = tuple(self.args) + tuple(getattr(self, 'kwonlyargs', ()))
if only_required:
defaults_dict = self.get_defaults_dict()
arg_names = tuple([an for an in arg_names if an not in defaults_dict])
return arg_names
if _IS_PY2:
def add_arg(self, arg_name, default=NO_DEFAULT):
if arg_name in self.args:
raise ExistingArgument('arg %r already in func %s arg list' % (arg_name, self.name))
self.args.append(arg_name)
if default is not NO_DEFAULT:
self.defaults = (self.defaults or ()) + (default,)
return
else:
def add_arg(self, arg_name, default=NO_DEFAULT, kwonly=False):
"""Add an argument with optional *default* (defaults to
``funcutils.NO_DEFAULT``). Pass *kwonly=True* to add a
keyword-only argument
"""
if arg_name in self.args:
raise ExistingArgument('arg %r already in func %s arg list' % (arg_name, self.name))
if arg_name in self.kwonlyargs:
raise ExistingArgument('arg %r already in func %s kwonly arg list' % (arg_name, self.name))
if not kwonly:
self.args.append(arg_name)
if default is not NO_DEFAULT:
self.defaults = (self.defaults or ()) + (default,)
else:
self.kwonlyargs.append(arg_name)
if default is not NO_DEFAULT:
self.kwonlydefaults[arg_name] = default
return
def remove_arg(self, arg_name):
args = self.args
d_dict = self.get_defaults_dict()
try:
args.remove(arg_name)
except ValueError:
try:
self.kwonlyargs.remove(arg_name)
except (AttributeError, ValueError):
# py2, or py3 and missing from both
exc = MissingArgument('arg %r not found in %s argument list:'
' %r' % (arg_name, self.name, args))
exc.arg_name = arg_name
raise exc
else:
self.kwonlydefaults.pop(arg_name, None)
else:
d_dict.pop(arg_name, None)
self.defaults = tuple([d_dict[a] for a in args if a in d_dict])
return
def _compile(self, src, execdict):
filename = ('<%s-%d>'
% (self.filename, next(self._compile_count),))
try:
code = compile(src, filename, 'single')
exec(code, execdict)
except Exception:
raise
return execdict
class MissingArgument(ValueError):
pass
class ExistingArgument(ValueError):
pass
def _indent(text, margin, newline='\n', key=bool):
indented_lines = [(margin + line if key(line) else line)
for line in text.splitlines()]
return newline.join(indented_lines)
try:
from functools import total_ordering # 2.7+
except ImportError:
# python 2.6
def total_ordering(cls):
"""Class decorator that fills in missing comparators/ordering
methods. Backport of :func:`functools.total_ordering` to work
with Python 2.6.
Code from http://code.activestate.com/recipes/576685/
"""
convert = {
'__lt__': [
('__gt__',
lambda self, other: not (self < other or self == other)),
('__le__',
lambda self, other: self < other or self == other),
('__ge__',
lambda self, other: not self < other)],
'__le__': [
('__ge__',
lambda self, other: not self <= other or self == other),
('__lt__',
lambda self, other: self <= other and not self == other),
('__gt__',
lambda self, other: not self <= other)],
'__gt__': [
('__lt__',
lambda self, other: not (self > other or self == other)),
('__ge__',
lambda self, other: self > other or self == other),
('__le__',
lambda self, other: not self > other)],
'__ge__': [
('__le__',
lambda self, other: (not self >= other) or self == other),
('__gt__',
lambda self, other: self >= other and not self == other),
('__lt__',
lambda self, other: not self >= other)]
}
roots = set(dir(cls)) & set(convert)
if not roots:
raise ValueError('must define at least one ordering operation:'
' < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
# end funcutils.py
| true | true |
f7152509f06d29375bdf82999b5fe6fe5470fe85 | 10,642 | py | Python | deslib/dcs/a_posteriori.py | vishalbelsare/DESlib | 64260ae7c6dd745ef0003cc6322c9f829c807708 | [
"BSD-3-Clause"
] | 310 | 2019-01-02T12:33:03.000Z | 2022-03-30T08:35:24.000Z | deslib/dcs/a_posteriori.py | vishalbelsare/DESlib | 64260ae7c6dd745ef0003cc6322c9f829c807708 | [
"BSD-3-Clause"
] | 95 | 2019-01-12T03:34:32.000Z | 2022-02-22T18:35:46.000Z | deslib/dcs/a_posteriori.py | vishalbelsare/DESlib | 64260ae7c6dd745ef0003cc6322c9f829c807708 | [
"BSD-3-Clause"
] | 51 | 2018-12-29T13:21:06.000Z | 2022-03-25T22:56:27.000Z | # coding=utf-8
# Author: Rafael Menelau Oliveira e Cruz <rafaelmenelau@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from deslib.dcs.base import BaseDCS
class APosteriori(BaseDCS):
"""A Posteriori Dynamic classifier selection.
The A Posteriori method uses the probability of correct classification of a
given base classifier :math:`c_{i}` for each neighbor :math:`x_{k}` with
respect to a single class. Consider a classifier :math:`c_{i}` that assigns
a test sample to class :math:`w_{l}`. Then, only the samples belonging to
class :math:`w_{l}` are taken into account during the competence level
estimates. Base classifiers with a higher probability of correct
classification have a higher competence level. Moreover, the method also
weights the influence of each neighbor :math:`x_{k}` according to its
Euclidean distance to the query sample. The closest neighbors have a higher
influence on the competence level estimate. In cases where no sample in the
region of competence belongs to the predicted class, :math:`w_{l}`, the
competence level estimate of the base classifier is equal to zero.
A single classifier is selected only if its competence level is
significantly higher than that of the other base classifiers in the pool
(higher than a pre-defined threshold). Otherwise, all classifiers in the
pool are combined using the majority voting rule. The selection methodology
can be modified by modifying the hyper-parameter selection_method.
Parameters
----------
pool_classifiers : list of classifiers (Default = None)
The generated_pool of classifiers trained for the corresponding
classification problem. Each base classifiers should support the method
"predict" and "predict_proba". If None, then the pool of classifiers is
a bagging classifier.
k : int (Default = 7)
Number of neighbors used to estimate the competence of the base
classifiers.
DFP : Boolean (Default = False)
Determines if the dynamic frienemy pruning is applied.
with_IH : Boolean (Default = False)
Whether the hardness level of the region of competence is used to
decide between using the DS algorithm or the KNN for classification of
a given query sample.
safe_k : int (default = None)
The size of the indecision region.
IH_rate : float (default = 0.3)
Hardness threshold. If the hardness level of the competence region is
lower than the IH_rate the KNN classifier is used. Otherwise, the DS
algorithm is used for classification.
selection_method : String (Default = "best")
Determines which method is used to select the base classifier after
the competences are estimated.
diff_thresh : float (Default = 0.1)
Threshold to measure the difference between the competence level of the
base classifiers for the random and diff selection schemes. If the
difference is lower than the threshold, their performance are
considered equivalent.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
knn_classifier : {'knn', 'faiss', None} (Default = 'knn')
The algorithm used to estimate the region of competence:
- 'knn' will use :class:`KNeighborsClassifier` from sklearn
:class:`KNNE` available on `deslib.utils.knne`
- 'faiss' will use Facebook's Faiss similarity search through the
class :class:`FaissKNNClassifier`
- None, will use sklearn :class:`KNeighborsClassifier`.
knne : bool (Default=False)
Whether to use K-Nearest Neighbor Equality (KNNE) for the region
of competence estimation.
DSEL_perc : float (Default = 0.5)
Percentage of the input data used to fit DSEL.
Note: This parameter is only used if the pool of classifier is None or
unfitted.
n_jobs : int, default=-1
The number of parallel jobs to run. None means 1 unless in
a joblib.parallel_backend context. -1 means using all processors.
Doesn’t affect fit method.
References
----------
G. Giacinto and F. Roli, Methods for Dynamic Classifier Selection
10th Int. Conf. on Image Anal. and Proc., Venice, Italy (1999), 659-664.
Ko, Albert HR, Robert Sabourin, and Alceu Souza Britto Jr. "From dynamic
classifier selection to dynamic ensemble selection."
Pattern Recognition 41.5 (2008): 1718-1731.
Britto, Alceu S., Robert Sabourin, and Luiz ES Oliveira. "Dynamic selection
of classifiers—a comprehensive review."
Pattern Recognition 47.11 (2014): 3665-3680.
R. M. O. Cruz, R. Sabourin, and G. D. Cavalcanti, “Dynamic classifier
selection: Recent advances and perspectives,”
Information Fusion, vol. 41, pp. 195 – 216, 2018.
"""
def __init__(self, pool_classifiers=None, k=7, DFP=False, with_IH=False,
safe_k=None, IH_rate=0.30, selection_method='diff',
diff_thresh=0.1, random_state=None, knn_classifier='knn',
knne=False, DSEL_perc=0.5, n_jobs=-1):
super(APosteriori, self).__init__(pool_classifiers=pool_classifiers,
k=k, DFP=DFP, with_IH=with_IH,
safe_k=safe_k, IH_rate=IH_rate,
selection_method=selection_method,
diff_thresh=diff_thresh,
knn_classifier=knn_classifier,
random_state=random_state,
knne=knne,
DSEL_perc=DSEL_perc, n_jobs=n_jobs)
def fit(self, X, y):
"""Prepare the DS model by setting the KNN algorithm and
pre-processing the information required to apply the DS
method.
Parameters
----------
X : array of shape (n_samples, n_features)
Data used to fit the model.
y : array of shape (n_samples)
class labels of each example in X.
Returns
-------
self
"""
super(APosteriori, self).fit(X, y)
self._check_predict_proba()
self.dsel_scores_ = self._predict_proba_base(self.DSEL_data_)
return self
def estimate_competence(self, competence_region, distances,
predictions=None):
"""Estimate the competence of each base classifier :math:`c_{i}` for
the classification of the query sample using the A Posteriori method.
The competence level is estimated based on the probability of correct
classification of the base classifier :math:`c_{i}`, for each neighbor
:math:`x_{k}` belonging to a specific class :math:`w_{l}`.
In this case, :math:`w_{l}` is the class predicted by the base
classifier :math:`c_{i}`, for the query sample. This method also
weights the influence of each training sample according to its
Euclidean distance to the query instance. The closest samples have a
higher influence in the computation of the competence level. The
competence level estimate is represented by the following equation:
.. math:: \\delta_{i,j} = \\frac{\\sum_{\\mathbf{x}_{k} \\in
\\omega_{l}}P(\\omega_{l} \\mid \\mathbf{x}_{k}, c_{i} )W_{k}}
{\\sum_{k = 1}^{K}P(\\omega_{l} \\mid \\mathbf{x}_{k}, c_{i} )W_{k}}
where :math:`\\delta_{i,j}` represents the competence level of
:math:`c_{i}` for the classification of query.
Parameters
----------
competence_region : array of shape (n_samples, n_neighbors)
Indices of the k nearest neighbors.
distances : array of shape (n_samples, n_neighbors)
Distances from the k nearest neighbors to the query.
predictions : array of shape (n_samples, n_classifiers)
Predictions of the base classifiers for the test examples.
Returns
-------
competences : array of shape (n_samples, n_classifiers)
Competence level estimated for each base classifier and test
example.
"""
# Guarantee that these arrays are view as a 2D array for the case where
# a single test sample is passed down.
predictions = np.atleast_2d(predictions)
distances[distances == 0] = 1e-10
# Normalize the distances
dists_normalized = 1.0 / distances
# Expanding the dimensions of the predictions and target arrays in
# order to compare both.
predictions_3d = np.expand_dims(predictions, axis=1)
target_3d = self.DSEL_target_[competence_region, np.newaxis]
# Create a mask to remove the neighbors belonging to a different class
# than the predicted by the base classifier
mask = (predictions_3d != target_3d)
# Broadcast the distance array to the same shape as the pre-processed
# information for future calculations
dists_normalized = np.repeat(np.expand_dims(dists_normalized, axis=2),
self.n_classifiers_, axis=2)
# Multiply the pre-processed correct predictions by the base
# classifiers to the distance array
scores_target = self.dsel_scores_[competence_region, :,
self.DSEL_target_[competence_region]]
scores_target_norm = scores_target * dists_normalized
# Create masked arrays to remove samples with different label in the
# calculations
masked_preprocessed = np.ma.MaskedArray(scores_target_norm, mask=mask)
masked_dist = np.ma.MaskedArray(dists_normalized, mask=mask)
# Consider only the neighbor samples where the predicted label is
# equals to the neighbor label
competences_masked = np.ma.sum(masked_preprocessed,
axis=1) / np.ma.sum(masked_dist, axis=1)
# Fill 0 to the masked values in the resulting array (when no neighbors
# belongs to the class predicted by the corresponding base classifier)
competences = np.ma.filled(competences_masked, 0)
return competences
| 44.157676 | 79 | 0.652603 |
import numpy as np
from deslib.dcs.base import BaseDCS
class APosteriori(BaseDCS):
def __init__(self, pool_classifiers=None, k=7, DFP=False, with_IH=False,
safe_k=None, IH_rate=0.30, selection_method='diff',
diff_thresh=0.1, random_state=None, knn_classifier='knn',
knne=False, DSEL_perc=0.5, n_jobs=-1):
super(APosteriori, self).__init__(pool_classifiers=pool_classifiers,
k=k, DFP=DFP, with_IH=with_IH,
safe_k=safe_k, IH_rate=IH_rate,
selection_method=selection_method,
diff_thresh=diff_thresh,
knn_classifier=knn_classifier,
random_state=random_state,
knne=knne,
DSEL_perc=DSEL_perc, n_jobs=n_jobs)
def fit(self, X, y):
super(APosteriori, self).fit(X, y)
self._check_predict_proba()
self.dsel_scores_ = self._predict_proba_base(self.DSEL_data_)
return self
def estimate_competence(self, competence_region, distances,
predictions=None):
predictions = np.atleast_2d(predictions)
distances[distances == 0] = 1e-10
dists_normalized = 1.0 / distances
predictions_3d = np.expand_dims(predictions, axis=1)
target_3d = self.DSEL_target_[competence_region, np.newaxis]
mask = (predictions_3d != target_3d)
dists_normalized = np.repeat(np.expand_dims(dists_normalized, axis=2),
self.n_classifiers_, axis=2)
scores_target = self.dsel_scores_[competence_region, :,
self.DSEL_target_[competence_region]]
scores_target_norm = scores_target * dists_normalized
masked_preprocessed = np.ma.MaskedArray(scores_target_norm, mask=mask)
masked_dist = np.ma.MaskedArray(dists_normalized, mask=mask)
competences_masked = np.ma.sum(masked_preprocessed,
axis=1) / np.ma.sum(masked_dist, axis=1)
competences = np.ma.filled(competences_masked, 0)
return competences
| true | true |
f71525eb6d02c2219a98e137bacd9ff85536c953 | 838 | py | Python | eventmanager/contacts/migrations/0001_initial.py | jasham/event2Backend | 54e9945676458231cacb6fb8ad62a757a9547b63 | [
"MIT"
] | null | null | null | eventmanager/contacts/migrations/0001_initial.py | jasham/event2Backend | 54e9945676458231cacb6fb8ad62a757a9547b63 | [
"MIT"
] | null | null | null | eventmanager/contacts/migrations/0001_initial.py | jasham/event2Backend | 54e9945676458231cacb6fb8ad62a757a9547b63 | [
"MIT"
] | 1 | 2020-04-27T06:45:18.000Z | 2020-04-27T06:45:18.000Z | # Generated by Django 2.2.5 on 2020-03-08 07:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('leads', '__first__'),
]
operations = [
migrations.CreateModel(
name='Contacts',
fields=[
('paid', models.CharField(choices=[('Yes', 'Yes'), ('No', 'No')], default='No', max_length=50)),
('amount', models.IntegerField()),
('unique_number', models.IntegerField(primary_key=True, serialize=False)),
('is_valid', models.BooleanField(default=True)),
('leads_data', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='leads.Leads')),
],
),
]
| 31.037037 | 114 | 0.559666 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('leads', '__first__'),
]
operations = [
migrations.CreateModel(
name='Contacts',
fields=[
('paid', models.CharField(choices=[('Yes', 'Yes'), ('No', 'No')], default='No', max_length=50)),
('amount', models.IntegerField()),
('unique_number', models.IntegerField(primary_key=True, serialize=False)),
('is_valid', models.BooleanField(default=True)),
('leads_data', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='leads.Leads')),
],
),
]
| true | true |
f71526b15e9dfaa84b99bbeb8e8827713bb52184 | 38,532 | py | Python | topaz/utils/regexp.py | mswart/topaz | 4bc02d6f4bf29c20f045223ecb6ae8a5cc9df2ae | [
"BSD-3-Clause"
] | null | null | null | topaz/utils/regexp.py | mswart/topaz | 4bc02d6f4bf29c20f045223ecb6ae8a5cc9df2ae | [
"BSD-3-Clause"
] | null | null | null | topaz/utils/regexp.py | mswart/topaz | 4bc02d6f4bf29c20f045223ecb6ae8a5cc9df2ae | [
"BSD-3-Clause"
] | null | null | null | import sys
from rpython.rlib.listsort import make_timsort_class
from rpython.rlib.objectmodel import specialize
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.rsre.rsre_core import (
OPCODE_LITERAL, OPCODE_LITERAL_IGNORE, OPCODE_SUCCESS, OPCODE_ASSERT,
OPCODE_MARK, OPCODE_REPEAT, OPCODE_ANY, OPCODE_ANY_ALL, OPCODE_MAX_UNTIL,
OPCODE_MIN_UNTIL, OPCODE_GROUPREF, OPCODE_AT, OPCODE_BRANCH, OPCODE_RANGE,
OPCODE_JUMP, OPCODE_ASSERT_NOT, OPCODE_CATEGORY, OPCODE_FAILURE, OPCODE_IN,
OPCODE_NEGATE
)
from rpython.rlib.rsre.rsre_char import MAXREPEAT as MAX_REPEAT
IGNORE_CASE = 1 << 0
EXTENDED = 1 << 1
DOT_ALL = 1 << 2
ONCE = 1 << 3
FIXED_ENCODING = 1 << 4
NO_ENCODING = 1 << 5
OPTIONS_MAP = {
"i": IGNORE_CASE,
"x": EXTENDED,
"m": DOT_ALL,
"o": ONCE,
"u": FIXED_ENCODING,
"n": NO_ENCODING,
"e": FIXED_ENCODING,
"s": FIXED_ENCODING,
}
FLAGS_MAP = [
("m", DOT_ALL),
("i", IGNORE_CASE),
("x", EXTENDED),
]
SPECIAL_CHARS = "()|?*+{^$.[\\#"
CHARACTER_ESCAPES = {
"a": "\a",
"b": "\b",
"f": "\f",
"n": "\n",
"r": "\r",
"t": "\t",
"v": "\v",
}
AT_BEGINNING = 0
AT_BEGINNING_LINE = 1
AT_BEGINNING_STRING = 2
AT_BOUNDARY = 3
AT_NON_BOUNDARY = 4
AT_END = 5
AT_END_LINE = 6
AT_END_STRING = 7
AT_LOC_BOUNDARY = 8
AT_LOC_NON_BOUNDARY = 9
AT_UNI_BOUNDARY = 10
AT_UNI_NON_BOUNDARY = 11
CATEGORY_DIGIT = 0
CATEGORY_NOT_DIGIT = 1
CATEGORY_SPACE = 2
CATEGORY_NOT_SPACE = 3
CATEGORY_WORD = 4
CATEGORY_NOT_WORD = 5
CATEGORY_LINEBREAK = 6
CATEGORY_NOT_LINEBREAK = 7
CATEGORY_LOC_WORD = 8
CATEGORY_LOC_NOT_WORD = 9
CATEGORY_UNI_DIGIT = 10
CATEGORY_UNI_NOT_DIGIT = 11
CATEGORY_UNI_SPACE = 12
CATEGORY_UNI_NOT_SPACE = 13
CATEGORY_UNI_WORD = 14
CATEGORY_UNI_NOT_WORD = 15
CATEGORY_UNI_LINEBREAK = 16
CATEGORY_UNI_NOT_LINEBREAK = 17
class UnscopedFlagSet(Exception):
def __init__(self, global_flags):
Exception.__init__(self)
self.global_flags = global_flags
class RegexpError(Exception):
pass
class ParseError(Exception):
pass
class Source(object):
def __init__(self, s):
self.pos = 0
self.s = s
self.ignore_space = False
def at_end(self):
s = self.s
pos = self.pos
if self.ignore_space:
while True:
if pos >= len(s):
break
elif s[pos].isspace():
pos += 1
elif s[pos] == "#":
pos = s.find("\n", pos)
if pos < 0:
pos = len(s)
else:
break
return pos >= len(s)
def get(self):
s = self.s
pos = self.pos
if self.ignore_space:
while True:
if pos >= len(s):
return ""
elif s[pos].isspace():
pos += 1
elif s[pos] == "#":
pos = s.find("\n", pos)
if pos < 0:
pos = len(s)
else:
break
try:
ch = s[pos]
self.pos = pos + 1
return ch
except IndexError:
self.pos = pos
return ""
except ValueError:
self.pos = len(s)
return ""
def match(self, substr):
s = self.s
pos = self.pos
if self.ignore_space:
for c in substr:
while True:
if pos >= len(s):
return False
elif s[pos].isspace():
pos += 1
elif s[pos] == "#":
pos = s.find("\n", pos)
if pos < 0:
pos = len(s)
else:
break
if s[pos] != c:
return False
pos += 1
self.pos = pos
return True
else:
if pos + len(substr) <= len(s):
matches = True
for i in xrange(len(substr)):
if s[pos + i] != substr[i]:
matches = False
else:
matches = False
if not matches:
return False
self.pos = pos + len(substr)
return True
def expect(self, substr):
if not self.match(substr):
raise RegexpError("Missing %s" % substr)
class Info(object):
OPEN = 0
CLOSED = 1
def __init__(self, flags):
self.flags = flags
self.group_count = 0
self.used_groups = {}
self.group_state = {}
self.group_index = {}
self.group_name = {}
self.named_lists_used = {}
self.defined_groups = {}
self.group_offsets = []
def new_group(self, name=None):
if name in self.group_index:
group = self.group_index[name]
else:
while True:
self.group_count += 1
if name is None or self.group_count not in self.group_name:
break
group = self.group_count
if name is not None:
self.group_index[name] = group
self.group_name[group] = name
self.used_groups[group] = None
self.group_state[group] = self.OPEN
return group
def close_group(self, group, hidden=False):
last_group_offset = self.group_offsets[-1] if self.group_offsets else 0
if hidden:
last_group_offset += 1
self.group_offsets.append(last_group_offset)
self.group_state[group] = self.CLOSED
def normalize_group(self, name):
if name.isdigit():
return int(name)
else:
return self.group_index[name]
def is_open_group(self, name):
group = self.normalize_group(name)
return group in self.group_state and self.group_state[group] == self.OPEN
BaseSorter = make_timsort_class()
class BranchSorter(BaseSorter):
def __init__(self, items, order):
BaseSorter.__init__(self, items)
self.order = order
def lt(self, a, b):
return self.order[a[0]] < self.order[b[0]]
class CompilerContext(object):
def __init__(self):
self.data = []
def emit(self, opcode):
self.data.append(opcode)
def tell(self):
return len(self.data)
def patch(self, pos, value):
self.data[pos] = value
def build(self):
return self.data[:]
class Counts(object):
def __init__(self, min_count, max_count=MAX_REPEAT, limited_quantifier=False):
self.min_count = min_count
self.max_count = max_count
self.limited_quantifier = limited_quantifier
class RegexpBase(object):
_attrs_ = ["positive", "case_insensitive", "zerowidth"]
def __init__(self, positive=True, case_insensitive=False, zerowidth=False):
self.positive = positive
self.case_insensitive = case_insensitive
self.zerowidth = zerowidth
@specialize.argtype(1, 2, 3)
def with_flags(self, positive=None, case_insensitive=None, zerowidth=None):
positive = positive if positive is not None else self.positive
case_insensitive = case_insensitive if case_insensitive is not None else self.case_insensitive
zerowidth = zerowidth if zerowidth is not None else self.zerowidth
if (positive == self.positive and
case_insensitive == self.case_insensitive and
zerowidth == self.zerowidth):
return self
return self.rebuild(positive, case_insensitive, zerowidth)
class Character(RegexpBase):
def __init__(self, value, case_insensitive=False, positive=True, zerowidth=False):
RegexpBase.__init__(self, case_insensitive=case_insensitive, positive=positive, zerowidth=zerowidth)
self.value = value
def rebuild(self, positive, case_insensitive, zerowidth):
return Character(self.value, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
def getwidth(self):
return 1, 1
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def can_be_affix(self):
return True
def is_empty(self):
return False
def compile(self, ctx):
ctx.emit(OPCODE_LITERAL_IGNORE if self.case_insensitive else OPCODE_LITERAL)
ctx.emit(self.value)
class Any(RegexpBase):
def is_empty(self):
return False
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def compile(self, ctx):
ctx.emit(OPCODE_ANY)
class AnyAll(RegexpBase):
def is_empty(self):
return False
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def compile(self, ctx):
ctx.emit(OPCODE_ANY_ALL)
class ZeroWidthBase(RegexpBase):
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
class AtPosition(ZeroWidthBase):
def __init__(self, code):
ZeroWidthBase.__init__(self)
self.code = code
def can_be_affix(self):
return True
def compile(self, ctx):
ctx.emit(OPCODE_AT)
ctx.emit(self.code)
class Property(RegexpBase):
def __init__(self, value, positive=True, case_insensitive=False, zerowidth=False):
RegexpBase.__init__(self, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
self.value = value
def rebuild(self, positive, case_insensitive, zerowidth):
return Property(self.value, positive, case_insensitive, zerowidth)
def getwidth(self):
return 1, 1
def is_empty(self):
return False
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def can_be_affix(self):
return True
def compile(self, ctx):
ctx.emit(OPCODE_CATEGORY)
ctx.emit(self.value)
class Range(RegexpBase):
def __init__(self, lower, upper, positive=True, case_insensitive=False, zerowidth=False):
RegexpBase.__init__(self, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
self.lower = lower
self.upper = upper
def rebuild(self, positive, case_insensitive, zerowidth):
return Range(self.lower, self.upper, positive, case_insensitive, zerowidth)
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def can_be_affix(self):
return True
def compile(self, ctx):
if not self.positive:
ctx.emit(OPCODE_NEGATE)
ctx.emit(OPCODE_RANGE)
ctx.emit(self.lower)
ctx.emit(self.upper)
class Sequence(RegexpBase):
def __init__(self, items):
RegexpBase.__init__(self)
self.items = items
def is_empty(self):
for item in self.items:
if not item.is_empty():
return False
return True
def fix_groups(self):
for item in self.items:
item.fix_groups()
def optimize(self, info, in_set=False):
items = []
for item in self.items:
item = item.optimize(info)
if isinstance(item, Sequence):
items.extend(item.items)
else:
items.append(item)
return make_sequence(items)
def compile(self, ctx):
for item in self.items:
item.compile(ctx)
class Branch(RegexpBase):
def __init__(self, branches):
RegexpBase.__init__(self)
self.branches = branches
def fix_groups(self):
for b in self.branches:
b.fix_groups()
def is_empty(self):
for b in self.branches:
if not b.is_empty():
return False
return True
def _flatten_branches(self, info, branches):
new_branches = []
for b in branches:
b = b.optimize(info)
if isinstance(b, Branch):
new_branches.extend(b.branches)
else:
new_branches.append(b)
return new_branches
def _split_common_prefix(self, info, branches):
alternatives = []
for b in branches:
if isinstance(b, Sequence):
alternatives.append(b.items)
else:
alternatives.append([b])
max_count = sys.maxint
for a in alternatives:
max_count = min(max_count, len(a))
prefix = alternatives[0]
pos = 0
end_pos = max_count
while (pos < end_pos and prefix[pos].can_be_affix() and
[None for a in alternatives if a[pos] == prefix[pos]]):
pos += 1
if pos == 0:
return [], branches
new_branches = []
for a in alternatives:
new_branches.append(make_sequence(a[pos:]))
return prefix[:pos], new_branches
def _split_common_suffix(self, info, branches):
alternatives = []
for b in branches:
if isinstance(b, Sequence):
alternatives.append(b.items)
else:
alternatives.append([b])
max_count = sys.maxint
for a in alternatives:
max_count = min(max_count, len(a))
suffix = alternatives[0]
pos = -1
end_pos = -1 - max_count
while (pos > end_pos and suffix[pos].can_be_affix() and
[None for a in alternatives if a[pos] == suffix[pos]]):
pos -= 1
count = -1 - pos
if count == 0:
return [], branches
new_branches = []
for a in alternatives:
end = len(a) - count
assert end >= 0
new_branches.append(make_sequence(a[:end]))
start = len(suffix) - count
assert start >= 0
return suffix[start:], new_branches
def _is_simple_character(self, c):
return isinstance(c, Character) and c.positive and not c.case_insensitive
def _flush_char_prefix(self, info, prefixed, order, new_branches):
if not prefixed:
return
items = prefixed.items()
sorter = BranchSorter(items, order)
sorter.sort()
for value, branches in items:
if len(branches) == 1:
new_branches.append(make_sequence(branches[0]))
else:
subbranches = []
optional = False
for b in branches:
if len(b) > 1:
subbranches.append(make_sequence(b[1:]))
elif not optional:
subbranches.append(Sequence([]))
optional = True
sequence = Sequence([Character(value), Branch(subbranches)])
new_branches.append(sequence.optimize(info))
prefixed.clear()
order.clear()
def _merge_common_prefixes(self, info, branches):
prefixed = {}
order = {}
new_branches = []
for b in branches:
if self._is_simple_character(b):
assert isinstance(b, Character)
prefixed.setdefault(b.value, []).append([b])
order.setdefault(b.value, len(order))
elif isinstance(b, Sequence) and b.items and self._is_simple_character(b.items[0]):
item = b.items[0]
assert isinstance(item, Character)
prefixed.setdefault(item.value, []).append(b.items)
order.setdefault(item.value, len(order))
else:
self._flush_char_prefix(info, prefixed, order, new_branches)
new_branches.append(b)
self._flush_char_prefix(info, prefixed, order, new_branches)
return new_branches
def _flush_set_members(self, info, items, case_insensitive, new_branches):
if not items:
return
if len(items) == 1:
[item] = items.keys()
else:
item = SetUnion(info, items.keys()).optimize(info)
new_branches.append(item.with_flags(case_insensitive=case_insensitive))
items.clear()
def _reduce_to_set(self, info, branches):
new_branches = []
items = {}
case_insensitive = False
for b in branches:
if isinstance(b, Character) or isinstance(b, Property) or isinstance(b, SetBase):
if b.case_insensitive != case_insensitive:
self._flush_set_members(info, items, case_insensitive, new_branches)
case_insensitive = b.case_insensitive
items[b.with_flags(case_insensitive=False)] = False
else:
self._flush_set_members(info, items, case_insensitive, new_branches)
new_branches.append(b)
self._flush_set_members(info, items, case_insensitive, new_branches)
return new_branches
def optimize(self, info, in_set=False):
branches = self._flatten_branches(info, self.branches)
prefix, branches = self._split_common_prefix(info, branches)
suffix, branches = self._split_common_suffix(info, branches)
branches = self._merge_common_prefixes(info, branches)
branches = self._reduce_to_set(info, branches)
if len(branches) > 1:
sequence = prefix + [Branch(branches)] + suffix
else:
sequence = prefix + branches + suffix
return make_sequence(sequence)
def compile(self, ctx):
ctx.emit(OPCODE_BRANCH)
tail = []
for b in self.branches:
pos = ctx.tell()
ctx.emit(0)
b.compile(ctx)
ctx.emit(OPCODE_JUMP)
tail.append(ctx.tell())
ctx.emit(0)
ctx.patch(pos, ctx.tell() - pos)
ctx.emit(0)
for t in tail:
ctx.patch(t, ctx.tell() - t)
class BaseRepeat(RegexpBase):
def __init__(self, subpattern, min_count, max_count):
RegexpBase.__init__(self)
self.subpattern = subpattern
self.min_count = min_count
self.max_count = max_count
def fix_groups(self):
self.subpattern.fix_groups()
def is_empty(self):
return self.subpattern.is_empty()
def compile(self, ctx):
ctx.emit(OPCODE_REPEAT)
pos = ctx.tell()
ctx.emit(0)
ctx.emit(self.min_count)
ctx.emit(self.max_count)
self.subpattern.compile(ctx)
ctx.patch(pos, ctx.tell() - pos)
ctx.emit(self.UNTIL_OPCODE)
class GreedyRepeat(BaseRepeat):
UNTIL_OPCODE = OPCODE_MAX_UNTIL
def can_be_affix(self):
return True
def optimize(self, info, in_set=False):
subpattern = self.subpattern.optimize(info)
return GreedyRepeat(subpattern, self.min_count, self.max_count)
class LazyRepeat(BaseRepeat):
UNTIL_OPCODE = OPCODE_MIN_UNTIL
def optimize(self, info, in_set=False):
subpattern = self.subpattern.optimize(info)
return LazyRepeat(subpattern, self.min_count, self.max_count)
class LookAround(RegexpBase):
def __init__(self, subpattern, behind, positive):
RegexpBase.__init__(self, positive=positive)
self.subpattern = subpattern
self.behind = behind
def fix_groups(self):
self.subpattern.fix_groups()
def can_be_affix(self):
return self.subpattern.can_be_affix()
def optimize(self, info, in_set=False):
return LookAround(self.subpattern.optimize(info), self.behind, self.positive)
def compile(self, ctx):
ctx.emit(OPCODE_ASSERT if self.positive else OPCODE_ASSERT_NOT)
pos = ctx.tell()
ctx.emit(0)
if self.behind:
lo, hi = self.subpattern.getwidth()
if lo != hi:
raise RegexpError("look-behind requires fixed-width pattern")
ctx.emit(lo)
else:
ctx.emit(0)
self.subpattern.compile(ctx)
ctx.emit(OPCODE_SUCCESS)
ctx.patch(pos, ctx.tell() - pos)
class Group(RegexpBase):
def __init__(self, info, group, subpattern):
RegexpBase.__init__(self)
self.info = info
self.group = group
self.subpattern = subpattern
def fix_groups(self):
self.info.defined_groups[self.group] = self
self.subpattern.fix_groups()
def can_be_affix(self):
return False
def optimize(self, info, in_set=False):
return Group(self.info, self.group, self.subpattern.optimize(info))
def is_empty(self):
return False
def compile(self, ctx):
ctx.emit(OPCODE_MARK)
ctx.emit((self.group - 1) * 2)
self.subpattern.compile(ctx)
ctx.emit(OPCODE_MARK)
ctx.emit((self.group - 1) * 2 + 1)
class RefGroup(RegexpBase):
def __init__(self, info, group, case_insensitive=False):
RegexpBase.__init__(self, case_insensitive=case_insensitive)
self.info = info
self.group = group
def fix_groups(self):
if not 1 <= self.group <= self.info.group_count:
raise RegexpError("unknown group")
def optimize(self, info, in_set=False):
return self
def compile(self, ctx):
assert not self.case_insensitive
ctx.emit(OPCODE_GROUPREF)
ctx.emit(self.group - 1)
class SetBase(RegexpBase):
def __init__(self, info, items, positive=True, case_insensitive=False, zerowidth=False):
RegexpBase.__init__(self, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
self.info = info
self.items = items
def is_empty(self):
return False
def can_be_affix(self):
return True
def fix_groups(self):
pass
class SetUnion(SetBase):
def optimize(self, info, in_set=False):
items = []
for item in self.items:
item = item.optimize(info, in_set=True)
if isinstance(item, SetUnion) and item.positive:
items.extend(item.items)
else:
items.append(item)
if len(items) == 1 and not isinstance(items[0], Range):
return items[0].with_flags(
positive=items[0].positive == self.positive,
case_insensitive=self.case_insensitive,
zerowidth=self.zerowidth
).optimize(info, in_set=in_set)
return SetUnion(self.info, items, positive=self.positive, case_insensitive=self.case_insensitive, zerowidth=self.zerowidth)
def rebuild(self, positive, case_insensitive, zerowidth):
return SetUnion(self.info, self.items, positive, case_insensitive, zerowidth).optimize(self.info)
def compile(self, ctx):
ctx.emit(OPCODE_IN)
pos = ctx.tell()
ctx.emit(0)
if not self.positive:
ctx.emit(OPCODE_NEGATE)
for item in self.items:
item.compile(ctx)
ctx.emit(OPCODE_FAILURE)
ctx.patch(pos, ctx.tell() - pos)
class SetIntersection(SetBase):
def rebuild(self, positive, case_insensitive, zerowidth):
return SetIntersection(self.info, self.items, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
def optimize(self, info, in_set=False):
items = []
for item in self.items:
item = item.optimize(info, in_set=True)
if isinstance(item, SetIntersection) and item.positive:
items.extend(item.items)
else:
items.append(item)
if len(items) == 1:
return items[0].with_flags(
case_insensitive=self.case_insensitive,
zerowidth=self.zerowidth,
).optimize(info, in_set)
return SetIntersection(info, items)
def compile(self, ctx):
Sequence([
LookAround(item, behind=False, positive=True)
for item in self.items[:-1]
] + [self.items[-1]]).compile(ctx)
POSITION_ESCAPES = {
"A": AtPosition(AT_BEGINNING_STRING),
"z": AtPosition(AT_END_STRING),
"b": AtPosition(AT_BOUNDARY),
"B": AtPosition(AT_NON_BOUNDARY),
}
CHARSET_ESCAPES = {
"d": Property(CATEGORY_DIGIT),
"w": Property(CATEGORY_WORD),
}
PROPERTIES = {
"digit": CATEGORY_DIGIT,
"alnum": CATEGORY_WORD,
}
def make_character(info, value, in_set=False):
if in_set:
return Character(value)
return Character(value, case_insensitive=info.flags & IGNORE_CASE)
def make_sequence(items):
if len(items) == 1:
return items[0]
return Sequence(items)
def make_atomic(info, subpattern):
group = info.new_group()
info.close_group(group, hidden=True)
return Sequence([
LookAround(Group(info, group, subpattern), behind=False, positive=True),
RefGroup(info, group),
])
def make_ref_group(info, name):
return RefGroup(info, name, case_insensitive=info.flags & IGNORE_CASE)
def _parse_pattern(source, info):
previous_groups = info.used_groups.copy()
branches = [_parse_sequence(source, info)]
all_groups = info.used_groups
while source.match("|"):
info.used_groups = previous_groups.copy()
branches.append(_parse_sequence(source, info))
all_groups.update(info.used_groups)
info.used_groups = all_groups
if len(branches) == 1:
return branches[0]
return Branch(branches)
def _parse_sequence(source, info):
sequence = []
item = _parse_item(source, info)
while item:
sequence.append(item)
item = _parse_item(source, info)
return make_sequence(sequence)
def _parse_item(source, info):
element = _parse_element(source, info)
counts = _parse_quantifier(source, info)
if counts is not None:
min_count, max_count = counts.min_count, counts.max_count
if element.is_empty() or min_count == max_count == 1:
return element
if source.match("?"):
return LazyRepeat(element, min_count, max_count)
elif source.match("+"):
if counts.limited_quantifier:
return GreedyRepeat(GreedyRepeat(element, min_count, max_count), 1, MAX_REPEAT)
else:
return make_atomic(info, GreedyRepeat(element, min_count, max_count))
else:
return GreedyRepeat(element, min_count, max_count)
return element
def _parse_element(source, info):
here = source.pos
ch = source.get()
if ch in SPECIAL_CHARS:
if ch in ")|":
source.pos = here
return None
elif ch == "\\":
return _parse_escape(source, info, in_set=False)
elif ch == "(":
element = _parse_paren(source, info)
if element is not None:
return element
elif ch == ".":
if info.flags & DOT_ALL:
return AnyAll()
else:
return Any()
elif ch == "[":
return _parse_set(source, info)
elif ch == "^":
return AtPosition(AT_BEGINNING_STRING)
elif ch == "$":
return AtPosition(AT_END_STRING)
elif ch == "{":
here2 = source.pos
counts = _parse_quantifier(source, info)
if counts is not None:
raise RegexpError("nothing to repeat")
source.pos = here2
return make_character(info, ord(ch[0]))
elif ch in "?*+":
raise RegexpError("nothing to repeat")
else:
return make_character(info, ord(ch[0]))
else:
return make_character(info, ord(ch[0]))
def _parse_quantifier(source, info):
while True:
here = source.pos
if source.match("?"):
return Counts(0, 1)
elif source.match("*"):
return Counts(0)
elif source.match("+"):
return Counts(1)
elif source.match("{"):
try:
return _parse_limited_quantifier(source)
except ParseError:
pass
elif source.match("(?#"):
_parse_comment(source)
continue
break
source.pos = here
return None
def _parse_paren(source, info):
if source.match("?"):
if source.match("<"):
if source.match("="):
return _parse_lookaround(source, info, behind=True, positive=True)
elif source.match("!"):
return _parse_lookaround(source, info, behind=True, positive=False)
name = _parse_name(source)
group = info.new_group(name)
source.expect(">")
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(")")
info.close_group(group)
return Group(info, group, subpattern)
elif source.match("="):
return _parse_lookaround(source, info, behind=False, positive=True)
elif source.match("!"):
return _parse_lookaround(source, info, behind=False, positive=False)
elif source.match("#"):
_parse_comment(source)
return
elif source.match(">"):
return _parse_atomic(source, info)
elif source.match(":"):
subpattern = _parse_pattern(source, info)
source.expect(")")
return subpattern
elif source.match("-") or source.match("m") or source.match("i") or source.match("x"):
# TODO: parse plain here flags = _parse_plain_flags(source)
subpattern = _parse_pattern(source, info)
source.expect(")")
return subpattern
else:
raise RegexpError("undefined group option")
group = info.new_group()
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(")")
info.close_group(group)
return Group(info, group, subpattern)
def _parse_atomic(source, info):
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(")")
return make_atomic(info, subpattern)
def _parse_set(source, info):
saved_ignore = source.ignore_space
source.ignore_space = False
negate = source.match("^")
try:
item = _parse_set_intersect(source, info)
source.expect("]")
finally:
source.ignore_space = saved_ignore
if negate:
item = item.with_flags(positive=not item.positive)
return item.with_flags(case_insensitive=info.flags & IGNORE_CASE)
def _parse_set_intersect(source, info):
items = [_parse_set_implicit_union(source, info)]
while source.match("&&"):
items.append(_parse_set_implicit_union(source, info))
if len(items) == 1:
return items[0]
return SetIntersection(info, items)
def _parse_set_implicit_union(source, info):
items = [_parse_set_member(source, info)]
while True:
here = source.pos
if source.match("]") or source.match("&&"):
source.pos = here
break
items.append(_parse_set_member(source, info))
if len(items) == 1 and not isinstance(items[0], Range):
return items[0]
return SetUnion(info, items)
def _parse_set_member(source, info):
start = _parse_set_item(source, info)
if (not isinstance(start, Character) or not start.positive or
not source.match("-")):
return start
here = source.pos
if source.match("]"):
source.pos = here
return SetUnion(info, [start, Character(ord("-"))])
end = _parse_set_item(source, info)
if not isinstance(end, Character) or not end.positive:
return SetUnion(info, [start, Character(ord("-")), end])
if start.value > end.value:
raise RegexpError("bad character range")
if start.value == end.value:
return start
return Range(start.value, end.value)
def _parse_set_item(source, info):
if source.match("\\"):
return _parse_escape(source, info, in_set=True)
here = source.pos
if source.match("[:"):
try:
return _parse_posix_class(source, info)
except ParseError:
source.pos = here
if source.match("["):
negate = source.match("^")
item = _parse_set_intersect(source, info)
source.expect("]")
if negate:
item = item.with_flags(positive=not item.positive)
return item
ch = source.get()
if not ch:
raise RegexpError("bad set")
return Character(ord(ch[0]))
def _parse_escape(source, info, in_set):
saved_ignore = source.ignore_space
source.ignore_space = False
ch = source.get()
source.ignore_space = saved_ignore
if not ch:
raise RegexpError("bad escape")
if ch == "g" and not in_set:
here = source.pos
try:
return _parse_group_ref(source, info)
except RegexpError:
source.pos = here
return make_character(info, ord(ch[0]), in_set)
elif ch == "G" and not in_set:
return AtPosition(AT_BEGINNING)
elif ch in "pP":
return _parse_property(source, info, ch == "p", in_set)
elif ch.isalpha():
if not in_set:
if ch in POSITION_ESCAPES:
return POSITION_ESCAPES[ch]
if ch in CHARSET_ESCAPES:
return CHARSET_ESCAPES[ch]
elif ch in CHARACTER_ESCAPES:
return Character(ord(CHARACTER_ESCAPES[ch]))
return make_character(info, ord(ch[0]), in_set)
elif ch.isdigit():
return _parse_numeric_escape(source, info, ch, in_set)
else:
return make_character(info, ord(ch[0]), in_set)
def _parse_lookaround(source, info, behind, positive):
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(")")
return LookAround(subpattern, behind=behind, positive=positive)
def _parse_limited_quantifier(source):
min_count = _parse_count(source)
ch = source.get()
if ch == ",":
max_count = _parse_count(source)
if not source.match("}"):
raise ParseError
min_count = int(min_count) if min_count else 0
max_count = int(max_count) if max_count else MAX_REPEAT
if min_count > max_count:
raise RegexpError("min repeat gereater than max repeat")
if max_count > MAX_REPEAT:
raise RegexpError("repeat count too big")
return Counts(min_count, max_count, limited_quantifier=True)
if ch != "}":
raise ParseError
if not min_count:
raise ParseError
min_count = int(min_count)
if min_count > MAX_REPEAT:
raise RegexpError("repeat count too big")
return Counts(min_count, min_count, limited_quantifier=True)
def _parse_count(source):
b = StringBuilder(2)
while True:
here = source.pos
ch = source.get()
if ch.isdigit():
b.append(ch)
else:
source.pos = here
break
return b.build()
def _parse_comment(source):
while True:
ch = source.get()
if ch == ")":
break
elif not ch:
break
def _parse_name(source):
b = StringBuilder(5)
while True:
here = source.pos
ch = source.get()
if ch in ")>":
source.pos = here
break
elif not ch:
break
else:
b.append(ch)
return b.build()
def _parse_plain_flags(source):
b = StringBuilder(4)
while True:
ch = source.get()
if ch == ":":
break
else:
b.append(ch)
return b.build()
def _parse_group_ref(source, info):
source.expect("<")
name = _parse_name(source)
source.expect(">")
if info.is_open_group(name):
raise RegexpError("can't refer to an open group")
return make_ref_group(info, info.normalize_group(name))
def _parse_property(source, info, positive, in_set):
here = source.pos
if source.match("{"):
negate = source.match("^")
prop_name, name = _parse_property_name(source)
if source.match("}"):
if name in PROPERTIES:
return Property(PROPERTIES[name], positive != negate)
source.pos = here
return make_character(info, ord("p" if positive else "P"), in_set)
def _parse_property_name(source):
b = StringBuilder(5)
while True:
here = source.pos
ch = source.get()
if ch.isalnum():
b.append(ch)
else:
source.pos = here
break
name = b.build()
return name, name
def _parse_numeric_escape(source, info, ch, in_set):
raise NotImplementedError("_parse_numeric_escape")
def _parse_posix_class(source, info):
negate = source.match("^")
prop_name, name = _parse_property_name(source)
if not source.match(":]"):
raise ParseError
return Property(PROPERTIES[name], negate)
def _compile_no_cache(pattern, flags):
source = Source(pattern)
if flags & EXTENDED:
source.ignore_space = True
info = Info(flags)
parsed = _parse_pattern(source, info)
if not source.at_end():
raise RegexpError("trailing characters in pattern")
parsed.fix_groups()
parsed = parsed.optimize(info)
ctx = CompilerContext()
parsed.compile(ctx)
ctx.emit(OPCODE_SUCCESS)
code = ctx.build()
index_group = {}
for n, v in info.group_index.iteritems():
index_group[v] = n
return code, info.flags, info.group_count, info.group_index, index_group, info.group_offsets
def compile(cache, pattern, flags=0):
if not cache.contains(pattern, flags):
cache.set(pattern, flags, _compile_no_cache(pattern, flags))
return cache.get(pattern, flags)
| 29.190909 | 131 | 0.595116 | import sys
from rpython.rlib.listsort import make_timsort_class
from rpython.rlib.objectmodel import specialize
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.rsre.rsre_core import (
OPCODE_LITERAL, OPCODE_LITERAL_IGNORE, OPCODE_SUCCESS, OPCODE_ASSERT,
OPCODE_MARK, OPCODE_REPEAT, OPCODE_ANY, OPCODE_ANY_ALL, OPCODE_MAX_UNTIL,
OPCODE_MIN_UNTIL, OPCODE_GROUPREF, OPCODE_AT, OPCODE_BRANCH, OPCODE_RANGE,
OPCODE_JUMP, OPCODE_ASSERT_NOT, OPCODE_CATEGORY, OPCODE_FAILURE, OPCODE_IN,
OPCODE_NEGATE
)
from rpython.rlib.rsre.rsre_char import MAXREPEAT as MAX_REPEAT
IGNORE_CASE = 1 << 0
EXTENDED = 1 << 1
DOT_ALL = 1 << 2
ONCE = 1 << 3
FIXED_ENCODING = 1 << 4
NO_ENCODING = 1 << 5
OPTIONS_MAP = {
"i": IGNORE_CASE,
"x": EXTENDED,
"m": DOT_ALL,
"o": ONCE,
"u": FIXED_ENCODING,
"n": NO_ENCODING,
"e": FIXED_ENCODING,
"s": FIXED_ENCODING,
}
FLAGS_MAP = [
("m", DOT_ALL),
("i", IGNORE_CASE),
("x", EXTENDED),
]
SPECIAL_CHARS = "()|?*+{^$.[\\#"
CHARACTER_ESCAPES = {
"a": "\a",
"b": "\b",
"f": "\f",
"n": "\n",
"r": "\r",
"t": "\t",
"v": "\v",
}
AT_BEGINNING = 0
AT_BEGINNING_LINE = 1
AT_BEGINNING_STRING = 2
AT_BOUNDARY = 3
AT_NON_BOUNDARY = 4
AT_END = 5
AT_END_LINE = 6
AT_END_STRING = 7
AT_LOC_BOUNDARY = 8
AT_LOC_NON_BOUNDARY = 9
AT_UNI_BOUNDARY = 10
AT_UNI_NON_BOUNDARY = 11
CATEGORY_DIGIT = 0
CATEGORY_NOT_DIGIT = 1
CATEGORY_SPACE = 2
CATEGORY_NOT_SPACE = 3
CATEGORY_WORD = 4
CATEGORY_NOT_WORD = 5
CATEGORY_LINEBREAK = 6
CATEGORY_NOT_LINEBREAK = 7
CATEGORY_LOC_WORD = 8
CATEGORY_LOC_NOT_WORD = 9
CATEGORY_UNI_DIGIT = 10
CATEGORY_UNI_NOT_DIGIT = 11
CATEGORY_UNI_SPACE = 12
CATEGORY_UNI_NOT_SPACE = 13
CATEGORY_UNI_WORD = 14
CATEGORY_UNI_NOT_WORD = 15
CATEGORY_UNI_LINEBREAK = 16
CATEGORY_UNI_NOT_LINEBREAK = 17
class UnscopedFlagSet(Exception):
def __init__(self, global_flags):
Exception.__init__(self)
self.global_flags = global_flags
class RegexpError(Exception):
pass
class ParseError(Exception):
pass
class Source(object):
def __init__(self, s):
self.pos = 0
self.s = s
self.ignore_space = False
def at_end(self):
s = self.s
pos = self.pos
if self.ignore_space:
while True:
if pos >= len(s):
break
elif s[pos].isspace():
pos += 1
elif s[pos] == "#":
pos = s.find("\n", pos)
if pos < 0:
pos = len(s)
else:
break
return pos >= len(s)
def get(self):
s = self.s
pos = self.pos
if self.ignore_space:
while True:
if pos >= len(s):
return ""
elif s[pos].isspace():
pos += 1
elif s[pos] == "#":
pos = s.find("\n", pos)
if pos < 0:
pos = len(s)
else:
break
try:
ch = s[pos]
self.pos = pos + 1
return ch
except IndexError:
self.pos = pos
return ""
except ValueError:
self.pos = len(s)
return ""
def match(self, substr):
s = self.s
pos = self.pos
if self.ignore_space:
for c in substr:
while True:
if pos >= len(s):
return False
elif s[pos].isspace():
pos += 1
elif s[pos] == "#":
pos = s.find("\n", pos)
if pos < 0:
pos = len(s)
else:
break
if s[pos] != c:
return False
pos += 1
self.pos = pos
return True
else:
if pos + len(substr) <= len(s):
matches = True
for i in xrange(len(substr)):
if s[pos + i] != substr[i]:
matches = False
else:
matches = False
if not matches:
return False
self.pos = pos + len(substr)
return True
def expect(self, substr):
if not self.match(substr):
raise RegexpError("Missing %s" % substr)
class Info(object):
OPEN = 0
CLOSED = 1
def __init__(self, flags):
self.flags = flags
self.group_count = 0
self.used_groups = {}
self.group_state = {}
self.group_index = {}
self.group_name = {}
self.named_lists_used = {}
self.defined_groups = {}
self.group_offsets = []
def new_group(self, name=None):
if name in self.group_index:
group = self.group_index[name]
else:
while True:
self.group_count += 1
if name is None or self.group_count not in self.group_name:
break
group = self.group_count
if name is not None:
self.group_index[name] = group
self.group_name[group] = name
self.used_groups[group] = None
self.group_state[group] = self.OPEN
return group
def close_group(self, group, hidden=False):
last_group_offset = self.group_offsets[-1] if self.group_offsets else 0
if hidden:
last_group_offset += 1
self.group_offsets.append(last_group_offset)
self.group_state[group] = self.CLOSED
def normalize_group(self, name):
if name.isdigit():
return int(name)
else:
return self.group_index[name]
def is_open_group(self, name):
group = self.normalize_group(name)
return group in self.group_state and self.group_state[group] == self.OPEN
BaseSorter = make_timsort_class()
class BranchSorter(BaseSorter):
def __init__(self, items, order):
BaseSorter.__init__(self, items)
self.order = order
def lt(self, a, b):
return self.order[a[0]] < self.order[b[0]]
class CompilerContext(object):
def __init__(self):
self.data = []
def emit(self, opcode):
self.data.append(opcode)
def tell(self):
return len(self.data)
def patch(self, pos, value):
self.data[pos] = value
def build(self):
return self.data[:]
class Counts(object):
def __init__(self, min_count, max_count=MAX_REPEAT, limited_quantifier=False):
self.min_count = min_count
self.max_count = max_count
self.limited_quantifier = limited_quantifier
class RegexpBase(object):
_attrs_ = ["positive", "case_insensitive", "zerowidth"]
def __init__(self, positive=True, case_insensitive=False, zerowidth=False):
self.positive = positive
self.case_insensitive = case_insensitive
self.zerowidth = zerowidth
@specialize.argtype(1, 2, 3)
def with_flags(self, positive=None, case_insensitive=None, zerowidth=None):
positive = positive if positive is not None else self.positive
case_insensitive = case_insensitive if case_insensitive is not None else self.case_insensitive
zerowidth = zerowidth if zerowidth is not None else self.zerowidth
if (positive == self.positive and
case_insensitive == self.case_insensitive and
zerowidth == self.zerowidth):
return self
return self.rebuild(positive, case_insensitive, zerowidth)
class Character(RegexpBase):
def __init__(self, value, case_insensitive=False, positive=True, zerowidth=False):
RegexpBase.__init__(self, case_insensitive=case_insensitive, positive=positive, zerowidth=zerowidth)
self.value = value
def rebuild(self, positive, case_insensitive, zerowidth):
return Character(self.value, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
def getwidth(self):
return 1, 1
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def can_be_affix(self):
return True
def is_empty(self):
return False
def compile(self, ctx):
ctx.emit(OPCODE_LITERAL_IGNORE if self.case_insensitive else OPCODE_LITERAL)
ctx.emit(self.value)
class Any(RegexpBase):
def is_empty(self):
return False
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def compile(self, ctx):
ctx.emit(OPCODE_ANY)
class AnyAll(RegexpBase):
def is_empty(self):
return False
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def compile(self, ctx):
ctx.emit(OPCODE_ANY_ALL)
class ZeroWidthBase(RegexpBase):
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
class AtPosition(ZeroWidthBase):
def __init__(self, code):
ZeroWidthBase.__init__(self)
self.code = code
def can_be_affix(self):
return True
def compile(self, ctx):
ctx.emit(OPCODE_AT)
ctx.emit(self.code)
class Property(RegexpBase):
def __init__(self, value, positive=True, case_insensitive=False, zerowidth=False):
RegexpBase.__init__(self, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
self.value = value
def rebuild(self, positive, case_insensitive, zerowidth):
return Property(self.value, positive, case_insensitive, zerowidth)
def getwidth(self):
return 1, 1
def is_empty(self):
return False
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def can_be_affix(self):
return True
def compile(self, ctx):
ctx.emit(OPCODE_CATEGORY)
ctx.emit(self.value)
class Range(RegexpBase):
def __init__(self, lower, upper, positive=True, case_insensitive=False, zerowidth=False):
RegexpBase.__init__(self, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
self.lower = lower
self.upper = upper
def rebuild(self, positive, case_insensitive, zerowidth):
return Range(self.lower, self.upper, positive, case_insensitive, zerowidth)
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def can_be_affix(self):
return True
def compile(self, ctx):
if not self.positive:
ctx.emit(OPCODE_NEGATE)
ctx.emit(OPCODE_RANGE)
ctx.emit(self.lower)
ctx.emit(self.upper)
class Sequence(RegexpBase):
def __init__(self, items):
RegexpBase.__init__(self)
self.items = items
def is_empty(self):
for item in self.items:
if not item.is_empty():
return False
return True
def fix_groups(self):
for item in self.items:
item.fix_groups()
def optimize(self, info, in_set=False):
items = []
for item in self.items:
item = item.optimize(info)
if isinstance(item, Sequence):
items.extend(item.items)
else:
items.append(item)
return make_sequence(items)
def compile(self, ctx):
for item in self.items:
item.compile(ctx)
class Branch(RegexpBase):
def __init__(self, branches):
RegexpBase.__init__(self)
self.branches = branches
def fix_groups(self):
for b in self.branches:
b.fix_groups()
def is_empty(self):
for b in self.branches:
if not b.is_empty():
return False
return True
def _flatten_branches(self, info, branches):
new_branches = []
for b in branches:
b = b.optimize(info)
if isinstance(b, Branch):
new_branches.extend(b.branches)
else:
new_branches.append(b)
return new_branches
def _split_common_prefix(self, info, branches):
alternatives = []
for b in branches:
if isinstance(b, Sequence):
alternatives.append(b.items)
else:
alternatives.append([b])
max_count = sys.maxint
for a in alternatives:
max_count = min(max_count, len(a))
prefix = alternatives[0]
pos = 0
end_pos = max_count
while (pos < end_pos and prefix[pos].can_be_affix() and
[None for a in alternatives if a[pos] == prefix[pos]]):
pos += 1
if pos == 0:
return [], branches
new_branches = []
for a in alternatives:
new_branches.append(make_sequence(a[pos:]))
return prefix[:pos], new_branches
def _split_common_suffix(self, info, branches):
alternatives = []
for b in branches:
if isinstance(b, Sequence):
alternatives.append(b.items)
else:
alternatives.append([b])
max_count = sys.maxint
for a in alternatives:
max_count = min(max_count, len(a))
suffix = alternatives[0]
pos = -1
end_pos = -1 - max_count
while (pos > end_pos and suffix[pos].can_be_affix() and
[None for a in alternatives if a[pos] == suffix[pos]]):
pos -= 1
count = -1 - pos
if count == 0:
return [], branches
new_branches = []
for a in alternatives:
end = len(a) - count
assert end >= 0
new_branches.append(make_sequence(a[:end]))
start = len(suffix) - count
assert start >= 0
return suffix[start:], new_branches
def _is_simple_character(self, c):
return isinstance(c, Character) and c.positive and not c.case_insensitive
def _flush_char_prefix(self, info, prefixed, order, new_branches):
if not prefixed:
return
items = prefixed.items()
sorter = BranchSorter(items, order)
sorter.sort()
for value, branches in items:
if len(branches) == 1:
new_branches.append(make_sequence(branches[0]))
else:
subbranches = []
optional = False
for b in branches:
if len(b) > 1:
subbranches.append(make_sequence(b[1:]))
elif not optional:
subbranches.append(Sequence([]))
optional = True
sequence = Sequence([Character(value), Branch(subbranches)])
new_branches.append(sequence.optimize(info))
prefixed.clear()
order.clear()
def _merge_common_prefixes(self, info, branches):
prefixed = {}
order = {}
new_branches = []
for b in branches:
if self._is_simple_character(b):
assert isinstance(b, Character)
prefixed.setdefault(b.value, []).append([b])
order.setdefault(b.value, len(order))
elif isinstance(b, Sequence) and b.items and self._is_simple_character(b.items[0]):
item = b.items[0]
assert isinstance(item, Character)
prefixed.setdefault(item.value, []).append(b.items)
order.setdefault(item.value, len(order))
else:
self._flush_char_prefix(info, prefixed, order, new_branches)
new_branches.append(b)
self._flush_char_prefix(info, prefixed, order, new_branches)
return new_branches
def _flush_set_members(self, info, items, case_insensitive, new_branches):
if not items:
return
if len(items) == 1:
[item] = items.keys()
else:
item = SetUnion(info, items.keys()).optimize(info)
new_branches.append(item.with_flags(case_insensitive=case_insensitive))
items.clear()
def _reduce_to_set(self, info, branches):
new_branches = []
items = {}
case_insensitive = False
for b in branches:
if isinstance(b, Character) or isinstance(b, Property) or isinstance(b, SetBase):
if b.case_insensitive != case_insensitive:
self._flush_set_members(info, items, case_insensitive, new_branches)
case_insensitive = b.case_insensitive
items[b.with_flags(case_insensitive=False)] = False
else:
self._flush_set_members(info, items, case_insensitive, new_branches)
new_branches.append(b)
self._flush_set_members(info, items, case_insensitive, new_branches)
return new_branches
def optimize(self, info, in_set=False):
branches = self._flatten_branches(info, self.branches)
prefix, branches = self._split_common_prefix(info, branches)
suffix, branches = self._split_common_suffix(info, branches)
branches = self._merge_common_prefixes(info, branches)
branches = self._reduce_to_set(info, branches)
if len(branches) > 1:
sequence = prefix + [Branch(branches)] + suffix
else:
sequence = prefix + branches + suffix
return make_sequence(sequence)
def compile(self, ctx):
ctx.emit(OPCODE_BRANCH)
tail = []
for b in self.branches:
pos = ctx.tell()
ctx.emit(0)
b.compile(ctx)
ctx.emit(OPCODE_JUMP)
tail.append(ctx.tell())
ctx.emit(0)
ctx.patch(pos, ctx.tell() - pos)
ctx.emit(0)
for t in tail:
ctx.patch(t, ctx.tell() - t)
class BaseRepeat(RegexpBase):
def __init__(self, subpattern, min_count, max_count):
RegexpBase.__init__(self)
self.subpattern = subpattern
self.min_count = min_count
self.max_count = max_count
def fix_groups(self):
self.subpattern.fix_groups()
def is_empty(self):
return self.subpattern.is_empty()
def compile(self, ctx):
ctx.emit(OPCODE_REPEAT)
pos = ctx.tell()
ctx.emit(0)
ctx.emit(self.min_count)
ctx.emit(self.max_count)
self.subpattern.compile(ctx)
ctx.patch(pos, ctx.tell() - pos)
ctx.emit(self.UNTIL_OPCODE)
class GreedyRepeat(BaseRepeat):
UNTIL_OPCODE = OPCODE_MAX_UNTIL
def can_be_affix(self):
return True
def optimize(self, info, in_set=False):
subpattern = self.subpattern.optimize(info)
return GreedyRepeat(subpattern, self.min_count, self.max_count)
class LazyRepeat(BaseRepeat):
UNTIL_OPCODE = OPCODE_MIN_UNTIL
def optimize(self, info, in_set=False):
subpattern = self.subpattern.optimize(info)
return LazyRepeat(subpattern, self.min_count, self.max_count)
class LookAround(RegexpBase):
def __init__(self, subpattern, behind, positive):
RegexpBase.__init__(self, positive=positive)
self.subpattern = subpattern
self.behind = behind
def fix_groups(self):
self.subpattern.fix_groups()
def can_be_affix(self):
return self.subpattern.can_be_affix()
def optimize(self, info, in_set=False):
return LookAround(self.subpattern.optimize(info), self.behind, self.positive)
def compile(self, ctx):
ctx.emit(OPCODE_ASSERT if self.positive else OPCODE_ASSERT_NOT)
pos = ctx.tell()
ctx.emit(0)
if self.behind:
lo, hi = self.subpattern.getwidth()
if lo != hi:
raise RegexpError("look-behind requires fixed-width pattern")
ctx.emit(lo)
else:
ctx.emit(0)
self.subpattern.compile(ctx)
ctx.emit(OPCODE_SUCCESS)
ctx.patch(pos, ctx.tell() - pos)
class Group(RegexpBase):
def __init__(self, info, group, subpattern):
RegexpBase.__init__(self)
self.info = info
self.group = group
self.subpattern = subpattern
def fix_groups(self):
self.info.defined_groups[self.group] = self
self.subpattern.fix_groups()
def can_be_affix(self):
return False
def optimize(self, info, in_set=False):
return Group(self.info, self.group, self.subpattern.optimize(info))
def is_empty(self):
return False
def compile(self, ctx):
ctx.emit(OPCODE_MARK)
ctx.emit((self.group - 1) * 2)
self.subpattern.compile(ctx)
ctx.emit(OPCODE_MARK)
ctx.emit((self.group - 1) * 2 + 1)
class RefGroup(RegexpBase):
def __init__(self, info, group, case_insensitive=False):
RegexpBase.__init__(self, case_insensitive=case_insensitive)
self.info = info
self.group = group
def fix_groups(self):
if not 1 <= self.group <= self.info.group_count:
raise RegexpError("unknown group")
def optimize(self, info, in_set=False):
return self
def compile(self, ctx):
assert not self.case_insensitive
ctx.emit(OPCODE_GROUPREF)
ctx.emit(self.group - 1)
class SetBase(RegexpBase):
def __init__(self, info, items, positive=True, case_insensitive=False, zerowidth=False):
RegexpBase.__init__(self, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
self.info = info
self.items = items
def is_empty(self):
return False
def can_be_affix(self):
return True
def fix_groups(self):
pass
class SetUnion(SetBase):
def optimize(self, info, in_set=False):
items = []
for item in self.items:
item = item.optimize(info, in_set=True)
if isinstance(item, SetUnion) and item.positive:
items.extend(item.items)
else:
items.append(item)
if len(items) == 1 and not isinstance(items[0], Range):
return items[0].with_flags(
positive=items[0].positive == self.positive,
case_insensitive=self.case_insensitive,
zerowidth=self.zerowidth
).optimize(info, in_set=in_set)
return SetUnion(self.info, items, positive=self.positive, case_insensitive=self.case_insensitive, zerowidth=self.zerowidth)
def rebuild(self, positive, case_insensitive, zerowidth):
return SetUnion(self.info, self.items, positive, case_insensitive, zerowidth).optimize(self.info)
def compile(self, ctx):
ctx.emit(OPCODE_IN)
pos = ctx.tell()
ctx.emit(0)
if not self.positive:
ctx.emit(OPCODE_NEGATE)
for item in self.items:
item.compile(ctx)
ctx.emit(OPCODE_FAILURE)
ctx.patch(pos, ctx.tell() - pos)
class SetIntersection(SetBase):
def rebuild(self, positive, case_insensitive, zerowidth):
return SetIntersection(self.info, self.items, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
def optimize(self, info, in_set=False):
items = []
for item in self.items:
item = item.optimize(info, in_set=True)
if isinstance(item, SetIntersection) and item.positive:
items.extend(item.items)
else:
items.append(item)
if len(items) == 1:
return items[0].with_flags(
case_insensitive=self.case_insensitive,
zerowidth=self.zerowidth,
).optimize(info, in_set)
return SetIntersection(info, items)
def compile(self, ctx):
Sequence([
LookAround(item, behind=False, positive=True)
for item in self.items[:-1]
] + [self.items[-1]]).compile(ctx)
POSITION_ESCAPES = {
"A": AtPosition(AT_BEGINNING_STRING),
"z": AtPosition(AT_END_STRING),
"b": AtPosition(AT_BOUNDARY),
"B": AtPosition(AT_NON_BOUNDARY),
}
CHARSET_ESCAPES = {
"d": Property(CATEGORY_DIGIT),
"w": Property(CATEGORY_WORD),
}
PROPERTIES = {
"digit": CATEGORY_DIGIT,
"alnum": CATEGORY_WORD,
}
def make_character(info, value, in_set=False):
if in_set:
return Character(value)
return Character(value, case_insensitive=info.flags & IGNORE_CASE)
def make_sequence(items):
if len(items) == 1:
return items[0]
return Sequence(items)
def make_atomic(info, subpattern):
group = info.new_group()
info.close_group(group, hidden=True)
return Sequence([
LookAround(Group(info, group, subpattern), behind=False, positive=True),
RefGroup(info, group),
])
def make_ref_group(info, name):
return RefGroup(info, name, case_insensitive=info.flags & IGNORE_CASE)
def _parse_pattern(source, info):
previous_groups = info.used_groups.copy()
branches = [_parse_sequence(source, info)]
all_groups = info.used_groups
while source.match("|"):
info.used_groups = previous_groups.copy()
branches.append(_parse_sequence(source, info))
all_groups.update(info.used_groups)
info.used_groups = all_groups
if len(branches) == 1:
return branches[0]
return Branch(branches)
def _parse_sequence(source, info):
sequence = []
item = _parse_item(source, info)
while item:
sequence.append(item)
item = _parse_item(source, info)
return make_sequence(sequence)
def _parse_item(source, info):
element = _parse_element(source, info)
counts = _parse_quantifier(source, info)
if counts is not None:
min_count, max_count = counts.min_count, counts.max_count
if element.is_empty() or min_count == max_count == 1:
return element
if source.match("?"):
return LazyRepeat(element, min_count, max_count)
elif source.match("+"):
if counts.limited_quantifier:
return GreedyRepeat(GreedyRepeat(element, min_count, max_count), 1, MAX_REPEAT)
else:
return make_atomic(info, GreedyRepeat(element, min_count, max_count))
else:
return GreedyRepeat(element, min_count, max_count)
return element
def _parse_element(source, info):
here = source.pos
ch = source.get()
if ch in SPECIAL_CHARS:
if ch in ")|":
source.pos = here
return None
elif ch == "\\":
return _parse_escape(source, info, in_set=False)
elif ch == "(":
element = _parse_paren(source, info)
if element is not None:
return element
elif ch == ".":
if info.flags & DOT_ALL:
return AnyAll()
else:
return Any()
elif ch == "[":
return _parse_set(source, info)
elif ch == "^":
return AtPosition(AT_BEGINNING_STRING)
elif ch == "$":
return AtPosition(AT_END_STRING)
elif ch == "{":
here2 = source.pos
counts = _parse_quantifier(source, info)
if counts is not None:
raise RegexpError("nothing to repeat")
source.pos = here2
return make_character(info, ord(ch[0]))
elif ch in "?*+":
raise RegexpError("nothing to repeat")
else:
return make_character(info, ord(ch[0]))
else:
return make_character(info, ord(ch[0]))
def _parse_quantifier(source, info):
while True:
here = source.pos
if source.match("?"):
return Counts(0, 1)
elif source.match("*"):
return Counts(0)
elif source.match("+"):
return Counts(1)
elif source.match("{"):
try:
return _parse_limited_quantifier(source)
except ParseError:
pass
elif source.match("(?#"):
_parse_comment(source)
continue
break
source.pos = here
return None
def _parse_paren(source, info):
if source.match("?"):
if source.match("<"):
if source.match("="):
return _parse_lookaround(source, info, behind=True, positive=True)
elif source.match("!"):
return _parse_lookaround(source, info, behind=True, positive=False)
name = _parse_name(source)
group = info.new_group(name)
source.expect(">")
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(")")
info.close_group(group)
return Group(info, group, subpattern)
elif source.match("="):
return _parse_lookaround(source, info, behind=False, positive=True)
elif source.match("!"):
return _parse_lookaround(source, info, behind=False, positive=False)
elif source.match("#"):
_parse_comment(source)
return
elif source.match(">"):
return _parse_atomic(source, info)
elif source.match(":"):
subpattern = _parse_pattern(source, info)
source.expect(")")
return subpattern
elif source.match("-") or source.match("m") or source.match("i") or source.match("x"):
subpattern = _parse_pattern(source, info)
source.expect(")")
return subpattern
else:
raise RegexpError("undefined group option")
group = info.new_group()
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(")")
info.close_group(group)
return Group(info, group, subpattern)
def _parse_atomic(source, info):
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(")")
return make_atomic(info, subpattern)
def _parse_set(source, info):
saved_ignore = source.ignore_space
source.ignore_space = False
negate = source.match("^")
try:
item = _parse_set_intersect(source, info)
source.expect("]")
finally:
source.ignore_space = saved_ignore
if negate:
item = item.with_flags(positive=not item.positive)
return item.with_flags(case_insensitive=info.flags & IGNORE_CASE)
def _parse_set_intersect(source, info):
items = [_parse_set_implicit_union(source, info)]
while source.match("&&"):
items.append(_parse_set_implicit_union(source, info))
if len(items) == 1:
return items[0]
return SetIntersection(info, items)
def _parse_set_implicit_union(source, info):
items = [_parse_set_member(source, info)]
while True:
here = source.pos
if source.match("]") or source.match("&&"):
source.pos = here
break
items.append(_parse_set_member(source, info))
if len(items) == 1 and not isinstance(items[0], Range):
return items[0]
return SetUnion(info, items)
def _parse_set_member(source, info):
start = _parse_set_item(source, info)
if (not isinstance(start, Character) or not start.positive or
not source.match("-")):
return start
here = source.pos
if source.match("]"):
source.pos = here
return SetUnion(info, [start, Character(ord("-"))])
end = _parse_set_item(source, info)
if not isinstance(end, Character) or not end.positive:
return SetUnion(info, [start, Character(ord("-")), end])
if start.value > end.value:
raise RegexpError("bad character range")
if start.value == end.value:
return start
return Range(start.value, end.value)
def _parse_set_item(source, info):
if source.match("\\"):
return _parse_escape(source, info, in_set=True)
here = source.pos
if source.match("[:"):
try:
return _parse_posix_class(source, info)
except ParseError:
source.pos = here
if source.match("["):
negate = source.match("^")
item = _parse_set_intersect(source, info)
source.expect("]")
if negate:
item = item.with_flags(positive=not item.positive)
return item
ch = source.get()
if not ch:
raise RegexpError("bad set")
return Character(ord(ch[0]))
def _parse_escape(source, info, in_set):
saved_ignore = source.ignore_space
source.ignore_space = False
ch = source.get()
source.ignore_space = saved_ignore
if not ch:
raise RegexpError("bad escape")
if ch == "g" and not in_set:
here = source.pos
try:
return _parse_group_ref(source, info)
except RegexpError:
source.pos = here
return make_character(info, ord(ch[0]), in_set)
elif ch == "G" and not in_set:
return AtPosition(AT_BEGINNING)
elif ch in "pP":
return _parse_property(source, info, ch == "p", in_set)
elif ch.isalpha():
if not in_set:
if ch in POSITION_ESCAPES:
return POSITION_ESCAPES[ch]
if ch in CHARSET_ESCAPES:
return CHARSET_ESCAPES[ch]
elif ch in CHARACTER_ESCAPES:
return Character(ord(CHARACTER_ESCAPES[ch]))
return make_character(info, ord(ch[0]), in_set)
elif ch.isdigit():
return _parse_numeric_escape(source, info, ch, in_set)
else:
return make_character(info, ord(ch[0]), in_set)
def _parse_lookaround(source, info, behind, positive):
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(")")
return LookAround(subpattern, behind=behind, positive=positive)
def _parse_limited_quantifier(source):
min_count = _parse_count(source)
ch = source.get()
if ch == ",":
max_count = _parse_count(source)
if not source.match("}"):
raise ParseError
min_count = int(min_count) if min_count else 0
max_count = int(max_count) if max_count else MAX_REPEAT
if min_count > max_count:
raise RegexpError("min repeat gereater than max repeat")
if max_count > MAX_REPEAT:
raise RegexpError("repeat count too big")
return Counts(min_count, max_count, limited_quantifier=True)
if ch != "}":
raise ParseError
if not min_count:
raise ParseError
min_count = int(min_count)
if min_count > MAX_REPEAT:
raise RegexpError("repeat count too big")
return Counts(min_count, min_count, limited_quantifier=True)
def _parse_count(source):
b = StringBuilder(2)
while True:
here = source.pos
ch = source.get()
if ch.isdigit():
b.append(ch)
else:
source.pos = here
break
return b.build()
def _parse_comment(source):
while True:
ch = source.get()
if ch == ")":
break
elif not ch:
break
def _parse_name(source):
b = StringBuilder(5)
while True:
here = source.pos
ch = source.get()
if ch in ")>":
source.pos = here
break
elif not ch:
break
else:
b.append(ch)
return b.build()
def _parse_plain_flags(source):
b = StringBuilder(4)
while True:
ch = source.get()
if ch == ":":
break
else:
b.append(ch)
return b.build()
def _parse_group_ref(source, info):
source.expect("<")
name = _parse_name(source)
source.expect(">")
if info.is_open_group(name):
raise RegexpError("can't refer to an open group")
return make_ref_group(info, info.normalize_group(name))
def _parse_property(source, info, positive, in_set):
here = source.pos
if source.match("{"):
negate = source.match("^")
prop_name, name = _parse_property_name(source)
if source.match("}"):
if name in PROPERTIES:
return Property(PROPERTIES[name], positive != negate)
source.pos = here
return make_character(info, ord("p" if positive else "P"), in_set)
def _parse_property_name(source):
b = StringBuilder(5)
while True:
here = source.pos
ch = source.get()
if ch.isalnum():
b.append(ch)
else:
source.pos = here
break
name = b.build()
return name, name
def _parse_numeric_escape(source, info, ch, in_set):
raise NotImplementedError("_parse_numeric_escape")
def _parse_posix_class(source, info):
negate = source.match("^")
prop_name, name = _parse_property_name(source)
if not source.match(":]"):
raise ParseError
return Property(PROPERTIES[name], negate)
def _compile_no_cache(pattern, flags):
source = Source(pattern)
if flags & EXTENDED:
source.ignore_space = True
info = Info(flags)
parsed = _parse_pattern(source, info)
if not source.at_end():
raise RegexpError("trailing characters in pattern")
parsed.fix_groups()
parsed = parsed.optimize(info)
ctx = CompilerContext()
parsed.compile(ctx)
ctx.emit(OPCODE_SUCCESS)
code = ctx.build()
index_group = {}
for n, v in info.group_index.iteritems():
index_group[v] = n
return code, info.flags, info.group_count, info.group_index, index_group, info.group_offsets
def compile(cache, pattern, flags=0):
if not cache.contains(pattern, flags):
cache.set(pattern, flags, _compile_no_cache(pattern, flags))
return cache.get(pattern, flags)
| true | true |
f71526c8cd11306936f07cf05d779b8768da4f48 | 2,661 | py | Python | search_wizard/__init__.py | saptarshibasu15/search-wizard | bd2e84f1f5dbc9196b09ba62930970e364413ed7 | [
"MIT"
] | 2 | 2020-11-26T14:43:45.000Z | 2021-02-15T07:34:45.000Z | search_wizard/__init__.py | saptarshibasu15/search_wizard | bd2e84f1f5dbc9196b09ba62930970e364413ed7 | [
"MIT"
] | null | null | null | search_wizard/__init__.py | saptarshibasu15/search_wizard | bd2e84f1f5dbc9196b09ba62930970e364413ed7 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
import pandas as pd
class SearchWizard:
config = {
"base": "https://www.google.com/search?q=",
"query": None,
"format": "json"
}
search_results = []
def __init__(self, query: str = None):
if not query == None:
self.config["query"] = query
def get_config(self) -> dict:
return self.config
def get_results(self, query: str = None, flag: str = None) -> list:
if not query == None:
self.config["query"] = query
if not self.config["query"] == None:
r = requests.get(self.config["base"]+self.config["query"])
htmlContent = r.content
soup = BeautifulSoup(htmlContent, "html.parser")
titles = soup.find_all("h3", class_="zBAuLc")
descriptions = soup.find_all('div', class_="BNeawe s3v9rd AP7Wnd")
urls = soup.find_all("div", class_="kCrYT")
for title, description, url in zip(titles, descriptions, urls):
description = description.get_text().replace(u"\xa0", "")
try:
url = str(url.find("a")["href"])
except:
url = "NaN"
self.search_results.append(
{
"title": title.get_text(),
"description": description if description.find("...") == -1 else description[:description.find("...")+3],
"url": url[7:url.find("&sa")]
}
)
if not flag == None:
if flag == "head":
return self.search_results[:3]
elif flag == "tail":
return self.search_results[len(self.search_results)-3:]
else:
return self.search_results
else:
raise Exception(
"QueryNotFound: Try mentioning the search query before using.\nHint: SearchWizard(query) or SearchWizard().get_results(query)")
def prettify(self, flag=None):
if not self.config["query"] == None:
if not flag == None:
if flag == "head":
print(pd.DataFrame(self.get_results(flag="head")))
elif flag == "tail":
print(pd.DataFrame(self.get_results(flag="tail")))
else:
print(pd.DataFrame(self.get_results()))
else:
raise Exception(
"QueryNotFound: Try mentioning the search query before using.\nHint: SearchWizard(query) or SearchWizard().get_results(query)")
| 34.115385 | 143 | 0.516723 | import requests
from bs4 import BeautifulSoup
import pandas as pd
class SearchWizard:
config = {
"base": "https://www.google.com/search?q=",
"query": None,
"format": "json"
}
search_results = []
def __init__(self, query: str = None):
if not query == None:
self.config["query"] = query
def get_config(self) -> dict:
return self.config
def get_results(self, query: str = None, flag: str = None) -> list:
if not query == None:
self.config["query"] = query
if not self.config["query"] == None:
r = requests.get(self.config["base"]+self.config["query"])
htmlContent = r.content
soup = BeautifulSoup(htmlContent, "html.parser")
titles = soup.find_all("h3", class_="zBAuLc")
descriptions = soup.find_all('div', class_="BNeawe s3v9rd AP7Wnd")
urls = soup.find_all("div", class_="kCrYT")
for title, description, url in zip(titles, descriptions, urls):
description = description.get_text().replace(u"\xa0", "")
try:
url = str(url.find("a")["href"])
except:
url = "NaN"
self.search_results.append(
{
"title": title.get_text(),
"description": description if description.find("...") == -1 else description[:description.find("...")+3],
"url": url[7:url.find("&sa")]
}
)
if not flag == None:
if flag == "head":
return self.search_results[:3]
elif flag == "tail":
return self.search_results[len(self.search_results)-3:]
else:
return self.search_results
else:
raise Exception(
"QueryNotFound: Try mentioning the search query before using.\nHint: SearchWizard(query) or SearchWizard().get_results(query)")
def prettify(self, flag=None):
if not self.config["query"] == None:
if not flag == None:
if flag == "head":
print(pd.DataFrame(self.get_results(flag="head")))
elif flag == "tail":
print(pd.DataFrame(self.get_results(flag="tail")))
else:
print(pd.DataFrame(self.get_results()))
else:
raise Exception(
"QueryNotFound: Try mentioning the search query before using.\nHint: SearchWizard(query) or SearchWizard().get_results(query)")
| true | true |
f71526d120aeaf83394faef2c80ab3eeb85fdce7 | 1,413 | py | Python | setup.py | pmrowla/gumiyabot | 5dd446342f129c8e8ddc4de044a7072a58ec7851 | [
"MIT"
] | 4 | 2019-03-19T00:25:44.000Z | 2021-10-16T03:45:22.000Z | setup.py | pmrowla/gumiyabot | 5dd446342f129c8e8ddc4de044a7072a58ec7851 | [
"MIT"
] | 5 | 2017-08-25T15:08:39.000Z | 2021-06-11T09:15:00.000Z | setup.py | pmrowla/gumiyabot | 5dd446342f129c8e8ddc4de044a7072a58ec7851 | [
"MIT"
] | 2 | 2017-10-04T19:30:08.000Z | 2021-12-01T13:39:27.000Z | from setuptools import find_packages, setup
from codecs import open
from os import path
version = '0.1.5'
install_requires = ['aiohttp', 'irc3', 'osuapi']
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='gumiyabot',
version=version,
description='Standalone Twitch + Bancho IRC bot for handling osu! beatmap requests',
long_description=long_description,
url='https://github.com/pmrowla/gumiyabot',
author='Peter Rowlands',
author_email='peter@pmrowla.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Framework :: AsyncIO',
'Intended Audience :: Developers',
'Topic :: Games/Entertainment',
'Topic :: Communications :: Chat :: Internet Relay Chat',
],
keywords='osu twitch gumiya',
packages=find_packages(exclude=['docs', 'tests']),
include_package_data=True,
entry_points={
'console_scripts': ['gumiyabot = gumiyabot.__main__:main'],
},
install_requires=install_requires,
)
| 30.717391 | 88 | 0.644728 | from setuptools import find_packages, setup
from codecs import open
from os import path
version = '0.1.5'
install_requires = ['aiohttp', 'irc3', 'osuapi']
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='gumiyabot',
version=version,
description='Standalone Twitch + Bancho IRC bot for handling osu! beatmap requests',
long_description=long_description,
url='https://github.com/pmrowla/gumiyabot',
author='Peter Rowlands',
author_email='peter@pmrowla.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Framework :: AsyncIO',
'Intended Audience :: Developers',
'Topic :: Games/Entertainment',
'Topic :: Communications :: Chat :: Internet Relay Chat',
],
keywords='osu twitch gumiya',
packages=find_packages(exclude=['docs', 'tests']),
include_package_data=True,
entry_points={
'console_scripts': ['gumiyabot = gumiyabot.__main__:main'],
},
install_requires=install_requires,
)
| true | true |
f715280a5f0f47e678d78c95f09a2e73a4da4522 | 5,503 | py | Python | src/sentry/web/frontend/accept_organization_invite.py | sigismund/sentry | 421a512cd3b4a4c9ed660af536dc5bc4c12a287c | [
"BSD-3-Clause"
] | 1 | 2019-05-28T06:18:03.000Z | 2019-05-28T06:18:03.000Z | src/sentry/web/frontend/accept_organization_invite.py | sigismund/sentry | 421a512cd3b4a4c9ed660af536dc5bc4c12a287c | [
"BSD-3-Clause"
] | 6 | 2018-10-19T10:04:23.000Z | 2019-12-09T20:29:12.000Z | src/sentry/web/frontend/accept_organization_invite.py | sigismund/sentry | 421a512cd3b4a4c9ed660af536dc5bc4c12a287c | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.utils.crypto import constant_time_compare
from django.utils.translation import ugettext_lazy as _
from sentry.models import AuditLogEntryEvent, Authenticator, OrganizationMember, Project
from sentry.signals import member_joined
from sentry.utils import auth
from sentry.web.frontend.base import BaseView
ERR_INVITE_INVALID = _('The invite link you followed is not valid.')
PENDING_INVITE = 'pending-invite'
MAX_AGE = 60 * 60 * 24 * 7 # 7 days
class AcceptInviteForm(forms.Form):
pass
class AcceptOrganizationInviteView(BaseView):
auth_required = False
def get_form(self, request):
if request.method == 'POST':
return AcceptInviteForm(request.POST)
return AcceptInviteForm()
def handle(self, request, member_id, token):
assert request.method in ('POST', 'GET')
try:
om = OrganizationMember.objects.select_related('organization').get(pk=member_id)
except OrganizationMember.DoesNotExist:
messages.add_message(
request,
messages.ERROR,
ERR_INVITE_INVALID,
)
return self.redirect(reverse('sentry'))
if not om.is_pending:
messages.add_message(
request,
messages.ERROR,
ERR_INVITE_INVALID,
)
return self.redirect(reverse('sentry'))
if not constant_time_compare(om.token or om.legacy_token, token):
messages.add_message(
request,
messages.ERROR,
ERR_INVITE_INVALID,
)
return self.redirect(reverse('sentry'))
organization = om.organization
qs = Project.objects.filter(
organization=organization,
)
project_list = list(qs[:25])
project_count = qs.count()
org_requires_2fa = organization.flags.require_2fa.is_set
user_has_2fa = Authenticator.objects.user_has_2fa(request.user.id)
needs_2fa = org_requires_2fa and not user_has_2fa
context = {
'org_name': organization.name,
'project_list': project_list,
'project_count': project_count,
'needs_authentication': not request.user.is_authenticated(),
'needs_2fa': needs_2fa,
'logout_url': '{}?next={}'.format(
reverse('sentry-logout'),
request.path,
),
'login_url': '{}?next={}'.format(
reverse('sentry-login'),
request.path,
),
'register_url': '{}?next={}'.format(
reverse('sentry-register'),
request.path,
),
}
if not request.user.is_authenticated():
# Show login or register form
auth.initiate_login(request, next_url=request.get_full_path())
request.session['can_register'] = True
request.session['invite_email'] = om.email
return self.respond('sentry/accept-organization-invite.html', context)
if needs_2fa:
# redirect to setup 2fa
response = self.respond('sentry/accept-organization-invite.html', context)
response.set_cookie(PENDING_INVITE, request.path, max_age=MAX_AGE)
return response
# if they're already a member of the organization its likely they're
# using a shared account and either previewing this invite or
# are incorrectly expecting this to create a new account for them
context['existing_member'] = OrganizationMember.objects.filter(
user=request.user.id,
organization=om.organization_id,
).exists()
form = self.get_form(request)
if form.is_valid():
if OrganizationMember.objects.filter(
organization=organization, user=request.user
).exists():
messages.add_message(
request, messages.SUCCESS,
_('You are already a member of the %r organization.') %
(organization.name.encode('utf-8'), )
)
om.delete()
else:
om.user = request.user
om.email = None
om.save()
self.create_audit_entry(
request,
organization=organization,
target_object=om.id,
target_user=request.user,
event=AuditLogEntryEvent.MEMBER_ACCEPT,
data=om.get_audit_log_data(),
)
messages.add_message(
request, messages.SUCCESS,
_('You have been added to the %r organization.') %
(organization.name.encode('utf-8'), )
)
member_joined.send(member=om, sender=self)
request.session.pop('can_register', None)
response = self.redirect(reverse('sentry-organization-home', args=[organization.slug]))
if PENDING_INVITE in request.COOKIES:
response.delete_cookie(PENDING_INVITE)
return response
context['form'] = form
return self.respond('sentry/accept-organization-invite.html', context)
| 34.39375 | 99 | 0.58459 | from __future__ import absolute_import
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.utils.crypto import constant_time_compare
from django.utils.translation import ugettext_lazy as _
from sentry.models import AuditLogEntryEvent, Authenticator, OrganizationMember, Project
from sentry.signals import member_joined
from sentry.utils import auth
from sentry.web.frontend.base import BaseView
ERR_INVITE_INVALID = _('The invite link you followed is not valid.')
PENDING_INVITE = 'pending-invite'
MAX_AGE = 60 * 60 * 24 * 7
class AcceptInviteForm(forms.Form):
pass
class AcceptOrganizationInviteView(BaseView):
auth_required = False
def get_form(self, request):
if request.method == 'POST':
return AcceptInviteForm(request.POST)
return AcceptInviteForm()
def handle(self, request, member_id, token):
assert request.method in ('POST', 'GET')
try:
om = OrganizationMember.objects.select_related('organization').get(pk=member_id)
except OrganizationMember.DoesNotExist:
messages.add_message(
request,
messages.ERROR,
ERR_INVITE_INVALID,
)
return self.redirect(reverse('sentry'))
if not om.is_pending:
messages.add_message(
request,
messages.ERROR,
ERR_INVITE_INVALID,
)
return self.redirect(reverse('sentry'))
if not constant_time_compare(om.token or om.legacy_token, token):
messages.add_message(
request,
messages.ERROR,
ERR_INVITE_INVALID,
)
return self.redirect(reverse('sentry'))
organization = om.organization
qs = Project.objects.filter(
organization=organization,
)
project_list = list(qs[:25])
project_count = qs.count()
org_requires_2fa = organization.flags.require_2fa.is_set
user_has_2fa = Authenticator.objects.user_has_2fa(request.user.id)
needs_2fa = org_requires_2fa and not user_has_2fa
context = {
'org_name': organization.name,
'project_list': project_list,
'project_count': project_count,
'needs_authentication': not request.user.is_authenticated(),
'needs_2fa': needs_2fa,
'logout_url': '{}?next={}'.format(
reverse('sentry-logout'),
request.path,
),
'login_url': '{}?next={}'.format(
reverse('sentry-login'),
request.path,
),
'register_url': '{}?next={}'.format(
reverse('sentry-register'),
request.path,
),
}
if not request.user.is_authenticated():
auth.initiate_login(request, next_url=request.get_full_path())
request.session['can_register'] = True
request.session['invite_email'] = om.email
return self.respond('sentry/accept-organization-invite.html', context)
if needs_2fa:
response = self.respond('sentry/accept-organization-invite.html', context)
response.set_cookie(PENDING_INVITE, request.path, max_age=MAX_AGE)
return response
context['existing_member'] = OrganizationMember.objects.filter(
user=request.user.id,
organization=om.organization_id,
).exists()
form = self.get_form(request)
if form.is_valid():
if OrganizationMember.objects.filter(
organization=organization, user=request.user
).exists():
messages.add_message(
request, messages.SUCCESS,
_('You are already a member of the %r organization.') %
(organization.name.encode('utf-8'), )
)
om.delete()
else:
om.user = request.user
om.email = None
om.save()
self.create_audit_entry(
request,
organization=organization,
target_object=om.id,
target_user=request.user,
event=AuditLogEntryEvent.MEMBER_ACCEPT,
data=om.get_audit_log_data(),
)
messages.add_message(
request, messages.SUCCESS,
_('You have been added to the %r organization.') %
(organization.name.encode('utf-8'), )
)
member_joined.send(member=om, sender=self)
request.session.pop('can_register', None)
response = self.redirect(reverse('sentry-organization-home', args=[organization.slug]))
if PENDING_INVITE in request.COOKIES:
response.delete_cookie(PENDING_INVITE)
return response
context['form'] = form
return self.respond('sentry/accept-organization-invite.html', context)
| true | true |
f7152815b2e45bf057d62fc81d08199232df205f | 591 | py | Python | wildlifecompliance/migrations/0539_auto_20210317_1151.py | Djandwich/wildlifecompliance | ca296798526a56ce67ffc2f7e8ebdbae95077e6d | [
"Apache-2.0"
] | null | null | null | wildlifecompliance/migrations/0539_auto_20210317_1151.py | Djandwich/wildlifecompliance | ca296798526a56ce67ffc2f7e8ebdbae95077e6d | [
"Apache-2.0"
] | 3 | 2020-03-12T00:45:31.000Z | 2022-03-02T10:37:23.000Z | wildlifecompliance/migrations/0539_auto_20210317_1151.py | Djandwich/wildlifecompliance | ca296798526a56ce67ffc2f7e8ebdbae95077e6d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-03-17 03:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0538_auto_20210305_1140'),
]
operations = [
migrations.AlterField(
model_name='classification',
name='name',
field=models.CharField(choices=[('complaint', 'Complaint'), ('enquiry', 'Enquiry'), ('incident', 'Incident')], default='complaint', max_length=30, unique=True),
),
]
| 28.142857 | 172 | 0.637902 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0538_auto_20210305_1140'),
]
operations = [
migrations.AlterField(
model_name='classification',
name='name',
field=models.CharField(choices=[('complaint', 'Complaint'), ('enquiry', 'Enquiry'), ('incident', 'Incident')], default='complaint', max_length=30, unique=True),
),
]
| true | true |
f71528a738fc1065a84bb466471ea884e98dc377 | 3,768 | py | Python | src/decode/decoder.py | Masao-Someki/CycleVAE_VC | be4a27637a3f8b6272d96105f9b3c9327f6c16f7 | [
"MIT"
] | 3 | 2020-06-03T08:29:49.000Z | 2022-03-23T02:29:01.000Z | src/decode/decoder.py | Masao-Someki/CycleVAE_VC | be4a27637a3f8b6272d96105f9b3c9327f6c16f7 | [
"MIT"
] | 1 | 2020-06-07T23:06:10.000Z | 2020-06-07T23:06:10.000Z | src/decode/decoder.py | Masao-Someki/CycleVAE_VC | be4a27637a3f8b6272d96105f9b3c9327f6c16f7 | [
"MIT"
] | 1 | 2020-06-03T09:41:42.000Z | 2020-06-03T09:41:42.000Z | # Copyright 2020 Masao Someki
# MIT License (https://opensource.org/licenses/MIT)
import os
import glob
import h5py
import logging
import librosa
import numpy as np
from scipy.io import wavfile
from speech import Synthesizer
IRLEN = 1024
INTERVALS = 10
SEED = 1
LP_CUTOFF = 20
class Decoder(object):
def __init__(self, args, scaler, logger=None):
# directory to save wav files
self.save_dir = args.exp_dir
self.fs = args.fs
self.shiftms = args.shiftms
self.fftl = args.fftl
# mcep_alpha
if args.fs == 16000:
self.mcep_alpha = 0.41
elif args.fs == 22050:
self.mcep_alpha = 0.455
elif args.fs == 24000:
self.mcep_alpha = 0.466
elif args.fs == 44100:
self.mcep_alpha = 0.544
elif args.fs == 48000:
self.mcep_alpha = 0.554
else:
raise ValueError('sampling rate should be one of \
16000, 22050, 24000, 44100, 48000')
# scaler
self.scaler = scaler
# synthesizer
self.synthesizer = Synthesizer(fs=args.fs, fftl=args.fftl, shiftms=args.shiftms)
# logger
if logger is not None:
self.logger = logger
else:
self.logger = logging.getLogger(__name__)
def _inverse_transform(self, key, x):
m = self.scaler[key].mean_
s = self.scaler[key].scale_
return x * s + m
def decode(self, inputs, output, iter_count, i):
# directory
wav_dir = os.path.join(self.save_dir, str(iter_count))
if not os.path.exists(wav_dir):
os.mkdir(wav_dir)
# process over all data
for b in range(len(output['reconst_half'][0])):
# flen
flen = inputs['flen'][b]
# mcep
mcep = inputs['mcep'][b][:flen].cpu().detach().numpy()
mcep = self._inverse_transform('mcep', mcep).astype(np.float64)
# process src-src wav
cvmcep = output['reconst_half'][0][b][:flen].cpu().detach().numpy()
cvmcep = self._inverse_transform('mcep', cvmcep).astype(np.float64)
# codeap
codeap = inputs['codeap'][b][:flen].cpu().detach().numpy().astype(np.float64)
codeap = self._inverse_transform('codeap', codeap)
# synthesize
wav = self.synthesizer.synthesis(
inputs['f0'][b][:flen].squeeze(1).cpu().detach().numpy().astype(np.float64),
cvmcep,
codeap,
alpha=self.mcep_alpha,
rmcep=mcep
)
wav = np.clip(wav, -32768, 32767)
wav_file = os.path.join(
wav_dir,
'%s_%s_%d.wav' % (inputs['src'][b], inputs['src'][b], i)
)
wavfile.write(wav_file, self.fs, wav.astype(np.int16))
# process src-trg wav
cvmcep = output['trg_reconst'][b][:flen].cpu().detach().numpy()
cvmcep = self._inverse_transform('mcep', cvmcep).astype(np.float64)
# convert f0
cvf0 = inputs['cv_f0'][b][:flen].squeeze(1).cpu().detach().numpy().astype(np.float64)
# synthesize
wav = self.synthesizer.synthesis(
cvf0,
cvmcep,
codeap,
alpha=self.mcep_alpha,
rmcep=mcep
)
wav = np.clip(wav, -32768, 32767)
wav_file = os.path.join(
wav_dir,
'%s_%s_%d.wav' % (inputs['src'][b], inputs['trg'][b], i)
)
wavfile.write(wav_file, self.fs, wav.astype(np.int16))
| 31.4 | 97 | 0.52362 |
import os
import glob
import h5py
import logging
import librosa
import numpy as np
from scipy.io import wavfile
from speech import Synthesizer
IRLEN = 1024
INTERVALS = 10
SEED = 1
LP_CUTOFF = 20
class Decoder(object):
def __init__(self, args, scaler, logger=None):
self.save_dir = args.exp_dir
self.fs = args.fs
self.shiftms = args.shiftms
self.fftl = args.fftl
if args.fs == 16000:
self.mcep_alpha = 0.41
elif args.fs == 22050:
self.mcep_alpha = 0.455
elif args.fs == 24000:
self.mcep_alpha = 0.466
elif args.fs == 44100:
self.mcep_alpha = 0.544
elif args.fs == 48000:
self.mcep_alpha = 0.554
else:
raise ValueError('sampling rate should be one of \
16000, 22050, 24000, 44100, 48000')
self.scaler = scaler
self.synthesizer = Synthesizer(fs=args.fs, fftl=args.fftl, shiftms=args.shiftms)
if logger is not None:
self.logger = logger
else:
self.logger = logging.getLogger(__name__)
def _inverse_transform(self, key, x):
m = self.scaler[key].mean_
s = self.scaler[key].scale_
return x * s + m
def decode(self, inputs, output, iter_count, i):
wav_dir = os.path.join(self.save_dir, str(iter_count))
if not os.path.exists(wav_dir):
os.mkdir(wav_dir)
for b in range(len(output['reconst_half'][0])):
flen = inputs['flen'][b]
mcep = inputs['mcep'][b][:flen].cpu().detach().numpy()
mcep = self._inverse_transform('mcep', mcep).astype(np.float64)
cvmcep = output['reconst_half'][0][b][:flen].cpu().detach().numpy()
cvmcep = self._inverse_transform('mcep', cvmcep).astype(np.float64)
codeap = inputs['codeap'][b][:flen].cpu().detach().numpy().astype(np.float64)
codeap = self._inverse_transform('codeap', codeap)
wav = self.synthesizer.synthesis(
inputs['f0'][b][:flen].squeeze(1).cpu().detach().numpy().astype(np.float64),
cvmcep,
codeap,
alpha=self.mcep_alpha,
rmcep=mcep
)
wav = np.clip(wav, -32768, 32767)
wav_file = os.path.join(
wav_dir,
'%s_%s_%d.wav' % (inputs['src'][b], inputs['src'][b], i)
)
wavfile.write(wav_file, self.fs, wav.astype(np.int16))
cvmcep = output['trg_reconst'][b][:flen].cpu().detach().numpy()
cvmcep = self._inverse_transform('mcep', cvmcep).astype(np.float64)
cvf0 = inputs['cv_f0'][b][:flen].squeeze(1).cpu().detach().numpy().astype(np.float64)
wav = self.synthesizer.synthesis(
cvf0,
cvmcep,
codeap,
alpha=self.mcep_alpha,
rmcep=mcep
)
wav = np.clip(wav, -32768, 32767)
wav_file = os.path.join(
wav_dir,
'%s_%s_%d.wav' % (inputs['src'][b], inputs['trg'][b], i)
)
wavfile.write(wav_file, self.fs, wav.astype(np.int16))
| true | true |
f7152949331934bec0c7d5505f3422644b6d6f4e | 114,228 | gyp | Python | grpc.gyp | stungkit/grpc | 063c36cb46733c13d2ce8116b6af482c9bd832d6 | [
"Apache-2.0"
] | null | null | null | grpc.gyp | stungkit/grpc | 063c36cb46733c13d2ce8116b6af482c9bd832d6 | [
"Apache-2.0"
] | null | null | null | grpc.gyp | stungkit/grpc | 063c36cb46733c13d2ce8116b6af482c9bd832d6 | [
"Apache-2.0"
] | null | null | null | # GRPC GYP build file
# This file has been automatically generated from a template file.
# Please look at the templates directory instead.
# This file can be regenerated from the template by running
# tools/buildgen/generate_projects.sh
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
# The openssl and zlib dependencies must be passed in as variables
# defined in an included gypi file, usually common.gypi.
'openssl_gyp_target%': 'Please Define openssl_gyp_target variable',
'zlib_gyp_target%': 'Please Define zlib_gyp_target variable',
'grpc_gcov%': 'false',
'grpc_alpine%': 'false',
},
'target_defaults': {
'configurations': {
'Debug': {
'cflags': [
'-O0',
],
'defines': [
'_DEBUG',
'DEBUG',
],
},
'Release': {
'cflags': [
'-O2',
'-Wframe-larger-than=16384',
],
'defines': [
'NDEBUG',
],
},
},
'cflags': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
'-Ithird_party/xxhash',
],
'ldflags': [
'-g',
],
'cflags_c': [
'-Werror',
'-std=c99',
],
'cflags_cc': [
'-Werror',
'-std=c++11',
],
'include_dirs': [
'.',
'../..',
'include',
],
'defines': [
'GRPC_ARES=0',
],
'dependencies': [
'<(openssl_gyp_target)',
'<(zlib_gyp_target)',
],
'conditions': [
['grpc_gcov=="true"', {
'cflags': [
'-O0',
'-fprofile-arcs',
'-ftest-coverage',
'-Wno-return-type',
],
'defines': [
'_DEBUG',
'DEBUG',
'GPR_GCOV',
],
'ldflags': [
'-fprofile-arcs',
'-ftest-coverage',
'-rdynamic',
'-lstdc++',
],
}],
['grpc_alpine=="true"', {
'defines': [
'GPR_MUSL_LIBC_COMPAT'
]
}],
['OS == "win"', {
'defines': [
'_WIN32_WINNT=0x0600',
'WIN32_LEAN_AND_MEAN',
'_HAS_EXCEPTIONS=0',
'UNICODE',
'_UNICODE',
'NOMINMAX',
],
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': 1, # static debug
}
},
"libraries": [
"ws2_32"
]
}],
['OS == "mac"', {
'xcode_settings': {
'OTHER_CFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
'-Ithird_party/xxhash',
],
'OTHER_CPLUSPLUSFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
'-Ithird_party/xxhash',
'-stdlib=libc++',
'-std=c++11',
'-Wno-error=deprecated-declarations',
],
},
}]
]
},
'targets': [
{
'target_name': 'address_sorting',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/address_sorting/address_sorting.c',
'third_party/address_sorting/address_sorting_posix.c',
'third_party/address_sorting/address_sorting_windows.c',
],
},
{
'target_name': 'end2end_tests',
'type': 'static_library',
'dependencies': [
'grpc_test_util',
],
'sources': [
'src/core/lib/security/authorization/grpc_authorization_policy_provider.cc',
'src/core/lib/security/authorization/rbac_translator.cc',
'test/core/compression/args_utils.cc',
'test/core/end2end/cq_verifier.cc',
'test/core/end2end/data/client_certs.cc',
'test/core/end2end/data/server1_cert.cc',
'test/core/end2end/data/server1_key.cc',
'test/core/end2end/data/test_root_cert.cc',
'test/core/end2end/end2end_test_utils.cc',
'test/core/end2end/end2end_tests.cc',
'test/core/end2end/fixtures/http_proxy_fixture.cc',
'test/core/end2end/fixtures/local_util.cc',
'test/core/end2end/fixtures/proxy.cc',
'test/core/end2end/tests/authority_not_supported.cc',
'test/core/end2end/tests/bad_hostname.cc',
'test/core/end2end/tests/bad_ping.cc',
'test/core/end2end/tests/binary_metadata.cc',
'test/core/end2end/tests/call_creds.cc',
'test/core/end2end/tests/call_host_override.cc',
'test/core/end2end/tests/cancel_after_accept.cc',
'test/core/end2end/tests/cancel_after_client_done.cc',
'test/core/end2end/tests/cancel_after_invoke.cc',
'test/core/end2end/tests/cancel_after_round_trip.cc',
'test/core/end2end/tests/cancel_before_invoke.cc',
'test/core/end2end/tests/cancel_in_a_vacuum.cc',
'test/core/end2end/tests/cancel_with_status.cc',
'test/core/end2end/tests/channelz.cc',
'test/core/end2end/tests/client_streaming.cc',
'test/core/end2end/tests/compressed_payload.cc',
'test/core/end2end/tests/connectivity.cc',
'test/core/end2end/tests/default_host.cc',
'test/core/end2end/tests/disappearing_server.cc',
'test/core/end2end/tests/empty_batch.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_context.cc',
'test/core/end2end/tests/filter_init_fails.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/filtered_metadata.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/grpc_authz.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',
'test/core/end2end/tests/invoke_large_request.cc',
'test/core/end2end/tests/keepalive_timeout.cc',
'test/core/end2end/tests/large_metadata.cc',
'test/core/end2end/tests/max_concurrent_streams.cc',
'test/core/end2end/tests/max_connection_age.cc',
'test/core/end2end/tests/max_connection_idle.cc',
'test/core/end2end/tests/max_message_length.cc',
'test/core/end2end/tests/negative_deadline.cc',
'test/core/end2end/tests/no_error_on_hotpath.cc',
'test/core/end2end/tests/no_logging.cc',
'test/core/end2end/tests/no_op.cc',
'test/core/end2end/tests/payload.cc',
'test/core/end2end/tests/ping.cc',
'test/core/end2end/tests/ping_pong_streaming.cc',
'test/core/end2end/tests/proxy_auth.cc',
'test/core/end2end/tests/registered_call.cc',
'test/core/end2end/tests/request_with_flags.cc',
'test/core/end2end/tests/request_with_payload.cc',
'test/core/end2end/tests/resource_quota_server.cc',
'test/core/end2end/tests/retry.cc',
'test/core/end2end/tests/retry_cancel_after_first_attempt_starts.cc',
'test/core/end2end/tests/retry_cancel_during_delay.cc',
'test/core/end2end/tests/retry_cancel_with_multiple_send_batches.cc',
'test/core/end2end/tests/retry_cancellation.cc',
'test/core/end2end/tests/retry_disabled.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_delay.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_initial_batch.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_subsequent_batch.cc',
'test/core/end2end/tests/retry_lb_drop.cc',
'test/core/end2end/tests/retry_lb_fail.cc',
'test/core/end2end/tests/retry_non_retriable_status.cc',
'test/core/end2end/tests/retry_non_retriable_status_before_recv_trailing_metadata_started.cc',
'test/core/end2end/tests/retry_per_attempt_recv_timeout.cc',
'test/core/end2end/tests/retry_per_attempt_recv_timeout_on_last_attempt.cc',
'test/core/end2end/tests/retry_recv_initial_metadata.cc',
'test/core/end2end/tests/retry_recv_message.cc',
'test/core/end2end/tests/retry_recv_message_replay.cc',
'test/core/end2end/tests/retry_recv_trailing_metadata_error.cc',
'test/core/end2end/tests/retry_send_initial_metadata_refs.cc',
'test/core/end2end/tests/retry_send_op_fails.cc',
'test/core/end2end/tests/retry_send_recv_batch.cc',
'test/core/end2end/tests/retry_server_pushback_delay.cc',
'test/core/end2end/tests/retry_server_pushback_disabled.cc',
'test/core/end2end/tests/retry_streaming.cc',
'test/core/end2end/tests/retry_streaming_after_commit.cc',
'test/core/end2end/tests/retry_streaming_succeeds_before_replay_finished.cc',
'test/core/end2end/tests/retry_throttled.cc',
'test/core/end2end/tests/retry_too_many_attempts.cc',
'test/core/end2end/tests/retry_transparent_goaway.cc',
'test/core/end2end/tests/retry_transparent_max_concurrent_streams.cc',
'test/core/end2end/tests/retry_transparent_not_sent_on_wire.cc',
'test/core/end2end/tests/retry_unref_before_finish.cc',
'test/core/end2end/tests/retry_unref_before_recv.cc',
'test/core/end2end/tests/server_finishes_request.cc',
'test/core/end2end/tests/server_streaming.cc',
'test/core/end2end/tests/shutdown_finishes_calls.cc',
'test/core/end2end/tests/shutdown_finishes_tags.cc',
'test/core/end2end/tests/simple_delayed_request.cc',
'test/core/end2end/tests/simple_metadata.cc',
'test/core/end2end/tests/simple_request.cc',
'test/core/end2end/tests/streaming_error_response.cc',
'test/core/end2end/tests/trailing_metadata.cc',
'test/core/end2end/tests/write_buffering.cc',
'test/core/end2end/tests/write_buffering_at_end.cc',
'test/core/util/test_lb_policies.cc',
],
},
{
'target_name': 'gpr',
'type': 'static_library',
'dependencies': [
'absl/base:base',
'absl/base:core_headers',
'absl/memory:memory',
'absl/random:random',
'absl/status:status',
'absl/strings:cord',
'absl/strings:str_format',
'absl/strings:strings',
'absl/synchronization:synchronization',
'absl/time:time',
'absl/types:optional',
'upb',
],
'sources': [
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/lib/gpr/alloc.cc',
'src/core/lib/gpr/atm.cc',
'src/core/lib/gpr/cpu_iphone.cc',
'src/core/lib/gpr/cpu_linux.cc',
'src/core/lib/gpr/cpu_posix.cc',
'src/core/lib/gpr/cpu_windows.cc',
'src/core/lib/gpr/env_linux.cc',
'src/core/lib/gpr/env_posix.cc',
'src/core/lib/gpr/env_windows.cc',
'src/core/lib/gpr/log.cc',
'src/core/lib/gpr/log_android.cc',
'src/core/lib/gpr/log_linux.cc',
'src/core/lib/gpr/log_posix.cc',
'src/core/lib/gpr/log_windows.cc',
'src/core/lib/gpr/murmur_hash.cc',
'src/core/lib/gpr/string.cc',
'src/core/lib/gpr/string_posix.cc',
'src/core/lib/gpr/string_util_windows.cc',
'src/core/lib/gpr/string_windows.cc',
'src/core/lib/gpr/sync.cc',
'src/core/lib/gpr/sync_abseil.cc',
'src/core/lib/gpr/sync_posix.cc',
'src/core/lib/gpr/sync_windows.cc',
'src/core/lib/gpr/time.cc',
'src/core/lib/gpr/time_posix.cc',
'src/core/lib/gpr/time_precise.cc',
'src/core/lib/gpr/time_windows.cc',
'src/core/lib/gpr/tmpfile_msys.cc',
'src/core/lib/gpr/tmpfile_posix.cc',
'src/core/lib/gpr/tmpfile_windows.cc',
'src/core/lib/gpr/wrap_memcpy.cc',
'src/core/lib/gprpp/examine_stack.cc',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/global_config_env.cc',
'src/core/lib/gprpp/host_port.cc',
'src/core/lib/gprpp/mpscq.cc',
'src/core/lib/gprpp/stat_posix.cc',
'src/core/lib/gprpp/stat_windows.cc',
'src/core/lib/gprpp/status_helper.cc',
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',
'src/core/lib/gprpp/time_util.cc',
'src/core/lib/profiling/basic_timers.cc',
'src/core/lib/profiling/stap_timers.cc',
],
},
{
'target_name': 'grpc',
'type': 'static_library',
'dependencies': [
'absl/container:flat_hash_map',
'absl/container:inlined_vector',
'absl/functional:bind_front',
'absl/hash:hash',
'absl/meta:type_traits',
'absl/status:statusor',
'absl/types:span',
'absl/types:variant',
'absl/utility:utility',
'gpr',
'address_sorting',
],
'sources': [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/channel_idle/channel_idle_filter.cc',
'src/core/ext/filters/channel_idle/idle_filter_state.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/config_selector.cc',
'src/core/ext/filters/client_channel/dynamic_filters.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc',
'src/core/ext/filters/client_channel/lb_policy/rls/rls.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc',
'src/core/ext/filters/client_channel/resolver/polling_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/retry_filter.cc',
'src/core/ext/filters/client_channel/retry_service_config.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_channel/subchannel_stream_client.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/fault_injection/fault_injection_filter.cc',
'src/core/ext/filters/fault_injection/service_config_parser.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/message_compress/message_decompress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/filters/rbac/rbac_filter.cc',
'src/core/ext/filters/rbac/rbac_service_config_parser.cc',
'src/core/ext/filters/server_config_selector/server_config_selector.cc',
'src/core/ext/filters/server_config_selector/server_config_selector_filter.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser_table.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/envoy/admin/v3/certs.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/clusters.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/config_dump.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/init_dump.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/listeners.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/memory.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/metrics.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/mutex_stats.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/server_info.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/tap.upb.c',
'src/core/ext/upb-generated/envoy/annotations/deprecation.upb.c',
'src/core/ext/upb-generated/envoy/annotations/resource.upb.c',
'src/core/ext/upb-generated/envoy/config/accesslog/v3/accesslog.upb.c',
'src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/circuit_breaker.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/filter.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/outlier_detection.upb.c',
'src/core/ext/upb-generated/envoy/config/common/matcher/v3/matcher.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/address.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/backoff.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/config_source.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/event_service_config.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/extension.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/grpc_method_list.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/grpc_service.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/health_check.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/socket_option.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c',
'src/core/ext/upb-generated/envoy/config/metrics/v3/metrics_service.upb.c',
'src/core/ext/upb-generated/envoy/config/metrics/v3/stats.upb.c',
'src/core/ext/upb-generated/envoy/config/overload/v3/overload.upb.c',
'src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/scoped_route.upb.c',
'src/core/ext/upb-generated/envoy/config/tap/v3/common.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/datadog.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/dynamic_ot.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/http_tracer.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/lightstep.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/opencensus.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/service.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/skywalking.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/trace.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/xray.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/zipkin.upb.c',
'src/core/ext/upb-generated/envoy/extensions/clusters/aggregate/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/common/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/rbac/v3/rbac.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/cert.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/secret.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/ads.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/discovery.upb.c',
'src/core/ext/upb-generated/envoy/service/load_stats/v3/lrs.upb.c',
'src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c',
'src/core/ext/upb-generated/envoy/type/http/v3/cookie.upb.c',
'src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/http_inputs.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/node.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/path.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/string.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/struct.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/value.upb.c',
'src/core/ext/upb-generated/envoy/type/metadata/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/tracing/v3/custom_tag.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/hash_policy.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/http.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/http_status.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/percent.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/range.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/ratelimit_unit.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/semantic_version.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/token_bucket.upb.c',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/checked.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/syntax.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/api/httpbody.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/opencensus/proto/trace/v1/trace_config.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls_config.upb.c',
'src/core/ext/upb-generated/udpa/annotations/migrate.upb.c',
'src/core/ext/upb-generated/udpa/annotations/security.upb.c',
'src/core/ext/upb-generated/udpa/annotations/sensitive.upb.c',
'src/core/ext/upb-generated/udpa/annotations/status.upb.c',
'src/core/ext/upb-generated/udpa/annotations/versioning.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/migrate.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/security.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/sensitive.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/status.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/versioning.upb.c',
'src/core/ext/upb-generated/xds/core/v3/authority.upb.c',
'src/core/ext/upb-generated/xds/core/v3/collection_entry.upb.c',
'src/core/ext/upb-generated/xds/core/v3/context_params.upb.c',
'src/core/ext/upb-generated/xds/core/v3/extension.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_locator.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_name.upb.c',
'src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c',
'src/core/ext/upb-generated/xds/service/orca/v3/orca.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/matcher.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/regex.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/string.upb.c',
'src/core/ext/upb-generated/xds/type/v3/typed_struct.upb.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/certs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/clusters.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/init_dump.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/listeners.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/memory.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/metrics.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/mutex_stats.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/server_info.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/tap.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/circuit_breaker.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/filter.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/outlier_detection.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/common/matcher/v3/matcher.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/address.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/backoff.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/event_service_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/extension.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_method_list.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/health_check.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/http_uri.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/proxy_protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/socket_option.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/load_report.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/api_listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/metrics/v3/metrics_service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/metrics/v3/stats.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/overload/v3/overload.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/rbac/v3/rbac.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/scoped_route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/tap/v3/common.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/datadog.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/dynamic_ot.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/http_tracer.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/lightstep.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/opencensus.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/skywalking.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/trace.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/xray.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/zipkin.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/clusters/aggregate/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/common/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/rbac/v3/rbac.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/cert.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/secret.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/ads.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/discovery.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/load_stats/v3/lrs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/http/v3/cookie.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/http_inputs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/node.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/number.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/path.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/value.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/metadata/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/tracing/v3/custom_tag.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/hash_policy.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/http.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/http_status.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/percent.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/range.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/ratelimit_unit.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/semantic_version.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/token_bucket.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/annotations.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/checked.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/syntax.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/http.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/httpbody.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/any.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/duration.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/empty.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/timestamp.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/wrappers.upbdefs.c',
'src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.c',
'src/core/ext/upbdefs-generated/opencensus/proto/trace/v1/trace_config.upbdefs.c',
'src/core/ext/upbdefs-generated/src/proto/grpc/lookup/v1/rls_config.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/migrate.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/security.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/sensitive.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/status.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/versioning.upbdefs.c',
'src/core/ext/upbdefs-generated/validate/validate.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/migrate.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/security.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/sensitive.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/status.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/versioning.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/authority.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/collection_entry.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/context_params.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/extension.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_locator.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_name.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/matcher.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/regex.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/string.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/v3/typed_struct.upbdefs.c',
'src/core/ext/xds/certificate_provider_registry.cc',
'src/core/ext/xds/certificate_provider_store.cc',
'src/core/ext/xds/file_watcher_certificate_provider_factory.cc',
'src/core/ext/xds/xds_api.cc',
'src/core/ext/xds/xds_bootstrap.cc',
'src/core/ext/xds/xds_certificate_provider.cc',
'src/core/ext/xds/xds_channel_stack_modifier.cc',
'src/core/ext/xds/xds_client.cc',
'src/core/ext/xds/xds_client_stats.cc',
'src/core/ext/xds/xds_cluster.cc',
'src/core/ext/xds/xds_cluster_specifier_plugin.cc',
'src/core/ext/xds/xds_common_types.cc',
'src/core/ext/xds/xds_endpoint.cc',
'src/core/ext/xds/xds_http_fault_filter.cc',
'src/core/ext/xds/xds_http_filters.cc',
'src/core/ext/xds/xds_http_rbac_filter.cc',
'src/core/ext/xds/xds_listener.cc',
'src/core/ext/xds/xds_resource_type.cc',
'src/core/ext/xds/xds_route_config.cc',
'src/core/ext/xds/xds_routing.cc',
'src/core/ext/xds/xds_server_config_fetcher.cc',
'src/core/lib/address_utils/parse_address.cc',
'src/core/lib/address_utils/sockaddr_utils.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_args_preconditioning.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_stack_builder_impl.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/promise_based_filter.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/config/core_configuration.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/event_engine/channel_args_endpoint_config.cc',
'src/core/lib/event_engine/default_event_engine_factory.cc',
'src/core/lib/event_engine/event_engine.cc',
'src/core/lib/event_engine/memory_allocator.cc',
'src/core/lib/event_engine/resolved_address.cc',
'src/core/lib/event_engine/slice.cc',
'src/core/lib/event_engine/slice_buffer.cc',
'src/core/lib/event_engine/sockaddr.cc',
'src/core/lib/gprpp/time.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/httpcli_security_connector.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/dualstack_socket_posix.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_event_engine.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/event_engine/closure.cc',
'src/core/lib/iomgr/event_engine/endpoint.cc',
'src/core/lib/iomgr/event_engine/iomgr.cc',
'src/core/lib/iomgr/event_engine/pollset.cc',
'src/core/lib/iomgr/event_engine/resolved_address_internal.cc',
'src/core/lib/iomgr/event_engine/resolver.cc',
'src/core/lib/iomgr/event_engine/tcp.cc',
'src/core/lib/iomgr/event_engine/timer.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/sockaddr_utils_posix.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_util.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/resolver/resolver.cc',
'src/core/lib/resolver/resolver_registry.cc',
'src/core/lib/resolver/server_address.cc',
'src/core/lib/resource_quota/api.cc',
'src/core/lib/resource_quota/arena.cc',
'src/core/lib/resource_quota/memory_quota.cc',
'src/core/lib/resource_quota/resource_quota.cc',
'src/core/lib/resource_quota/thread_quota.cc',
'src/core/lib/resource_quota/trace.cc',
'src/core/lib/security/authorization/authorization_policy_provider_vtable.cc',
'src/core/lib/security/authorization/evaluate_args.cc',
'src/core/lib/security/authorization/grpc_authorization_engine.cc',
'src/core/lib/security/authorization/grpc_server_authz_filter.cc',
'src/core/lib/security/authorization/matchers.cc',
'src/core/lib/security/authorization/rbac_policy.cc',
'src/core/lib/security/context/security_context.cc',
'src/core/lib/security/credentials/alts/alts_credentials.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc',
'src/core/lib/security/credentials/call_creds_util.cc',
'src/core/lib/security/credentials/channel_creds_registry_init.cc',
'src/core/lib/security/credentials/composite/composite_credentials.cc',
'src/core/lib/security/credentials/credentials.cc',
'src/core/lib/security/credentials/external/aws_external_account_credentials.cc',
'src/core/lib/security/credentials/external/aws_request_signer.cc',
'src/core/lib/security/credentials/external/external_account_credentials.cc',
'src/core/lib/security/credentials/external/file_external_account_credentials.cc',
'src/core/lib/security/credentials/external/url_external_account_credentials.cc',
'src/core/lib/security/credentials/fake/fake_credentials.cc',
'src/core/lib/security/credentials/google_default/credentials_generic.cc',
'src/core/lib/security/credentials/google_default/google_default_credentials.cc',
'src/core/lib/security/credentials/iam/iam_credentials.cc',
'src/core/lib/security/credentials/insecure/insecure_credentials.cc',
'src/core/lib/security/credentials/jwt/json_token.cc',
'src/core/lib/security/credentials/jwt/jwt_credentials.cc',
'src/core/lib/security/credentials/jwt/jwt_verifier.cc',
'src/core/lib/security/credentials/local/local_credentials.cc',
'src/core/lib/security/credentials/oauth2/oauth2_credentials.cc',
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_verifier.cc',
'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc',
'src/core/lib/security/credentials/tls/tls_credentials.cc',
'src/core/lib/security/credentials/tls/tls_utils.cc',
'src/core/lib/security/credentials/xds/xds_credentials.cc',
'src/core/lib/security/security_connector/alts/alts_security_connector.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/insecure/insecure_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
'src/core/lib/security/security_connector/load_system_roots_linux.cc',
'src/core/lib/security/security_connector/local/local_security_connector.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/security_connector/ssl/ssl_security_connector.cc',
'src/core/lib/security/security_connector/ssl_utils.cc',
'src/core/lib/security/security_connector/ssl_utils_config.cc',
'src/core/lib/security/security_connector/tls/tls_security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/service_config/service_config_impl.cc',
'src/core/lib/service_config/service_config_parser.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_api.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_buffer_api.cc',
'src/core/lib/slice/slice_refcount.cc',
'src/core/lib/slice/slice_split.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/builtins.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/handshaker.cc',
'src/core/lib/transport/handshaker_registry.cc',
'src/core/lib/transport/http_connect_handshaker.cc',
'src/core/lib/transport/metadata_batch.cc',
'src/core/lib/transport/parsed_metadata.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/tcp_connect_handshaker.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/plugin_registry/grpc_plugin_registry_extra.cc',
'src/core/tsi/alts/crypt/aes_gcm.cc',
'src/core/tsi/alts/crypt/gsec.cc',
'src/core/tsi/alts/frame_protector/alts_counter.cc',
'src/core/tsi/alts/frame_protector/alts_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_frame_protector.cc',
'src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc',
'src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/frame_handler.cc',
'src/core/tsi/alts/handshaker/alts_handshaker_client.cc',
'src/core/tsi/alts/handshaker/alts_shared_resource.cc',
'src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc',
'src/core/tsi/alts/handshaker/alts_tsi_utils.cc',
'src/core/tsi/alts/handshaker/transport_security_common_api.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/ssl/key_logging/ssl_key_logging.cc',
'src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc',
'src/core/tsi/ssl/session_cache/ssl_session_cache.cc',
'src/core/tsi/ssl/session_cache/ssl_session_openssl.cc',
'src/core/tsi/ssl_transport_security.cc',
'src/core/tsi/transport_security.cc',
'src/core/tsi/transport_security_grpc.cc',
],
},
{
'target_name': 'grpc_test_util',
'type': 'static_library',
'dependencies': [
'absl/debugging:failure_signal_handler',
'absl/debugging:stacktrace',
'absl/debugging:symbolize',
'grpc',
],
'sources': [
'test/core/event_engine/test_init.cc',
'test/core/util/build.cc',
'test/core/util/cmdline.cc',
'test/core/util/fuzzer_util.cc',
'test/core/util/grpc_profiler.cc',
'test/core/util/histogram.cc',
'test/core/util/mock_endpoint.cc',
'test/core/util/parse_hexstring.cc',
'test/core/util/passthru_endpoint.cc',
'test/core/util/port.cc',
'test/core/util/port_isolated_runtime_environment.cc',
'test/core/util/port_server_client.cc',
'test/core/util/reconnect_server.cc',
'test/core/util/resolve_localhost_ip46.cc',
'test/core/util/slice_splitter.cc',
'test/core/util/stack_tracer.cc',
'test/core/util/subprocess_posix.cc',
'test/core/util/subprocess_windows.cc',
'test/core/util/test_config.cc',
'test/core/util/test_tcp_server.cc',
'test/core/util/tls_utils.cc',
'test/core/util/tracer_util.cc',
],
},
{
'target_name': 'grpc_test_util_unsecure',
'type': 'static_library',
'dependencies': [
'absl/debugging:failure_signal_handler',
'absl/debugging:stacktrace',
'absl/debugging:symbolize',
'grpc_unsecure',
],
'sources': [
'test/core/event_engine/test_init.cc',
'test/core/util/build.cc',
'test/core/util/cmdline.cc',
'test/core/util/fuzzer_util.cc',
'test/core/util/grpc_profiler.cc',
'test/core/util/histogram.cc',
'test/core/util/mock_endpoint.cc',
'test/core/util/parse_hexstring.cc',
'test/core/util/passthru_endpoint.cc',
'test/core/util/port.cc',
'test/core/util/port_isolated_runtime_environment.cc',
'test/core/util/port_server_client.cc',
'test/core/util/reconnect_server.cc',
'test/core/util/resolve_localhost_ip46.cc',
'test/core/util/slice_splitter.cc',
'test/core/util/stack_tracer.cc',
'test/core/util/subprocess_posix.cc',
'test/core/util/subprocess_windows.cc',
'test/core/util/test_config.cc',
'test/core/util/test_tcp_server.cc',
'test/core/util/tracer_util.cc',
],
},
{
'target_name': 'grpc_unsecure',
'type': 'static_library',
'dependencies': [
'absl/container:flat_hash_map',
'absl/container:inlined_vector',
'absl/functional:bind_front',
'absl/hash:hash',
'absl/meta:type_traits',
'absl/status:statusor',
'absl/types:span',
'absl/types:variant',
'absl/utility:utility',
'gpr',
'address_sorting',
],
'sources': [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/channel_idle/channel_idle_filter.cc',
'src/core/ext/filters/channel_idle/idle_filter_state.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/config_selector.cc',
'src/core/ext/filters/client_channel/dynamic_filters.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc',
'src/core/ext/filters/client_channel/lb_policy/rls/rls.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/polling_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/retry_filter.cc',
'src/core/ext/filters/client_channel/retry_service_config.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_channel/subchannel_stream_client.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/fault_injection/fault_injection_filter.cc',
'src/core/ext/filters/fault_injection/service_config_parser.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/message_compress/message_decompress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser_table.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c',
'src/core/ext/upb-generated/xds/service/orca/v3/orca.upb.c',
'src/core/lib/address_utils/parse_address.cc',
'src/core/lib/address_utils/sockaddr_utils.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_args_preconditioning.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_stack_builder_impl.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/promise_based_filter.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/config/core_configuration.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/event_engine/channel_args_endpoint_config.cc',
'src/core/lib/event_engine/default_event_engine_factory.cc',
'src/core/lib/event_engine/event_engine.cc',
'src/core/lib/event_engine/memory_allocator.cc',
'src/core/lib/event_engine/resolved_address.cc',
'src/core/lib/event_engine/slice.cc',
'src/core/lib/event_engine/slice_buffer.cc',
'src/core/lib/event_engine/sockaddr.cc',
'src/core/lib/gprpp/time.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/dualstack_socket_posix.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_event_engine.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/event_engine/closure.cc',
'src/core/lib/iomgr/event_engine/endpoint.cc',
'src/core/lib/iomgr/event_engine/iomgr.cc',
'src/core/lib/iomgr/event_engine/pollset.cc',
'src/core/lib/iomgr/event_engine/resolved_address_internal.cc',
'src/core/lib/iomgr/event_engine/resolver.cc',
'src/core/lib/iomgr/event_engine/tcp.cc',
'src/core/lib/iomgr/event_engine/timer.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/sockaddr_utils_posix.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_util.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/resolver/resolver.cc',
'src/core/lib/resolver/resolver_registry.cc',
'src/core/lib/resolver/server_address.cc',
'src/core/lib/resource_quota/api.cc',
'src/core/lib/resource_quota/arena.cc',
'src/core/lib/resource_quota/memory_quota.cc',
'src/core/lib/resource_quota/resource_quota.cc',
'src/core/lib/resource_quota/thread_quota.cc',
'src/core/lib/resource_quota/trace.cc',
'src/core/lib/security/authorization/authorization_policy_provider_vtable.cc',
'src/core/lib/security/authorization/evaluate_args.cc',
'src/core/lib/security/authorization/grpc_server_authz_filter.cc',
'src/core/lib/security/context/security_context.cc',
'src/core/lib/security/credentials/call_creds_util.cc',
'src/core/lib/security/credentials/composite/composite_credentials.cc',
'src/core/lib/security/credentials/credentials.cc',
'src/core/lib/security/credentials/fake/fake_credentials.cc',
'src/core/lib/security/credentials/insecure/insecure_credentials.cc',
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/tls/tls_utils.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/insecure/insecure_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
'src/core/lib/security/security_connector/load_system_roots_linux.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/service_config/service_config_impl.cc',
'src/core/lib/service_config/service_config_parser.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_api.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_buffer_api.cc',
'src/core/lib/slice/slice_refcount.cc',
'src/core/lib/slice/slice_split.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/builtins.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/handshaker.cc',
'src/core/lib/transport/handshaker_registry.cc',
'src/core/lib/transport/http_connect_handshaker.cc',
'src/core/lib/transport/metadata_batch.cc',
'src/core/lib/transport/parsed_metadata.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/tcp_connect_handshaker.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/plugin_registry/grpc_plugin_registry_noextra.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/transport_security.cc',
'src/core/tsi/transport_security_grpc.cc',
],
},
{
'target_name': 'benchmark_helpers',
'type': 'static_library',
'dependencies': [
'benchmark',
'grpc++_unsecure',
'grpc_test_util_unsecure',
'grpc++_test_config',
],
'sources': [
'src/proto/grpc/testing/echo.proto',
'src/proto/grpc/testing/echo_messages.proto',
'src/proto/grpc/testing/simple_messages.proto',
'src/proto/grpc/testing/xds/v3/orca_load_report.proto',
'test/cpp/microbenchmarks/helpers.cc',
],
},
{
'target_name': 'grpc++',
'type': 'static_library',
'dependencies': [
'grpc',
],
'sources': [
'src/core/ext/transport/binder/client/binder_connector.cc',
'src/core/ext/transport/binder/client/channel_create.cc',
'src/core/ext/transport/binder/client/channel_create_impl.cc',
'src/core/ext/transport/binder/client/connection_id_generator.cc',
'src/core/ext/transport/binder/client/endpoint_binder_pool.cc',
'src/core/ext/transport/binder/client/jni_utils.cc',
'src/core/ext/transport/binder/client/security_policy_setting.cc',
'src/core/ext/transport/binder/security_policy/binder_security_policy.cc',
'src/core/ext/transport/binder/server/binder_server.cc',
'src/core/ext/transport/binder/server/binder_server_credentials.cc',
'src/core/ext/transport/binder/transport/binder_transport.cc',
'src/core/ext/transport/binder/utils/ndk_binder.cc',
'src/core/ext/transport/binder/utils/transport_stream_receiver_impl.cc',
'src/core/ext/transport/binder/wire_format/binder_android.cc',
'src/core/ext/transport/binder/wire_format/binder_constants.cc',
'src/core/ext/transport/binder/wire_format/transaction.cc',
'src/core/ext/transport/binder/wire_format/wire_reader_impl.cc',
'src/core/ext/transport/binder/wire_format/wire_writer.cc',
'src/cpp/client/channel_cc.cc',
'src/cpp/client/client_callback.cc',
'src/cpp/client/client_context.cc',
'src/cpp/client/client_interceptor.cc',
'src/cpp/client/create_channel.cc',
'src/cpp/client/create_channel_internal.cc',
'src/cpp/client/create_channel_posix.cc',
'src/cpp/client/credentials_cc.cc',
'src/cpp/client/insecure_credentials.cc',
'src/cpp/client/secure_credentials.cc',
'src/cpp/client/xds_credentials.cc',
'src/cpp/codegen/codegen_init.cc',
'src/cpp/common/alarm.cc',
'src/cpp/common/auth_property_iterator.cc',
'src/cpp/common/channel_arguments.cc',
'src/cpp/common/channel_filter.cc',
'src/cpp/common/completion_queue_cc.cc',
'src/cpp/common/core_codegen.cc',
'src/cpp/common/resource_quota_cc.cc',
'src/cpp/common/rpc_method.cc',
'src/cpp/common/secure_auth_context.cc',
'src/cpp/common/secure_channel_arguments.cc',
'src/cpp/common/secure_create_auth_context.cc',
'src/cpp/common/tls_certificate_provider.cc',
'src/cpp/common/tls_certificate_verifier.cc',
'src/cpp/common/tls_credentials_options.cc',
'src/cpp/common/validate_service_config.cc',
'src/cpp/common/version_cc.cc',
'src/cpp/server/async_generic_service.cc',
'src/cpp/server/channel_argument_option.cc',
'src/cpp/server/create_default_thread_pool.cc',
'src/cpp/server/dynamic_thread_pool.cc',
'src/cpp/server/external_connection_acceptor_impl.cc',
'src/cpp/server/health/default_health_check_service.cc',
'src/cpp/server/health/health_check_service.cc',
'src/cpp/server/health/health_check_service_server_builder_option.cc',
'src/cpp/server/insecure_server_credentials.cc',
'src/cpp/server/secure_server_credentials.cc',
'src/cpp/server/server_builder.cc',
'src/cpp/server/server_callback.cc',
'src/cpp/server/server_cc.cc',
'src/cpp/server/server_context.cc',
'src/cpp/server/server_credentials.cc',
'src/cpp/server/server_posix.cc',
'src/cpp/server/xds_server_credentials.cc',
'src/cpp/thread_manager/thread_manager.cc',
'src/cpp/util/byte_buffer_cc.cc',
'src/cpp/util/status.cc',
'src/cpp/util/string_ref.cc',
'src/cpp/util/time_cc.cc',
],
},
{
'target_name': 'grpc++_alts',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/cpp/common/alts_context.cc',
'src/cpp/common/alts_util.cc',
],
},
{
'target_name': 'grpc++_error_details',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/cpp/util/error_details.cc',
],
},
{
'target_name': 'grpc++_reflection',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/proto/grpc/reflection/v1alpha/reflection.proto',
'src/cpp/ext/proto_server_reflection.cc',
'src/cpp/ext/proto_server_reflection_plugin.cc',
],
},
{
'target_name': 'grpc++_test',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/cpp/client/channel_test_peer.cc',
],
},
{
'target_name': 'grpc++_test_config',
'type': 'static_library',
'dependencies': [
'absl/flags:parse',
'gpr',
],
'sources': [
'test/cpp/util/test_config_cc.cc',
],
},
{
'target_name': 'grpc++_test_util',
'type': 'static_library',
'dependencies': [
'absl/flags:flag',
'grpc++',
'grpc_test_util',
],
'sources': [
'test/core/end2end/data/client_certs.cc',
'test/core/end2end/data/server1_cert.cc',
'test/core/end2end/data/server1_key.cc',
'test/core/end2end/data/test_root_cert.cc',
'test/cpp/util/byte_buffer_proto_helper.cc',
'test/cpp/util/create_test_channel.cc',
'test/cpp/util/string_ref_helper.cc',
'test/cpp/util/subprocess.cc',
'test/cpp/util/test_credentials_provider.cc',
],
},
{
'target_name': 'grpc++_unsecure',
'type': 'static_library',
'dependencies': [
'grpc_unsecure',
],
'sources': [
'src/cpp/client/channel_cc.cc',
'src/cpp/client/client_callback.cc',
'src/cpp/client/client_context.cc',
'src/cpp/client/client_interceptor.cc',
'src/cpp/client/create_channel.cc',
'src/cpp/client/create_channel_internal.cc',
'src/cpp/client/create_channel_posix.cc',
'src/cpp/client/credentials_cc.cc',
'src/cpp/client/insecure_credentials.cc',
'src/cpp/codegen/codegen_init.cc',
'src/cpp/common/alarm.cc',
'src/cpp/common/channel_arguments.cc',
'src/cpp/common/channel_filter.cc',
'src/cpp/common/completion_queue_cc.cc',
'src/cpp/common/core_codegen.cc',
'src/cpp/common/insecure_create_auth_context.cc',
'src/cpp/common/resource_quota_cc.cc',
'src/cpp/common/rpc_method.cc',
'src/cpp/common/validate_service_config.cc',
'src/cpp/common/version_cc.cc',
'src/cpp/server/async_generic_service.cc',
'src/cpp/server/channel_argument_option.cc',
'src/cpp/server/create_default_thread_pool.cc',
'src/cpp/server/dynamic_thread_pool.cc',
'src/cpp/server/external_connection_acceptor_impl.cc',
'src/cpp/server/health/default_health_check_service.cc',
'src/cpp/server/health/health_check_service.cc',
'src/cpp/server/health/health_check_service_server_builder_option.cc',
'src/cpp/server/insecure_server_credentials.cc',
'src/cpp/server/server_builder.cc',
'src/cpp/server/server_callback.cc',
'src/cpp/server/server_cc.cc',
'src/cpp/server/server_context.cc',
'src/cpp/server/server_credentials.cc',
'src/cpp/server/server_posix.cc',
'src/cpp/thread_manager/thread_manager.cc',
'src/cpp/util/byte_buffer_cc.cc',
'src/cpp/util/status.cc',
'src/cpp/util/string_ref.cc',
'src/cpp/util/time_cc.cc',
],
},
{
'target_name': 'grpc_plugin_support',
'type': 'static_library',
'dependencies': [
],
'sources': [
'src/compiler/cpp_generator.cc',
'src/compiler/csharp_generator.cc',
'src/compiler/node_generator.cc',
'src/compiler/objective_c_generator.cc',
'src/compiler/php_generator.cc',
'src/compiler/python_generator.cc',
'src/compiler/ruby_generator.cc',
],
},
{
'target_name': 'grpcpp_channelz',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/proto/grpc/channelz/channelz.proto',
'src/cpp/server/channelz/channelz_service.cc',
'src/cpp/server/channelz/channelz_service_plugin.cc',
],
},
{
'target_name': 'boringssl',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/boringssl-with-bazel/err_data.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bitstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bool.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_d2i_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_dup.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_enum.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_gentm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_i2d_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_mbstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_object.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_octet.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_print.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strex.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strnid.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_time.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_type.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utctm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utf8.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_lib.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_par.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn_pack.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_string.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_dec.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_enc.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_fre.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_new.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_typ.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_utl.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/time_support.c',
'third_party/boringssl-with-bazel/src/crypto/base64/base64.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio_mem.c',
'third_party/boringssl-with-bazel/src/crypto/bio/connect.c',
'third_party/boringssl-with-bazel/src/crypto/bio/fd.c',
'third_party/boringssl-with-bazel/src/crypto/bio/file.c',
'third_party/boringssl-with-bazel/src/crypto/bio/hexdump.c',
'third_party/boringssl-with-bazel/src/crypto/bio/pair.c',
'third_party/boringssl-with-bazel/src/crypto/bio/printf.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket_helper.c',
'third_party/boringssl-with-bazel/src/crypto/blake2/blake2.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/bn_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/convert.c',
'third_party/boringssl-with-bazel/src/crypto/buf/buf.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/asn1_compat.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/ber.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbb.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbs.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/unicode.c',
'third_party/boringssl-with-bazel/src/crypto/chacha/chacha.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/cipher_extra.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/derive_key.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesccm.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesctrhmac.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesgcmsiv.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_chacha20poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_null.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc2.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc4.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_tls.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/tls_cbc.c',
'third_party/boringssl-with-bazel/src/crypto/cmac/cmac.c',
'third_party/boringssl-with-bazel/src/crypto/conf/conf.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-win.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-intel.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-ppc64le.c',
'third_party/boringssl-with-bazel/src/crypto/crypto.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/curve25519.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/spake25519.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/dh_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/params.c',
'third_party/boringssl-with-bazel/src/crypto/digest_extra/digest_extra.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_derive.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/hash_to_curve.c',
'third_party/boringssl-with-bazel/src/crypto/ecdh_extra/ecdh_extra.c',
'third_party/boringssl-with-bazel/src/crypto/ecdsa_extra/ecdsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/engine/engine.c',
'third_party/boringssl-with-bazel/src/crypto/err/err.c',
'third_party/boringssl-with-bazel/src/crypto/evp/digestsign.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_ctx.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/pbkdf.c',
'third_party/boringssl-with-bazel/src/crypto/evp/print.c',
'third_party/boringssl-with-bazel/src/crypto/evp/scrypt.c',
'third_party/boringssl-with-bazel/src/crypto/evp/sign.c',
'third_party/boringssl-with-bazel/src/crypto/ex_data.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/bcm.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/fips_shared_support.c',
'third_party/boringssl-with-bazel/src/crypto/hkdf/hkdf.c',
'third_party/boringssl-with-bazel/src/crypto/hpke/hpke.c',
'third_party/boringssl-with-bazel/src/crypto/hrss/hrss.c',
'third_party/boringssl-with-bazel/src/crypto/lhash/lhash.c',
'third_party/boringssl-with-bazel/src/crypto/mem.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj_xref.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_all.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_info.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_lib.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_oth.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pk8.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_xaux.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/p5_pbev2.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8_x509.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_arm.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_vec.c',
'third_party/boringssl-with-bazel/src/crypto/pool/pool.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/deterministic.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/forkunsafe.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/passive.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/rand_extra.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/windows.c',
'third_party/boringssl-with-bazel/src/crypto/rc4/rc4.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_c11.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_lock.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_print.c',
'third_party/boringssl-with-bazel/src/crypto/siphash/siphash.c',
'third_party/boringssl-with-bazel/src/crypto/stack/stack.c',
'third_party/boringssl-with-bazel/src/crypto/thread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_none.c',
'third_party/boringssl-with-bazel/src/crypto/thread_pthread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_win.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/pmbtoken.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/trust_token.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/voprf.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_digest.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_sign.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_verify.c',
'third_party/boringssl-with-bazel/src/crypto/x509/algorithm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/asn1_gen.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_dir.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_file.c',
'third_party/boringssl-with-bazel/src/crypto/x509/i2d_pr.c',
'third_party/boringssl-with-bazel/src/crypto/x509/name_print.c',
'third_party/boringssl-with-bazel/src/crypto/x509/rsa_pss.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_att.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_cmp.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_d2.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_def.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_ext.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_lu.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_obj.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_set.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_trs.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_txt.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_v3.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vfy.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vpm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509cset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509rset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_algor.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_all.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_attrib.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_exten.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pubkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_sig.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_val.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_cache.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_data.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_map.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_node.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_tree.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akeya.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_alt.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bitst.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_conf.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_cpols.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_crld.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_enum.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_extku.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_genn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ia5.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_int.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ncons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ocsp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pci.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcia.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pmaps.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_prn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_purp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_skey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_utl.c',
'third_party/boringssl-with-bazel/src/ssl/bio_ssl.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_both.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_srtp.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_record.cc',
'third_party/boringssl-with-bazel/src/ssl/encrypted_client_hello.cc',
'third_party/boringssl-with-bazel/src/ssl/extensions.cc',
'third_party/boringssl-with-bazel/src/ssl/handoff.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_client.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_server.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_both.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_aead_ctx.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_asn1.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_buffer.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cert.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cipher.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_file.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_key_share.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_privkey.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_session.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_stat.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_transcript.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_versions.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_x509.cc',
'third_party/boringssl-with-bazel/src/ssl/t1_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_both.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_client.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_server.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_record.cc',
],
},
{
'target_name': 'boringssl_test_util',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/boringssl-with-bazel/src/crypto/test/file_test.cc',
'third_party/boringssl-with-bazel/src/crypto/test/malloc.cc',
'third_party/boringssl-with-bazel/src/crypto/test/test_util.cc',
'third_party/boringssl-with-bazel/src/crypto/test/wycheproof_util.cc',
],
},
{
'target_name': 'benchmark',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/benchmark/src/benchmark.cc',
'third_party/benchmark/src/benchmark_api_internal.cc',
'third_party/benchmark/src/benchmark_main.cc',
'third_party/benchmark/src/benchmark_name.cc',
'third_party/benchmark/src/benchmark_register.cc',
'third_party/benchmark/src/benchmark_runner.cc',
'third_party/benchmark/src/colorprint.cc',
'third_party/benchmark/src/commandlineflags.cc',
'third_party/benchmark/src/complexity.cc',
'third_party/benchmark/src/console_reporter.cc',
'third_party/benchmark/src/counter.cc',
'third_party/benchmark/src/csv_reporter.cc',
'third_party/benchmark/src/json_reporter.cc',
'third_party/benchmark/src/perf_counters.cc',
'third_party/benchmark/src/reporter.cc',
'third_party/benchmark/src/sleep.cc',
'third_party/benchmark/src/statistics.cc',
'third_party/benchmark/src/string_util.cc',
'third_party/benchmark/src/sysinfo.cc',
'third_party/benchmark/src/timers.cc',
],
},
{
'target_name': 're2',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/re2/re2/bitstate.cc',
'third_party/re2/re2/compile.cc',
'third_party/re2/re2/dfa.cc',
'third_party/re2/re2/filtered_re2.cc',
'third_party/re2/re2/mimics_pcre.cc',
'third_party/re2/re2/nfa.cc',
'third_party/re2/re2/onepass.cc',
'third_party/re2/re2/parse.cc',
'third_party/re2/re2/perl_groups.cc',
'third_party/re2/re2/prefilter.cc',
'third_party/re2/re2/prefilter_tree.cc',
'third_party/re2/re2/prog.cc',
'third_party/re2/re2/re2.cc',
'third_party/re2/re2/regexp.cc',
'third_party/re2/re2/set.cc',
'third_party/re2/re2/simplify.cc',
'third_party/re2/re2/stringpiece.cc',
'third_party/re2/re2/tostring.cc',
'third_party/re2/re2/unicode_casefold.cc',
'third_party/re2/re2/unicode_groups.cc',
'third_party/re2/util/pcre.cc',
'third_party/re2/util/rune.cc',
'third_party/re2/util/strutil.cc',
],
},
{
'target_name': 'upb',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/upb/third_party/utf8_range/naive.c',
'third_party/upb/third_party/utf8_range/range2-neon.c',
'third_party/upb/third_party/utf8_range/range2-sse.c',
'third_party/upb/upb/decode_fast.c',
'third_party/upb/upb/decode.c',
'third_party/upb/upb/def.c',
'third_party/upb/upb/encode.c',
'third_party/upb/upb/json_encode.c',
'third_party/upb/upb/msg.c',
'third_party/upb/upb/reflection.c',
'third_party/upb/upb/table.c',
'third_party/upb/upb/text_encode.c',
'third_party/upb/upb/upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c',
],
},
{
'target_name': 'z',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/zlib/adler32.c',
'third_party/zlib/compress.c',
'third_party/zlib/crc32.c',
'third_party/zlib/deflate.c',
'third_party/zlib/gzclose.c',
'third_party/zlib/gzlib.c',
'third_party/zlib/gzread.c',
'third_party/zlib/gzwrite.c',
'third_party/zlib/infback.c',
'third_party/zlib/inffast.c',
'third_party/zlib/inflate.c',
'third_party/zlib/inftrees.c',
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
],
},
]
}
| 54.73311 | 135 | 0.684683 |
{
'variables': {
'openssl_gyp_target%': 'Please Define openssl_gyp_target variable',
'zlib_gyp_target%': 'Please Define zlib_gyp_target variable',
'grpc_gcov%': 'false',
'grpc_alpine%': 'false',
},
'target_defaults': {
'configurations': {
'Debug': {
'cflags': [
'-O0',
],
'defines': [
'_DEBUG',
'DEBUG',
],
},
'Release': {
'cflags': [
'-O2',
'-Wframe-larger-than=16384',
],
'defines': [
'NDEBUG',
],
},
},
'cflags': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
'-Ithird_party/xxhash',
],
'ldflags': [
'-g',
],
'cflags_c': [
'-Werror',
'-std=c99',
],
'cflags_cc': [
'-Werror',
'-std=c++11',
],
'include_dirs': [
'.',
'../..',
'include',
],
'defines': [
'GRPC_ARES=0',
],
'dependencies': [
'<(openssl_gyp_target)',
'<(zlib_gyp_target)',
],
'conditions': [
['grpc_gcov=="true"', {
'cflags': [
'-O0',
'-fprofile-arcs',
'-ftest-coverage',
'-Wno-return-type',
],
'defines': [
'_DEBUG',
'DEBUG',
'GPR_GCOV',
],
'ldflags': [
'-fprofile-arcs',
'-ftest-coverage',
'-rdynamic',
'-lstdc++',
],
}],
['grpc_alpine=="true"', {
'defines': [
'GPR_MUSL_LIBC_COMPAT'
]
}],
['OS == "win"', {
'defines': [
'_WIN32_WINNT=0x0600',
'WIN32_LEAN_AND_MEAN',
'_HAS_EXCEPTIONS=0',
'UNICODE',
'_UNICODE',
'NOMINMAX',
],
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': 1,
}
},
"libraries": [
"ws2_32"
]
}],
['OS == "mac"', {
'xcode_settings': {
'OTHER_CFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
'-Ithird_party/xxhash',
],
'OTHER_CPLUSPLUSFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
'-Ithird_party/xxhash',
'-stdlib=libc++',
'-std=c++11',
'-Wno-error=deprecated-declarations',
],
},
}]
]
},
'targets': [
{
'target_name': 'address_sorting',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/address_sorting/address_sorting.c',
'third_party/address_sorting/address_sorting_posix.c',
'third_party/address_sorting/address_sorting_windows.c',
],
},
{
'target_name': 'end2end_tests',
'type': 'static_library',
'dependencies': [
'grpc_test_util',
],
'sources': [
'src/core/lib/security/authorization/grpc_authorization_policy_provider.cc',
'src/core/lib/security/authorization/rbac_translator.cc',
'test/core/compression/args_utils.cc',
'test/core/end2end/cq_verifier.cc',
'test/core/end2end/data/client_certs.cc',
'test/core/end2end/data/server1_cert.cc',
'test/core/end2end/data/server1_key.cc',
'test/core/end2end/data/test_root_cert.cc',
'test/core/end2end/end2end_test_utils.cc',
'test/core/end2end/end2end_tests.cc',
'test/core/end2end/fixtures/http_proxy_fixture.cc',
'test/core/end2end/fixtures/local_util.cc',
'test/core/end2end/fixtures/proxy.cc',
'test/core/end2end/tests/authority_not_supported.cc',
'test/core/end2end/tests/bad_hostname.cc',
'test/core/end2end/tests/bad_ping.cc',
'test/core/end2end/tests/binary_metadata.cc',
'test/core/end2end/tests/call_creds.cc',
'test/core/end2end/tests/call_host_override.cc',
'test/core/end2end/tests/cancel_after_accept.cc',
'test/core/end2end/tests/cancel_after_client_done.cc',
'test/core/end2end/tests/cancel_after_invoke.cc',
'test/core/end2end/tests/cancel_after_round_trip.cc',
'test/core/end2end/tests/cancel_before_invoke.cc',
'test/core/end2end/tests/cancel_in_a_vacuum.cc',
'test/core/end2end/tests/cancel_with_status.cc',
'test/core/end2end/tests/channelz.cc',
'test/core/end2end/tests/client_streaming.cc',
'test/core/end2end/tests/compressed_payload.cc',
'test/core/end2end/tests/connectivity.cc',
'test/core/end2end/tests/default_host.cc',
'test/core/end2end/tests/disappearing_server.cc',
'test/core/end2end/tests/empty_batch.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_context.cc',
'test/core/end2end/tests/filter_init_fails.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/filtered_metadata.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/grpc_authz.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',
'test/core/end2end/tests/invoke_large_request.cc',
'test/core/end2end/tests/keepalive_timeout.cc',
'test/core/end2end/tests/large_metadata.cc',
'test/core/end2end/tests/max_concurrent_streams.cc',
'test/core/end2end/tests/max_connection_age.cc',
'test/core/end2end/tests/max_connection_idle.cc',
'test/core/end2end/tests/max_message_length.cc',
'test/core/end2end/tests/negative_deadline.cc',
'test/core/end2end/tests/no_error_on_hotpath.cc',
'test/core/end2end/tests/no_logging.cc',
'test/core/end2end/tests/no_op.cc',
'test/core/end2end/tests/payload.cc',
'test/core/end2end/tests/ping.cc',
'test/core/end2end/tests/ping_pong_streaming.cc',
'test/core/end2end/tests/proxy_auth.cc',
'test/core/end2end/tests/registered_call.cc',
'test/core/end2end/tests/request_with_flags.cc',
'test/core/end2end/tests/request_with_payload.cc',
'test/core/end2end/tests/resource_quota_server.cc',
'test/core/end2end/tests/retry.cc',
'test/core/end2end/tests/retry_cancel_after_first_attempt_starts.cc',
'test/core/end2end/tests/retry_cancel_during_delay.cc',
'test/core/end2end/tests/retry_cancel_with_multiple_send_batches.cc',
'test/core/end2end/tests/retry_cancellation.cc',
'test/core/end2end/tests/retry_disabled.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_delay.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_initial_batch.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_subsequent_batch.cc',
'test/core/end2end/tests/retry_lb_drop.cc',
'test/core/end2end/tests/retry_lb_fail.cc',
'test/core/end2end/tests/retry_non_retriable_status.cc',
'test/core/end2end/tests/retry_non_retriable_status_before_recv_trailing_metadata_started.cc',
'test/core/end2end/tests/retry_per_attempt_recv_timeout.cc',
'test/core/end2end/tests/retry_per_attempt_recv_timeout_on_last_attempt.cc',
'test/core/end2end/tests/retry_recv_initial_metadata.cc',
'test/core/end2end/tests/retry_recv_message.cc',
'test/core/end2end/tests/retry_recv_message_replay.cc',
'test/core/end2end/tests/retry_recv_trailing_metadata_error.cc',
'test/core/end2end/tests/retry_send_initial_metadata_refs.cc',
'test/core/end2end/tests/retry_send_op_fails.cc',
'test/core/end2end/tests/retry_send_recv_batch.cc',
'test/core/end2end/tests/retry_server_pushback_delay.cc',
'test/core/end2end/tests/retry_server_pushback_disabled.cc',
'test/core/end2end/tests/retry_streaming.cc',
'test/core/end2end/tests/retry_streaming_after_commit.cc',
'test/core/end2end/tests/retry_streaming_succeeds_before_replay_finished.cc',
'test/core/end2end/tests/retry_throttled.cc',
'test/core/end2end/tests/retry_too_many_attempts.cc',
'test/core/end2end/tests/retry_transparent_goaway.cc',
'test/core/end2end/tests/retry_transparent_max_concurrent_streams.cc',
'test/core/end2end/tests/retry_transparent_not_sent_on_wire.cc',
'test/core/end2end/tests/retry_unref_before_finish.cc',
'test/core/end2end/tests/retry_unref_before_recv.cc',
'test/core/end2end/tests/server_finishes_request.cc',
'test/core/end2end/tests/server_streaming.cc',
'test/core/end2end/tests/shutdown_finishes_calls.cc',
'test/core/end2end/tests/shutdown_finishes_tags.cc',
'test/core/end2end/tests/simple_delayed_request.cc',
'test/core/end2end/tests/simple_metadata.cc',
'test/core/end2end/tests/simple_request.cc',
'test/core/end2end/tests/streaming_error_response.cc',
'test/core/end2end/tests/trailing_metadata.cc',
'test/core/end2end/tests/write_buffering.cc',
'test/core/end2end/tests/write_buffering_at_end.cc',
'test/core/util/test_lb_policies.cc',
],
},
{
'target_name': 'gpr',
'type': 'static_library',
'dependencies': [
'absl/base:base',
'absl/base:core_headers',
'absl/memory:memory',
'absl/random:random',
'absl/status:status',
'absl/strings:cord',
'absl/strings:str_format',
'absl/strings:strings',
'absl/synchronization:synchronization',
'absl/time:time',
'absl/types:optional',
'upb',
],
'sources': [
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/lib/gpr/alloc.cc',
'src/core/lib/gpr/atm.cc',
'src/core/lib/gpr/cpu_iphone.cc',
'src/core/lib/gpr/cpu_linux.cc',
'src/core/lib/gpr/cpu_posix.cc',
'src/core/lib/gpr/cpu_windows.cc',
'src/core/lib/gpr/env_linux.cc',
'src/core/lib/gpr/env_posix.cc',
'src/core/lib/gpr/env_windows.cc',
'src/core/lib/gpr/log.cc',
'src/core/lib/gpr/log_android.cc',
'src/core/lib/gpr/log_linux.cc',
'src/core/lib/gpr/log_posix.cc',
'src/core/lib/gpr/log_windows.cc',
'src/core/lib/gpr/murmur_hash.cc',
'src/core/lib/gpr/string.cc',
'src/core/lib/gpr/string_posix.cc',
'src/core/lib/gpr/string_util_windows.cc',
'src/core/lib/gpr/string_windows.cc',
'src/core/lib/gpr/sync.cc',
'src/core/lib/gpr/sync_abseil.cc',
'src/core/lib/gpr/sync_posix.cc',
'src/core/lib/gpr/sync_windows.cc',
'src/core/lib/gpr/time.cc',
'src/core/lib/gpr/time_posix.cc',
'src/core/lib/gpr/time_precise.cc',
'src/core/lib/gpr/time_windows.cc',
'src/core/lib/gpr/tmpfile_msys.cc',
'src/core/lib/gpr/tmpfile_posix.cc',
'src/core/lib/gpr/tmpfile_windows.cc',
'src/core/lib/gpr/wrap_memcpy.cc',
'src/core/lib/gprpp/examine_stack.cc',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/global_config_env.cc',
'src/core/lib/gprpp/host_port.cc',
'src/core/lib/gprpp/mpscq.cc',
'src/core/lib/gprpp/stat_posix.cc',
'src/core/lib/gprpp/stat_windows.cc',
'src/core/lib/gprpp/status_helper.cc',
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',
'src/core/lib/gprpp/time_util.cc',
'src/core/lib/profiling/basic_timers.cc',
'src/core/lib/profiling/stap_timers.cc',
],
},
{
'target_name': 'grpc',
'type': 'static_library',
'dependencies': [
'absl/container:flat_hash_map',
'absl/container:inlined_vector',
'absl/functional:bind_front',
'absl/hash:hash',
'absl/meta:type_traits',
'absl/status:statusor',
'absl/types:span',
'absl/types:variant',
'absl/utility:utility',
'gpr',
'address_sorting',
],
'sources': [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/channel_idle/channel_idle_filter.cc',
'src/core/ext/filters/channel_idle/idle_filter_state.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/config_selector.cc',
'src/core/ext/filters/client_channel/dynamic_filters.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc',
'src/core/ext/filters/client_channel/lb_policy/rls/rls.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc',
'src/core/ext/filters/client_channel/resolver/polling_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/retry_filter.cc',
'src/core/ext/filters/client_channel/retry_service_config.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_channel/subchannel_stream_client.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/fault_injection/fault_injection_filter.cc',
'src/core/ext/filters/fault_injection/service_config_parser.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/message_compress/message_decompress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/filters/rbac/rbac_filter.cc',
'src/core/ext/filters/rbac/rbac_service_config_parser.cc',
'src/core/ext/filters/server_config_selector/server_config_selector.cc',
'src/core/ext/filters/server_config_selector/server_config_selector_filter.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser_table.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/envoy/admin/v3/certs.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/clusters.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/config_dump.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/init_dump.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/listeners.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/memory.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/metrics.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/mutex_stats.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/server_info.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/tap.upb.c',
'src/core/ext/upb-generated/envoy/annotations/deprecation.upb.c',
'src/core/ext/upb-generated/envoy/annotations/resource.upb.c',
'src/core/ext/upb-generated/envoy/config/accesslog/v3/accesslog.upb.c',
'src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/circuit_breaker.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/filter.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/outlier_detection.upb.c',
'src/core/ext/upb-generated/envoy/config/common/matcher/v3/matcher.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/address.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/backoff.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/config_source.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/event_service_config.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/extension.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/grpc_method_list.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/grpc_service.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/health_check.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/socket_option.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c',
'src/core/ext/upb-generated/envoy/config/metrics/v3/metrics_service.upb.c',
'src/core/ext/upb-generated/envoy/config/metrics/v3/stats.upb.c',
'src/core/ext/upb-generated/envoy/config/overload/v3/overload.upb.c',
'src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/scoped_route.upb.c',
'src/core/ext/upb-generated/envoy/config/tap/v3/common.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/datadog.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/dynamic_ot.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/http_tracer.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/lightstep.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/opencensus.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/service.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/skywalking.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/trace.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/xray.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/zipkin.upb.c',
'src/core/ext/upb-generated/envoy/extensions/clusters/aggregate/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/common/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/rbac/v3/rbac.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/cert.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/secret.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/ads.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/discovery.upb.c',
'src/core/ext/upb-generated/envoy/service/load_stats/v3/lrs.upb.c',
'src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c',
'src/core/ext/upb-generated/envoy/type/http/v3/cookie.upb.c',
'src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/http_inputs.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/node.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/path.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/string.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/struct.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/value.upb.c',
'src/core/ext/upb-generated/envoy/type/metadata/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/tracing/v3/custom_tag.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/hash_policy.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/http.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/http_status.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/percent.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/range.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/ratelimit_unit.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/semantic_version.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/token_bucket.upb.c',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/checked.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/syntax.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/api/httpbody.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/opencensus/proto/trace/v1/trace_config.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls_config.upb.c',
'src/core/ext/upb-generated/udpa/annotations/migrate.upb.c',
'src/core/ext/upb-generated/udpa/annotations/security.upb.c',
'src/core/ext/upb-generated/udpa/annotations/sensitive.upb.c',
'src/core/ext/upb-generated/udpa/annotations/status.upb.c',
'src/core/ext/upb-generated/udpa/annotations/versioning.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/migrate.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/security.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/sensitive.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/status.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/versioning.upb.c',
'src/core/ext/upb-generated/xds/core/v3/authority.upb.c',
'src/core/ext/upb-generated/xds/core/v3/collection_entry.upb.c',
'src/core/ext/upb-generated/xds/core/v3/context_params.upb.c',
'src/core/ext/upb-generated/xds/core/v3/extension.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_locator.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_name.upb.c',
'src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c',
'src/core/ext/upb-generated/xds/service/orca/v3/orca.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/matcher.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/regex.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/string.upb.c',
'src/core/ext/upb-generated/xds/type/v3/typed_struct.upb.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/certs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/clusters.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/init_dump.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/listeners.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/memory.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/metrics.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/mutex_stats.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/server_info.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/tap.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/circuit_breaker.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/filter.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/outlier_detection.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/common/matcher/v3/matcher.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/address.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/backoff.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/event_service_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/extension.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_method_list.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/health_check.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/http_uri.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/proxy_protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/socket_option.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/load_report.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/api_listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/metrics/v3/metrics_service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/metrics/v3/stats.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/overload/v3/overload.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/rbac/v3/rbac.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/scoped_route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/tap/v3/common.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/datadog.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/dynamic_ot.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/http_tracer.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/lightstep.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/opencensus.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/skywalking.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/trace.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/xray.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/zipkin.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/clusters/aggregate/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/common/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/rbac/v3/rbac.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/cert.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/secret.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/ads.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/discovery.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/load_stats/v3/lrs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/http/v3/cookie.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/http_inputs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/node.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/number.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/path.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/value.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/metadata/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/tracing/v3/custom_tag.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/hash_policy.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/http.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/http_status.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/percent.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/range.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/ratelimit_unit.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/semantic_version.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/token_bucket.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/annotations.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/checked.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/syntax.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/http.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/httpbody.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/any.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/duration.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/empty.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/timestamp.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/wrappers.upbdefs.c',
'src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.c',
'src/core/ext/upbdefs-generated/opencensus/proto/trace/v1/trace_config.upbdefs.c',
'src/core/ext/upbdefs-generated/src/proto/grpc/lookup/v1/rls_config.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/migrate.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/security.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/sensitive.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/status.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/versioning.upbdefs.c',
'src/core/ext/upbdefs-generated/validate/validate.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/migrate.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/security.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/sensitive.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/status.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/versioning.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/authority.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/collection_entry.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/context_params.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/extension.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_locator.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_name.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/matcher.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/regex.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/string.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/v3/typed_struct.upbdefs.c',
'src/core/ext/xds/certificate_provider_registry.cc',
'src/core/ext/xds/certificate_provider_store.cc',
'src/core/ext/xds/file_watcher_certificate_provider_factory.cc',
'src/core/ext/xds/xds_api.cc',
'src/core/ext/xds/xds_bootstrap.cc',
'src/core/ext/xds/xds_certificate_provider.cc',
'src/core/ext/xds/xds_channel_stack_modifier.cc',
'src/core/ext/xds/xds_client.cc',
'src/core/ext/xds/xds_client_stats.cc',
'src/core/ext/xds/xds_cluster.cc',
'src/core/ext/xds/xds_cluster_specifier_plugin.cc',
'src/core/ext/xds/xds_common_types.cc',
'src/core/ext/xds/xds_endpoint.cc',
'src/core/ext/xds/xds_http_fault_filter.cc',
'src/core/ext/xds/xds_http_filters.cc',
'src/core/ext/xds/xds_http_rbac_filter.cc',
'src/core/ext/xds/xds_listener.cc',
'src/core/ext/xds/xds_resource_type.cc',
'src/core/ext/xds/xds_route_config.cc',
'src/core/ext/xds/xds_routing.cc',
'src/core/ext/xds/xds_server_config_fetcher.cc',
'src/core/lib/address_utils/parse_address.cc',
'src/core/lib/address_utils/sockaddr_utils.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_args_preconditioning.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_stack_builder_impl.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/promise_based_filter.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/config/core_configuration.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/event_engine/channel_args_endpoint_config.cc',
'src/core/lib/event_engine/default_event_engine_factory.cc',
'src/core/lib/event_engine/event_engine.cc',
'src/core/lib/event_engine/memory_allocator.cc',
'src/core/lib/event_engine/resolved_address.cc',
'src/core/lib/event_engine/slice.cc',
'src/core/lib/event_engine/slice_buffer.cc',
'src/core/lib/event_engine/sockaddr.cc',
'src/core/lib/gprpp/time.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/httpcli_security_connector.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/dualstack_socket_posix.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_event_engine.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/event_engine/closure.cc',
'src/core/lib/iomgr/event_engine/endpoint.cc',
'src/core/lib/iomgr/event_engine/iomgr.cc',
'src/core/lib/iomgr/event_engine/pollset.cc',
'src/core/lib/iomgr/event_engine/resolved_address_internal.cc',
'src/core/lib/iomgr/event_engine/resolver.cc',
'src/core/lib/iomgr/event_engine/tcp.cc',
'src/core/lib/iomgr/event_engine/timer.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/sockaddr_utils_posix.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_util.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/resolver/resolver.cc',
'src/core/lib/resolver/resolver_registry.cc',
'src/core/lib/resolver/server_address.cc',
'src/core/lib/resource_quota/api.cc',
'src/core/lib/resource_quota/arena.cc',
'src/core/lib/resource_quota/memory_quota.cc',
'src/core/lib/resource_quota/resource_quota.cc',
'src/core/lib/resource_quota/thread_quota.cc',
'src/core/lib/resource_quota/trace.cc',
'src/core/lib/security/authorization/authorization_policy_provider_vtable.cc',
'src/core/lib/security/authorization/evaluate_args.cc',
'src/core/lib/security/authorization/grpc_authorization_engine.cc',
'src/core/lib/security/authorization/grpc_server_authz_filter.cc',
'src/core/lib/security/authorization/matchers.cc',
'src/core/lib/security/authorization/rbac_policy.cc',
'src/core/lib/security/context/security_context.cc',
'src/core/lib/security/credentials/alts/alts_credentials.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc',
'src/core/lib/security/credentials/call_creds_util.cc',
'src/core/lib/security/credentials/channel_creds_registry_init.cc',
'src/core/lib/security/credentials/composite/composite_credentials.cc',
'src/core/lib/security/credentials/credentials.cc',
'src/core/lib/security/credentials/external/aws_external_account_credentials.cc',
'src/core/lib/security/credentials/external/aws_request_signer.cc',
'src/core/lib/security/credentials/external/external_account_credentials.cc',
'src/core/lib/security/credentials/external/file_external_account_credentials.cc',
'src/core/lib/security/credentials/external/url_external_account_credentials.cc',
'src/core/lib/security/credentials/fake/fake_credentials.cc',
'src/core/lib/security/credentials/google_default/credentials_generic.cc',
'src/core/lib/security/credentials/google_default/google_default_credentials.cc',
'src/core/lib/security/credentials/iam/iam_credentials.cc',
'src/core/lib/security/credentials/insecure/insecure_credentials.cc',
'src/core/lib/security/credentials/jwt/json_token.cc',
'src/core/lib/security/credentials/jwt/jwt_credentials.cc',
'src/core/lib/security/credentials/jwt/jwt_verifier.cc',
'src/core/lib/security/credentials/local/local_credentials.cc',
'src/core/lib/security/credentials/oauth2/oauth2_credentials.cc',
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_verifier.cc',
'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc',
'src/core/lib/security/credentials/tls/tls_credentials.cc',
'src/core/lib/security/credentials/tls/tls_utils.cc',
'src/core/lib/security/credentials/xds/xds_credentials.cc',
'src/core/lib/security/security_connector/alts/alts_security_connector.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/insecure/insecure_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
'src/core/lib/security/security_connector/load_system_roots_linux.cc',
'src/core/lib/security/security_connector/local/local_security_connector.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/security_connector/ssl/ssl_security_connector.cc',
'src/core/lib/security/security_connector/ssl_utils.cc',
'src/core/lib/security/security_connector/ssl_utils_config.cc',
'src/core/lib/security/security_connector/tls/tls_security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/service_config/service_config_impl.cc',
'src/core/lib/service_config/service_config_parser.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_api.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_buffer_api.cc',
'src/core/lib/slice/slice_refcount.cc',
'src/core/lib/slice/slice_split.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/builtins.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/handshaker.cc',
'src/core/lib/transport/handshaker_registry.cc',
'src/core/lib/transport/http_connect_handshaker.cc',
'src/core/lib/transport/metadata_batch.cc',
'src/core/lib/transport/parsed_metadata.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/tcp_connect_handshaker.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/plugin_registry/grpc_plugin_registry_extra.cc',
'src/core/tsi/alts/crypt/aes_gcm.cc',
'src/core/tsi/alts/crypt/gsec.cc',
'src/core/tsi/alts/frame_protector/alts_counter.cc',
'src/core/tsi/alts/frame_protector/alts_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_frame_protector.cc',
'src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc',
'src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/frame_handler.cc',
'src/core/tsi/alts/handshaker/alts_handshaker_client.cc',
'src/core/tsi/alts/handshaker/alts_shared_resource.cc',
'src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc',
'src/core/tsi/alts/handshaker/alts_tsi_utils.cc',
'src/core/tsi/alts/handshaker/transport_security_common_api.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/ssl/key_logging/ssl_key_logging.cc',
'src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc',
'src/core/tsi/ssl/session_cache/ssl_session_cache.cc',
'src/core/tsi/ssl/session_cache/ssl_session_openssl.cc',
'src/core/tsi/ssl_transport_security.cc',
'src/core/tsi/transport_security.cc',
'src/core/tsi/transport_security_grpc.cc',
],
},
{
'target_name': 'grpc_test_util',
'type': 'static_library',
'dependencies': [
'absl/debugging:failure_signal_handler',
'absl/debugging:stacktrace',
'absl/debugging:symbolize',
'grpc',
],
'sources': [
'test/core/event_engine/test_init.cc',
'test/core/util/build.cc',
'test/core/util/cmdline.cc',
'test/core/util/fuzzer_util.cc',
'test/core/util/grpc_profiler.cc',
'test/core/util/histogram.cc',
'test/core/util/mock_endpoint.cc',
'test/core/util/parse_hexstring.cc',
'test/core/util/passthru_endpoint.cc',
'test/core/util/port.cc',
'test/core/util/port_isolated_runtime_environment.cc',
'test/core/util/port_server_client.cc',
'test/core/util/reconnect_server.cc',
'test/core/util/resolve_localhost_ip46.cc',
'test/core/util/slice_splitter.cc',
'test/core/util/stack_tracer.cc',
'test/core/util/subprocess_posix.cc',
'test/core/util/subprocess_windows.cc',
'test/core/util/test_config.cc',
'test/core/util/test_tcp_server.cc',
'test/core/util/tls_utils.cc',
'test/core/util/tracer_util.cc',
],
},
{
'target_name': 'grpc_test_util_unsecure',
'type': 'static_library',
'dependencies': [
'absl/debugging:failure_signal_handler',
'absl/debugging:stacktrace',
'absl/debugging:symbolize',
'grpc_unsecure',
],
'sources': [
'test/core/event_engine/test_init.cc',
'test/core/util/build.cc',
'test/core/util/cmdline.cc',
'test/core/util/fuzzer_util.cc',
'test/core/util/grpc_profiler.cc',
'test/core/util/histogram.cc',
'test/core/util/mock_endpoint.cc',
'test/core/util/parse_hexstring.cc',
'test/core/util/passthru_endpoint.cc',
'test/core/util/port.cc',
'test/core/util/port_isolated_runtime_environment.cc',
'test/core/util/port_server_client.cc',
'test/core/util/reconnect_server.cc',
'test/core/util/resolve_localhost_ip46.cc',
'test/core/util/slice_splitter.cc',
'test/core/util/stack_tracer.cc',
'test/core/util/subprocess_posix.cc',
'test/core/util/subprocess_windows.cc',
'test/core/util/test_config.cc',
'test/core/util/test_tcp_server.cc',
'test/core/util/tracer_util.cc',
],
},
{
'target_name': 'grpc_unsecure',
'type': 'static_library',
'dependencies': [
'absl/container:flat_hash_map',
'absl/container:inlined_vector',
'absl/functional:bind_front',
'absl/hash:hash',
'absl/meta:type_traits',
'absl/status:statusor',
'absl/types:span',
'absl/types:variant',
'absl/utility:utility',
'gpr',
'address_sorting',
],
'sources': [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/channel_idle/channel_idle_filter.cc',
'src/core/ext/filters/channel_idle/idle_filter_state.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/config_selector.cc',
'src/core/ext/filters/client_channel/dynamic_filters.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc',
'src/core/ext/filters/client_channel/lb_policy/rls/rls.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/polling_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/retry_filter.cc',
'src/core/ext/filters/client_channel/retry_service_config.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_channel/subchannel_stream_client.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/fault_injection/fault_injection_filter.cc',
'src/core/ext/filters/fault_injection/service_config_parser.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/message_compress/message_decompress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser_table.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c',
'src/core/ext/upb-generated/xds/service/orca/v3/orca.upb.c',
'src/core/lib/address_utils/parse_address.cc',
'src/core/lib/address_utils/sockaddr_utils.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_args_preconditioning.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_stack_builder_impl.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/promise_based_filter.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/config/core_configuration.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/event_engine/channel_args_endpoint_config.cc',
'src/core/lib/event_engine/default_event_engine_factory.cc',
'src/core/lib/event_engine/event_engine.cc',
'src/core/lib/event_engine/memory_allocator.cc',
'src/core/lib/event_engine/resolved_address.cc',
'src/core/lib/event_engine/slice.cc',
'src/core/lib/event_engine/slice_buffer.cc',
'src/core/lib/event_engine/sockaddr.cc',
'src/core/lib/gprpp/time.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/dualstack_socket_posix.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_event_engine.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/event_engine/closure.cc',
'src/core/lib/iomgr/event_engine/endpoint.cc',
'src/core/lib/iomgr/event_engine/iomgr.cc',
'src/core/lib/iomgr/event_engine/pollset.cc',
'src/core/lib/iomgr/event_engine/resolved_address_internal.cc',
'src/core/lib/iomgr/event_engine/resolver.cc',
'src/core/lib/iomgr/event_engine/tcp.cc',
'src/core/lib/iomgr/event_engine/timer.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/sockaddr_utils_posix.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_util.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/resolver/resolver.cc',
'src/core/lib/resolver/resolver_registry.cc',
'src/core/lib/resolver/server_address.cc',
'src/core/lib/resource_quota/api.cc',
'src/core/lib/resource_quota/arena.cc',
'src/core/lib/resource_quota/memory_quota.cc',
'src/core/lib/resource_quota/resource_quota.cc',
'src/core/lib/resource_quota/thread_quota.cc',
'src/core/lib/resource_quota/trace.cc',
'src/core/lib/security/authorization/authorization_policy_provider_vtable.cc',
'src/core/lib/security/authorization/evaluate_args.cc',
'src/core/lib/security/authorization/grpc_server_authz_filter.cc',
'src/core/lib/security/context/security_context.cc',
'src/core/lib/security/credentials/call_creds_util.cc',
'src/core/lib/security/credentials/composite/composite_credentials.cc',
'src/core/lib/security/credentials/credentials.cc',
'src/core/lib/security/credentials/fake/fake_credentials.cc',
'src/core/lib/security/credentials/insecure/insecure_credentials.cc',
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/tls/tls_utils.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/insecure/insecure_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
'src/core/lib/security/security_connector/load_system_roots_linux.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/service_config/service_config_impl.cc',
'src/core/lib/service_config/service_config_parser.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_api.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_buffer_api.cc',
'src/core/lib/slice/slice_refcount.cc',
'src/core/lib/slice/slice_split.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/builtins.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/handshaker.cc',
'src/core/lib/transport/handshaker_registry.cc',
'src/core/lib/transport/http_connect_handshaker.cc',
'src/core/lib/transport/metadata_batch.cc',
'src/core/lib/transport/parsed_metadata.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/tcp_connect_handshaker.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/plugin_registry/grpc_plugin_registry_noextra.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/transport_security.cc',
'src/core/tsi/transport_security_grpc.cc',
],
},
{
'target_name': 'benchmark_helpers',
'type': 'static_library',
'dependencies': [
'benchmark',
'grpc++_unsecure',
'grpc_test_util_unsecure',
'grpc++_test_config',
],
'sources': [
'src/proto/grpc/testing/echo.proto',
'src/proto/grpc/testing/echo_messages.proto',
'src/proto/grpc/testing/simple_messages.proto',
'src/proto/grpc/testing/xds/v3/orca_load_report.proto',
'test/cpp/microbenchmarks/helpers.cc',
],
},
{
'target_name': 'grpc++',
'type': 'static_library',
'dependencies': [
'grpc',
],
'sources': [
'src/core/ext/transport/binder/client/binder_connector.cc',
'src/core/ext/transport/binder/client/channel_create.cc',
'src/core/ext/transport/binder/client/channel_create_impl.cc',
'src/core/ext/transport/binder/client/connection_id_generator.cc',
'src/core/ext/transport/binder/client/endpoint_binder_pool.cc',
'src/core/ext/transport/binder/client/jni_utils.cc',
'src/core/ext/transport/binder/client/security_policy_setting.cc',
'src/core/ext/transport/binder/security_policy/binder_security_policy.cc',
'src/core/ext/transport/binder/server/binder_server.cc',
'src/core/ext/transport/binder/server/binder_server_credentials.cc',
'src/core/ext/transport/binder/transport/binder_transport.cc',
'src/core/ext/transport/binder/utils/ndk_binder.cc',
'src/core/ext/transport/binder/utils/transport_stream_receiver_impl.cc',
'src/core/ext/transport/binder/wire_format/binder_android.cc',
'src/core/ext/transport/binder/wire_format/binder_constants.cc',
'src/core/ext/transport/binder/wire_format/transaction.cc',
'src/core/ext/transport/binder/wire_format/wire_reader_impl.cc',
'src/core/ext/transport/binder/wire_format/wire_writer.cc',
'src/cpp/client/channel_cc.cc',
'src/cpp/client/client_callback.cc',
'src/cpp/client/client_context.cc',
'src/cpp/client/client_interceptor.cc',
'src/cpp/client/create_channel.cc',
'src/cpp/client/create_channel_internal.cc',
'src/cpp/client/create_channel_posix.cc',
'src/cpp/client/credentials_cc.cc',
'src/cpp/client/insecure_credentials.cc',
'src/cpp/client/secure_credentials.cc',
'src/cpp/client/xds_credentials.cc',
'src/cpp/codegen/codegen_init.cc',
'src/cpp/common/alarm.cc',
'src/cpp/common/auth_property_iterator.cc',
'src/cpp/common/channel_arguments.cc',
'src/cpp/common/channel_filter.cc',
'src/cpp/common/completion_queue_cc.cc',
'src/cpp/common/core_codegen.cc',
'src/cpp/common/resource_quota_cc.cc',
'src/cpp/common/rpc_method.cc',
'src/cpp/common/secure_auth_context.cc',
'src/cpp/common/secure_channel_arguments.cc',
'src/cpp/common/secure_create_auth_context.cc',
'src/cpp/common/tls_certificate_provider.cc',
'src/cpp/common/tls_certificate_verifier.cc',
'src/cpp/common/tls_credentials_options.cc',
'src/cpp/common/validate_service_config.cc',
'src/cpp/common/version_cc.cc',
'src/cpp/server/async_generic_service.cc',
'src/cpp/server/channel_argument_option.cc',
'src/cpp/server/create_default_thread_pool.cc',
'src/cpp/server/dynamic_thread_pool.cc',
'src/cpp/server/external_connection_acceptor_impl.cc',
'src/cpp/server/health/default_health_check_service.cc',
'src/cpp/server/health/health_check_service.cc',
'src/cpp/server/health/health_check_service_server_builder_option.cc',
'src/cpp/server/insecure_server_credentials.cc',
'src/cpp/server/secure_server_credentials.cc',
'src/cpp/server/server_builder.cc',
'src/cpp/server/server_callback.cc',
'src/cpp/server/server_cc.cc',
'src/cpp/server/server_context.cc',
'src/cpp/server/server_credentials.cc',
'src/cpp/server/server_posix.cc',
'src/cpp/server/xds_server_credentials.cc',
'src/cpp/thread_manager/thread_manager.cc',
'src/cpp/util/byte_buffer_cc.cc',
'src/cpp/util/status.cc',
'src/cpp/util/string_ref.cc',
'src/cpp/util/time_cc.cc',
],
},
{
'target_name': 'grpc++_alts',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/cpp/common/alts_context.cc',
'src/cpp/common/alts_util.cc',
],
},
{
'target_name': 'grpc++_error_details',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/cpp/util/error_details.cc',
],
},
{
'target_name': 'grpc++_reflection',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/proto/grpc/reflection/v1alpha/reflection.proto',
'src/cpp/ext/proto_server_reflection.cc',
'src/cpp/ext/proto_server_reflection_plugin.cc',
],
},
{
'target_name': 'grpc++_test',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/cpp/client/channel_test_peer.cc',
],
},
{
'target_name': 'grpc++_test_config',
'type': 'static_library',
'dependencies': [
'absl/flags:parse',
'gpr',
],
'sources': [
'test/cpp/util/test_config_cc.cc',
],
},
{
'target_name': 'grpc++_test_util',
'type': 'static_library',
'dependencies': [
'absl/flags:flag',
'grpc++',
'grpc_test_util',
],
'sources': [
'test/core/end2end/data/client_certs.cc',
'test/core/end2end/data/server1_cert.cc',
'test/core/end2end/data/server1_key.cc',
'test/core/end2end/data/test_root_cert.cc',
'test/cpp/util/byte_buffer_proto_helper.cc',
'test/cpp/util/create_test_channel.cc',
'test/cpp/util/string_ref_helper.cc',
'test/cpp/util/subprocess.cc',
'test/cpp/util/test_credentials_provider.cc',
],
},
{
'target_name': 'grpc++_unsecure',
'type': 'static_library',
'dependencies': [
'grpc_unsecure',
],
'sources': [
'src/cpp/client/channel_cc.cc',
'src/cpp/client/client_callback.cc',
'src/cpp/client/client_context.cc',
'src/cpp/client/client_interceptor.cc',
'src/cpp/client/create_channel.cc',
'src/cpp/client/create_channel_internal.cc',
'src/cpp/client/create_channel_posix.cc',
'src/cpp/client/credentials_cc.cc',
'src/cpp/client/insecure_credentials.cc',
'src/cpp/codegen/codegen_init.cc',
'src/cpp/common/alarm.cc',
'src/cpp/common/channel_arguments.cc',
'src/cpp/common/channel_filter.cc',
'src/cpp/common/completion_queue_cc.cc',
'src/cpp/common/core_codegen.cc',
'src/cpp/common/insecure_create_auth_context.cc',
'src/cpp/common/resource_quota_cc.cc',
'src/cpp/common/rpc_method.cc',
'src/cpp/common/validate_service_config.cc',
'src/cpp/common/version_cc.cc',
'src/cpp/server/async_generic_service.cc',
'src/cpp/server/channel_argument_option.cc',
'src/cpp/server/create_default_thread_pool.cc',
'src/cpp/server/dynamic_thread_pool.cc',
'src/cpp/server/external_connection_acceptor_impl.cc',
'src/cpp/server/health/default_health_check_service.cc',
'src/cpp/server/health/health_check_service.cc',
'src/cpp/server/health/health_check_service_server_builder_option.cc',
'src/cpp/server/insecure_server_credentials.cc',
'src/cpp/server/server_builder.cc',
'src/cpp/server/server_callback.cc',
'src/cpp/server/server_cc.cc',
'src/cpp/server/server_context.cc',
'src/cpp/server/server_credentials.cc',
'src/cpp/server/server_posix.cc',
'src/cpp/thread_manager/thread_manager.cc',
'src/cpp/util/byte_buffer_cc.cc',
'src/cpp/util/status.cc',
'src/cpp/util/string_ref.cc',
'src/cpp/util/time_cc.cc',
],
},
{
'target_name': 'grpc_plugin_support',
'type': 'static_library',
'dependencies': [
],
'sources': [
'src/compiler/cpp_generator.cc',
'src/compiler/csharp_generator.cc',
'src/compiler/node_generator.cc',
'src/compiler/objective_c_generator.cc',
'src/compiler/php_generator.cc',
'src/compiler/python_generator.cc',
'src/compiler/ruby_generator.cc',
],
},
{
'target_name': 'grpcpp_channelz',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/proto/grpc/channelz/channelz.proto',
'src/cpp/server/channelz/channelz_service.cc',
'src/cpp/server/channelz/channelz_service_plugin.cc',
],
},
{
'target_name': 'boringssl',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/boringssl-with-bazel/err_data.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bitstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bool.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_d2i_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_dup.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_enum.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_gentm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_i2d_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_mbstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_object.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_octet.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_print.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strex.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strnid.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_time.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_type.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utctm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utf8.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_lib.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_par.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn_pack.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_string.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_dec.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_enc.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_fre.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_new.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_typ.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_utl.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/time_support.c',
'third_party/boringssl-with-bazel/src/crypto/base64/base64.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio_mem.c',
'third_party/boringssl-with-bazel/src/crypto/bio/connect.c',
'third_party/boringssl-with-bazel/src/crypto/bio/fd.c',
'third_party/boringssl-with-bazel/src/crypto/bio/file.c',
'third_party/boringssl-with-bazel/src/crypto/bio/hexdump.c',
'third_party/boringssl-with-bazel/src/crypto/bio/pair.c',
'third_party/boringssl-with-bazel/src/crypto/bio/printf.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket_helper.c',
'third_party/boringssl-with-bazel/src/crypto/blake2/blake2.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/bn_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/convert.c',
'third_party/boringssl-with-bazel/src/crypto/buf/buf.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/asn1_compat.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/ber.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbb.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbs.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/unicode.c',
'third_party/boringssl-with-bazel/src/crypto/chacha/chacha.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/cipher_extra.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/derive_key.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesccm.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesctrhmac.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesgcmsiv.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_chacha20poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_null.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc2.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc4.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_tls.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/tls_cbc.c',
'third_party/boringssl-with-bazel/src/crypto/cmac/cmac.c',
'third_party/boringssl-with-bazel/src/crypto/conf/conf.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-win.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-intel.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-ppc64le.c',
'third_party/boringssl-with-bazel/src/crypto/crypto.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/curve25519.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/spake25519.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/dh_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/params.c',
'third_party/boringssl-with-bazel/src/crypto/digest_extra/digest_extra.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_derive.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/hash_to_curve.c',
'third_party/boringssl-with-bazel/src/crypto/ecdh_extra/ecdh_extra.c',
'third_party/boringssl-with-bazel/src/crypto/ecdsa_extra/ecdsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/engine/engine.c',
'third_party/boringssl-with-bazel/src/crypto/err/err.c',
'third_party/boringssl-with-bazel/src/crypto/evp/digestsign.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_ctx.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/pbkdf.c',
'third_party/boringssl-with-bazel/src/crypto/evp/print.c',
'third_party/boringssl-with-bazel/src/crypto/evp/scrypt.c',
'third_party/boringssl-with-bazel/src/crypto/evp/sign.c',
'third_party/boringssl-with-bazel/src/crypto/ex_data.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/bcm.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/fips_shared_support.c',
'third_party/boringssl-with-bazel/src/crypto/hkdf/hkdf.c',
'third_party/boringssl-with-bazel/src/crypto/hpke/hpke.c',
'third_party/boringssl-with-bazel/src/crypto/hrss/hrss.c',
'third_party/boringssl-with-bazel/src/crypto/lhash/lhash.c',
'third_party/boringssl-with-bazel/src/crypto/mem.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj_xref.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_all.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_info.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_lib.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_oth.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pk8.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_xaux.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/p5_pbev2.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8_x509.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_arm.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_vec.c',
'third_party/boringssl-with-bazel/src/crypto/pool/pool.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/deterministic.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/forkunsafe.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/passive.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/rand_extra.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/windows.c',
'third_party/boringssl-with-bazel/src/crypto/rc4/rc4.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_c11.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_lock.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_print.c',
'third_party/boringssl-with-bazel/src/crypto/siphash/siphash.c',
'third_party/boringssl-with-bazel/src/crypto/stack/stack.c',
'third_party/boringssl-with-bazel/src/crypto/thread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_none.c',
'third_party/boringssl-with-bazel/src/crypto/thread_pthread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_win.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/pmbtoken.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/trust_token.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/voprf.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_digest.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_sign.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_verify.c',
'third_party/boringssl-with-bazel/src/crypto/x509/algorithm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/asn1_gen.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_dir.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_file.c',
'third_party/boringssl-with-bazel/src/crypto/x509/i2d_pr.c',
'third_party/boringssl-with-bazel/src/crypto/x509/name_print.c',
'third_party/boringssl-with-bazel/src/crypto/x509/rsa_pss.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_att.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_cmp.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_d2.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_def.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_ext.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_lu.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_obj.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_set.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_trs.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_txt.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_v3.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vfy.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vpm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509cset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509rset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_algor.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_all.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_attrib.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_exten.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pubkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_sig.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_val.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_cache.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_data.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_map.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_node.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_tree.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akeya.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_alt.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bitst.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_conf.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_cpols.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_crld.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_enum.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_extku.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_genn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ia5.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_int.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ncons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ocsp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pci.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcia.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pmaps.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_prn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_purp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_skey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_utl.c',
'third_party/boringssl-with-bazel/src/ssl/bio_ssl.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_both.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_srtp.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_record.cc',
'third_party/boringssl-with-bazel/src/ssl/encrypted_client_hello.cc',
'third_party/boringssl-with-bazel/src/ssl/extensions.cc',
'third_party/boringssl-with-bazel/src/ssl/handoff.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_client.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_server.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_both.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_aead_ctx.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_asn1.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_buffer.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cert.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cipher.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_file.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_key_share.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_privkey.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_session.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_stat.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_transcript.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_versions.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_x509.cc',
'third_party/boringssl-with-bazel/src/ssl/t1_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_both.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_client.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_server.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_record.cc',
],
},
{
'target_name': 'boringssl_test_util',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/boringssl-with-bazel/src/crypto/test/file_test.cc',
'third_party/boringssl-with-bazel/src/crypto/test/malloc.cc',
'third_party/boringssl-with-bazel/src/crypto/test/test_util.cc',
'third_party/boringssl-with-bazel/src/crypto/test/wycheproof_util.cc',
],
},
{
'target_name': 'benchmark',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/benchmark/src/benchmark.cc',
'third_party/benchmark/src/benchmark_api_internal.cc',
'third_party/benchmark/src/benchmark_main.cc',
'third_party/benchmark/src/benchmark_name.cc',
'third_party/benchmark/src/benchmark_register.cc',
'third_party/benchmark/src/benchmark_runner.cc',
'third_party/benchmark/src/colorprint.cc',
'third_party/benchmark/src/commandlineflags.cc',
'third_party/benchmark/src/complexity.cc',
'third_party/benchmark/src/console_reporter.cc',
'third_party/benchmark/src/counter.cc',
'third_party/benchmark/src/csv_reporter.cc',
'third_party/benchmark/src/json_reporter.cc',
'third_party/benchmark/src/perf_counters.cc',
'third_party/benchmark/src/reporter.cc',
'third_party/benchmark/src/sleep.cc',
'third_party/benchmark/src/statistics.cc',
'third_party/benchmark/src/string_util.cc',
'third_party/benchmark/src/sysinfo.cc',
'third_party/benchmark/src/timers.cc',
],
},
{
'target_name': 're2',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/re2/re2/bitstate.cc',
'third_party/re2/re2/compile.cc',
'third_party/re2/re2/dfa.cc',
'third_party/re2/re2/filtered_re2.cc',
'third_party/re2/re2/mimics_pcre.cc',
'third_party/re2/re2/nfa.cc',
'third_party/re2/re2/onepass.cc',
'third_party/re2/re2/parse.cc',
'third_party/re2/re2/perl_groups.cc',
'third_party/re2/re2/prefilter.cc',
'third_party/re2/re2/prefilter_tree.cc',
'third_party/re2/re2/prog.cc',
'third_party/re2/re2/re2.cc',
'third_party/re2/re2/regexp.cc',
'third_party/re2/re2/set.cc',
'third_party/re2/re2/simplify.cc',
'third_party/re2/re2/stringpiece.cc',
'third_party/re2/re2/tostring.cc',
'third_party/re2/re2/unicode_casefold.cc',
'third_party/re2/re2/unicode_groups.cc',
'third_party/re2/util/pcre.cc',
'third_party/re2/util/rune.cc',
'third_party/re2/util/strutil.cc',
],
},
{
'target_name': 'upb',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/upb/third_party/utf8_range/naive.c',
'third_party/upb/third_party/utf8_range/range2-neon.c',
'third_party/upb/third_party/utf8_range/range2-sse.c',
'third_party/upb/upb/decode_fast.c',
'third_party/upb/upb/decode.c',
'third_party/upb/upb/def.c',
'third_party/upb/upb/encode.c',
'third_party/upb/upb/json_encode.c',
'third_party/upb/upb/msg.c',
'third_party/upb/upb/reflection.c',
'third_party/upb/upb/table.c',
'third_party/upb/upb/text_encode.c',
'third_party/upb/upb/upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c',
],
},
{
'target_name': 'z',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/zlib/adler32.c',
'third_party/zlib/compress.c',
'third_party/zlib/crc32.c',
'third_party/zlib/deflate.c',
'third_party/zlib/gzclose.c',
'third_party/zlib/gzlib.c',
'third_party/zlib/gzread.c',
'third_party/zlib/gzwrite.c',
'third_party/zlib/infback.c',
'third_party/zlib/inffast.c',
'third_party/zlib/inflate.c',
'third_party/zlib/inftrees.c',
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
],
},
]
}
| true | true |
f715296fb8250e6a9fc65fab4030ce645556e39d | 30,603 | py | Python | tests/httpwrappers/tests.py | ioinfinity/django | b6a0ab523751c13ae3eaec102de70f58f73a0d94 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/httpwrappers/tests.py | ioinfinity/django | b6a0ab523751c13ae3eaec102de70f58f73a0d94 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-07-02T21:10:44.000Z | 2020-07-02T21:11:21.000Z | tests/httpwrappers/tests.py | ioinfinity/django | b6a0ab523751c13ae3eaec102de70f58f73a0d94 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-08-11T18:46:32.000Z | 2020-08-11T18:46:32.000Z | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import copy
import json
import os
import pickle
import unittest
import uuid
from django.core.exceptions import SuspiciousOperation
from django.core.serializers.json import DjangoJSONEncoder
from django.core.signals import request_finished
from django.db import close_old_connections
from django.http import (
BadHeaderError, HttpResponse, HttpResponseNotAllowed,
HttpResponseNotModified, HttpResponsePermanentRedirect,
HttpResponseRedirect, JsonResponse, QueryDict, SimpleCookie,
StreamingHttpResponse, parse_cookie,
)
from django.test import SimpleTestCase
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import force_str
from django.utils.functional import lazystr
class QueryDictTests(unittest.TestCase):
def test_create_with_no_args(self):
self.assertEqual(QueryDict(), QueryDict(str('')))
def test_missing_key(self):
q = QueryDict()
with self.assertRaises(KeyError):
q.__getitem__('foo')
def test_immutability(self):
q = QueryDict()
with self.assertRaises(AttributeError):
q.__setitem__('something', 'bar')
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar'])
with self.assertRaises(AttributeError):
q.appendlist('foo', ['bar'])
with self.assertRaises(AttributeError):
q.update({'foo': 'bar'})
with self.assertRaises(AttributeError):
q.pop('foo')
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
def test_immutable_get_with_default(self):
q = QueryDict()
self.assertEqual(q.get('foo', 'default'), 'default')
def test_immutable_basic_operations(self):
q = QueryDict()
self.assertEqual(q.getlist('foo'), [])
if six.PY2:
self.assertIs(q.has_key('foo'), False)
self.assertNotIn('foo', q)
self.assertEqual(list(six.iteritems(q)), [])
self.assertEqual(list(six.iterlists(q)), [])
self.assertEqual(list(six.iterkeys(q)), [])
self.assertEqual(list(six.itervalues(q)), [])
self.assertEqual(len(q), 0)
self.assertEqual(q.urlencode(), '')
def test_single_key_value(self):
"""Test QueryDict with one key/value pair"""
q = QueryDict(str('foo=bar'))
self.assertEqual(q['foo'], 'bar')
with self.assertRaises(KeyError):
q.__getitem__('bar')
with self.assertRaises(AttributeError):
q.__setitem__('something', 'bar')
self.assertEqual(q.get('foo', 'default'), 'bar')
self.assertEqual(q.get('bar', 'default'), 'default')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertEqual(q.getlist('bar'), [])
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar'])
with self.assertRaises(AttributeError):
q.appendlist('foo', ['bar'])
if six.PY2:
self.assertTrue(q.has_key('foo'))
self.assertIn('foo', q)
if six.PY2:
self.assertFalse(q.has_key('bar'))
self.assertNotIn('bar', q)
self.assertEqual(list(six.iteritems(q)), [('foo', 'bar')])
self.assertEqual(list(six.iterlists(q)), [('foo', ['bar'])])
self.assertEqual(list(six.iterkeys(q)), ['foo'])
self.assertEqual(list(six.itervalues(q)), ['bar'])
self.assertEqual(len(q), 1)
with self.assertRaises(AttributeError):
q.update({'foo': 'bar'})
with self.assertRaises(AttributeError):
q.pop('foo')
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
with self.assertRaises(AttributeError):
q.setdefault('foo', 'bar')
self.assertEqual(q.urlencode(), 'foo=bar')
def test_urlencode(self):
q = QueryDict(mutable=True)
q['next'] = '/a&b/'
self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/a%26b/')
q = QueryDict(mutable=True)
q['next'] = '/t\xebst&key/'
self.assertEqual(q.urlencode(), 'next=%2Ft%C3%ABst%26key%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/t%C3%ABst%26key/')
def test_mutable_copy(self):
"""A copy of a QueryDict is mutable."""
q = QueryDict().copy()
with self.assertRaises(KeyError):
q.__getitem__("foo")
q['name'] = 'john'
self.assertEqual(q['name'], 'john')
def test_mutable_delete(self):
q = QueryDict(mutable=True)
q['name'] = 'john'
del q['name']
self.assertNotIn('name', q)
def test_basic_mutable_operations(self):
q = QueryDict(mutable=True)
q['name'] = 'john'
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.get('name', 'default'), 'john')
self.assertEqual(q.getlist('name'), ['john'])
self.assertEqual(q.getlist('foo'), [])
q.setlist('foo', ['bar', 'baz'])
self.assertEqual(q.get('foo', 'default'), 'baz')
self.assertEqual(q.getlist('foo'), ['bar', 'baz'])
q.appendlist('foo', 'another')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another'])
self.assertEqual(q['foo'], 'another')
if six.PY2:
self.assertTrue(q.has_key('foo'))
self.assertIn('foo', q)
self.assertListEqual(sorted(six.iteritems(q)),
[('foo', 'another'), ('name', 'john')])
self.assertListEqual(sorted(six.iterlists(q)),
[('foo', ['bar', 'baz', 'another']), ('name', ['john'])])
self.assertListEqual(sorted(six.iterkeys(q)),
['foo', 'name'])
self.assertListEqual(sorted(six.itervalues(q)),
['another', 'john'])
q.update({'foo': 'hello'})
self.assertEqual(q['foo'], 'hello')
self.assertEqual(q.get('foo', 'not available'), 'hello')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another', 'hello'])
self.assertEqual(q.pop('foo'), ['bar', 'baz', 'another', 'hello'])
self.assertEqual(q.pop('foo', 'not there'), 'not there')
self.assertEqual(q.get('foo', 'not there'), 'not there')
self.assertEqual(q.setdefault('foo', 'bar'), 'bar')
self.assertEqual(q['foo'], 'bar')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertIn(q.urlencode(), ['foo=bar&name=john', 'name=john&foo=bar'])
q.clear()
self.assertEqual(len(q), 0)
def test_multiple_keys(self):
"""Test QueryDict with two key/value pairs with same keys."""
q = QueryDict(str('vote=yes&vote=no'))
self.assertEqual(q['vote'], 'no')
with self.assertRaises(AttributeError):
q.__setitem__('something', 'bar')
self.assertEqual(q.get('vote', 'default'), 'no')
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.getlist('vote'), ['yes', 'no'])
self.assertEqual(q.getlist('foo'), [])
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar', 'baz'])
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar', 'baz'])
with self.assertRaises(AttributeError):
q.appendlist('foo', ['bar'])
if six.PY2:
self.assertIs(q.has_key('vote'), True)
self.assertIn('vote', q)
if six.PY2:
self.assertIs(q.has_key('foo'), False)
self.assertNotIn('foo', q)
self.assertEqual(list(six.iteritems(q)), [('vote', 'no')])
self.assertEqual(list(six.iterlists(q)), [('vote', ['yes', 'no'])])
self.assertEqual(list(six.iterkeys(q)), ['vote'])
self.assertEqual(list(six.itervalues(q)), ['no'])
self.assertEqual(len(q), 1)
with self.assertRaises(AttributeError):
q.update({'foo': 'bar'})
with self.assertRaises(AttributeError):
q.pop('foo')
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
with self.assertRaises(AttributeError):
q.setdefault('foo', 'bar')
with self.assertRaises(AttributeError):
q.__delitem__('vote')
if six.PY2:
def test_invalid_input_encoding(self):
"""
QueryDicts must be able to handle invalid input encoding (in this
case, bad UTF-8 encoding), falling back to ISO-8859-1 decoding.
This test doesn't apply under Python 3 because the URL is a string
and not a bytestring.
"""
q = QueryDict(str(b'foo=bar&foo=\xff'))
self.assertEqual(q['foo'], '\xff')
self.assertEqual(q.getlist('foo'), ['bar', '\xff'])
def test_pickle(self):
q = QueryDict()
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
q = QueryDict(str('a=b&c=d'))
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
q = QueryDict(str('a=b&c=d&a=1'))
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
def test_update_from_querydict(self):
"""Regression test for #8278: QueryDict.update(QueryDict)"""
x = QueryDict(str("a=1&a=2"), mutable=True)
y = QueryDict(str("a=3&a=4"))
x.update(y)
self.assertEqual(x.getlist('a'), ['1', '2', '3', '4'])
def test_non_default_encoding(self):
"""#13572 - QueryDict with a non-default encoding"""
q = QueryDict(str('cur=%A4'), encoding='iso-8859-15')
self.assertEqual(q.encoding, 'iso-8859-15')
self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
self.assertEqual(q.urlencode(), 'cur=%A4')
q = q.copy()
self.assertEqual(q.encoding, 'iso-8859-15')
self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
self.assertEqual(q.urlencode(), 'cur=%A4')
self.assertEqual(copy.copy(q).encoding, 'iso-8859-15')
self.assertEqual(copy.deepcopy(q).encoding, 'iso-8859-15')
class HttpResponseTests(unittest.TestCase):
def test_headers_type(self):
r = HttpResponse()
# The following tests explicitly test types in addition to values
# because in Python 2 u'foo' == b'foo'.
# ASCII unicode or bytes values are converted to native strings.
r['key'] = 'test'
self.assertEqual(r['key'], str('test'))
self.assertIsInstance(r['key'], str)
r['key'] = 'test'.encode('ascii')
self.assertEqual(r['key'], str('test'))
self.assertIsInstance(r['key'], str)
self.assertIn(b'test', r.serialize_headers())
# Latin-1 unicode or bytes values are also converted to native strings.
r['key'] = 'café'
self.assertEqual(r['key'], force_str('café', 'latin-1'))
self.assertIsInstance(r['key'], str)
r['key'] = 'café'.encode('latin-1')
self.assertEqual(r['key'], force_str('café', 'latin-1'))
self.assertIsInstance(r['key'], str)
self.assertIn('café'.encode('latin-1'), r.serialize_headers())
# Other unicode values are MIME-encoded (there's no way to pass them as bytes).
r['key'] = '†'
self.assertEqual(r['key'], str('=?utf-8?b?4oCg?='))
self.assertIsInstance(r['key'], str)
self.assertIn(b'=?utf-8?b?4oCg?=', r.serialize_headers())
# The response also converts unicode or bytes keys to strings, but requires
# them to contain ASCII
r = HttpResponse()
del r['Content-Type']
r['foo'] = 'bar'
l = list(r.items())
self.assertEqual(len(l), 1)
self.assertEqual(l[0], ('foo', 'bar'))
self.assertIsInstance(l[0][0], str)
r = HttpResponse()
del r['Content-Type']
r[b'foo'] = 'bar'
l = list(r.items())
self.assertEqual(len(l), 1)
self.assertEqual(l[0], ('foo', 'bar'))
self.assertIsInstance(l[0][0], str)
r = HttpResponse()
with self.assertRaises(UnicodeError):
r.__setitem__('føø', 'bar')
with self.assertRaises(UnicodeError):
r.__setitem__('føø'.encode('utf-8'), 'bar')
def test_long_line(self):
# Bug #20889: long lines trigger newlines to be added to headers
# (which is not allowed due to bug #10188)
h = HttpResponse()
f = 'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz a\xcc\x88'.encode('latin-1')
f = f.decode('utf-8')
h['Content-Disposition'] = 'attachment; filename="%s"' % f
# This one is triggering http://bugs.python.org/issue20747, that is Python
# will itself insert a newline in the header
h['Content-Disposition'] = 'attachment; filename="EdelRot_Blu\u0308te (3)-0.JPG"'
def test_newlines_in_headers(self):
# Bug #10188: Do not allow newlines in headers (CR or LF)
r = HttpResponse()
with self.assertRaises(BadHeaderError):
r.__setitem__('test\rstr', 'test')
with self.assertRaises(BadHeaderError):
r.__setitem__('test\nstr', 'test')
def test_dict_behavior(self):
"""
Test for bug #14020: Make HttpResponse.get work like dict.get
"""
r = HttpResponse()
self.assertIsNone(r.get('test'))
def test_non_string_content(self):
# Bug 16494: HttpResponse should behave consistently with non-strings
r = HttpResponse(12345)
self.assertEqual(r.content, b'12345')
# test content via property
r = HttpResponse()
r.content = 12345
self.assertEqual(r.content, b'12345')
def test_iter_content(self):
r = HttpResponse(['abc', 'def', 'ghi'])
self.assertEqual(r.content, b'abcdefghi')
# test iter content via property
r = HttpResponse()
r.content = ['idan', 'alex', 'jacob']
self.assertEqual(r.content, b'idanalexjacob')
r = HttpResponse()
r.content = [1, 2, 3]
self.assertEqual(r.content, b'123')
# test odd inputs
r = HttpResponse()
r.content = ['1', '2', 3, '\u079e']
# '\xde\x9e' == unichr(1950).encode('utf-8')
self.assertEqual(r.content, b'123\xde\x9e')
# .content can safely be accessed multiple times.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(r.content, r.content)
self.assertEqual(r.content, b'helloworld')
# __iter__ can safely be called multiple times (#20187).
self.assertEqual(b''.join(r), b'helloworld')
self.assertEqual(b''.join(r), b'helloworld')
# Accessing .content still works.
self.assertEqual(r.content, b'helloworld')
# Accessing .content also works if the response was iterated first.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(b''.join(r), b'helloworld')
self.assertEqual(r.content, b'helloworld')
# Additional content can be written to the response.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(r.content, b'helloworld')
r.write('!')
self.assertEqual(r.content, b'helloworld!')
def test_iterator_isnt_rewound(self):
# Regression test for #13222
r = HttpResponse('abc')
i = iter(r)
self.assertEqual(list(i), [b'abc'])
self.assertEqual(list(i), [])
def test_lazy_content(self):
r = HttpResponse(lazystr('helloworld'))
self.assertEqual(r.content, b'helloworld')
def test_file_interface(self):
r = HttpResponse()
r.write(b"hello")
self.assertEqual(r.tell(), 5)
r.write("привет")
self.assertEqual(r.tell(), 17)
r = HttpResponse(['abc'])
r.write('def')
self.assertEqual(r.tell(), 6)
self.assertEqual(r.content, b'abcdef')
# with Content-Encoding header
r = HttpResponse()
r['Content-Encoding'] = 'winning'
r.write(b'abc')
r.write(b'def')
self.assertEqual(r.content, b'abcdef')
def test_stream_interface(self):
r = HttpResponse('asdf')
self.assertEqual(r.getvalue(), b'asdf')
r = HttpResponse()
self.assertIs(r.writable(), True)
r.writelines(['foo\n', 'bar\n', 'baz\n'])
self.assertEqual(r.content, b'foo\nbar\nbaz\n')
def test_unsafe_redirect(self):
bad_urls = [
'data:text/html,<script>window.alert("xss")</script>',
'mailto:test@example.com',
'file:///etc/passwd',
]
for url in bad_urls:
with self.assertRaises(SuspiciousOperation):
HttpResponseRedirect(url)
with self.assertRaises(SuspiciousOperation):
HttpResponsePermanentRedirect(url)
class HttpResponseSubclassesTests(SimpleTestCase):
def test_redirect(self):
response = HttpResponseRedirect('/redirected/')
self.assertEqual(response.status_code, 302)
# Test that standard HttpResponse init args can be used
response = HttpResponseRedirect(
'/redirected/',
content='The resource has temporarily moved',
content_type='text/html',
)
self.assertContains(response, 'The resource has temporarily moved', status_code=302)
# Test that url attribute is right
self.assertEqual(response.url, response['Location'])
def test_redirect_lazy(self):
"""Make sure HttpResponseRedirect works with lazy strings."""
r = HttpResponseRedirect(lazystr('/redirected/'))
self.assertEqual(r.url, '/redirected/')
def test_redirect_repr(self):
response = HttpResponseRedirect('/redirected/')
expected = '<HttpResponseRedirect status_code=302, "text/html; charset=utf-8", url="/redirected/">'
self.assertEqual(repr(response), expected)
def test_not_modified(self):
response = HttpResponseNotModified()
self.assertEqual(response.status_code, 304)
# 304 responses should not have content/content-type
with self.assertRaises(AttributeError):
response.content = "Hello dear"
self.assertNotIn('content-type', response)
def test_not_allowed(self):
response = HttpResponseNotAllowed(['GET'])
self.assertEqual(response.status_code, 405)
# Test that standard HttpResponse init args can be used
response = HttpResponseNotAllowed(['GET'], content='Only the GET method is allowed', content_type='text/html')
self.assertContains(response, 'Only the GET method is allowed', status_code=405)
def test_not_allowed_repr(self):
response = HttpResponseNotAllowed(['GET', 'OPTIONS'], content_type='text/plain')
expected = '<HttpResponseNotAllowed [GET, OPTIONS] status_code=405, "text/plain">'
self.assertEqual(repr(response), expected)
class JsonResponseTests(SimpleTestCase):
def test_json_response_non_ascii(self):
data = {'key': 'łóżko'}
response = JsonResponse(data)
self.assertEqual(json.loads(response.content.decode()), data)
def test_json_response_raises_type_error_with_default_setting(self):
with self.assertRaisesMessage(
TypeError,
'In order to allow non-dict objects to be serialized set the '
'safe parameter to False'
):
JsonResponse([1, 2, 3])
def test_json_response_text(self):
response = JsonResponse('foobar', safe=False)
self.assertEqual(json.loads(response.content.decode()), 'foobar')
def test_json_response_list(self):
response = JsonResponse(['foo', 'bar'], safe=False)
self.assertEqual(json.loads(response.content.decode()), ['foo', 'bar'])
def test_json_response_uuid(self):
u = uuid.uuid4()
response = JsonResponse(u, safe=False)
self.assertEqual(json.loads(response.content.decode()), str(u))
def test_json_response_custom_encoder(self):
class CustomDjangoJSONEncoder(DjangoJSONEncoder):
def encode(self, o):
return json.dumps({'foo': 'bar'})
response = JsonResponse({}, encoder=CustomDjangoJSONEncoder)
self.assertEqual(json.loads(response.content.decode()), {'foo': 'bar'})
def test_json_response_passing_arguments_to_json_dumps(self):
response = JsonResponse({'foo': 'bar'}, json_dumps_params={'indent': 2})
self.assertEqual(response.content.decode(), '{\n "foo": "bar"\n}')
class StreamingHttpResponseTests(SimpleTestCase):
def test_streaming_response(self):
r = StreamingHttpResponse(iter(['hello', 'world']))
# iterating over the response itself yields bytestring chunks.
chunks = list(r)
self.assertEqual(chunks, [b'hello', b'world'])
for chunk in chunks:
self.assertIsInstance(chunk, six.binary_type)
# and the response can only be iterated once.
self.assertEqual(list(r), [])
# even when a sequence that can be iterated many times, like a list,
# is given as content.
r = StreamingHttpResponse(['abc', 'def'])
self.assertEqual(list(r), [b'abc', b'def'])
self.assertEqual(list(r), [])
# iterating over Unicode strings still yields bytestring chunks.
r.streaming_content = iter(['hello', 'café'])
chunks = list(r)
# '\xc3\xa9' == unichr(233).encode('utf-8')
self.assertEqual(chunks, [b'hello', b'caf\xc3\xa9'])
for chunk in chunks:
self.assertIsInstance(chunk, six.binary_type)
# streaming responses don't have a `content` attribute.
self.assertFalse(hasattr(r, 'content'))
# and you can't accidentally assign to a `content` attribute.
with self.assertRaises(AttributeError):
r.content = 'xyz'
# but they do have a `streaming_content` attribute.
self.assertTrue(hasattr(r, 'streaming_content'))
# that exists so we can check if a response is streaming, and wrap or
# replace the content iterator.
r.streaming_content = iter(['abc', 'def'])
r.streaming_content = (chunk.upper() for chunk in r.streaming_content)
self.assertEqual(list(r), [b'ABC', b'DEF'])
# coercing a streaming response to bytes doesn't return a complete HTTP
# message like a regular response does. it only gives us the headers.
r = StreamingHttpResponse(iter(['hello', 'world']))
self.assertEqual(
six.binary_type(r), b'Content-Type: text/html; charset=utf-8')
# and this won't consume its content.
self.assertEqual(list(r), [b'hello', b'world'])
# additional content cannot be written to the response.
r = StreamingHttpResponse(iter(['hello', 'world']))
with self.assertRaises(Exception):
r.write('!')
# and we can't tell the current position.
with self.assertRaises(Exception):
r.tell()
r = StreamingHttpResponse(iter(['hello', 'world']))
self.assertEqual(r.getvalue(), b'helloworld')
class FileCloseTests(SimpleTestCase):
def setUp(self):
# Disable the request_finished signal during this test
# to avoid interfering with the database connection.
request_finished.disconnect(close_old_connections)
def tearDown(self):
request_finished.connect(close_old_connections)
def test_response(self):
filename = os.path.join(os.path.dirname(upath(__file__)), 'abc.txt')
# file isn't closed until we close the response.
file1 = open(filename)
r = HttpResponse(file1)
self.assertTrue(file1.closed)
r.close()
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = HttpResponse(file1)
r.content = file2
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
def test_streaming_response(self):
filename = os.path.join(os.path.dirname(upath(__file__)), 'abc.txt')
# file isn't closed until we close the response.
file1 = open(filename)
r = StreamingHttpResponse(file1)
self.assertFalse(file1.closed)
r.close()
self.assertTrue(file1.closed)
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = StreamingHttpResponse(file1)
r.streaming_content = file2
self.assertFalse(file1.closed)
self.assertFalse(file2.closed)
r.close()
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
class CookieTests(unittest.TestCase):
def test_encode(self):
"""
Test that we don't output tricky characters in encoded value
"""
c = SimpleCookie()
c['test'] = "An,awkward;value"
self.assertNotIn(";", c.output().rstrip(';')) # IE compat
self.assertNotIn(",", c.output().rstrip(';')) # Safari compat
def test_decode(self):
"""
Test that we can still preserve semi-colons and commas
"""
c = SimpleCookie()
c['test'] = "An,awkward;value"
c2 = SimpleCookie()
c2.load(c.output()[12:])
self.assertEqual(c['test'].value, c2['test'].value)
c3 = parse_cookie(c.output()[12:])
self.assertEqual(c['test'].value, c3['test'])
def test_decode_2(self):
"""
Test that we haven't broken normal encoding
"""
c = SimpleCookie()
c['test'] = b"\xf0"
c2 = SimpleCookie()
c2.load(c.output()[12:])
self.assertEqual(c['test'].value, c2['test'].value)
c3 = parse_cookie(c.output()[12:])
self.assertEqual(c['test'].value, c3['test'])
def test_nonstandard_keys(self):
"""
Test that a single non-standard cookie name doesn't affect all cookies. Ticket #13007.
"""
self.assertIn('good_cookie', parse_cookie('good_cookie=yes;bad:cookie=yes').keys())
def test_repeated_nonstandard_keys(self):
"""
Test that a repeated non-standard name doesn't affect all cookies. Ticket #15852
"""
self.assertIn('good_cookie', parse_cookie('a:=b; a:=c; good_cookie=yes').keys())
def test_python_cookies(self):
"""
Test cases copied from Python's Lib/test/test_http_cookies.py
"""
self.assertEqual(parse_cookie('chips=ahoy; vienna=finger'), {'chips': 'ahoy', 'vienna': 'finger'})
# Here parse_cookie() differs from Python's cookie parsing in that it
# treats all semicolons as delimiters, even within quotes.
self.assertEqual(
parse_cookie('keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"'),
{'keebler': '"E=mc2', 'L': '\\"Loves\\"', 'fudge': '\\012', '': '"'}
)
# Illegal cookies that have an '=' char in an unquoted value.
self.assertEqual(parse_cookie('keebler=E=mc2'), {'keebler': 'E=mc2'})
# Cookies with ':' character in their name.
self.assertEqual(parse_cookie('key:term=value:term'), {'key:term': 'value:term'})
# Cookies with '[' and ']'.
self.assertEqual(parse_cookie('a=b; c=[; d=r; f=h'), {'a': 'b', 'c': '[', 'd': 'r', 'f': 'h'})
def test_cookie_edgecases(self):
# Cookies that RFC6265 allows.
self.assertEqual(parse_cookie('a=b; Domain=example.com'), {'a': 'b', 'Domain': 'example.com'})
# parse_cookie() has historically kept only the last cookie with the
# same name.
self.assertEqual(parse_cookie('a=b; h=i; a=c'), {'a': 'c', 'h': 'i'})
def test_invalid_cookies(self):
"""
Cookie strings that go against RFC6265 but browsers will send if set
via document.cookie.
"""
# Chunks without an equals sign appear as unnamed values per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
self.assertIn('django_language', parse_cookie('abc=def; unnamed; django_language=en').keys())
# Even a double quote may be an unamed value.
self.assertEqual(parse_cookie('a=b; "; c=d'), {'a': 'b', '': '"', 'c': 'd'})
# Spaces in names and values, and an equals sign in values.
self.assertEqual(parse_cookie('a b c=d e = f; gh=i'), {'a b c': 'd e = f', 'gh': 'i'})
# More characters the spec forbids.
self.assertEqual(parse_cookie('a b,c<>@:/[]?{}=d " =e,f g'), {'a b,c<>@:/[]?{}': 'd " =e,f g'})
# Unicode characters. The spec only allows ASCII.
self.assertEqual(parse_cookie('saint=André Bessette'), {'saint': force_str('André Bessette')})
# Browsers don't send extra whitespace or semicolons in Cookie headers,
# but parse_cookie() should parse whitespace the same way
# document.cookie parses whitespace.
self.assertEqual(parse_cookie(' = b ; ; = ; c = ; '), {'': 'b', 'c': ''})
def test_httponly_after_load(self):
"""
Test that we can use httponly attribute on cookies that we load
"""
c = SimpleCookie()
c.load("name=val")
c['name']['httponly'] = True
self.assertTrue(c['name']['httponly'])
def test_load_dict(self):
c = SimpleCookie()
c.load({'name': 'val'})
self.assertEqual(c['name'].value, 'val')
@unittest.skipUnless(six.PY2, "PY3 throws an exception on invalid cookie keys.")
def test_bad_cookie(self):
"""
Regression test for #18403
"""
r = HttpResponse()
r.set_cookie("a:.b/", 1)
self.assertEqual(len(r.cookies.bad_cookies), 1)
def test_pickle(self):
rawdata = 'Customer="WILE_E_COYOTE"; Path=/acme; Version=1'
expected_output = 'Set-Cookie: %s' % rawdata
C = SimpleCookie()
C.load(rawdata)
self.assertEqual(C.output(), expected_output)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
C1 = pickle.loads(pickle.dumps(C, protocol=proto))
self.assertEqual(C1.output(), expected_output)
| 38.935115 | 118 | 0.60151 |
from __future__ import unicode_literals
import copy
import json
import os
import pickle
import unittest
import uuid
from django.core.exceptions import SuspiciousOperation
from django.core.serializers.json import DjangoJSONEncoder
from django.core.signals import request_finished
from django.db import close_old_connections
from django.http import (
BadHeaderError, HttpResponse, HttpResponseNotAllowed,
HttpResponseNotModified, HttpResponsePermanentRedirect,
HttpResponseRedirect, JsonResponse, QueryDict, SimpleCookie,
StreamingHttpResponse, parse_cookie,
)
from django.test import SimpleTestCase
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import force_str
from django.utils.functional import lazystr
class QueryDictTests(unittest.TestCase):
def test_create_with_no_args(self):
self.assertEqual(QueryDict(), QueryDict(str('')))
def test_missing_key(self):
q = QueryDict()
with self.assertRaises(KeyError):
q.__getitem__('foo')
def test_immutability(self):
q = QueryDict()
with self.assertRaises(AttributeError):
q.__setitem__('something', 'bar')
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar'])
with self.assertRaises(AttributeError):
q.appendlist('foo', ['bar'])
with self.assertRaises(AttributeError):
q.update({'foo': 'bar'})
with self.assertRaises(AttributeError):
q.pop('foo')
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
def test_immutable_get_with_default(self):
q = QueryDict()
self.assertEqual(q.get('foo', 'default'), 'default')
def test_immutable_basic_operations(self):
q = QueryDict()
self.assertEqual(q.getlist('foo'), [])
if six.PY2:
self.assertIs(q.has_key('foo'), False)
self.assertNotIn('foo', q)
self.assertEqual(list(six.iteritems(q)), [])
self.assertEqual(list(six.iterlists(q)), [])
self.assertEqual(list(six.iterkeys(q)), [])
self.assertEqual(list(six.itervalues(q)), [])
self.assertEqual(len(q), 0)
self.assertEqual(q.urlencode(), '')
def test_single_key_value(self):
q = QueryDict(str('foo=bar'))
self.assertEqual(q['foo'], 'bar')
with self.assertRaises(KeyError):
q.__getitem__('bar')
with self.assertRaises(AttributeError):
q.__setitem__('something', 'bar')
self.assertEqual(q.get('foo', 'default'), 'bar')
self.assertEqual(q.get('bar', 'default'), 'default')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertEqual(q.getlist('bar'), [])
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar'])
with self.assertRaises(AttributeError):
q.appendlist('foo', ['bar'])
if six.PY2:
self.assertTrue(q.has_key('foo'))
self.assertIn('foo', q)
if six.PY2:
self.assertFalse(q.has_key('bar'))
self.assertNotIn('bar', q)
self.assertEqual(list(six.iteritems(q)), [('foo', 'bar')])
self.assertEqual(list(six.iterlists(q)), [('foo', ['bar'])])
self.assertEqual(list(six.iterkeys(q)), ['foo'])
self.assertEqual(list(six.itervalues(q)), ['bar'])
self.assertEqual(len(q), 1)
with self.assertRaises(AttributeError):
q.update({'foo': 'bar'})
with self.assertRaises(AttributeError):
q.pop('foo')
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
with self.assertRaises(AttributeError):
q.setdefault('foo', 'bar')
self.assertEqual(q.urlencode(), 'foo=bar')
def test_urlencode(self):
q = QueryDict(mutable=True)
q['next'] = '/a&b/'
self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/a%26b/')
q = QueryDict(mutable=True)
q['next'] = '/t\xebst&key/'
self.assertEqual(q.urlencode(), 'next=%2Ft%C3%ABst%26key%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/t%C3%ABst%26key/')
def test_mutable_copy(self):
q = QueryDict().copy()
with self.assertRaises(KeyError):
q.__getitem__("foo")
q['name'] = 'john'
self.assertEqual(q['name'], 'john')
def test_mutable_delete(self):
q = QueryDict(mutable=True)
q['name'] = 'john'
del q['name']
self.assertNotIn('name', q)
def test_basic_mutable_operations(self):
q = QueryDict(mutable=True)
q['name'] = 'john'
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.get('name', 'default'), 'john')
self.assertEqual(q.getlist('name'), ['john'])
self.assertEqual(q.getlist('foo'), [])
q.setlist('foo', ['bar', 'baz'])
self.assertEqual(q.get('foo', 'default'), 'baz')
self.assertEqual(q.getlist('foo'), ['bar', 'baz'])
q.appendlist('foo', 'another')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another'])
self.assertEqual(q['foo'], 'another')
if six.PY2:
self.assertTrue(q.has_key('foo'))
self.assertIn('foo', q)
self.assertListEqual(sorted(six.iteritems(q)),
[('foo', 'another'), ('name', 'john')])
self.assertListEqual(sorted(six.iterlists(q)),
[('foo', ['bar', 'baz', 'another']), ('name', ['john'])])
self.assertListEqual(sorted(six.iterkeys(q)),
['foo', 'name'])
self.assertListEqual(sorted(six.itervalues(q)),
['another', 'john'])
q.update({'foo': 'hello'})
self.assertEqual(q['foo'], 'hello')
self.assertEqual(q.get('foo', 'not available'), 'hello')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another', 'hello'])
self.assertEqual(q.pop('foo'), ['bar', 'baz', 'another', 'hello'])
self.assertEqual(q.pop('foo', 'not there'), 'not there')
self.assertEqual(q.get('foo', 'not there'), 'not there')
self.assertEqual(q.setdefault('foo', 'bar'), 'bar')
self.assertEqual(q['foo'], 'bar')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertIn(q.urlencode(), ['foo=bar&name=john', 'name=john&foo=bar'])
q.clear()
self.assertEqual(len(q), 0)
def test_multiple_keys(self):
q = QueryDict(str('vote=yes&vote=no'))
self.assertEqual(q['vote'], 'no')
with self.assertRaises(AttributeError):
q.__setitem__('something', 'bar')
self.assertEqual(q.get('vote', 'default'), 'no')
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.getlist('vote'), ['yes', 'no'])
self.assertEqual(q.getlist('foo'), [])
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar', 'baz'])
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar', 'baz'])
with self.assertRaises(AttributeError):
q.appendlist('foo', ['bar'])
if six.PY2:
self.assertIs(q.has_key('vote'), True)
self.assertIn('vote', q)
if six.PY2:
self.assertIs(q.has_key('foo'), False)
self.assertNotIn('foo', q)
self.assertEqual(list(six.iteritems(q)), [('vote', 'no')])
self.assertEqual(list(six.iterlists(q)), [('vote', ['yes', 'no'])])
self.assertEqual(list(six.iterkeys(q)), ['vote'])
self.assertEqual(list(six.itervalues(q)), ['no'])
self.assertEqual(len(q), 1)
with self.assertRaises(AttributeError):
q.update({'foo': 'bar'})
with self.assertRaises(AttributeError):
q.pop('foo')
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
with self.assertRaises(AttributeError):
q.setdefault('foo', 'bar')
with self.assertRaises(AttributeError):
q.__delitem__('vote')
if six.PY2:
def test_invalid_input_encoding(self):
q = QueryDict(str(b'foo=bar&foo=\xff'))
self.assertEqual(q['foo'], '\xff')
self.assertEqual(q.getlist('foo'), ['bar', '\xff'])
def test_pickle(self):
q = QueryDict()
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
q = QueryDict(str('a=b&c=d'))
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
q = QueryDict(str('a=b&c=d&a=1'))
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
def test_update_from_querydict(self):
x = QueryDict(str("a=1&a=2"), mutable=True)
y = QueryDict(str("a=3&a=4"))
x.update(y)
self.assertEqual(x.getlist('a'), ['1', '2', '3', '4'])
def test_non_default_encoding(self):
q = QueryDict(str('cur=%A4'), encoding='iso-8859-15')
self.assertEqual(q.encoding, 'iso-8859-15')
self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
self.assertEqual(q.urlencode(), 'cur=%A4')
q = q.copy()
self.assertEqual(q.encoding, 'iso-8859-15')
self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
self.assertEqual(q.urlencode(), 'cur=%A4')
self.assertEqual(copy.copy(q).encoding, 'iso-8859-15')
self.assertEqual(copy.deepcopy(q).encoding, 'iso-8859-15')
class HttpResponseTests(unittest.TestCase):
def test_headers_type(self):
r = HttpResponse()
r['key'] = 'test'
self.assertEqual(r['key'], str('test'))
self.assertIsInstance(r['key'], str)
r['key'] = 'test'.encode('ascii')
self.assertEqual(r['key'], str('test'))
self.assertIsInstance(r['key'], str)
self.assertIn(b'test', r.serialize_headers())
r['key'] = 'café'
self.assertEqual(r['key'], force_str('café', 'latin-1'))
self.assertIsInstance(r['key'], str)
r['key'] = 'café'.encode('latin-1')
self.assertEqual(r['key'], force_str('café', 'latin-1'))
self.assertIsInstance(r['key'], str)
self.assertIn('café'.encode('latin-1'), r.serialize_headers())
r['key'] = '†'
self.assertEqual(r['key'], str('=?utf-8?b?4oCg?='))
self.assertIsInstance(r['key'], str)
self.assertIn(b'=?utf-8?b?4oCg?=', r.serialize_headers())
# The response also converts unicode or bytes keys to strings, but requires
# them to contain ASCII
r = HttpResponse()
del r['Content-Type']
r['foo'] = 'bar'
l = list(r.items())
self.assertEqual(len(l), 1)
self.assertEqual(l[0], ('foo', 'bar'))
self.assertIsInstance(l[0][0], str)
r = HttpResponse()
del r['Content-Type']
r[b'foo'] = 'bar'
l = list(r.items())
self.assertEqual(len(l), 1)
self.assertEqual(l[0], ('foo', 'bar'))
self.assertIsInstance(l[0][0], str)
r = HttpResponse()
with self.assertRaises(UnicodeError):
r.__setitem__('føø', 'bar')
with self.assertRaises(UnicodeError):
r.__setitem__('føø'.encode('utf-8'), 'bar')
def test_long_line(self):
# Bug #20889: long lines trigger newlines to be added to headers
# (which is not allowed due to bug #10188)
h = HttpResponse()
f = 'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz a\xcc\x88'.encode('latin-1')
f = f.decode('utf-8')
h['Content-Disposition'] = 'attachment; filename="%s"' % f
# This one is triggering http://bugs.python.org/issue20747, that is Python
# will itself insert a newline in the header
h['Content-Disposition'] = 'attachment; filename="EdelRot_Blu\u0308te (3)-0.JPG"'
def test_newlines_in_headers(self):
# Bug #10188: Do not allow newlines in headers (CR or LF)
r = HttpResponse()
with self.assertRaises(BadHeaderError):
r.__setitem__('test\rstr', 'test')
with self.assertRaises(BadHeaderError):
r.__setitem__('test\nstr', 'test')
def test_dict_behavior(self):
r = HttpResponse()
self.assertIsNone(r.get('test'))
def test_non_string_content(self):
# Bug 16494: HttpResponse should behave consistently with non-strings
r = HttpResponse(12345)
self.assertEqual(r.content, b'12345')
# test content via property
r = HttpResponse()
r.content = 12345
self.assertEqual(r.content, b'12345')
def test_iter_content(self):
r = HttpResponse(['abc', 'def', 'ghi'])
self.assertEqual(r.content, b'abcdefghi')
# test iter content via property
r = HttpResponse()
r.content = ['idan', 'alex', 'jacob']
self.assertEqual(r.content, b'idanalexjacob')
r = HttpResponse()
r.content = [1, 2, 3]
self.assertEqual(r.content, b'123')
# test odd inputs
r = HttpResponse()
r.content = ['1', '2', 3, '\u079e']
# '\xde\x9e' == unichr(1950).encode('utf-8')
self.assertEqual(r.content, b'123\xde\x9e')
# .content can safely be accessed multiple times.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(r.content, r.content)
self.assertEqual(r.content, b'helloworld')
# __iter__ can safely be called multiple times (#20187).
self.assertEqual(b''.join(r), b'helloworld')
self.assertEqual(b''.join(r), b'helloworld')
# Accessing .content still works.
self.assertEqual(r.content, b'helloworld')
# Accessing .content also works if the response was iterated first.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(b''.join(r), b'helloworld')
self.assertEqual(r.content, b'helloworld')
# Additional content can be written to the response.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(r.content, b'helloworld')
r.write('!')
self.assertEqual(r.content, b'helloworld!')
def test_iterator_isnt_rewound(self):
# Regression test for #13222
r = HttpResponse('abc')
i = iter(r)
self.assertEqual(list(i), [b'abc'])
self.assertEqual(list(i), [])
def test_lazy_content(self):
r = HttpResponse(lazystr('helloworld'))
self.assertEqual(r.content, b'helloworld')
def test_file_interface(self):
r = HttpResponse()
r.write(b"hello")
self.assertEqual(r.tell(), 5)
r.write("привет")
self.assertEqual(r.tell(), 17)
r = HttpResponse(['abc'])
r.write('def')
self.assertEqual(r.tell(), 6)
self.assertEqual(r.content, b'abcdef')
# with Content-Encoding header
r = HttpResponse()
r['Content-Encoding'] = 'winning'
r.write(b'abc')
r.write(b'def')
self.assertEqual(r.content, b'abcdef')
def test_stream_interface(self):
r = HttpResponse('asdf')
self.assertEqual(r.getvalue(), b'asdf')
r = HttpResponse()
self.assertIs(r.writable(), True)
r.writelines(['foo\n', 'bar\n', 'baz\n'])
self.assertEqual(r.content, b'foo\nbar\nbaz\n')
def test_unsafe_redirect(self):
bad_urls = [
'data:text/html,<script>window.alert("xss")</script>',
'mailto:test@example.com',
'file:///etc/passwd',
]
for url in bad_urls:
with self.assertRaises(SuspiciousOperation):
HttpResponseRedirect(url)
with self.assertRaises(SuspiciousOperation):
HttpResponsePermanentRedirect(url)
class HttpResponseSubclassesTests(SimpleTestCase):
def test_redirect(self):
response = HttpResponseRedirect('/redirected/')
self.assertEqual(response.status_code, 302)
# Test that standard HttpResponse init args can be used
response = HttpResponseRedirect(
'/redirected/',
content='The resource has temporarily moved',
content_type='text/html',
)
self.assertContains(response, 'The resource has temporarily moved', status_code=302)
# Test that url attribute is right
self.assertEqual(response.url, response['Location'])
def test_redirect_lazy(self):
r = HttpResponseRedirect(lazystr('/redirected/'))
self.assertEqual(r.url, '/redirected/')
def test_redirect_repr(self):
response = HttpResponseRedirect('/redirected/')
expected = '<HttpResponseRedirect status_code=302, "text/html; charset=utf-8", url="/redirected/">'
self.assertEqual(repr(response), expected)
def test_not_modified(self):
response = HttpResponseNotModified()
self.assertEqual(response.status_code, 304)
# 304 responses should not have content/content-type
with self.assertRaises(AttributeError):
response.content = "Hello dear"
self.assertNotIn('content-type', response)
def test_not_allowed(self):
response = HttpResponseNotAllowed(['GET'])
self.assertEqual(response.status_code, 405)
# Test that standard HttpResponse init args can be used
response = HttpResponseNotAllowed(['GET'], content='Only the GET method is allowed', content_type='text/html')
self.assertContains(response, 'Only the GET method is allowed', status_code=405)
def test_not_allowed_repr(self):
response = HttpResponseNotAllowed(['GET', 'OPTIONS'], content_type='text/plain')
expected = '<HttpResponseNotAllowed [GET, OPTIONS] status_code=405, "text/plain">'
self.assertEqual(repr(response), expected)
class JsonResponseTests(SimpleTestCase):
def test_json_response_non_ascii(self):
data = {'key': 'łóżko'}
response = JsonResponse(data)
self.assertEqual(json.loads(response.content.decode()), data)
def test_json_response_raises_type_error_with_default_setting(self):
with self.assertRaisesMessage(
TypeError,
'In order to allow non-dict objects to be serialized set the '
'safe parameter to False'
):
JsonResponse([1, 2, 3])
def test_json_response_text(self):
response = JsonResponse('foobar', safe=False)
self.assertEqual(json.loads(response.content.decode()), 'foobar')
def test_json_response_list(self):
response = JsonResponse(['foo', 'bar'], safe=False)
self.assertEqual(json.loads(response.content.decode()), ['foo', 'bar'])
def test_json_response_uuid(self):
u = uuid.uuid4()
response = JsonResponse(u, safe=False)
self.assertEqual(json.loads(response.content.decode()), str(u))
def test_json_response_custom_encoder(self):
class CustomDjangoJSONEncoder(DjangoJSONEncoder):
def encode(self, o):
return json.dumps({'foo': 'bar'})
response = JsonResponse({}, encoder=CustomDjangoJSONEncoder)
self.assertEqual(json.loads(response.content.decode()), {'foo': 'bar'})
def test_json_response_passing_arguments_to_json_dumps(self):
response = JsonResponse({'foo': 'bar'}, json_dumps_params={'indent': 2})
self.assertEqual(response.content.decode(), '{\n "foo": "bar"\n}')
class StreamingHttpResponseTests(SimpleTestCase):
def test_streaming_response(self):
r = StreamingHttpResponse(iter(['hello', 'world']))
# iterating over the response itself yields bytestring chunks.
chunks = list(r)
self.assertEqual(chunks, [b'hello', b'world'])
for chunk in chunks:
self.assertIsInstance(chunk, six.binary_type)
# and the response can only be iterated once.
self.assertEqual(list(r), [])
# even when a sequence that can be iterated many times, like a list,
# is given as content.
r = StreamingHttpResponse(['abc', 'def'])
self.assertEqual(list(r), [b'abc', b'def'])
self.assertEqual(list(r), [])
# iterating over Unicode strings still yields bytestring chunks.
r.streaming_content = iter(['hello', 'café'])
chunks = list(r)
# '\xc3\xa9' == unichr(233).encode('utf-8')
self.assertEqual(chunks, [b'hello', b'caf\xc3\xa9'])
for chunk in chunks:
self.assertIsInstance(chunk, six.binary_type)
# streaming responses don't have a `content` attribute.
self.assertFalse(hasattr(r, 'content'))
with self.assertRaises(AttributeError):
r.content = 'xyz'
# but they do have a `streaming_content` attribute.
self.assertTrue(hasattr(r, 'streaming_content'))
# that exists so we can check if a response is streaming, and wrap or
# replace the content iterator.
r.streaming_content = iter(['abc', 'def'])
r.streaming_content = (chunk.upper() for chunk in r.streaming_content)
self.assertEqual(list(r), [b'ABC', b'DEF'])
# coercing a streaming response to bytes doesn't return a complete HTTP
r = StreamingHttpResponse(iter(['hello', 'world']))
self.assertEqual(
six.binary_type(r), b'Content-Type: text/html; charset=utf-8')
self.assertEqual(list(r), [b'hello', b'world'])
# additional content cannot be written to the response.
r = StreamingHttpResponse(iter(['hello', 'world']))
with self.assertRaises(Exception):
r.write('!')
# and we can't tell the current position.
with self.assertRaises(Exception):
r.tell()
r = StreamingHttpResponse(iter(['hello', 'world']))
self.assertEqual(r.getvalue(), b'helloworld')
class FileCloseTests(SimpleTestCase):
def setUp(self):
request_finished.disconnect(close_old_connections)
def tearDown(self):
request_finished.connect(close_old_connections)
def test_response(self):
filename = os.path.join(os.path.dirname(upath(__file__)), 'abc.txt')
file1 = open(filename)
r = HttpResponse(file1)
self.assertTrue(file1.closed)
r.close()
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = HttpResponse(file1)
r.content = file2
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
def test_streaming_response(self):
filename = os.path.join(os.path.dirname(upath(__file__)), 'abc.txt')
# file isn't closed until we close the response.
file1 = open(filename)
r = StreamingHttpResponse(file1)
self.assertFalse(file1.closed)
r.close()
self.assertTrue(file1.closed)
file1 = open(filename)
file2 = open(filename)
r = StreamingHttpResponse(file1)
r.streaming_content = file2
self.assertFalse(file1.closed)
self.assertFalse(file2.closed)
r.close()
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
class CookieTests(unittest.TestCase):
def test_encode(self):
c = SimpleCookie()
c['test'] = "An,awkward;value"
self.assertNotIn(";", c.output().rstrip(';'))
self.assertNotIn(",", c.output().rstrip(';'))
def test_decode(self):
c = SimpleCookie()
c['test'] = "An,awkward;value"
c2 = SimpleCookie()
c2.load(c.output()[12:])
self.assertEqual(c['test'].value, c2['test'].value)
c3 = parse_cookie(c.output()[12:])
self.assertEqual(c['test'].value, c3['test'])
def test_decode_2(self):
c = SimpleCookie()
c['test'] = b"\xf0"
c2 = SimpleCookie()
c2.load(c.output()[12:])
self.assertEqual(c['test'].value, c2['test'].value)
c3 = parse_cookie(c.output()[12:])
self.assertEqual(c['test'].value, c3['test'])
def test_nonstandard_keys(self):
self.assertIn('good_cookie', parse_cookie('good_cookie=yes;bad:cookie=yes').keys())
def test_repeated_nonstandard_keys(self):
self.assertIn('good_cookie', parse_cookie('a:=b; a:=c; good_cookie=yes').keys())
def test_python_cookies(self):
self.assertEqual(parse_cookie('chips=ahoy; vienna=finger'), {'chips': 'ahoy', 'vienna': 'finger'})
# treats all semicolons as delimiters, even within quotes.
self.assertEqual(
parse_cookie('keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"'),
{'keebler': '"E=mc2', 'L': '\\"Loves\\"', 'fudge': '\\012', '': '"'}
)
# Illegal cookies that have an '=' char in an unquoted value.
self.assertEqual(parse_cookie('keebler=E=mc2'), {'keebler': 'E=mc2'})
# Cookies with ':' character in their name.
self.assertEqual(parse_cookie('key:term=value:term'), {'key:term': 'value:term'})
# Cookies with '[' and ']'.
self.assertEqual(parse_cookie('a=b; c=[; d=r; f=h'), {'a': 'b', 'c': '[', 'd': 'r', 'f': 'h'})
def test_cookie_edgecases(self):
# Cookies that RFC6265 allows.
self.assertEqual(parse_cookie('a=b; Domain=example.com'), {'a': 'b', 'Domain': 'example.com'})
# parse_cookie() has historically kept only the last cookie with the
# same name.
self.assertEqual(parse_cookie('a=b; h=i; a=c'), {'a': 'c', 'h': 'i'})
def test_invalid_cookies(self):
# Chunks without an equals sign appear as unnamed values per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
self.assertIn('django_language', parse_cookie('abc=def; unnamed; django_language=en').keys())
# Even a double quote may be an unamed value.
self.assertEqual(parse_cookie('a=b; "; c=d'), {'a': 'b', '': '"', 'c': 'd'})
# Spaces in names and values, and an equals sign in values.
self.assertEqual(parse_cookie('a b c=d e = f; gh=i'), {'a b c': 'd e = f', 'gh': 'i'})
# More characters the spec forbids.
self.assertEqual(parse_cookie('a b,c<>@:/[]?{}=d " =e,f g'), {'a b,c<>@:/[]?{}': 'd " =e,f g'})
# Unicode characters. The spec only allows ASCII.
self.assertEqual(parse_cookie('saint=André Bessette'), {'saint': force_str('André Bessette')})
# Browsers don't send extra whitespace or semicolons in Cookie headers,
self.assertEqual(parse_cookie(' = b ; ; = ; c = ; '), {'': 'b', 'c': ''})
def test_httponly_after_load(self):
c = SimpleCookie()
c.load("name=val")
c['name']['httponly'] = True
self.assertTrue(c['name']['httponly'])
def test_load_dict(self):
c = SimpleCookie()
c.load({'name': 'val'})
self.assertEqual(c['name'].value, 'val')
@unittest.skipUnless(six.PY2, "PY3 throws an exception on invalid cookie keys.")
def test_bad_cookie(self):
r = HttpResponse()
r.set_cookie("a:.b/", 1)
self.assertEqual(len(r.cookies.bad_cookies), 1)
def test_pickle(self):
rawdata = 'Customer="WILE_E_COYOTE"; Path=/acme; Version=1'
expected_output = 'Set-Cookie: %s' % rawdata
C = SimpleCookie()
C.load(rawdata)
self.assertEqual(C.output(), expected_output)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
C1 = pickle.loads(pickle.dumps(C, protocol=proto))
self.assertEqual(C1.output(), expected_output)
| true | true |
f7152c01a06b1a8eea4ae4c08a05e1af35676efc | 82 | py | Python | pyrobolearn/states/generators/__init__.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | 2 | 2021-01-21T21:08:30.000Z | 2022-03-29T16:45:49.000Z | pyrobolearn/states/generators/__init__.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | null | null | null | pyrobolearn/states/generators/__init__.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | 1 | 2020-09-29T21:25:39.000Z | 2020-09-29T21:25:39.000Z | # -*- coding: utf-8 -*-
# import state generators
from .state_generator import *
| 16.4 | 30 | 0.682927 |
from .state_generator import *
| true | true |
f7152cd6c81c021fabfecc053762e195b6af37eb | 493 | py | Python | setup.py | knorth55/chainer-dense-fusion | 8ff53173d7071fc2cfcd05b1e0b2c544aeed090b | [
"MIT"
] | 22 | 2019-01-31T23:50:30.000Z | 2021-09-13T09:41:00.000Z | setup.py | knorth55/chainer-dense-fusion | 8ff53173d7071fc2cfcd05b1e0b2c544aeed090b | [
"MIT"
] | 4 | 2019-07-31T14:40:06.000Z | 2022-03-16T13:32:45.000Z | setup.py | knorth55/chainer-dense-fusion | 8ff53173d7071fc2cfcd05b1e0b2c544aeed090b | [
"MIT"
] | 3 | 2019-08-30T09:18:45.000Z | 2020-03-03T16:07:51.000Z | from setuptools import find_packages
from setuptools import setup
version = '0.0.0'
setup(
name='chainer_dense_fusion',
version=version,
packages=find_packages(),
install_requires=open('requirements.txt').readlines(),
description='',
long_description=open('README.md').read(),
author='Shingo Kitagawa',
author_email='shingogo.5511@gmail.com',
url='https://github.com/knorth55/chainer-dense-fusion',
license='MIT',
keywords='machine-learning',
)
| 23.47619 | 59 | 0.703854 | from setuptools import find_packages
from setuptools import setup
version = '0.0.0'
setup(
name='chainer_dense_fusion',
version=version,
packages=find_packages(),
install_requires=open('requirements.txt').readlines(),
description='',
long_description=open('README.md').read(),
author='Shingo Kitagawa',
author_email='shingogo.5511@gmail.com',
url='https://github.com/knorth55/chainer-dense-fusion',
license='MIT',
keywords='machine-learning',
)
| true | true |
f7152da2720cfd4a357ce9c5b71bde73ceb5bb7b | 2,199 | py | Python | year_2018/day_09.py | gchazot/aoc | 1926114b1060a927be3f87732ba0a399afd98ae4 | [
"MIT"
] | 1 | 2020-04-12T16:14:29.000Z | 2020-04-12T16:14:29.000Z | year_2018/day_09.py | gchazot/aoc | 1926114b1060a927be3f87732ba0a399afd98ae4 | [
"MIT"
] | null | null | null | year_2018/day_09.py | gchazot/aoc | 1926114b1060a927be3f87732ba0a399afd98ae4 | [
"MIT"
] | null | null | null | from __future__ import print_function
import unittest
class TestMarbleGame(unittest.TestCase):
def test_starts_empty(self):
game = MarbleGame(0, 0)
self.assertListEqual([], game.scores)
self.assertListEqual([0], game._circle)
def test_play_examples(self):
def high_score(players, last_marble):
game = MarbleGame(players, last_marble)
return game.play()
self.assertEqual(32, high_score(9, 25))
self.assertEqual(8317, high_score(10, 1618))
self.assertEqual(146373, high_score(13, 7999))
self.assertEqual(2764, high_score(17, 1104))
self.assertEqual(54718, high_score(21, 6111))
self.assertEqual(37305, high_score(30, 5807))
def test_play_mine(self):
self.assertEqual(410375, MarbleGame(439, 71307).play())
@unittest.skip("Too slow, > 2h")
def test_play_huge_mine(self):
self.assertEqual(3314195047, MarbleGame(439, 71307 * 100).play())
class MarbleGame:
def __init__(self, num_players, last_marble):
self.scores = [0 for _ in range(num_players)]
self.last_marble = last_marble
self._circle = [0]
self._next_marble = 1
self._current_index = 0
def play(self):
while self._next_marble <= self.last_marble:
self.play_round()
return max(self.scores)
def play_round(self):
if self._next_marble % 23 != 0:
next_index = (self._current_index + 2) % len(self._circle)
self._circle.insert(next_index, self._next_marble)
else:
num_players = len(self.scores)
current_player_index = self._next_marble % num_players
self.scores[current_player_index] += self._next_marble
next_index = (self._current_index - 7) % len(self._circle)
self.scores[current_player_index] += self._circle.pop(next_index)
self._current_index = next_index
self._next_marble += 1
def print(self):
print(" ".join(map(str, self.scores)), " ",
self._next_marble, "-",
self._current_index, "-",
" ".join(map(str, self._circle)))
print()
| 34.359375 | 77 | 0.626648 | from __future__ import print_function
import unittest
class TestMarbleGame(unittest.TestCase):
def test_starts_empty(self):
game = MarbleGame(0, 0)
self.assertListEqual([], game.scores)
self.assertListEqual([0], game._circle)
def test_play_examples(self):
def high_score(players, last_marble):
game = MarbleGame(players, last_marble)
return game.play()
self.assertEqual(32, high_score(9, 25))
self.assertEqual(8317, high_score(10, 1618))
self.assertEqual(146373, high_score(13, 7999))
self.assertEqual(2764, high_score(17, 1104))
self.assertEqual(54718, high_score(21, 6111))
self.assertEqual(37305, high_score(30, 5807))
def test_play_mine(self):
self.assertEqual(410375, MarbleGame(439, 71307).play())
@unittest.skip("Too slow, > 2h")
def test_play_huge_mine(self):
self.assertEqual(3314195047, MarbleGame(439, 71307 * 100).play())
class MarbleGame:
def __init__(self, num_players, last_marble):
self.scores = [0 for _ in range(num_players)]
self.last_marble = last_marble
self._circle = [0]
self._next_marble = 1
self._current_index = 0
def play(self):
while self._next_marble <= self.last_marble:
self.play_round()
return max(self.scores)
def play_round(self):
if self._next_marble % 23 != 0:
next_index = (self._current_index + 2) % len(self._circle)
self._circle.insert(next_index, self._next_marble)
else:
num_players = len(self.scores)
current_player_index = self._next_marble % num_players
self.scores[current_player_index] += self._next_marble
next_index = (self._current_index - 7) % len(self._circle)
self.scores[current_player_index] += self._circle.pop(next_index)
self._current_index = next_index
self._next_marble += 1
def print(self):
print(" ".join(map(str, self.scores)), " ",
self._next_marble, "-",
self._current_index, "-",
" ".join(map(str, self._circle)))
print()
| true | true |
f7152e2178499fecdb9dce8c3f5d9bfbf5c3dfd6 | 297 | py | Python | tests/test_modes.py | s-s-boika/obdlib | 5b0b35276575a522d20858b6993a9bebf0acc968 | [
"MIT"
] | 9 | 2015-07-14T07:15:58.000Z | 2021-06-03T01:42:19.000Z | tests/test_modes.py | s-s-boika/obdlib | 5b0b35276575a522d20858b6993a9bebf0acc968 | [
"MIT"
] | null | null | null | tests/test_modes.py | s-s-boika/obdlib | 5b0b35276575a522d20858b6993a9bebf0acc968 | [
"MIT"
] | 4 | 2015-07-15T09:05:46.000Z | 2022-02-06T04:28:53.000Z | import unittest
import obdlib.obd.modes as modes
class TestModes(unittest.TestCase):
def test_init(self):
m = modes.Modes(1)
self.assertIsInstance(m.modes, dict)
suite = unittest.TestLoader().loadTestsFromTestCase(TestModes)
unittest.TextTestRunner(verbosity=2).run(suite)
| 22.846154 | 62 | 0.747475 | import unittest
import obdlib.obd.modes as modes
class TestModes(unittest.TestCase):
def test_init(self):
m = modes.Modes(1)
self.assertIsInstance(m.modes, dict)
suite = unittest.TestLoader().loadTestsFromTestCase(TestModes)
unittest.TextTestRunner(verbosity=2).run(suite)
| true | true |
f7152e96fcfdafb7945bd56df781ae2d29581903 | 4,319 | py | Python | gbrarscrapy.py | wothard/scrapyfloat | ff0c6698a4732015358ed5e9a07e24dd212eaf7f | [
"MIT"
] | null | null | null | gbrarscrapy.py | wothard/scrapyfloat | ff0c6698a4732015358ed5e9a07e24dd212eaf7f | [
"MIT"
] | null | null | null | gbrarscrapy.py | wothard/scrapyfloat | ff0c6698a4732015358ed5e9a07e24dd212eaf7f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# encoding: utf-8
from lxml import html
import requests
import os
import random
import time
from fake_agent import fakeagent
class Gbrarscrapy(object):
def __init__(self, url_li, proxy_single):
self.title_xpa = '//a[@onmouseover]/text()'
self.score_list_xpa = '//span[@style="color:DarkSlateGray"]/text()'
self.id_xpa = '//a[contains(@href,"/torrent/")]/@href'
self.ch_xpa = '//tr[@class="lista2"][{}]/td[2]/span/text()'
# self.date_list_xpa = '//td[contains(@align,"center")
# and contains(@width,"150px")]/text()'
self.seli_xpa = '//td[@align="center" and @width="50px"]/font/text()'
self.tor_dict = dict() # 地址字典(包含地址,健康度,评分)
self.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=' +
'0.9,image/webp,image/apng,*/*;q=0.8',
'Cookies': 'skt=v97mrzygux; gaDts48g=q8h5pp9t; skt=v97mrzygux; gaDts48g=q8h5pp9t; expla=1; tcc; aby=2; ppu_main_9ef78edf998c4df1e1636c9a474d9f47=1; ppu_sub_9ef78edf998c4df1e1636c9a474d9f47=1; ppu_delay_9ef78edf998c4df1e1636c9a474d9f47=1'
}
self.url = url_li
self.pro = proxy_single
self.user_agent = fakeagent.load_ua()
def run(self):
while 1:
try:
temp_agent = random.choice(self.user_agent)
agent = temp_agent.split("\n")[0]
self.headers["User-Agent"] = agent
pro = {"http": "http://" + random.choice(self.pro)}
s = requests.get(self.url, headers=self.headers,
proxies=pro, timeout=10)
response = html.fromstring(s.text)
print(s.text)
title_l = response.xpath(self.title_xpa) # title
id = (response.xpath(self.id_xpa)) # id
seed = response.xpath(self.seli_xpa) # seed
torrent_f = self.torent_front(id)
for i in range(25):
# tor_addr 是完整种子下载地址
address = torrent_f[i] + title_l[i] + "-[rarbg.to].torrent"
check_sc = response.xpath(self.ch_xpa.format(i + 1))
# 电影名称提取
title = title_l[i].split(".1080p.")[0]
# 标记分数 无分数则为 0
if not check_sc or ('/' not in check_sc[0]):
score = 0
if '/' in check_sc[0]:
score = float((check_sc[0].split(" ")[-1]).split('/')[0])
if score >= 5:
self.torrent_dict(title_l[i], seed[i],
title, address, score)
time.sleep(2)
print(len(self.tor_dict), self.tor_dict)
print(self.url)
self.torrent_save()
print("保存成功一页")
break
except Exception as e:
print("REason: ", e)
print(self.url)
self.error_save_page(self.url)
def torent_front(self, id):
torrent_f = [] # 地址前缀
for i in range(len(id) - 8):
te = id[i + 8].split("torrent/")[-1]
if "comment" not in te:
temp = "https://rarbgprx.org/download.php?id={}&f=".format(te)
torrent_f.append(temp)
return torrent_f
def torrent_dict(self, title_l, seed, title, address, score):
# 检查是否重复
if title_l in self.tor_dict.keys():
# 检查健康度 及评分在5.0以上的数据
if seed > self.tor_dict[title][0]:
self.tor_dict[title] = [str(seed), address, str(score)]
else:
self.tor_dict[title] = [str(seed), address, str(score)]
else:
self.tor_dict[title] = [str(seed), address, str(score)]
def torrent_save(self):
with open(os.getcwd()+'/data/dianying.txt', 'a') as f:
for (i, j) in self.tor_dict.items():
f.write(i)
f.write(", ")
for k in j:
f.write(k)
f.write(", ")
f.write("\n")
def error_save_page(self, url):
with open(os.getcwd()+'/data/error_page_1.txt', 'a') as f:
f.write(url)
f.write("\n")
| 41.133333 | 249 | 0.508219 |
from lxml import html
import requests
import os
import random
import time
from fake_agent import fakeagent
class Gbrarscrapy(object):
def __init__(self, url_li, proxy_single):
self.title_xpa = '//a[@onmouseover]/text()'
self.score_list_xpa = '//span[@style="color:DarkSlateGray"]/text()'
self.id_xpa = '//a[contains(@href,"/torrent/")]/@href'
self.ch_xpa = '//tr[@class="lista2"][{}]/td[2]/span/text()'
# and contains(@width,"150px")]/text()'
self.seli_xpa = '//td[@align="center" and @width="50px"]/font/text()'
self.tor_dict = dict()
self.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=' +
'0.9,image/webp,image/apng,*/*;q=0.8',
'Cookies': 'skt=v97mrzygux; gaDts48g=q8h5pp9t; skt=v97mrzygux; gaDts48g=q8h5pp9t; expla=1; tcc; aby=2; ppu_main_9ef78edf998c4df1e1636c9a474d9f47=1; ppu_sub_9ef78edf998c4df1e1636c9a474d9f47=1; ppu_delay_9ef78edf998c4df1e1636c9a474d9f47=1'
}
self.url = url_li
self.pro = proxy_single
self.user_agent = fakeagent.load_ua()
def run(self):
while 1:
try:
temp_agent = random.choice(self.user_agent)
agent = temp_agent.split("\n")[0]
self.headers["User-Agent"] = agent
pro = {"http": "http://" + random.choice(self.pro)}
s = requests.get(self.url, headers=self.headers,
proxies=pro, timeout=10)
response = html.fromstring(s.text)
print(s.text)
title_l = response.xpath(self.title_xpa)
id = (response.xpath(self.id_xpa))
seed = response.xpath(self.seli_xpa)
torrent_f = self.torent_front(id)
for i in range(25):
address = torrent_f[i] + title_l[i] + "-[rarbg.to].torrent"
check_sc = response.xpath(self.ch_xpa.format(i + 1))
title = title_l[i].split(".1080p.")[0]
if not check_sc or ('/' not in check_sc[0]):
score = 0
if '/' in check_sc[0]:
score = float((check_sc[0].split(" ")[-1]).split('/')[0])
if score >= 5:
self.torrent_dict(title_l[i], seed[i],
title, address, score)
time.sleep(2)
print(len(self.tor_dict), self.tor_dict)
print(self.url)
self.torrent_save()
print("保存成功一页")
break
except Exception as e:
print("REason: ", e)
print(self.url)
self.error_save_page(self.url)
def torent_front(self, id):
torrent_f = []
for i in range(len(id) - 8):
te = id[i + 8].split("torrent/")[-1]
if "comment" not in te:
temp = "https://rarbgprx.org/download.php?id={}&f=".format(te)
torrent_f.append(temp)
return torrent_f
def torrent_dict(self, title_l, seed, title, address, score):
if title_l in self.tor_dict.keys():
if seed > self.tor_dict[title][0]:
self.tor_dict[title] = [str(seed), address, str(score)]
else:
self.tor_dict[title] = [str(seed), address, str(score)]
else:
self.tor_dict[title] = [str(seed), address, str(score)]
def torrent_save(self):
with open(os.getcwd()+'/data/dianying.txt', 'a') as f:
for (i, j) in self.tor_dict.items():
f.write(i)
f.write(", ")
for k in j:
f.write(k)
f.write(", ")
f.write("\n")
def error_save_page(self, url):
with open(os.getcwd()+'/data/error_page_1.txt', 'a') as f:
f.write(url)
f.write("\n")
| true | true |
f7152ea08588300b8fbe747412eb41de76a983a8 | 456 | py | Python | experiments/3_parallel_training.py | dddaga/word-tree | ed6c59c16feee04d5c6003b3f5f4df68e6808e04 | [
"MIT"
] | null | null | null | experiments/3_parallel_training.py | dddaga/word-tree | ed6c59c16feee04d5c6003b3f5f4df68e6808e04 | [
"MIT"
] | null | null | null | experiments/3_parallel_training.py | dddaga/word-tree | ed6c59c16feee04d5c6003b3f5f4df68e6808e04 | [
"MIT"
] | 1 | 2020-12-02T09:07:06.000Z | 2020-12-02T09:07:06.000Z | import pymongo
EXPERIMENT_NAME = 'EXP_3'
CORPUS_PATH = 'data/pride_and_prejudice_cleaned.txt'
TRAINING_WINDOW = 3
CONTEXT_DIMENSION = 64
CONTEXT_DECAY = 0.5
CONTRASTIVE_WEIGHT = 0.001
LEANING_RATE = 1
DROPOUT = 0.1
myclient = pymongo.MongoClient('mongodb://localhost:27017')
mydb = myclient["mydatabase"]
collection = mydb.parallel_trainging
'''
Experiment details:
Parallel training
3 parallel instances
'''
| 14.709677 | 61 | 0.710526 | import pymongo
EXPERIMENT_NAME = 'EXP_3'
CORPUS_PATH = 'data/pride_and_prejudice_cleaned.txt'
TRAINING_WINDOW = 3
CONTEXT_DIMENSION = 64
CONTEXT_DECAY = 0.5
CONTRASTIVE_WEIGHT = 0.001
LEANING_RATE = 1
DROPOUT = 0.1
myclient = pymongo.MongoClient('mongodb://localhost:27017')
mydb = myclient["mydatabase"]
collection = mydb.parallel_trainging
| true | true |
f7152f1421c03597f5ecc2d6a08acdad363400e1 | 3,893 | py | Python | deprecated/Imputation/GRUI/Run_GAN_imputed.py | srinivasans/DeepSepsis | 8647a2ec93ad5a937638acfc279a756bbfa04f7f | [
"Apache-2.0"
] | 2 | 2019-04-22T07:41:23.000Z | 2019-04-23T02:45:06.000Z | deprecated/Imputation/GRUI/Run_GAN_imputed.py | srinivasans/DeepSepsis | 8647a2ec93ad5a937638acfc279a756bbfa04f7f | [
"Apache-2.0"
] | null | null | null | deprecated/Imputation/GRUI/Run_GAN_imputed.py | srinivasans/DeepSepsis | 8647a2ec93ad5a937638acfc279a756bbfa04f7f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 26 10:47:41 2018
@author: yonghong
"""
from __future__ import print_function
import sys
sys.path.append("..")
import argparse
import os
import tensorflow as tf
from Physionet2019ImputedSepsisData import readImputed
import gru_delta_forGAN
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('--gpus', type=str, default = None)
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--run-type', type=str, default='test')
parser.add_argument('--data-path', type=str, default="../Gan_Imputation/imputation_train_results/WGAN_no_mask/")
#输入填充之后的训练数据集的完整路径 Gan_Imputation/imputation_train_results/WGAN_no_mask/30_8_128_64_0.001_400_True_True_True_0.15_0.5
parser.add_argument('--model-path', type=str, default=None)
parser.add_argument('--result-path', type=str, default=None)
parser.add_argument('--lr', type=float, default=0.01)
#parser.add_argument('--epoch', type=int, default=20)
parser.add_argument('--n-inputs', type=int, default=41)
parser.add_argument('--n-hidden-units', type=int, default=64)
parser.add_argument('--n-classes', type=int, default=2)
parser.add_argument('--checkpoint-dir', type=str, default='checkpoint_physionet_imputed',
help='Directory name to save the checkpoints')
parser.add_argument('--log-dir', type=str, default='logs_physionet_imputed',
help='Directory name to save training logs')
parser.add_argument('--isNormal',type=int,default=1)
parser.add_argument('--isSlicing',type=int,default=1)
#0 false 1 true
parser.add_argument('--isBatch-normal',type=int,default=1)
args = parser.parse_args()
if args.isBatch_normal==0:
args.isBatch_normal=False
if args.isBatch_normal==1:
args.isBatch_normal=True
if args.isNormal==0:
args.isNormal=False
if args.isNormal==1:
args.isNormal=True
if args.isSlicing==0:
args.isSlicing=False
if args.isSlicing==1:
args.isSlicing=True
checkdir=args.checkpoint_dir
logdir=args.log_dir
base=args.data_path
data_paths=["30_8_128_64_0.001_400_True_True_False_0.15_0.5"]
max_auc = 0.0
for d in data_paths:
args.data_path=os.path.join(base,d)
path_splits=args.data_path.split("/")
if len(path_splits[-1])==0:
datasetName=path_splits[-2]
else:
datasetName=path_splits[-1]
args.checkpoint_dir=checkdir+"/"+datasetName
args.log_dir=logdir+"/"+datasetName
dt_train=readImputed.ReadImputedPhysionetData(args.data_path)
dt_train.load()
dt_test=readImputed.ReadImputedPhysionetData(args.data_path.replace("imputation_train_results","imputation_test_results"))
dt_test.load()
lrs=[0.004,0.003,0.005,0.006,0.007,0.008,0.009,0.01,0.012,0.015]
#lrs = [0.0075,0.0085]
for lr in lrs:
args.lr=lr
epoch=30
args.epoch=epoch
print("epoch: %2d"%(epoch))
tf.reset_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
model = gru_delta_forGAN.grui(sess,
args=args,
dataset=dt_train,
test_set = dt_test
)
# build graph
model.build()
auc = model.train()
if auc > max_auc:
max_auc = auc
print("")
print("max auc is: " + str(max_auc))
f2 = open("max_auc","w")
f2.write(str(max_auc))
f2.close()
| 36.046296 | 130 | 0.625995 |
from __future__ import print_function
import sys
sys.path.append("..")
import argparse
import os
import tensorflow as tf
from Physionet2019ImputedSepsisData import readImputed
import gru_delta_forGAN
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('--gpus', type=str, default = None)
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--run-type', type=str, default='test')
parser.add_argument('--data-path', type=str, default="../Gan_Imputation/imputation_train_results/WGAN_no_mask/")
parser.add_argument('--model-path', type=str, default=None)
parser.add_argument('--result-path', type=str, default=None)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--n-inputs', type=int, default=41)
parser.add_argument('--n-hidden-units', type=int, default=64)
parser.add_argument('--n-classes', type=int, default=2)
parser.add_argument('--checkpoint-dir', type=str, default='checkpoint_physionet_imputed',
help='Directory name to save the checkpoints')
parser.add_argument('--log-dir', type=str, default='logs_physionet_imputed',
help='Directory name to save training logs')
parser.add_argument('--isNormal',type=int,default=1)
parser.add_argument('--isSlicing',type=int,default=1)
parser.add_argument('--isBatch-normal',type=int,default=1)
args = parser.parse_args()
if args.isBatch_normal==0:
args.isBatch_normal=False
if args.isBatch_normal==1:
args.isBatch_normal=True
if args.isNormal==0:
args.isNormal=False
if args.isNormal==1:
args.isNormal=True
if args.isSlicing==0:
args.isSlicing=False
if args.isSlicing==1:
args.isSlicing=True
checkdir=args.checkpoint_dir
logdir=args.log_dir
base=args.data_path
data_paths=["30_8_128_64_0.001_400_True_True_False_0.15_0.5"]
max_auc = 0.0
for d in data_paths:
args.data_path=os.path.join(base,d)
path_splits=args.data_path.split("/")
if len(path_splits[-1])==0:
datasetName=path_splits[-2]
else:
datasetName=path_splits[-1]
args.checkpoint_dir=checkdir+"/"+datasetName
args.log_dir=logdir+"/"+datasetName
dt_train=readImputed.ReadImputedPhysionetData(args.data_path)
dt_train.load()
dt_test=readImputed.ReadImputedPhysionetData(args.data_path.replace("imputation_train_results","imputation_test_results"))
dt_test.load()
lrs=[0.004,0.003,0.005,0.006,0.007,0.008,0.009,0.01,0.012,0.015]
for lr in lrs:
args.lr=lr
epoch=30
args.epoch=epoch
print("epoch: %2d"%(epoch))
tf.reset_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
model = gru_delta_forGAN.grui(sess,
args=args,
dataset=dt_train,
test_set = dt_test
)
model.build()
auc = model.train()
if auc > max_auc:
max_auc = auc
print("")
print("max auc is: " + str(max_auc))
f2 = open("max_auc","w")
f2.write(str(max_auc))
f2.close()
| true | true |
f7152f1b586fbf4b47b2c1084a5b2a3f185a8418 | 340 | py | Python | apps/teams/adminx.py | slyslyme/CTF_AWD_Platform | 6e9eec0a23a316aaf1927d4ec5be923ac26ff21e | [
"MIT"
] | 85 | 2019-04-21T01:38:18.000Z | 2022-03-22T08:06:21.000Z | apps/teams/adminx.py | xuchaoa/CTF_AWD_Platform | b2201f18677939442002e16e64280acd44f72bfe | [
"MIT"
] | 12 | 2019-05-10T14:09:12.000Z | 2022-03-11T23:45:35.000Z | apps/teams/adminx.py | slyslyme/CTF_AWD_Platform | 6e9eec0a23a316aaf1927d4ec5be923ac26ff21e | [
"MIT"
] | 21 | 2019-04-14T16:12:15.000Z | 2022-03-22T08:06:22.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Archerx
# @time: 2019/4/16 上午 11:35
from .models import TeamProfile
import xadmin
class TeamDispaly(object):
list_display = ('id','team_name','team_captain','team_member1','team_member2','team_member3','competition','team_token')
xadmin.site.register(TeamProfile, TeamDispaly) | 28.333333 | 124 | 0.729412 |
from .models import TeamProfile
import xadmin
class TeamDispaly(object):
list_display = ('id','team_name','team_captain','team_member1','team_member2','team_member3','competition','team_token')
xadmin.site.register(TeamProfile, TeamDispaly) | true | true |
f7152fae1381c42726a0ec3c4057ea6d2f710ce3 | 3,769 | py | Python | kmip/core/messages/payloads/create.py | vbnmmnbv/PyKMIP | 4617ae528006178c466fe3945a477f568b596940 | [
"Apache-2.0"
] | 12 | 2016-09-14T21:59:10.000Z | 2020-03-11T07:37:25.000Z | kmip/core/messages/payloads/create.py | vbnmmnbv/PyKMIP | 4617ae528006178c466fe3945a477f568b596940 | [
"Apache-2.0"
] | 1 | 2021-06-25T15:43:48.000Z | 2021-06-25T15:43:48.000Z | kmip/core/messages/payloads/create.py | vbnmmnbv/PyKMIP | 4617ae528006178c466fe3945a477f568b596940 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kmip.core import attributes
from kmip.core import enums
from kmip.core.enums import Tags
from kmip.core.objects import TemplateAttribute
from kmip.core.primitives import Struct
from kmip.core.utils import BytearrayStream
class CreateRequestPayload(Struct):
def __init__(self,
object_type=None,
template_attribute=None):
super(CreateRequestPayload, self).__init__(
tag=enums.Tags.REQUEST_PAYLOAD)
self.object_type = object_type
self.template_attribute = template_attribute
self.validate()
def read(self, istream):
super(CreateRequestPayload, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.object_type = attributes.ObjectType()
self.template_attribute = TemplateAttribute()
self.object_type.read(tstream)
self.template_attribute.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
# Write the object type and template attribute of the request payload
self.object_type.write(tstream)
self.template_attribute.write(tstream)
# Write the length and value of the request payload
self.length = tstream.length()
super(CreateRequestPayload, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
class CreateResponsePayload(Struct):
def __init__(self,
object_type=None,
unique_identifier=None,
template_attribute=None):
super(CreateResponsePayload, self).__init__(
tag=enums.Tags.RESPONSE_PAYLOAD)
self.object_type = object_type
self.unique_identifier = unique_identifier
self.template_attribute = template_attribute
self.validate()
def read(self, istream):
super(CreateResponsePayload, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.object_type = attributes.ObjectType()
self.unique_identifier = attributes.UniqueIdentifier()
self.object_type.read(tstream)
self.unique_identifier.read(tstream)
if self.is_tag_next(Tags.TEMPLATE_ATTRIBUTE, tstream):
self.template_attribute = TemplateAttribute()
self.template_attribute.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
# Write the contents of the request payload
self.object_type.write(tstream)
self.unique_identifier.write(tstream)
if self.template_attribute is not None:
self.template_attribute.write(tstream)
# Write the length and value of the request payload
self.length = tstream.length()
super(CreateResponsePayload, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
| 32.491379 | 77 | 0.689573 |
from kmip.core import attributes
from kmip.core import enums
from kmip.core.enums import Tags
from kmip.core.objects import TemplateAttribute
from kmip.core.primitives import Struct
from kmip.core.utils import BytearrayStream
class CreateRequestPayload(Struct):
def __init__(self,
object_type=None,
template_attribute=None):
super(CreateRequestPayload, self).__init__(
tag=enums.Tags.REQUEST_PAYLOAD)
self.object_type = object_type
self.template_attribute = template_attribute
self.validate()
def read(self, istream):
super(CreateRequestPayload, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.object_type = attributes.ObjectType()
self.template_attribute = TemplateAttribute()
self.object_type.read(tstream)
self.template_attribute.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
self.object_type.write(tstream)
self.template_attribute.write(tstream)
self.length = tstream.length()
super(CreateRequestPayload, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
pass
class CreateResponsePayload(Struct):
def __init__(self,
object_type=None,
unique_identifier=None,
template_attribute=None):
super(CreateResponsePayload, self).__init__(
tag=enums.Tags.RESPONSE_PAYLOAD)
self.object_type = object_type
self.unique_identifier = unique_identifier
self.template_attribute = template_attribute
self.validate()
def read(self, istream):
super(CreateResponsePayload, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.object_type = attributes.ObjectType()
self.unique_identifier = attributes.UniqueIdentifier()
self.object_type.read(tstream)
self.unique_identifier.read(tstream)
if self.is_tag_next(Tags.TEMPLATE_ATTRIBUTE, tstream):
self.template_attribute = TemplateAttribute()
self.template_attribute.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
self.object_type.write(tstream)
self.unique_identifier.write(tstream)
if self.template_attribute is not None:
self.template_attribute.write(tstream)
self.length = tstream.length()
super(CreateResponsePayload, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
pass
| true | true |
f7152fd92189d41c2d60b2ab2fa4b993a10814c8 | 9,994 | py | Python | torchtext/experimental/datasets/language_modeling.py | NicolasHug/text | 651c1f70ee6e75705aa1c5e4d4cf86ff69b6cbed | [
"BSD-3-Clause"
] | null | null | null | torchtext/experimental/datasets/language_modeling.py | NicolasHug/text | 651c1f70ee6e75705aa1c5e4d4cf86ff69b6cbed | [
"BSD-3-Clause"
] | null | null | null | torchtext/experimental/datasets/language_modeling.py | NicolasHug/text | 651c1f70ee6e75705aa1c5e4d4cf86ff69b6cbed | [
"BSD-3-Clause"
] | null | null | null | import torch
import logging
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torchtext.experimental.datasets.raw import language_modeling as raw
from torchtext.experimental.datasets.raw.common import check_default_set
from torchtext.experimental.datasets.raw.common import wrap_datasets
logger_ = logging.getLogger(__name__)
def build_vocab(data, transforms):
def apply_transforms(data):
for line in data:
tokens = transforms(line)
yield tokens
return build_vocab_from_iterator(apply_transforms(data), len(data))
class LanguageModelingDataset(torch.utils.data.Dataset):
"""Defines a dataset for language modeling.
Currently, we only support the following datasets:
- WikiText2
- WikiText103
- PennTreebank
- WMTNewsCrawl
"""
def __init__(self, data, vocab, transform):
"""Initiate language modeling dataset.
Args:
data: a tensor of tokens. tokens are ids after
numericalizing the string tokens.
torch.tensor([token_id_1, token_id_2, token_id_3, token_id1]).long()
vocab: Vocabulary object used for dataset.
transform: Text string transform.
"""
super(LanguageModelingDataset, self).__init__()
self.vocab = vocab
self.transform = transform
self.data = data
def __getitem__(self, i):
return self.data[i]
def __len__(self):
return len(self.data)
def __iter__(self):
for x in self.data:
yield x
def get_vocab(self):
return self.vocab
def _setup_datasets(dataset_name, tokenizer, root, vocab, split_, year, language):
if tokenizer is None:
tokenizer = get_tokenizer('basic_english')
split = check_default_set(split_, ('train', 'test', 'valid'), dataset_name)
if vocab is None:
if 'train' not in split:
raise TypeError("Must pass a vocab if train is not selected.")
if dataset_name == 'WMTNewsCrawl':
raw_train, = raw.DATASETS[dataset_name](root=root, split=('train',), year=year, language=language)
else:
raw_train, = raw.DATASETS[dataset_name](root=root, split=('train',))
logger_.info('Building Vocab based on train data')
vocab = build_vocab(raw_train, tokenizer)
logger_.info('Vocab has %d entries', len(vocab))
def text_transform(line):
return torch.tensor([vocab[token] for token in tokenizer(line)], dtype=torch.long)
if dataset_name == 'WMTNewsCrawl':
raw_datasets = raw.DATASETS[dataset_name](root=root, split=split, year=year, language=language)
else:
raw_datasets = raw.DATASETS[dataset_name](root=root, split=split)
raw_data = {name: list(map(text_transform, raw_dataset)) for name, raw_dataset in zip(split, raw_datasets)}
logger_.info('Building datasets for {}'.format(split))
return wrap_datasets(tuple(LanguageModelingDataset(raw_data[item], vocab, text_transform)
for item in split), split_)
def WikiText2(tokenizer=None, root='.data', vocab=None, split=('train', 'valid', 'test')):
""" Defines WikiText2 datasets.
Create language modeling dataset: WikiText2
Separately returns the train/test/valid set
Args:
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well (see example below). A custom tokenizer is callable
function with input of a string and output of a token list.
root: Directory where the datasets are saved. Default: ".data"
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
split: a string or tuple for the returned datasets. Default: ('train', 'valid','test')
By default, all the three datasets (train, test, valid) are generated. Users
could also choose any one or two of them, for example ('train', 'test') or
just a string 'train'. If 'train' is not in the tuple or string, a vocab
object should be provided which will be used to process valid and/or test
data.
Examples:
>>> from torchtext.experimental.datasets import WikiText2
>>> from torchtext.data.utils import get_tokenizer
>>> tokenizer = get_tokenizer("spacy")
>>> train_dataset, valid_dataset, test_dataset = WikiText2(tokenizer=tokenizer)
>>> vocab = train_dataset.get_vocab()
>>> valid_dataset, = WikiText2(tokenizer=tokenizer, vocab=vocab,
split='valid')
"""
return _setup_datasets("WikiText2", tokenizer, root, vocab, split, None, None)
def WikiText103(tokenizer=None, root='.data', vocab=None, split=('train', 'valid', 'test')):
""" Defines WikiText103 datasets.
Create language modeling dataset: WikiText103
Separately returns the train/test/valid set
Args:
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well (see example below). A custom tokenizer is callable
function with input of a string and output of a token list.
root: Directory where the datasets are saved. Default: ".data"
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
split: a string or tuple for the returned datasets. Default: ('train', 'valid', 'test')
By default, all the three datasets (train, test, valid) are generated. Users
could also choose any one or two of them, for example ('train', 'test') or
just a string 'train'. If 'train' is not in the tuple or string, a vocab
object should be provided which will be used to process valid and/or test
data.
Examples:
>>> from torchtext.experimental.datasets import WikiText103
>>> from torchtext.data.utils import get_tokenizer
>>> tokenizer = get_tokenizer("spacy")
>>> train_dataset, valid_dataset, test_dataset = WikiText103(tokenizer=tokenizer)
>>> vocab = train_dataset.get_vocab()
>>> valid_dataset, = WikiText103(tokenizer=tokenizer, vocab=vocab,
split='valid')
"""
return _setup_datasets("WikiText103", tokenizer, root, vocab, split, None, None)
def PennTreebank(tokenizer=None, root='.data', vocab=None, split=('train', 'valid', 'test')):
""" Defines PennTreebank datasets.
Create language modeling dataset: PennTreebank
Separately returns the train/test/valid set
Args:
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well (see example below). A custom tokenizer is callable
function with input of a string and output of a token list.
root: Directory where the datasets are saved. Default: ".data"
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
split: a string or tuple for the returned datasets. Default: ('train', 'valid', 'test')
By default, all the three datasets (train, test, valid) are generated. Users
could also choose any one or two of them, for example ('train', 'test') or
just a string 'train'. If 'train' is not in the tuple or string, a vocab
object should be provided which will be used to process valid and/or test
data.
Examples:
>>> from torchtext.experimental.datasets import PennTreebank
>>> from torchtext.data.utils import get_tokenizer
>>> tokenizer = get_tokenizer("spacy")
>>> train_dataset, valid_dataset, test_dataset = PennTreebank(tokenizer=tokenizer)
>>> vocab = train_dataset.get_vocab()
>>> valid_dataset, = PennTreebank(tokenizer=tokenizer, vocab=vocab,
split='valid')
"""
return _setup_datasets("PennTreebank", tokenizer, root, vocab, split, None, None)
def WMTNewsCrawl(tokenizer=None, root='.data', vocab=None, split=('train'), year=2010, language='en'):
""" Defines WMTNewsCrawl datasets.
Create language modeling dataset: WMTNewsCrawl
returns the train set
Args:
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well (see example below). A custom tokenizer is callable
function with input of a string and output of a token list.
root: Directory where the datasets are saved. Default: ".data"
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
split: a string or tuple for the returned datasets
(Default: ('train',))
year: the year of the dataset (Default: 2010)
language: the language of the dataset (Default: 'en')
Examples:
>>> from torchtext.experimental.datasets import WMTNewsCrawl
>>> from torchtext.data.utils import get_tokenizer
>>> tokenizer = get_tokenizer("spacy")
>>> train_dataset, = WMTNewsCrawl(tokenizer=tokenizer, split='train')
Note: WMTNewsCrawl provides datasets based on the year and language instead of train/valid/test.
"""
return _setup_datasets("WMTNewsCrawl", tokenizer, root, vocab, split, year, language)
DATASETS = {
'WikiText2': WikiText2,
'WikiText103': WikiText103,
'PennTreebank': PennTreebank,
'WMTNewsCrawl': WMTNewsCrawl
}
| 42.892704 | 111 | 0.662097 | import torch
import logging
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torchtext.experimental.datasets.raw import language_modeling as raw
from torchtext.experimental.datasets.raw.common import check_default_set
from torchtext.experimental.datasets.raw.common import wrap_datasets
logger_ = logging.getLogger(__name__)
def build_vocab(data, transforms):
def apply_transforms(data):
for line in data:
tokens = transforms(line)
yield tokens
return build_vocab_from_iterator(apply_transforms(data), len(data))
class LanguageModelingDataset(torch.utils.data.Dataset):
def __init__(self, data, vocab, transform):
super(LanguageModelingDataset, self).__init__()
self.vocab = vocab
self.transform = transform
self.data = data
def __getitem__(self, i):
return self.data[i]
def __len__(self):
return len(self.data)
def __iter__(self):
for x in self.data:
yield x
def get_vocab(self):
return self.vocab
def _setup_datasets(dataset_name, tokenizer, root, vocab, split_, year, language):
if tokenizer is None:
tokenizer = get_tokenizer('basic_english')
split = check_default_set(split_, ('train', 'test', 'valid'), dataset_name)
if vocab is None:
if 'train' not in split:
raise TypeError("Must pass a vocab if train is not selected.")
if dataset_name == 'WMTNewsCrawl':
raw_train, = raw.DATASETS[dataset_name](root=root, split=('train',), year=year, language=language)
else:
raw_train, = raw.DATASETS[dataset_name](root=root, split=('train',))
logger_.info('Building Vocab based on train data')
vocab = build_vocab(raw_train, tokenizer)
logger_.info('Vocab has %d entries', len(vocab))
def text_transform(line):
return torch.tensor([vocab[token] for token in tokenizer(line)], dtype=torch.long)
if dataset_name == 'WMTNewsCrawl':
raw_datasets = raw.DATASETS[dataset_name](root=root, split=split, year=year, language=language)
else:
raw_datasets = raw.DATASETS[dataset_name](root=root, split=split)
raw_data = {name: list(map(text_transform, raw_dataset)) for name, raw_dataset in zip(split, raw_datasets)}
logger_.info('Building datasets for {}'.format(split))
return wrap_datasets(tuple(LanguageModelingDataset(raw_data[item], vocab, text_transform)
for item in split), split_)
def WikiText2(tokenizer=None, root='.data', vocab=None, split=('train', 'valid', 'test')):
return _setup_datasets("WikiText2", tokenizer, root, vocab, split, None, None)
def WikiText103(tokenizer=None, root='.data', vocab=None, split=('train', 'valid', 'test')):
return _setup_datasets("WikiText103", tokenizer, root, vocab, split, None, None)
def PennTreebank(tokenizer=None, root='.data', vocab=None, split=('train', 'valid', 'test')):
return _setup_datasets("PennTreebank", tokenizer, root, vocab, split, None, None)
def WMTNewsCrawl(tokenizer=None, root='.data', vocab=None, split=('train'), year=2010, language='en'):
return _setup_datasets("WMTNewsCrawl", tokenizer, root, vocab, split, year, language)
DATASETS = {
'WikiText2': WikiText2,
'WikiText103': WikiText103,
'PennTreebank': PennTreebank,
'WMTNewsCrawl': WMTNewsCrawl
}
| true | true |
f715303f7cff1a03797169fbe6f8d2773e09ef68 | 925 | py | Python | challenge/urls.py | superdev0505/mtp-web | 8288765a89daaa7b02dfd7e78cc51c4f12d7fcce | [
"MIT"
] | null | null | null | challenge/urls.py | superdev0505/mtp-web | 8288765a89daaa7b02dfd7e78cc51c4f12d7fcce | [
"MIT"
] | null | null | null | challenge/urls.py | superdev0505/mtp-web | 8288765a89daaa7b02dfd7e78cc51c4f12d7fcce | [
"MIT"
] | null | null | null | from django.urls import path, re_path
from . import views
urlpatterns = [
path('', views.index, name='challenge.index'),
path('create', views.challenge_create, name='challenge.challenge_create'),
path('<str:unique_id>/edit/', views.challenge_edit, name='challenge.my_challenge_edit'),
re_path(r'^my-list/(?P<page>\d*)?$', views.my_challenge_list, name='challenge.my_challenge_list'),
re_path(r'^list/(?P<page>\d*)?$', views.challenge_list, name='challenge.challenge_list'),
path('<str:unique_id>/delete/', views.my_challenge_delete, name='challenge.my_challenge_delete'),
path('<str:unique_id>/', views.challenge_detail, name='challenge.challenge_detail'),
path('<str:unique_id>/leaderboard/', views.challenge_leaderboard, name='challenge.challenge_leaderboard'),
path('ajax/get_challenge_detail/<str:unique_id>/', views.ajax_challenge_detail, name='challenge.ajax_challenge_detail'),
]
| 57.8125 | 124 | 0.740541 | from django.urls import path, re_path
from . import views
urlpatterns = [
path('', views.index, name='challenge.index'),
path('create', views.challenge_create, name='challenge.challenge_create'),
path('<str:unique_id>/edit/', views.challenge_edit, name='challenge.my_challenge_edit'),
re_path(r'^my-list/(?P<page>\d*)?$', views.my_challenge_list, name='challenge.my_challenge_list'),
re_path(r'^list/(?P<page>\d*)?$', views.challenge_list, name='challenge.challenge_list'),
path('<str:unique_id>/delete/', views.my_challenge_delete, name='challenge.my_challenge_delete'),
path('<str:unique_id>/', views.challenge_detail, name='challenge.challenge_detail'),
path('<str:unique_id>/leaderboard/', views.challenge_leaderboard, name='challenge.challenge_leaderboard'),
path('ajax/get_challenge_detail/<str:unique_id>/', views.ajax_challenge_detail, name='challenge.ajax_challenge_detail'),
]
| true | true |
f7153249e54fec334ca1d518b4485c45f6ac4c7a | 693 | py | Python | osiris/vault/__init__.py | skadyan/aws-glue-python-kickstart | 5e3228a0793188d248f801a2b5a522210048ccde | [
"Apache-2.0"
] | 4 | 2020-04-23T18:43:27.000Z | 2022-02-22T03:57:06.000Z | osiris/vault/__init__.py | skadyan/aws-glue-python-kickstart | 5e3228a0793188d248f801a2b5a522210048ccde | [
"Apache-2.0"
] | 1 | 2021-06-02T00:47:12.000Z | 2021-06-02T00:47:12.000Z | osiris/vault/__init__.py | skadyan/aws-glue-python-kickstart | 5e3228a0793188d248f801a2b5a522210048ccde | [
"Apache-2.0"
] | null | null | null | import abc
from abc import abstractmethod
from typing import Union
from osiris.base.generalutils import instantiate
class SecretVault(abc.ABC):
@abstractmethod
def get_secret(self, key: str, attr: str = None, **kwargs) -> Union[dict, str]:
pass
class NoopSecretVault(SecretVault):
def get_secret(self, key: str, attr: str = None, **kwargs) -> Union[dict, str]:
return None
def new_secret_vault(env) -> SecretVault:
instance = None
if env.flag("sys.vault.enabled"):
impl = env.get_property("sys.vault.impl")
impl_kwargs = env.get_section("sys.vault.impl_kwargs")
instance = instantiate(impl, impl_kwargs)
return instance
| 24.75 | 83 | 0.688312 | import abc
from abc import abstractmethod
from typing import Union
from osiris.base.generalutils import instantiate
class SecretVault(abc.ABC):
@abstractmethod
def get_secret(self, key: str, attr: str = None, **kwargs) -> Union[dict, str]:
pass
class NoopSecretVault(SecretVault):
def get_secret(self, key: str, attr: str = None, **kwargs) -> Union[dict, str]:
return None
def new_secret_vault(env) -> SecretVault:
instance = None
if env.flag("sys.vault.enabled"):
impl = env.get_property("sys.vault.impl")
impl_kwargs = env.get_section("sys.vault.impl_kwargs")
instance = instantiate(impl, impl_kwargs)
return instance
| true | true |
f71533a0ade4d2a2240d14b32b74a3bbac06db98 | 7,949 | py | Python | i2plib/tunnel.py | undecidedzogvisrainbowvitalispotent-360/i2plib | 6edf51cd5d21cc745aa7e23cb98c582144884fa8 | [
"MIT"
] | 25 | 2018-09-05T16:44:05.000Z | 2022-02-16T18:32:32.000Z | i2plib/tunnel.py | undecidedzogvisvitalispotent8stars360/i2plib | 6edf51cd5d21cc745aa7e23cb98c582144884fa8 | [
"MIT"
] | 2 | 2018-10-24T19:57:16.000Z | 2019-01-26T14:30:40.000Z | i2plib/tunnel.py | undecidedzogvisvitalispotent8stars360/i2plib | 6edf51cd5d21cc745aa7e23cb98c582144884fa8 | [
"MIT"
] | 5 | 2018-10-24T18:01:46.000Z | 2020-12-15T18:16:14.000Z | import logging
import asyncio
import argparse
import i2plib.sam
import i2plib.aiosam
import i2plib.utils
from i2plib.log import logger
BUFFER_SIZE = 65536
async def proxy_data(reader, writer):
"""Proxy data from reader to writer"""
try:
while True:
data = await reader.read(BUFFER_SIZE)
if not data:
break
writer.write(data)
except Exception as e:
logger.debug('proxy_data_task exception {}'.format(e))
finally:
try:
writer.close()
except RuntimeError:
pass
logger.debug('close connection')
class I2PTunnel(object):
"""Base I2P Tunnel object, not to be used directly
:param local_address: A local address to use for a tunnel.
E.g. ("127.0.0.1", 6668)
:param destination: (optional) Destination to use for this tunnel. Can be
a base64 encoded string, :class:`i2plib.Destination`
instance or None. A new destination is created when it
is None.
:param session_name: (optional) Session nick name. A new session nickname is
generated if not specified.
:param options: (optional) A dict object with i2cp options
:param loop: (optional) Event loop instance
:param sam_address: (optional) SAM API address
"""
def __init__(self, local_address, destination=None, session_name=None,
options={}, loop=None, sam_address=i2plib.sam.DEFAULT_ADDRESS):
self.local_address = local_address
self.destination = destination
self.session_name = session_name or i2plib.utils.generate_session_id()
self.options = options
self.loop = loop
self.sam_address = sam_address
async def _pre_run(self):
if not self.destination:
self.destination = await i2plib.new_destination(
sam_address=self.sam_address, loop=self.loop)
_, self.session_writer = await i2plib.aiosam.create_session(
self.session_name, style=self.style, options=self.options,
sam_address=self.sam_address,
loop=self.loop, destination=self.destination)
def stop(self):
"""Stop the tunnel"""
self.session_writer.close()
class ClientTunnel(I2PTunnel):
"""Client tunnel, a subclass of i2plib.tunnel.I2PTunnel
If you run a client tunnel with a local address ("127.0.0.1", 6668) and
a remote destination "irc.echelon.i2p", all connections to 127.0.0.1:6668
will be proxied to irc.echelon.i2p.
:param remote_destination: Remote I2P destination, can be either .i2p
domain, .b32.i2p address, base64 destination or
:class:`i2plib.Destination` instance
"""
def __init__(self, remote_destination, *args, **kwargs):
super().__init__(*args, **kwargs)
self.style = "STREAM"
self.remote_destination = remote_destination
async def run(self):
"""A coroutine used to run the tunnel"""
await self._pre_run()
async def handle_client(client_reader, client_writer):
"""Handle local client connection"""
remote_reader, remote_writer = await i2plib.aiosam.stream_connect(
self.session_name, self.remote_destination,
sam_address=self.sam_address, loop=self.loop)
asyncio.ensure_future(proxy_data(remote_reader, client_writer),
loop=self.loop)
asyncio.ensure_future(proxy_data(client_reader, remote_writer),
loop=self.loop)
self.server = await asyncio.start_server(handle_client, *self.local_address,
loop=self.loop)
def stop(self):
super().stop()
self.server.close()
class ServerTunnel(I2PTunnel):
"""Server tunnel, a subclass of i2plib.tunnel.I2PTunnel
If you want to expose a local service 127.0.0.1:80 to the I2P network, run
a server tunnel with a local address ("127.0.0.1", 80). If you don't
provide a private key or a session name, it will use a TRANSIENT
destination.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.style = "STREAM"
async def run(self):
"""A coroutine used to run the tunnel"""
await self._pre_run()
async def handle_client(incoming, client_reader, client_writer):
# data and dest may come in one chunk
dest, data = incoming.split(b"\n", 1)
remote_destination = i2plib.sam.Destination(dest.decode())
logger.debug("{} client connected: {}.b32.i2p".format(
self.session_name, remote_destination.base32))
try:
remote_reader, remote_writer = await asyncio.wait_for(
asyncio.open_connection(
host=self.local_address[0],
port=self.local_address[1], loop=self.loop),
timeout=5, loop=self.loop)
if data: remote_writer.write(data)
asyncio.ensure_future(proxy_data(remote_reader, client_writer),
loop=self.loop)
asyncio.ensure_future(proxy_data(client_reader, remote_writer),
loop=self.loop)
except ConnectionRefusedError:
client_writer.close()
async def server_loop():
try:
while True:
client_reader, client_writer = await i2plib.aiosam.stream_accept(
self.session_name, sam_address=self.sam_address,
loop=self.loop)
incoming = await client_reader.read(BUFFER_SIZE)
asyncio.ensure_future(handle_client(
incoming, client_reader, client_writer), loop=self.loop)
except asyncio.CancelledError:
pass
self.server_loop = asyncio.ensure_future(server_loop(), loop=self.loop)
def stop(self):
super().stop()
self.server_loop.cancel()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('type', metavar="TYPE", choices=('server', 'client'),
help="Tunnel type (server or client)")
parser.add_argument('address', metavar="ADDRESS",
help="Local address (e.g. 127.0.0.1:8000)")
parser.add_argument('--debug', '-d', action='store_true',
help='Debugging')
parser.add_argument('--key', '-k', default='', metavar='PRIVATE_KEY',
help='Path to private key file')
parser.add_argument('--destination', '-D', default='',
metavar='DESTINATION', help='Remote destination')
args = parser.parse_args()
SAM_ADDRESS = i2plib.utils.get_sam_address()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
loop = asyncio.get_event_loop()
loop.set_debug(args.debug)
if args.key:
destination = i2plib.sam.Destination(path=args.key, has_private_key=True)
else:
destination = None
local_address = i2plib.utils.address_from_string(args.address)
if args.type == "client":
tunnel = ClientTunnel(args.destination, local_address, loop=loop,
destination=destination, sam_address=SAM_ADDRESS)
elif args.type == "server":
tunnel = ServerTunnel(local_address, loop=loop, destination=destination,
sam_address=SAM_ADDRESS)
asyncio.ensure_future(tunnel.run(), loop=loop)
try:
loop.run_forever()
except KeyboardInterrupt:
tunnel.stop()
finally:
loop.stop()
loop.close()
| 38.965686 | 85 | 0.603472 | import logging
import asyncio
import argparse
import i2plib.sam
import i2plib.aiosam
import i2plib.utils
from i2plib.log import logger
BUFFER_SIZE = 65536
async def proxy_data(reader, writer):
try:
while True:
data = await reader.read(BUFFER_SIZE)
if not data:
break
writer.write(data)
except Exception as e:
logger.debug('proxy_data_task exception {}'.format(e))
finally:
try:
writer.close()
except RuntimeError:
pass
logger.debug('close connection')
class I2PTunnel(object):
def __init__(self, local_address, destination=None, session_name=None,
options={}, loop=None, sam_address=i2plib.sam.DEFAULT_ADDRESS):
self.local_address = local_address
self.destination = destination
self.session_name = session_name or i2plib.utils.generate_session_id()
self.options = options
self.loop = loop
self.sam_address = sam_address
async def _pre_run(self):
if not self.destination:
self.destination = await i2plib.new_destination(
sam_address=self.sam_address, loop=self.loop)
_, self.session_writer = await i2plib.aiosam.create_session(
self.session_name, style=self.style, options=self.options,
sam_address=self.sam_address,
loop=self.loop, destination=self.destination)
def stop(self):
self.session_writer.close()
class ClientTunnel(I2PTunnel):
def __init__(self, remote_destination, *args, **kwargs):
super().__init__(*args, **kwargs)
self.style = "STREAM"
self.remote_destination = remote_destination
async def run(self):
await self._pre_run()
async def handle_client(client_reader, client_writer):
remote_reader, remote_writer = await i2plib.aiosam.stream_connect(
self.session_name, self.remote_destination,
sam_address=self.sam_address, loop=self.loop)
asyncio.ensure_future(proxy_data(remote_reader, client_writer),
loop=self.loop)
asyncio.ensure_future(proxy_data(client_reader, remote_writer),
loop=self.loop)
self.server = await asyncio.start_server(handle_client, *self.local_address,
loop=self.loop)
def stop(self):
super().stop()
self.server.close()
class ServerTunnel(I2PTunnel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.style = "STREAM"
async def run(self):
await self._pre_run()
async def handle_client(incoming, client_reader, client_writer):
dest, data = incoming.split(b"\n", 1)
remote_destination = i2plib.sam.Destination(dest.decode())
logger.debug("{} client connected: {}.b32.i2p".format(
self.session_name, remote_destination.base32))
try:
remote_reader, remote_writer = await asyncio.wait_for(
asyncio.open_connection(
host=self.local_address[0],
port=self.local_address[1], loop=self.loop),
timeout=5, loop=self.loop)
if data: remote_writer.write(data)
asyncio.ensure_future(proxy_data(remote_reader, client_writer),
loop=self.loop)
asyncio.ensure_future(proxy_data(client_reader, remote_writer),
loop=self.loop)
except ConnectionRefusedError:
client_writer.close()
async def server_loop():
try:
while True:
client_reader, client_writer = await i2plib.aiosam.stream_accept(
self.session_name, sam_address=self.sam_address,
loop=self.loop)
incoming = await client_reader.read(BUFFER_SIZE)
asyncio.ensure_future(handle_client(
incoming, client_reader, client_writer), loop=self.loop)
except asyncio.CancelledError:
pass
self.server_loop = asyncio.ensure_future(server_loop(), loop=self.loop)
def stop(self):
super().stop()
self.server_loop.cancel()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('type', metavar="TYPE", choices=('server', 'client'),
help="Tunnel type (server or client)")
parser.add_argument('address', metavar="ADDRESS",
help="Local address (e.g. 127.0.0.1:8000)")
parser.add_argument('--debug', '-d', action='store_true',
help='Debugging')
parser.add_argument('--key', '-k', default='', metavar='PRIVATE_KEY',
help='Path to private key file')
parser.add_argument('--destination', '-D', default='',
metavar='DESTINATION', help='Remote destination')
args = parser.parse_args()
SAM_ADDRESS = i2plib.utils.get_sam_address()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
loop = asyncio.get_event_loop()
loop.set_debug(args.debug)
if args.key:
destination = i2plib.sam.Destination(path=args.key, has_private_key=True)
else:
destination = None
local_address = i2plib.utils.address_from_string(args.address)
if args.type == "client":
tunnel = ClientTunnel(args.destination, local_address, loop=loop,
destination=destination, sam_address=SAM_ADDRESS)
elif args.type == "server":
tunnel = ServerTunnel(local_address, loop=loop, destination=destination,
sam_address=SAM_ADDRESS)
asyncio.ensure_future(tunnel.run(), loop=loop)
try:
loop.run_forever()
except KeyboardInterrupt:
tunnel.stop()
finally:
loop.stop()
loop.close()
| true | true |
f71535405665888b171719de9948f63f35341da0 | 948 | py | Python | dune/gdt/test/linearelliptic/mpi_linearelliptic__block_swipdg_discretization.py | TiKeil/dune-gdt | 25c8b987cc07a4b8b966c1a07ea21b78dba7852f | [
"BSD-2-Clause"
] | null | null | null | dune/gdt/test/linearelliptic/mpi_linearelliptic__block_swipdg_discretization.py | TiKeil/dune-gdt | 25c8b987cc07a4b8b966c1a07ea21b78dba7852f | [
"BSD-2-Clause"
] | null | null | null | dune/gdt/test/linearelliptic/mpi_linearelliptic__block_swipdg_discretization.py | TiKeil/dune-gdt | 25c8b987cc07a4b8b966c1a07ea21b78dba7852f | [
"BSD-2-Clause"
] | null | null | null | # ~~~
# This file is part of the dune-gdt project:
# https://github.com/dune-community/dune-gdt
# Copyright 2010-2018 dune-gdt developers and contributors. All rights reserved.
# License: Dual licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
# or GPL-2.0+ (http://opensource.org/licenses/gpl-license)
# with "runtime exception" (http://www.dune-project.org/license.html)
# Authors:
# Felix Schindler (2017 - 2018)
# Rene Milk (2017 - 2018)
#
# ~~~
import itertools
from dune.xt.codegen import typeid_to_typedef_name, la_backends
grids = ['Yasp2Grid']
casenames = ['ESV2007DdSubdomainsTestCase',]
testcases = ['Dune::GDT::LinearElliptic::{}<{}>'.format(c, g) for c, g in itertools.product(casenames, grids)]
permutations = itertools.product(testcases, ('gdt',), ('istl_sparse', ))
permutations = [(t, s, l, typeid_to_typedef_name('{}_{}_{}'.format(t, s, l))) for t, s, l in permutations]
| 39.5 | 110 | 0.696203 |
import itertools
from dune.xt.codegen import typeid_to_typedef_name, la_backends
grids = ['Yasp2Grid']
casenames = ['ESV2007DdSubdomainsTestCase',]
testcases = ['Dune::GDT::LinearElliptic::{}<{}>'.format(c, g) for c, g in itertools.product(casenames, grids)]
permutations = itertools.product(testcases, ('gdt',), ('istl_sparse', ))
permutations = [(t, s, l, typeid_to_typedef_name('{}_{}_{}'.format(t, s, l))) for t, s, l in permutations]
| true | true |
f715368c12fac7bcd0f0179357f6b421ee70790a | 795 | py | Python | config_music.py | vincenzodentamaro/transformer-xl | 61b76d783be49e409863667bba8576826bbf54df | [
"MIT"
] | 16 | 2020-09-30T02:31:53.000Z | 2022-03-09T10:27:25.000Z | config_music.py | vincenzodentamaro/transformer-xl | 61b76d783be49e409863667bba8576826bbf54df | [
"MIT"
] | 4 | 2020-11-09T03:58:04.000Z | 2021-09-21T09:00:22.000Z | config_music.py | vincenzodentamaro/transformer-xl | 61b76d783be49e409863667bba8576826bbf54df | [
"MIT"
] | 5 | 2020-09-30T02:31:56.000Z | 2021-10-06T15:50:18.000Z | import joblib
tempo = 500000
ppq = 480
numerator = 4
denominator = 4
clocks_per_click = 24
notated_32nd_notes_per_beat = 8
cc_kept = [64, 67]
cc_threshold = 64
cc_lower = 0
cc_upper = 127
vel_value = 64
n_notes = 128
n_cc = 2 * len(cc_kept)
n_sounds = 2 * n_notes + n_cc + 1
n_deltas = 66 + 1
pad_idx = 0
n_jobs = joblib.cpu_count()
d_sound = 384
d_delta = 256
d_combined = d_sound + d_delta
n_heads_sound = 6
n_heads_delta = 4
n_heads_combined = n_heads_sound + n_heads_delta
n_layers_sound = 3
n_layers_delta = 3
n_layers_combined = 6
seq_len = 256
mem_len = 384
batch_size = 8
dropout_rate = 0.1
n_epochs = 200
max_segs_per_batch = 20
lr = 0.00002
use_attn_reg = True
dataset_url = 'https://storage.googleapis.com/magentadata/datasets/maestro/v2.0.0/maestro-v2.0.0-midi.zip'
| 14.722222 | 106 | 0.735849 | import joblib
tempo = 500000
ppq = 480
numerator = 4
denominator = 4
clocks_per_click = 24
notated_32nd_notes_per_beat = 8
cc_kept = [64, 67]
cc_threshold = 64
cc_lower = 0
cc_upper = 127
vel_value = 64
n_notes = 128
n_cc = 2 * len(cc_kept)
n_sounds = 2 * n_notes + n_cc + 1
n_deltas = 66 + 1
pad_idx = 0
n_jobs = joblib.cpu_count()
d_sound = 384
d_delta = 256
d_combined = d_sound + d_delta
n_heads_sound = 6
n_heads_delta = 4
n_heads_combined = n_heads_sound + n_heads_delta
n_layers_sound = 3
n_layers_delta = 3
n_layers_combined = 6
seq_len = 256
mem_len = 384
batch_size = 8
dropout_rate = 0.1
n_epochs = 200
max_segs_per_batch = 20
lr = 0.00002
use_attn_reg = True
dataset_url = 'https://storage.googleapis.com/magentadata/datasets/maestro/v2.0.0/maestro-v2.0.0-midi.zip'
| true | true |
f715368cd5d00722102b52900d74a1a59b5b3689 | 1,003 | py | Python | loaf/projects/admin.py | Charles4th/Loaf | 1a42fd7c1dc74a90231acfee0d65e235eb586ea3 | [
"MIT"
] | 1 | 2018-12-24T03:30:08.000Z | 2018-12-24T03:30:08.000Z | loaf/projects/admin.py | Charles4th/Loaf | 1a42fd7c1dc74a90231acfee0d65e235eb586ea3 | [
"MIT"
] | 2 | 2020-06-05T18:34:54.000Z | 2022-02-10T11:23:33.000Z | loaf/projects/admin.py | Charles4th/Loaf | 1a42fd7c1dc74a90231acfee0d65e235eb586ea3 | [
"MIT"
] | 1 | 2018-08-07T08:49:28.000Z | 2018-08-07T08:49:28.000Z | from django.contrib import admin
from . import models
# Register your models here.
@admin.register(models.Project)
class ProjectAdmin(admin.ModelAdmin):
list_display_links = (
'title',
)
search_fields = (
'title',
)
list_filter = (
'title',
'creator',
)
list_display = (
'file',
'title',
'creator',
'created_at',
'updated_at',
)
@admin.register(models.Like)
class LikeAdmin(admin.ModelAdmin):
list_display = (
'creator',
'project',
'created_at',
'updated_at',
)
@admin.register(models.Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = (
'message',
'creator',
'project',
'created_at',
'updated_at',
)
@admin.register(models.Join)
class JoinAdmin(admin.ModelAdmin):
list_display = (
'joiner',
'project',
'created_at',
'updated_at',
) | 17 | 37 | 0.540379 | from django.contrib import admin
from . import models
@admin.register(models.Project)
class ProjectAdmin(admin.ModelAdmin):
list_display_links = (
'title',
)
search_fields = (
'title',
)
list_filter = (
'title',
'creator',
)
list_display = (
'file',
'title',
'creator',
'created_at',
'updated_at',
)
@admin.register(models.Like)
class LikeAdmin(admin.ModelAdmin):
list_display = (
'creator',
'project',
'created_at',
'updated_at',
)
@admin.register(models.Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = (
'message',
'creator',
'project',
'created_at',
'updated_at',
)
@admin.register(models.Join)
class JoinAdmin(admin.ModelAdmin):
list_display = (
'joiner',
'project',
'created_at',
'updated_at',
) | true | true |
f71537787e6f655fdc91b195e6460f7fc600f783 | 3,800 | py | Python | src/solution/sdc_workspace/catkin_ws/src/sdc_package/scripts/mission_planner.py | coherentsolutionsinc/issoft-insights-2019-sdc-carla-ros | f6d3e162888bd79d59b771c82ff028df0f70ae11 | [
"MIT"
] | 8 | 2019-06-04T16:21:07.000Z | 2021-09-05T07:24:20.000Z | src/solution/sdc_workspace/catkin_ws/src/sdc_package/scripts/mission_planner.py | coherentsolutionsinc/issoft-insights-2019-sdc-carla-ros | f6d3e162888bd79d59b771c82ff028df0f70ae11 | [
"MIT"
] | null | null | null | src/solution/sdc_workspace/catkin_ws/src/sdc_package/scripts/mission_planner.py | coherentsolutionsinc/issoft-insights-2019-sdc-carla-ros | f6d3e162888bd79d59b771c82ff028df0f70ae11 | [
"MIT"
] | 1 | 2019-06-21T14:37:18.000Z | 2019-06-21T14:37:18.000Z | #!/usr/bin/env python
import os
import csv
import rospy
# TODO: 1. Import waypoint messages
from sdc_package.msg import BaseWaypoint, Path
class MissionPlanner(object):
def __init__(self):
self.start_time = None
# TODO: 2. Init mission planner node
rospy.init_node('mission_planner')
#-------------------------------------------------------------------------------------------------------
self.wait_master_initialization()
# TODO: 3. Create publisher to publish mission path to /planner/mission_waypoints topic
self.waypoints_publisher = rospy.Publisher('/planner/mission_waypoints',
Path, queue_size=1, latch=True)
#-------------------------------------------------------------------------------------------------------
# TODO: 4. Get waypoints file path from parameters
waypoints_file_path = rospy.get_param('~waypoints_path')
#-------------------------------------------------------------------------------------------------------
#TODO 5. Load waypoints from fie using "load_waypoints" method
waypoints = self.load_waypoints(waypoints_file_path)
#-------------------------------------------------------------------------------------------------------
# TODO: 6. Publish waypoints to created publisher
self.publish_waypoints(waypoints)
#-------------------------------------------------------------------------------------------------------
rospy.loginfo('Waypoints published')
# TODO: 7. Run Empty ROS loop to keep node online using rospy.spin()
rospy.spin()
#-------------------------------------------------------------------------------------------------------
# Wait ROS Master node is initialized
def wait_master_initialization(self):
while not self.start_time and not rospy.is_shutdown():
self.start_time = rospy.Time.now().to_nsec()
if not rospy.is_shutdown():
rospy.loginfo('Mission Planner: ROS master initialized.')
def load_waypoints(self, path):
waypoints = []
# check if file path is valid
if os.path.isfile(path):
waypointsFile = open(path, 'r')
# read csv file
with waypointsFile:
reader = csv.reader(waypointsFile)
for row in reader:
# row[0] to access first csv element
# row[1] to access second csv element
# TODO: 8. Create new BaseWaypoint object and add to waypoints array
waypoint = BaseWaypoint()
waypoint.x = float(row[0])
waypoint.y = -float(row[1])
waypoints.append(waypoint)
#-------------------------------------------------------------------------------------------------------
rospy.loginfo('Waypoints Loaded: found %d waypoints', len(waypoints))
else:
rospy.logerr('%s is not a file', path)
return waypoints
def publish_waypoints(self, waypoints):
# TODO: 9. Crete new Path message with waypoints provided and publish to /planner/mission_waypoints topic
path = Path()
path.waypoints = waypoints
#-------------------------------------------------------------------------------------------------------
self.waypoints_publisher.publish(path)
if __name__ == '__main__':
try:
MissionPlanner()
except rospy.ROSInterruptException:
rospy.logerr('Could not start Mission Planner node.')
pass | 40.425532 | 124 | 0.448947 |
import os
import csv
import rospy
from sdc_package.msg import BaseWaypoint, Path
class MissionPlanner(object):
def __init__(self):
self.start_time = None
rospy.init_node('mission_planner')
self.wait_master_initialization()
self.waypoints_publisher = rospy.Publisher('/planner/mission_waypoints',
Path, queue_size=1, latch=True)
waypoints_file_path = rospy.get_param('~waypoints_path')
waypoints = self.load_waypoints(waypoints_file_path)
self.publish_waypoints(waypoints)
rospy.loginfo('Waypoints published')
rospy.spin()
def wait_master_initialization(self):
while not self.start_time and not rospy.is_shutdown():
self.start_time = rospy.Time.now().to_nsec()
if not rospy.is_shutdown():
rospy.loginfo('Mission Planner: ROS master initialized.')
def load_waypoints(self, path):
waypoints = []
if os.path.isfile(path):
waypointsFile = open(path, 'r')
with waypointsFile:
reader = csv.reader(waypointsFile)
for row in reader:
waypoint = BaseWaypoint()
waypoint.x = float(row[0])
waypoint.y = -float(row[1])
waypoints.append(waypoint)
rospy.loginfo('Waypoints Loaded: found %d waypoints', len(waypoints))
else:
rospy.logerr('%s is not a file', path)
return waypoints
def publish_waypoints(self, waypoints):
path = Path()
path.waypoints = waypoints
self.waypoints_publisher.publish(path)
if __name__ == '__main__':
try:
MissionPlanner()
except rospy.ROSInterruptException:
rospy.logerr('Could not start Mission Planner node.')
pass | true | true |
f71537954c3bd01d3b1211f2a051aa20670e6f9c | 3,870 | py | Python | run_local_mertric.py | middleprince/fashionAi | c512936b4983c2fb093008f06e04753180af0a90 | [
"Apache-2.0"
] | 316 | 2018-06-01T16:21:21.000Z | 2022-03-22T03:25:20.000Z | run_local_mertric.py | middleprince/fashionAi | c512936b4983c2fb093008f06e04753180af0a90 | [
"Apache-2.0"
] | 8 | 2018-06-02T07:07:49.000Z | 2019-07-11T06:55:43.000Z | run_local_mertric.py | middleprince/fashionAi | c512936b4983c2fb093008f06e04753180af0a90 | [
"Apache-2.0"
] | 91 | 2018-06-01T17:12:21.000Z | 2022-03-19T06:54:34.000Z | import os
import sys
import time
import numpy as np
import pandas as pd
import argparse
import math
import config as cfg
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='The Normarlized Error Mertric Calculation For FashionAI Keypoint Detection Script.')
train_set = parser.add_mutually_exclusive_group()
parser.add_argument('--prediction', default='',
help='The path of file containing the prediction of keypoints.')
parser.add_argument('--cat', type=lambda s: s.lower() in ['True', 'true', 't', 'yes', '1'], help="whether print Normarlized Error for each catgory")
parser.add_argument('--gt', default='./stage1_testb_gt.csv',
help='The path of file containing the ground truth of keypoints.')
args = parser.parse_args()
def run():
if args.prediction.strip() == '' or args.gt.strip() == '':
parser.error('Must specify the file path of the prediction and ground truth.')
pred_df = pd.read_csv(args.prediction, encoding='utf-8')
gt_df = pd.read_csv(args.gt, encoding='utf-8').set_index('image_id')
num_v = 0.
sum_dist = 0.
for index, row in pred_df.iterrows():
gt = gt_df.loc[row['image_id']]
img_cat = gt['image_category']
gt_points = {}
pred_points = {}
for kp in cfg.all_keys:
pred_kp = row[kp].strip().split('_')
gt_kp = gt[kp].strip().split('_')
pred_points[kp] = [int(_) for _ in pred_kp]
gt_points[kp] = [int(_) for _ in gt_kp]
lnorm_name, rnorm_name = cfg.normalize_point_name[img_cat]
lnorm, rnorm = gt_points[lnorm_name][:-1], gt_points[rnorm_name][:-1]
norm_value = math.pow(math.pow(lnorm[0] - rnorm[0], 2.) + math.pow(lnorm[1] - rnorm[1], 2.), 0.5)
for kp in cfg.all_keys:
if gt_points[kp][-1] == -1 or norm_value < 1e-3:
continue
num_v += 1.
dist = math.pow(math.pow(pred_points[kp][0] - gt_points[kp][0], 2.) + math.pow(pred_points[kp][1] - gt_points[kp][1], 2.), 0.5)
sum_dist += dist/norm_value
sum_dist = sum_dist/num_v
print(sum_dist)
def run_by_cat():
if args.prediction.strip() == '' or args.gt.strip() == '':
parser.error('Must specify the file path of the prediction and ground truth.')
pred_df = pd.read_csv(args.prediction, encoding='utf-8')
gt_df = pd.read_csv(args.gt, encoding='utf-8').set_index('image_id')
for cat_ in cfg.CATEGORIES:
num_v = 0.
sum_dist = 0.
for index, row in pred_df.iterrows():
gt = gt_df.loc[row['image_id']]
img_cat = gt['image_category']
if cat_ not in img_cat:
continue
gt_points = {}
pred_points = {}
for kp in cfg.all_keys:
pred_kp = row[kp].strip().split('_')
gt_kp = gt[kp].strip().split('_')
pred_points[kp] = [int(_) for _ in pred_kp]
gt_points[kp] = [int(_) for _ in gt_kp]
lnorm_name, rnorm_name = cfg.normalize_point_name[img_cat]
lnorm, rnorm = gt_points[lnorm_name][:-1], gt_points[rnorm_name][:-1]
norm_value = math.pow(math.pow(lnorm[0] - rnorm[0], 2.) + math.pow(lnorm[1] - rnorm[1], 2.), 0.5)
for kp in cfg.all_keys:
if gt_points[kp][-1] == -1 or norm_value < 1e-3:
continue
num_v += 1.
dist = math.pow(math.pow(pred_points[kp][0] - gt_points[kp][0], 2.) + math.pow(pred_points[kp][1] - gt_points[kp][1], 2.), 0.5)
sum_dist += dist/norm_value
sum_dist = sum_dist/num_v
print('{}:'.format(cat_), sum_dist)
if __name__ == '__main__':
if not args.cat:
run()
else:
run_by_cat()
| 35.833333 | 148 | 0.582946 | import os
import sys
import time
import numpy as np
import pandas as pd
import argparse
import math
import config as cfg
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='The Normarlized Error Mertric Calculation For FashionAI Keypoint Detection Script.')
train_set = parser.add_mutually_exclusive_group()
parser.add_argument('--prediction', default='',
help='The path of file containing the prediction of keypoints.')
parser.add_argument('--cat', type=lambda s: s.lower() in ['True', 'true', 't', 'yes', '1'], help="whether print Normarlized Error for each catgory")
parser.add_argument('--gt', default='./stage1_testb_gt.csv',
help='The path of file containing the ground truth of keypoints.')
args = parser.parse_args()
def run():
if args.prediction.strip() == '' or args.gt.strip() == '':
parser.error('Must specify the file path of the prediction and ground truth.')
pred_df = pd.read_csv(args.prediction, encoding='utf-8')
gt_df = pd.read_csv(args.gt, encoding='utf-8').set_index('image_id')
num_v = 0.
sum_dist = 0.
for index, row in pred_df.iterrows():
gt = gt_df.loc[row['image_id']]
img_cat = gt['image_category']
gt_points = {}
pred_points = {}
for kp in cfg.all_keys:
pred_kp = row[kp].strip().split('_')
gt_kp = gt[kp].strip().split('_')
pred_points[kp] = [int(_) for _ in pred_kp]
gt_points[kp] = [int(_) for _ in gt_kp]
lnorm_name, rnorm_name = cfg.normalize_point_name[img_cat]
lnorm, rnorm = gt_points[lnorm_name][:-1], gt_points[rnorm_name][:-1]
norm_value = math.pow(math.pow(lnorm[0] - rnorm[0], 2.) + math.pow(lnorm[1] - rnorm[1], 2.), 0.5)
for kp in cfg.all_keys:
if gt_points[kp][-1] == -1 or norm_value < 1e-3:
continue
num_v += 1.
dist = math.pow(math.pow(pred_points[kp][0] - gt_points[kp][0], 2.) + math.pow(pred_points[kp][1] - gt_points[kp][1], 2.), 0.5)
sum_dist += dist/norm_value
sum_dist = sum_dist/num_v
print(sum_dist)
def run_by_cat():
if args.prediction.strip() == '' or args.gt.strip() == '':
parser.error('Must specify the file path of the prediction and ground truth.')
pred_df = pd.read_csv(args.prediction, encoding='utf-8')
gt_df = pd.read_csv(args.gt, encoding='utf-8').set_index('image_id')
for cat_ in cfg.CATEGORIES:
num_v = 0.
sum_dist = 0.
for index, row in pred_df.iterrows():
gt = gt_df.loc[row['image_id']]
img_cat = gt['image_category']
if cat_ not in img_cat:
continue
gt_points = {}
pred_points = {}
for kp in cfg.all_keys:
pred_kp = row[kp].strip().split('_')
gt_kp = gt[kp].strip().split('_')
pred_points[kp] = [int(_) for _ in pred_kp]
gt_points[kp] = [int(_) for _ in gt_kp]
lnorm_name, rnorm_name = cfg.normalize_point_name[img_cat]
lnorm, rnorm = gt_points[lnorm_name][:-1], gt_points[rnorm_name][:-1]
norm_value = math.pow(math.pow(lnorm[0] - rnorm[0], 2.) + math.pow(lnorm[1] - rnorm[1], 2.), 0.5)
for kp in cfg.all_keys:
if gt_points[kp][-1] == -1 or norm_value < 1e-3:
continue
num_v += 1.
dist = math.pow(math.pow(pred_points[kp][0] - gt_points[kp][0], 2.) + math.pow(pred_points[kp][1] - gt_points[kp][1], 2.), 0.5)
sum_dist += dist/norm_value
sum_dist = sum_dist/num_v
print('{}:'.format(cat_), sum_dist)
if __name__ == '__main__':
if not args.cat:
run()
else:
run_by_cat()
| true | true |
f71538dd1163b1cc57d1780f5f59d458c6767583 | 5,306 | py | Python | examples/plot_SimpleLineTest.py | aasensio/Lightweaver | 9a261e72235f05df548148da140012f40dbd1e4b | [
"MIT"
] | 13 | 2020-01-13T14:01:23.000Z | 2022-03-11T08:36:45.000Z | examples/plot_SimpleLineTest.py | aasensio/Lightweaver | 9a261e72235f05df548148da140012f40dbd1e4b | [
"MIT"
] | 30 | 2020-01-17T13:00:37.000Z | 2022-03-07T12:08:37.000Z | examples/plot_SimpleLineTest.py | aasensio/Lightweaver | 9a261e72235f05df548148da140012f40dbd1e4b | [
"MIT"
] | 4 | 2021-07-07T11:21:07.000Z | 2021-11-23T06:52:02.000Z | """
===============================================================
Computing a simple NLTE 8542 line profile in a FAL C atmosphere
===============================================================
"""
#%%
# First, we import everything we need. Lightweaver is typically imported as
# `lw`, but things like the library of model atoms and Fal atmospheres need to
# be imported separately.
from lightweaver.fal import Falc82
from lightweaver.rh_atoms import H_6_atom, C_atom, O_atom, Si_atom, Al_atom, \
CaII_atom, Fe_atom, He_9_atom, He_atom, MgII_atom, N_atom, Na_atom, S_atom
import lightweaver as lw
import matplotlib.pyplot as plt
import time
import numpy as np
#%%
# Now, we define the functions that will be used in our spectral synthesise.
# First `synth_8542` which synthesises and returns the line given by an
# atmosphere.
def synth_8542(atmos, conserve, useNe, wave):
'''
Synthesise a spectral line for given atmosphere with different
conditions.
Parameters
----------
atmos : lw.Atmosphere
The atmospheric model in which to synthesise the line.
conserve : bool
Whether to start from LTE electron density and conserve charge, or
simply use from the electron density present in the atomic model.
useNe : bool
Whether to use the electron density present in the model as the
starting solution, or compute the LTE electron density.
wave : np.ndarray
Array of wavelengths over which to resynthesise the final line
profile for muz=1.
Returns
-------
ctx : lw.Context
The Context object that was used to compute the equilibrium
populations.
Iwave : np.ndarray
The intensity at muz=1 for each wavelength in `wave`.
'''
# Configure the atmospheric angular quadrature
atmos.quadrature(5)
# Configure the set of atomic models to use.
aSet = lw.RadiativeSet([H_6_atom(), C_atom(), O_atom(), Si_atom(),
Al_atom(), CaII_atom(), Fe_atom(), He_9_atom(),
MgII_atom(), N_atom(), Na_atom(), S_atom()
])
# Set H and Ca to "active" i.e. NLTE, everything else participates as an
# LTE background.
aSet.set_active('H', 'Ca')
# Compute the necessary wavelength dependent information (SpectrumConfiguration).
spect = aSet.compute_wavelength_grid()
# Either compute the equilibrium populations at the fixed electron density
# provided in the model, or iterate an LTE electron density and compute the
# corresponding equilibrium populations (SpeciesStateTable).
if useNe:
eqPops = aSet.compute_eq_pops(atmos)
else:
eqPops = aSet.iterate_lte_ne_eq_pops(atmos)
# Configure the Context which holds the state of the simulation for the
# backend, and provides the python interface to the backend.
# Feel free to increase Nthreads to increase the number of threads the
# program will use.
ctx = lw.Context(atmos, spect, eqPops, conserveCharge=conserve, Nthreads=1)
start = time.time()
# Iterate the Context to convergence
iterate_ctx(ctx)
end = time.time()
print('%.2f s' % (end - start))
# Update the background populations based on the converged solution and
# compute the final intensity for mu=1 on the provided wavelength grid.
eqPops.update_lte_atoms_Hmin_pops(atmos)
Iwave = ctx.compute_rays(wave, [atmos.muz[-1]], stokes=False)
return ctx, Iwave
def iterate_ctx(ctx, Nscatter=3, NmaxIter=500):
'''
Iterate a Context to convergence.
'''
for i in range(NmaxIter):
# Compute the formal solution
dJ = ctx.formal_sol_gamma_matrices()
# Just update J for Nscatter iterations
if i < Nscatter:
continue
# Update the active populations under statistical equilibrium,
# conserving charge if this option was set on the Context.
delta = ctx.stat_equil()
# If we are converged in both relative change of J and populations,
# then print a message and return
# N.B. as this is just a simple case, there is no checking for failure
# to converge within the NmaxIter. This could be achieved simpy with an
# else block after this for.
if dJ < 3e-3 and delta < 1e-3:
print('%d iterations' % i)
print('-'*80)
return
#%%
# The wavelength grid to output the final synthesised line on.
wave = np.linspace(853.9444, 854.9444, 1001)
#%%
# Load an lw.Atmosphere object containing the FAL C atmosphere with 82 points
# in depth, before synthesising the Ca II 8542 \AA line profile using:
#
# - The given electron density.
# - The electron density charge conserved from a starting LTE solution.
# - The LTE electron density.
#
# These results are then plotted.
atmosRef = Falc82()
ctxRef, IwaveRef = synth_8542(atmosRef, conserve=False, useNe=True, wave=wave)
atmosCons = Falc82()
ctxCons, IwaveCons = synth_8542(atmosCons, conserve=True, useNe=False, wave=wave)
atmosLte = Falc82()
ctx, IwaveLte = synth_8542(atmosLte, conserve=False, useNe=False, wave=wave)
plt.plot(wave, IwaveRef, label='Reference FAL')
plt.plot(wave, IwaveCons, label='Reference Cons')
plt.plot(wave, IwaveLte, label='Reference LTE n_e')
plt.show()
| 38.729927 | 85 | 0.671692 |
from lightweaver.fal import Falc82
from lightweaver.rh_atoms import H_6_atom, C_atom, O_atom, Si_atom, Al_atom, \
CaII_atom, Fe_atom, He_9_atom, He_atom, MgII_atom, N_atom, Na_atom, S_atom
import lightweaver as lw
import matplotlib.pyplot as plt
import time
import numpy as np
def synth_8542(atmos, conserve, useNe, wave):
atmos.quadrature(5)
aSet = lw.RadiativeSet([H_6_atom(), C_atom(), O_atom(), Si_atom(),
Al_atom(), CaII_atom(), Fe_atom(), He_9_atom(),
MgII_atom(), N_atom(), Na_atom(), S_atom()
])
aSet.set_active('H', 'Ca')
spect = aSet.compute_wavelength_grid()
if useNe:
eqPops = aSet.compute_eq_pops(atmos)
else:
eqPops = aSet.iterate_lte_ne_eq_pops(atmos)
ctx = lw.Context(atmos, spect, eqPops, conserveCharge=conserve, Nthreads=1)
start = time.time()
iterate_ctx(ctx)
end = time.time()
print('%.2f s' % (end - start))
eqPops.update_lte_atoms_Hmin_pops(atmos)
Iwave = ctx.compute_rays(wave, [atmos.muz[-1]], stokes=False)
return ctx, Iwave
def iterate_ctx(ctx, Nscatter=3, NmaxIter=500):
for i in range(NmaxIter):
dJ = ctx.formal_sol_gamma_matrices()
if i < Nscatter:
continue
delta = ctx.stat_equil()
if dJ < 3e-3 and delta < 1e-3:
print('%d iterations' % i)
print('-'*80)
return
wave = np.linspace(853.9444, 854.9444, 1001)
atmosRef = Falc82()
ctxRef, IwaveRef = synth_8542(atmosRef, conserve=False, useNe=True, wave=wave)
atmosCons = Falc82()
ctxCons, IwaveCons = synth_8542(atmosCons, conserve=True, useNe=False, wave=wave)
atmosLte = Falc82()
ctx, IwaveLte = synth_8542(atmosLte, conserve=False, useNe=False, wave=wave)
plt.plot(wave, IwaveRef, label='Reference FAL')
plt.plot(wave, IwaveCons, label='Reference Cons')
plt.plot(wave, IwaveLte, label='Reference LTE n_e')
plt.show()
| true | true |
f7153948cabbb10fc8cd4bd9ce5fe812b7a32534 | 1,921 | py | Python | rme/datasets/mnist.py | satishjasthi/convnet-study | ccd20c90e449fc8db694abf706db178e9413e57b | [
"MIT"
] | 40 | 2016-09-17T00:57:42.000Z | 2021-09-25T05:24:27.000Z | rme/datasets/mnist.py | satishjasthi/convnet-study | ccd20c90e449fc8db694abf706db178e9413e57b | [
"MIT"
] | 1 | 2017-09-08T08:29:31.000Z | 2017-09-13T23:21:09.000Z | rme/datasets/mnist.py | satishjasthi/convnet-study | ccd20c90e449fc8db694abf706db178e9413e57b | [
"MIT"
] | 22 | 2016-11-06T03:57:22.000Z | 2021-09-25T05:24:32.000Z | from __future__ import absolute_import
import os
import numpy as np
import gzip
import struct
from .preprocessing import one_hotify
def load(data_dir, valid_ratio=0.0, one_hot=True, shuffle=False, dtype='float32'):
train_set, valid_set, test_set = {}, {}, {}
# Get data from binary files
for img_set, file_name in zip((train_set, test_set), ('train', 't10k')):
# Load images
img_path = os.path.join(data_dir, file_name + '-images-idx3-ubyte.gz')
with gzip.open(img_path, 'rb') as f:
magic_num, num_imgs, num_rows, num_cols = struct.unpack('>iiii',
f.read(16))
shape = (num_imgs, num_rows, num_cols, 1)
img_set['data'] = np.fromstring(f.read(),
dtype='uint8').astype(dtype).reshape(shape)
# Load labels
label_path = os.path.join(data_dir, file_name + '-labels-idx1-ubyte.gz')
with gzip.open(label_path, 'rb') as f:
magic_num, num_labels = struct.unpack('>ii', f.read(8))
img_set['labels'] = np.fromstring(f.read(),
dtype='uint8').astype('int')
if one_hot:
img_set['labels'] = one_hotify(img_set['labels'])
N = train_set['data'].shape[0]
if shuffle:
# Shuffle and separate between training and validation set
new_order = np.random.permutation(np.arange(N))
train_set['data'] = train_set['data'][new_order]
train_set['labels'] = train_set['labels'][new_order]
# Get the number of samples on the training set
M = int((1 - valid_ratio)*N)
# Separate validation set
valid_set['data'] = train_set['data'][M:]
valid_set['labels'] = train_set['labels'][M:]
train_set['data'] = train_set['data'][:M]
train_set['labels'] = train_set['labels'][:M]
return train_set, valid_set, test_set
def preprocess(dataset):
mean = 33.3
std = 78.6
dataset -= mean
dataset /= std
return dataset
| 33.12069 | 82 | 0.630401 | from __future__ import absolute_import
import os
import numpy as np
import gzip
import struct
from .preprocessing import one_hotify
def load(data_dir, valid_ratio=0.0, one_hot=True, shuffle=False, dtype='float32'):
train_set, valid_set, test_set = {}, {}, {}
for img_set, file_name in zip((train_set, test_set), ('train', 't10k')):
img_path = os.path.join(data_dir, file_name + '-images-idx3-ubyte.gz')
with gzip.open(img_path, 'rb') as f:
magic_num, num_imgs, num_rows, num_cols = struct.unpack('>iiii',
f.read(16))
shape = (num_imgs, num_rows, num_cols, 1)
img_set['data'] = np.fromstring(f.read(),
dtype='uint8').astype(dtype).reshape(shape)
label_path = os.path.join(data_dir, file_name + '-labels-idx1-ubyte.gz')
with gzip.open(label_path, 'rb') as f:
magic_num, num_labels = struct.unpack('>ii', f.read(8))
img_set['labels'] = np.fromstring(f.read(),
dtype='uint8').astype('int')
if one_hot:
img_set['labels'] = one_hotify(img_set['labels'])
N = train_set['data'].shape[0]
if shuffle:
new_order = np.random.permutation(np.arange(N))
train_set['data'] = train_set['data'][new_order]
train_set['labels'] = train_set['labels'][new_order]
M = int((1 - valid_ratio)*N)
valid_set['data'] = train_set['data'][M:]
valid_set['labels'] = train_set['labels'][M:]
train_set['data'] = train_set['data'][:M]
train_set['labels'] = train_set['labels'][:M]
return train_set, valid_set, test_set
def preprocess(dataset):
mean = 33.3
std = 78.6
dataset -= mean
dataset /= std
return dataset
| true | true |
f7153a02e898f5f116d487d957f85db359c928ad | 5,631 | py | Python | run_preprocessing_oggm.py | Wang518hongyu/PyGEM | 1c9fa133133b3d463b1383d4792c535fa61c5b8d | [
"MIT"
] | null | null | null | run_preprocessing_oggm.py | Wang518hongyu/PyGEM | 1c9fa133133b3d463b1383d4792c535fa61c5b8d | [
"MIT"
] | null | null | null | run_preprocessing_oggm.py | Wang518hongyu/PyGEM | 1c9fa133133b3d463b1383d4792c535fa61c5b8d | [
"MIT"
] | null | null | null | """ PRE-PROCESSING FOR MODEL RUNS USING OGGM """
# Built-in libraries
import argparse
import collections
import inspect
import multiprocessing
import os
import time
# External libraries
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
# Local libraries
import class_climate
#import class_mbdata
import pygem.pygem_input as pygem_prms
import pygemfxns_gcmbiasadj as gcmbiasadj
import pygemfxns_modelsetup as modelsetup
import spc_split_glaciers as split_glaciers
from oggm import cfg
from oggm import graphics
from oggm import tasks, utils, workflow
from oggm.core import climate
from oggm.core.flowline import FluxBasedModel
from oggm.shop import rgitopo
from pygem.massbalance import PyGEMMassBalance
from pygem.glacierdynamics import MassRedistributionCurveModel
from pygem.oggm_compat import single_flowline_glacier_directory
from pygem.shop import calving, debris, mbdata, icethickness
#%%
# ===== OGGM CONFIG FILE =====
# Initialize OGGM and set up the default run parameters
cfg.initialize(logging_level='WORKFLOW')
cfg.PARAMS['use_multiprocessing'] = False
#cfg.PARAMS['mp_processes'] = 1
cfg.PARAMS['border'] = 10
# Usually we recommend to set dl_verify to True - here it is quite slow
# because of the huge files so we just turn it off.
# Switch it on for real cases!
cfg.PARAMS['dl_verify'] = True
cfg.PARAMS['use_multiple_flowlines'] = False
# temporary directory for testing (deleted on computer restart)
#cfg.PATHS['working_dir'] = utils.get_temp_dir('PyGEM_ex')
cfg.PATHS['working_dir'] = pygem_prms.oggm_gdir_fp
# ===== LOAD GLACIERS =====
if pygem_prms.glac_no is not None:
glac_no = pygem_prms.glac_no
else:
main_glac_rgi_all = modelsetup.selectglaciersrgitable(
rgi_regionsO1=pygem_prms.rgi_regionsO1, rgi_regionsO2=pygem_prms.rgi_regionsO2,
rgi_glac_number=pygem_prms.rgi_glac_number)
glac_no = list(main_glac_rgi_all['rgino_str'].values)
rgi_ids = ['RGI60-' + x.split('.')[0].zfill(2) + '.' + x.split('.')[1] for x in glac_no]
#%% ===== SELECT BEST DEM =====
# Get the pre-processed topography data
# - creates directories from scratch
gdirs = rgitopo.init_glacier_directories_from_rgitopo(rgi_ids)
# ===== FLOWLINES (w debris) =====
# - checks if directories are created (only use if you're on an already prepared directory)
#gdirs = workflow.init_glacier_directories(rgi_ids)
print('\nTO-DO LIST:')
print(' - reinstall from git\n\n')
# Compute all the stuff
list_tasks = [
# Tasks for OGGM
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.compute_downstream_line,
tasks.catchment_area,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
# tasks.compute_downstream_line, # check??
# tasks.compute_downstream_bedshape,
# OGGM needs this to advance the glacier - it will be the exact same simply with additional bins below
# - init_present_time_glacier does this!
# # New workflow following Huss and Farinotti (2012) - squeezed flowline
# # - squeezed flowline averages slow of all branches over a bin
# # - OGGM does it based on the main flowline where most of the mass is; also we have more control with frontal ablation width
# Debris tasks
debris.debris_to_gdir,
debris.debris_binned,
# Consensus ice thickness
icethickness.consensus_gridded,
icethickness.consensus_binned,
# Mass balance data
mbdata.mb_df_to_gdir,
]
for task in list_tasks:
workflow.execute_entity_task(task, gdirs)
## ===== Mass balance data =====
##mbdata.mb_bins_to_reg_glacierwide(mb_binned_fp=pygem_prms.mb_binned_fp, O1Regions=['01'])
##workflow.execute_entity_task(mbdata.mb_bins_to_glacierwide, gdirs)
#workflow.execute_entity_task(mbdata.mb_df_to_gdir, gdirs)
# ===== CALVING CALIBRATION =====
# Individual glaciers
#for gdir in gdirs:
# if gdir.is_tidewater:
# calving.calibrate_calving_k_single_wconsensus(gdir)
## Perform inversion based on PyGEM MB
### Add thickness, width_m, and dx_meter to inversion flowlines so they are compatible with PyGEM's
### mass balance model (necessary because OGGM's inversion flowlines use pixel distances; however,
### this will likely be rectified in the future)
#fls_inv = gdirs[0].read_pickle('inversion_flowlines')
#%%
# ----- Alternative to use squeezed flowlines from Huss and Farinotti (2012) -----
#tasks.simple_glacier_masks, # much more robust mask than the one used for flowlines
#tasks.elevation_band_flowline, # same as Huss and Farinotti; produces the binned elevation (30m), length, and width
#tasks.fixed_dx_elevation_band_flowline, # converts the binned elevation, length, width to the fixed dx grid in OGGM
# # output is the same flowline object
# ----- Alternative way of running tasks -----
#for rgi_id in rgi_ids:
# gdirs = rgitopo.init_glacier_directories_from_rgitopo([rgi_id])
# gdir = gdirs[0]
# tasks.glacier_masks(gdir)
# tasks.compute_centerlines(gdir)
# tasks.glacier_masks(gdir)
# tasks.compute_centerlines(gdir)
# tasks.initialize_flowlines(gdir)
# tasks.compute_downstream_line(gdir)
# tasks.catchment_area(gdir)
# tasks.catchment_width_geom(gdir)
# tasks.catchment_width_correction(gdir)
# # Debris tasks
# debris.debris_to_gdir(gdir)
# debris.debris_binned(gdir)
# # Consensus ice thickness
# icethickness.consensus_gridded(gdir)
# icethickness.consensus_binned(gdir)
# # Tidewater
# if gdir.is_tidewater:
# calving.calibrate_calving_k_single_wconsensus(gdir) | 35.866242 | 129 | 0.74818 |
import argparse
import collections
import inspect
import multiprocessing
import os
import time
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
import class_climate
import pygem.pygem_input as pygem_prms
import pygemfxns_gcmbiasadj as gcmbiasadj
import pygemfxns_modelsetup as modelsetup
import spc_split_glaciers as split_glaciers
from oggm import cfg
from oggm import graphics
from oggm import tasks, utils, workflow
from oggm.core import climate
from oggm.core.flowline import FluxBasedModel
from oggm.shop import rgitopo
from pygem.massbalance import PyGEMMassBalance
from pygem.glacierdynamics import MassRedistributionCurveModel
from pygem.oggm_compat import single_flowline_glacier_directory
from pygem.shop import calving, debris, mbdata, icethickness
cfg.initialize(logging_level='WORKFLOW')
cfg.PARAMS['use_multiprocessing'] = False
cfg.PARAMS['border'] = 10
cfg.PARAMS['dl_verify'] = True
cfg.PARAMS['use_multiple_flowlines'] = False
cfg.PATHS['working_dir'] = pygem_prms.oggm_gdir_fp
if pygem_prms.glac_no is not None:
glac_no = pygem_prms.glac_no
else:
main_glac_rgi_all = modelsetup.selectglaciersrgitable(
rgi_regionsO1=pygem_prms.rgi_regionsO1, rgi_regionsO2=pygem_prms.rgi_regionsO2,
rgi_glac_number=pygem_prms.rgi_glac_number)
glac_no = list(main_glac_rgi_all['rgino_str'].values)
rgi_ids = ['RGI60-' + x.split('.')[0].zfill(2) + '.' + x.split('.')[1] for x in glac_no]
gdirs = rgitopo.init_glacier_directories_from_rgitopo(rgi_ids)
#gdirs = workflow.init_glacier_directories(rgi_ids)
print('\nTO-DO LIST:')
print(' - reinstall from git\n\n')
# Compute all the stuff
list_tasks = [
# Tasks for OGGM
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.compute_downstream_line,
tasks.catchment_area,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
# tasks.compute_downstream_line, # check??
# tasks.compute_downstream_bedshape,
# OGGM needs this to advance the glacier - it will be the exact same simply with additional bins below
# - init_present_time_glacier does this!
# # New workflow following Huss and Farinotti (2012) - squeezed flowline
# # - squeezed flowline averages slow of all branches over a bin
# # - OGGM does it based on the main flowline where most of the mass is; also we have more control with frontal ablation width
# Debris tasks
debris.debris_to_gdir,
debris.debris_binned,
# Consensus ice thickness
icethickness.consensus_gridded,
icethickness.consensus_binned,
# Mass balance data
mbdata.mb_df_to_gdir,
]
for task in list_tasks:
workflow.execute_entity_task(task, gdirs)
## ===== Mass balance data =====
##mbdata.mb_bins_to_reg_glacierwide(mb_binned_fp=pygem_prms.mb_binned_fp, O1Regions=['01'])
##workflow.execute_entity_task(mbdata.mb_bins_to_glacierwide, gdirs)
#workflow.execute_entity_task(mbdata.mb_df_to_gdir, gdirs)
# ===== CALVING CALIBRATION =====
# Individual glaciers
#for gdir in gdirs:
# if gdir.is_tidewater:
# calving.calibrate_calving_k_single_wconsensus(gdir)
## Perform inversion based on PyGEM MB
### Add thickness, width_m, and dx_meter to inversion flowlines so they are compatible with PyGEM's
sks.simple_glacier_masks, # much more robust mask than the one used for flowlines
#tasks.elevation_band_flowline, # same as Huss and Farinotti; produces the binned elevation (30m), length, and width
#tasks.fixed_dx_elevation_band_flowline, # converts the binned elevation, length, width to the fixed dx grid in OGGM
# # output is the same flowline object
# ----- Alternative way of running tasks -----
#for rgi_id in rgi_ids:
# gdirs = rgitopo.init_glacier_directories_from_rgitopo([rgi_id])
# gdir = gdirs[0]
# tasks.glacier_masks(gdir)
# tasks.compute_centerlines(gdir)
# tasks.glacier_masks(gdir)
# tasks.compute_centerlines(gdir)
# tasks.initialize_flowlines(gdir)
# tasks.compute_downstream_line(gdir)
# tasks.catchment_area(gdir)
# tasks.catchment_width_geom(gdir)
# tasks.catchment_width_correction(gdir)
# # Debris tasks
# debris.debris_to_gdir(gdir)
# debris.debris_binned(gdir)
# # Consensus ice thickness
# icethickness.consensus_gridded(gdir)
# icethickness.consensus_binned(gdir)
# # Tidewater
# if gdir.is_tidewater:
# calving.calibrate_calving_k_single_wconsensus(gdir) | true | true |
f7153a70ee09cafbc4a4a4209f921a512961caf3 | 308 | py | Python | aispace/datasets/tokenizer/__init__.py | SmileGoat/AiSpace | 35fc120667e4263c99b300815e0bf018f5064a40 | [
"Apache-2.0"
] | 32 | 2020-01-16T07:59:03.000Z | 2022-03-31T09:24:00.000Z | aispace/datasets/tokenizer/__init__.py | SmileGoat/AiSpace | 35fc120667e4263c99b300815e0bf018f5064a40 | [
"Apache-2.0"
] | 9 | 2020-06-05T03:27:06.000Z | 2022-03-12T01:00:17.000Z | aispace/datasets/tokenizer/__init__.py | SmileGoat/AiSpace | 35fc120667e4263c99b300815e0bf018f5064a40 | [
"Apache-2.0"
] | 3 | 2020-06-09T02:22:50.000Z | 2021-07-19T06:07:07.000Z | # -*- coding: utf-8 -*-
# @Time : 2019-11-10 16:50
# @Author : yingyuankai
# @Email : yingyuankai@aliyun.com
# @File : __init__.py
from .bert_tokenizer import BertTokenizer
from .tokenizer_base import BaseTokenizer
from .xlnet_tokenizer import XlnetTokenizer
from .gpt_tokenizer import CPMTokenizer | 30.8 | 43 | 0.746753 |
from .bert_tokenizer import BertTokenizer
from .tokenizer_base import BaseTokenizer
from .xlnet_tokenizer import XlnetTokenizer
from .gpt_tokenizer import CPMTokenizer | true | true |
f7153af43ab719b288088a86b292514bb5b4ec0a | 2,233 | py | Python | gcalcli/authorization.py | kdrabek/gcalcli | c05d84ea14a0e85f3689efc6ddd258de33c76e95 | [
"MIT"
] | null | null | null | gcalcli/authorization.py | kdrabek/gcalcli | c05d84ea14a0e85f3689efc6ddd258de33c76e95 | [
"MIT"
] | null | null | null | gcalcli/authorization.py | kdrabek/gcalcli | c05d84ea14a0e85f3689efc6ddd258de33c76e95 | [
"MIT"
] | null | null | null | import json
from pathlib import Path
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import Flow
# set of permissions for particular API
SCOPES = 'https://www.googleapis.com/auth/calendar'
CONFIG_PATH = Path.home() / '.gcalcli'
CREDENTIALS_PATH = CONFIG_PATH / 'credentials.json'
TOKEN_PATH = CONFIG_PATH / 'token.json'
def open_file(path, formatter=None):
with open(path, 'r') as f:
if formatter:
return formatter(f.read())
return f.read()
def save_file(path, content):
with open(path, 'w') as f:
return f.write(content)
def create_credentials(token, flow, scopes=SCOPES):
return Credentials(
token['access_token'],
refresh_token=token['refresh_token'],
token_uri=flow.client_config['token_uri'],
client_id=flow.client_config['client_id'],
client_secret=flow.client_config['client_secret'],
scopes=scopes
)
def setup_authentication():
Path.mkdir(CONFIG_PATH, exist_ok=True)
print('Please go to Google API console,')
print('then generate & download credentials .json file')
creds = input("Paste contents of the file here: ")
save_file(CREDENTIALS_PATH, creds)
flow = Flow.from_client_secrets_file(
CREDENTIALS_PATH, SCOPES, redirect_uri='urn:ietf:wg:oauth:2.0:oob'
)
auth_url, _ = flow.authorization_url()
print('Please go to this URL: {}'.format(auth_url))
code = input('Enter the authorization code: ')
token = flow.fetch_token(code=code)
save_file(TOKEN_PATH, json.dumps(token))
return create_credentials(token, flow)
def is_authentication_setup():
Path.mkdir(CONFIG_PATH, exist_ok=True)
try:
token = open_file(CREDENTIALS_PATH, json.loads)
credentials = open_file(TOKEN_PATH)
except Exception as e:
print(e)
return False
return token is not None and credentials is not None
def load_credentials():
Path.mkdir(CONFIG_PATH, exist_ok=True)
flow = Flow.from_client_secrets_file(
CREDENTIALS_PATH, SCOPES,
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
token = open_file(TOKEN_PATH, formatter=json.loads)
return create_credentials(token, flow)
| 27.9125 | 74 | 0.695477 | import json
from pathlib import Path
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import Flow
SCOPES = 'https://www.googleapis.com/auth/calendar'
CONFIG_PATH = Path.home() / '.gcalcli'
CREDENTIALS_PATH = CONFIG_PATH / 'credentials.json'
TOKEN_PATH = CONFIG_PATH / 'token.json'
def open_file(path, formatter=None):
with open(path, 'r') as f:
if formatter:
return formatter(f.read())
return f.read()
def save_file(path, content):
with open(path, 'w') as f:
return f.write(content)
def create_credentials(token, flow, scopes=SCOPES):
return Credentials(
token['access_token'],
refresh_token=token['refresh_token'],
token_uri=flow.client_config['token_uri'],
client_id=flow.client_config['client_id'],
client_secret=flow.client_config['client_secret'],
scopes=scopes
)
def setup_authentication():
Path.mkdir(CONFIG_PATH, exist_ok=True)
print('Please go to Google API console,')
print('then generate & download credentials .json file')
creds = input("Paste contents of the file here: ")
save_file(CREDENTIALS_PATH, creds)
flow = Flow.from_client_secrets_file(
CREDENTIALS_PATH, SCOPES, redirect_uri='urn:ietf:wg:oauth:2.0:oob'
)
auth_url, _ = flow.authorization_url()
print('Please go to this URL: {}'.format(auth_url))
code = input('Enter the authorization code: ')
token = flow.fetch_token(code=code)
save_file(TOKEN_PATH, json.dumps(token))
return create_credentials(token, flow)
def is_authentication_setup():
Path.mkdir(CONFIG_PATH, exist_ok=True)
try:
token = open_file(CREDENTIALS_PATH, json.loads)
credentials = open_file(TOKEN_PATH)
except Exception as e:
print(e)
return False
return token is not None and credentials is not None
def load_credentials():
Path.mkdir(CONFIG_PATH, exist_ok=True)
flow = Flow.from_client_secrets_file(
CREDENTIALS_PATH, SCOPES,
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
token = open_file(TOKEN_PATH, formatter=json.loads)
return create_credentials(token, flow)
| true | true |
f7153b1e77bb06edb0103c75b470f2e4165017f6 | 4,895 | py | Python | snpdb/views/views_autocomplete.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 5 | 2021-01-14T03:34:42.000Z | 2022-03-07T15:34:18.000Z | snpdb/views/views_autocomplete.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 551 | 2020-10-19T00:02:38.000Z | 2022-03-30T02:18:22.000Z | snpdb/views/views_autocomplete.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | null | null | null | from abc import ABC
from django.contrib.auth.models import User
from django.db.models.functions import Length
from django.db.models.query_utils import Q
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django.views.decorators.vary import vary_on_cookie
from library.constants import MINUTE_SECS
from library.django_utils.autocomplete_utils import AutocompleteView
from snpdb.models import VCF, Sample, Cohort, CustomColumnsCollection, CustomColumn, Tag, Trio, \
Lab, GenomicIntervalsCollection, GenomeBuild, ImportStatus, Project
class GenomeBuildAutocompleteView(AutocompleteView, ABC):
def filter_to_genome_build(self, qs, path_to_genome_build):
genome_build_id = self.forwarded.get('genome_build_id')
if genome_build_id:
genome_build = GenomeBuild.objects.get(pk=genome_build_id)
qs = qs.filter(**{path_to_genome_build: genome_build})
return qs
@method_decorator(cache_page(MINUTE_SECS), name='dispatch')
class UserAutocompleteView(AutocompleteView):
fields = ['last_name', 'first_name', 'username']
def get_user_queryset(self, user):
return User.objects.all()
@method_decorator(cache_page(MINUTE_SECS), name='dispatch')
class UsernameAutocompleteView(AutocompleteView):
"""
Needed to make separate from UserAutocompleteView for the sake of sort order
"""
fields = ['username', 'first_name', 'last_name']
def get_user_queryset(self, user):
return User.objects.all()
def get_result_label(self, obj):
return obj.username
@method_decorator(cache_page(MINUTE_SECS), name='dispatch')
class LabAutocompleteView(AutocompleteView):
fields = ['organization__name', 'name']
def get_user_queryset(self, user):
return Lab.objects.filter(organization__active=True)
def get_result_label(self, obj):
return f'{obj.organization.name} - {obj.name}'
@method_decorator([cache_page(MINUTE_SECS)], name='get') # Doesn't need to vary_on_cookie as no permissions on Proj
class ProjectAutocompleteView(AutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
return Project.objects.all()
@method_decorator([cache_page(MINUTE_SECS), vary_on_cookie], name='dispatch')
class VCFAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
qs = VCF.filter_for_user(user, True).filter(import_status=ImportStatus.SUCCESS)
return self.filter_to_genome_build(qs, "genome_build")
@method_decorator([cache_page(MINUTE_SECS), vary_on_cookie], name='dispatch')
class SampleAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
sample_qs = Sample.filter_for_user(user, True).filter(import_status=ImportStatus.SUCCESS)
return self.filter_to_genome_build(sample_qs, "vcf__genome_build")
@method_decorator([cache_page(MINUTE_SECS), vary_on_cookie], name='dispatch')
class CohortAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
vcf_success_if_exists = Q(vcf__isnull=True) | Q(vcf__import_status=ImportStatus.SUCCESS)
qs = Cohort.filter_for_user(user, success_status_only=True).filter(vcf_success_if_exists)
return self.filter_to_genome_build(qs, "genome_build")
class CustomColumnAutocompleteView(AutocompleteView):
fields = ['column__grid_column_name']
def get_user_queryset(self, user):
# Called different things in Analysis/UserSettings
columns = self.forwarded.get('columns') or self.forwarded.get('custom_columns_collection')
if columns:
custom_columns_collections_qs = CustomColumnsCollection.filter_for_user(user).filter(pk=columns)
else:
custom_columns_collections_qs = CustomColumnsCollection.objects.none()
return CustomColumn.objects.filter(custom_columns_collection__in=custom_columns_collections_qs)
class GenomicIntervalsCollectionAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
qs = GenomicIntervalsCollection.filter_for_user(user).filter(import_status=ImportStatus.SUCCESS)
return self.filter_to_genome_build(qs, "genome_build")
@method_decorator(cache_page(5), name='dispatch')
class TagAutocompleteView(AutocompleteView):
fields = ['id']
def get_user_queryset(self, user):
return Tag.objects.all().order_by(Length("id").asc())
@method_decorator([cache_page(MINUTE_SECS), vary_on_cookie], name='dispatch')
class TrioAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
qs = Trio.filter_for_user(user, success_status_only=True)
return self.filter_to_genome_build(qs, "cohort__genome_build")
| 37.083333 | 116 | 0.756486 | from abc import ABC
from django.contrib.auth.models import User
from django.db.models.functions import Length
from django.db.models.query_utils import Q
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django.views.decorators.vary import vary_on_cookie
from library.constants import MINUTE_SECS
from library.django_utils.autocomplete_utils import AutocompleteView
from snpdb.models import VCF, Sample, Cohort, CustomColumnsCollection, CustomColumn, Tag, Trio, \
Lab, GenomicIntervalsCollection, GenomeBuild, ImportStatus, Project
class GenomeBuildAutocompleteView(AutocompleteView, ABC):
def filter_to_genome_build(self, qs, path_to_genome_build):
genome_build_id = self.forwarded.get('genome_build_id')
if genome_build_id:
genome_build = GenomeBuild.objects.get(pk=genome_build_id)
qs = qs.filter(**{path_to_genome_build: genome_build})
return qs
@method_decorator(cache_page(MINUTE_SECS), name='dispatch')
class UserAutocompleteView(AutocompleteView):
fields = ['last_name', 'first_name', 'username']
def get_user_queryset(self, user):
return User.objects.all()
@method_decorator(cache_page(MINUTE_SECS), name='dispatch')
class UsernameAutocompleteView(AutocompleteView):
fields = ['username', 'first_name', 'last_name']
def get_user_queryset(self, user):
return User.objects.all()
def get_result_label(self, obj):
return obj.username
@method_decorator(cache_page(MINUTE_SECS), name='dispatch')
class LabAutocompleteView(AutocompleteView):
fields = ['organization__name', 'name']
def get_user_queryset(self, user):
return Lab.objects.filter(organization__active=True)
def get_result_label(self, obj):
return f'{obj.organization.name} - {obj.name}'
@method_decorator([cache_page(MINUTE_SECS)], name='get')
class ProjectAutocompleteView(AutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
return Project.objects.all()
@method_decorator([cache_page(MINUTE_SECS), vary_on_cookie], name='dispatch')
class VCFAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
qs = VCF.filter_for_user(user, True).filter(import_status=ImportStatus.SUCCESS)
return self.filter_to_genome_build(qs, "genome_build")
@method_decorator([cache_page(MINUTE_SECS), vary_on_cookie], name='dispatch')
class SampleAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
sample_qs = Sample.filter_for_user(user, True).filter(import_status=ImportStatus.SUCCESS)
return self.filter_to_genome_build(sample_qs, "vcf__genome_build")
@method_decorator([cache_page(MINUTE_SECS), vary_on_cookie], name='dispatch')
class CohortAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
vcf_success_if_exists = Q(vcf__isnull=True) | Q(vcf__import_status=ImportStatus.SUCCESS)
qs = Cohort.filter_for_user(user, success_status_only=True).filter(vcf_success_if_exists)
return self.filter_to_genome_build(qs, "genome_build")
class CustomColumnAutocompleteView(AutocompleteView):
fields = ['column__grid_column_name']
def get_user_queryset(self, user):
# Called different things in Analysis/UserSettings
columns = self.forwarded.get('columns') or self.forwarded.get('custom_columns_collection')
if columns:
custom_columns_collections_qs = CustomColumnsCollection.filter_for_user(user).filter(pk=columns)
else:
custom_columns_collections_qs = CustomColumnsCollection.objects.none()
return CustomColumn.objects.filter(custom_columns_collection__in=custom_columns_collections_qs)
class GenomicIntervalsCollectionAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
qs = GenomicIntervalsCollection.filter_for_user(user).filter(import_status=ImportStatus.SUCCESS)
return self.filter_to_genome_build(qs, "genome_build")
@method_decorator(cache_page(5), name='dispatch')
class TagAutocompleteView(AutocompleteView):
fields = ['id']
def get_user_queryset(self, user):
return Tag.objects.all().order_by(Length("id").asc())
@method_decorator([cache_page(MINUTE_SECS), vary_on_cookie], name='dispatch')
class TrioAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
qs = Trio.filter_for_user(user, success_status_only=True)
return self.filter_to_genome_build(qs, "cohort__genome_build")
| true | true |
f7153bf91286bae42e9a55fce4714d6889e21164 | 233 | py | Python | conexao.py | gabrielmonzato20/ProjetoCp | a0d6a3204487d653669284f651c911c09386d626 | [
"Apache-2.0"
] | null | null | null | conexao.py | gabrielmonzato20/ProjetoCp | a0d6a3204487d653669284f651c911c09386d626 | [
"Apache-2.0"
] | null | null | null | conexao.py | gabrielmonzato20/ProjetoCp | a0d6a3204487d653669284f651c911c09386d626 | [
"Apache-2.0"
] | 1 | 2018-09-19T12:28:08.000Z | 2018-09-19T12:28:08.000Z | def mensagem():
print('Criando no python')
def tabuada():
n = int(input('Digite um número que deseja ver a tabuada: '))
for x in range (1,11):
print('{} X {:2} = {:2}'.format(n, x, n*x))
mensagem()
tabuada()
| 21.181818 | 65 | 0.562232 | def mensagem():
print('Criando no python')
def tabuada():
n = int(input('Digite um número que deseja ver a tabuada: '))
for x in range (1,11):
print('{} X {:2} = {:2}'.format(n, x, n*x))
mensagem()
tabuada()
| true | true |
f7153c21736cca53d92914b2228e578ffc94a1f1 | 13,114 | py | Python | projects/g3h1-cp-fml-interpreter/src/lexer/dfa.py | keybrl/xdu-coursework | 9d0e905bef28c18d87d3b97643de0d32f9f08ee0 | [
"MIT"
] | null | null | null | projects/g3h1-cp-fml-interpreter/src/lexer/dfa.py | keybrl/xdu-coursework | 9d0e905bef28c18d87d3b97643de0d32f9f08ee0 | [
"MIT"
] | null | null | null | projects/g3h1-cp-fml-interpreter/src/lexer/dfa.py | keybrl/xdu-coursework | 9d0e905bef28c18d87d3b97643de0d32f9f08ee0 | [
"MIT"
] | null | null | null | from enum import unique, Enum
class DFA:
def __init__(self, source_data):
if type(source_data) != dict:
raise TypeError('第 1 个参数期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='dict', arg_type=str(type(source_data))
))
if type(source_data.get('type')) != Token:
raise TypeError('第 1 个参数的 "type" 字段期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='Token', arg_type=str(type(source_data.get('type')))
))
self.token_type = source_data.get('type')
if type(source_data.get('as_set')) != set:
raise TypeError('第 1 个参数的 "as_set" 字段期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='set', arg_type=str(type(source_data.get('as_set')))
))
self.as_set = source_data.get('as_set')
if type(source_data.get('stm')) != dict:
raise TypeError('第 1 个参数的 "stm" 字段期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='dict', arg_type=str(type(source_data.get('stm')))
))
self.stm = source_data.get('stm')
self.state = 0
# 清除状态(回到初态)
def clear(self):
self.state = 0
# 状态转移函数
# 返回 bool 类型,转移成功返回 True,否则返回 False
def move(self, ch):
# 条件跳转
if self.stm.get(ch) is not None:
if self.stm.get(ch)[self.state] is not None:
self.state = self.stm.get(ch)[self.state]
else:
return False
# 特殊字符集跳转
elif self.stm.get(SpecificCharSet.BLANK) is not None \
and ch in SpecificCharSet.CHARSET_MAP.get(SpecificCharSet.BLANK):
if self.stm.get(SpecificCharSet.BLANK)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.BLANK)[self.state]
else:
return False
elif self.stm.get(SpecificCharSet.NONZERO_DIGIT) is not None \
and ch in SpecificCharSet.CHARSET_MAP.get(SpecificCharSet.NONZERO_DIGIT):
if self.stm.get(SpecificCharSet.NONZERO_DIGIT)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.NONZERO_DIGIT)[self.state]
else:
return False
elif self.stm.get(SpecificCharSet.DIGIT) is not None \
and ch in SpecificCharSet.CHARSET_MAP.get(SpecificCharSet.DIGIT):
if self.stm.get(SpecificCharSet.DIGIT)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.DIGIT)[self.state]
else:
return False
# 任意跳转
elif self.stm.get(SpecificCharSet.ANY) is not None:
if self.stm.get(SpecificCharSet.ANY)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.ANY)[self.state]
else:
return False
# 非接受字符集
else:
return False
return True
# 判断是否处于接受状态
def is_access(self):
return self.state in self.as_set
@unique
class Token(Enum):
# 保留字
ORIGIN = 1
SCALE = 2
ROT = 3
IS = 4
TO = 5
STEP = 6
DRAW = 7
FOR = 8
FROM = 9
COLOR = 10
BACKGROUND = 11
# 分隔符
SEMICOLON = 21 # 分号
L_BRACKET = 22 # 左括号
R_BRACKET = 23 # 右括号
COMMA = 24 # 逗号
# 运算符
PLUS = 35 # 加号
MINUS = 36 # 减号
MUL = 37 # 乘号
DIV = 38 # 除号
POWER = 39 # 乘方号
# 其他
FUNC = 51 # 函数
NUM = 52 # 数值字面量
CONST_ID = 53 # 常量
T = 54 # 参数
COMMENT = 61 # 注释
NON_TOKEN = 62 # 空记号(源程序结束)
ERR_TOKEN = 63 # 错误记号
class SpecificCharSet(object):
NONZERO_DIGIT = 'NONZERO_DIGIT'
DIGIT = 'DIGIT'
BLANK = 'BLANK'
ANY = 'ANY'
CHARSET_MAP = {
'NONZERO_DIGIT': {'1', '2', '3', '4', '5', '6', '7', '8', '9'},
'DIGIT': {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'},
'BLANK': {'\n', ' '}
}
# 识别各种记号的 DFA
# type 是 DFA 识别的记号的类型
# as_set 是 DFA 的接受状态集,access state set
# stm 是 DFA 状态转移矩阵,state transition matrix,状态 0 为起始状态
DFA_DATA = (
# 保留字
{
'type': Token.ORIGIN,
'as_set': {7, },
'stm': {
'o': (1, None, None, None, None, None, None, None),
'r': (None, 2, None, None, None, None, None, None),
'i': (None, None, 3, None, 5, None, None, None),
'g': (None, None, None, 4, None, None, None, None),
'n': (None, None, None, None, None, 6, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, None, 7, None)
}
}, {
'type': Token.SCALE,
'as_set': {6, },
'stm': {
's': (1, None, None, None, None, None, None),
'c': (None, 2, None, None, None, None, None),
'a': (None, None, 3, None, None, None, None),
'l': (None, None, None, 4, None, None, None),
'e': (None, None, None, None, 5, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, 6, None)
}
}, {
'type': Token.ROT,
'as_set': {4, },
'stm': {
'r': (1, None, None, None, None),
'o': (None, 2, None, None, None),
't': (None, None, 3, None, None),
SpecificCharSet.BLANK: (None, None, None, 4, None)
}
}, {
'type': Token.IS,
'as_set': {3, },
'stm': {
'i': (1, None, None, None),
's': (None, 2, None, None),
SpecificCharSet.BLANK: (None, None, 3, None)
}
}, {
'type': Token.TO,
'as_set': {3, },
'stm': {
't': (1, None, None, None),
'o': (None, 2, None, None),
SpecificCharSet.BLANK: (None, None, 3, None)
}
}, {
'type': Token.STEP,
'as_set': {5, },
'stm': {
's': (1, None, None, None, None, None),
't': (None, 2, None, None, None, None),
'e': (None, None, 3, None, None, None),
'p': (None, None, None, 4, None, None),
SpecificCharSet.BLANK: (None, None, None, None, 5, None),
}
}, {
'type': Token.DRAW,
'as_set': {5, },
'stm': {
'd': (1, None, None, None, None, None),
'r': (None, 2, None, None, None, None),
'a': (None, None, 3, None, None, None),
'w': (None, None, None, 4, None, None),
SpecificCharSet.BLANK: (None, None, None, None, 5, None),
}
}, {
'type': Token.FOR,
'as_set': {4, },
'stm': {
'f': (1, None, None, None, None),
'o': (None, 2, None, None, None),
'r': (None, None, 3, None, None),
SpecificCharSet.BLANK: (None, None, None, 4, None)
}
}, {
'type': Token.FROM,
'as_set': {5, },
'stm': {
'f': (1, None, None, None, None, None),
'r': (None, 2, None, None, None, None),
'o': (None, None, 3, None, None, None),
'm': (None, None, None, 4, None, None),
SpecificCharSet.BLANK: (None, None, None, None, 5, None)
}
}, {
'type': Token.COLOR,
'as_set': {6, },
'stm': {
'c': (1, None, None, None, None, None, None),
'o': (None, 2, None, 4, None, None, None),
'l': (None, None, 3, None, None, None, None),
'r': (None, None, None, None, 5, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, 6, None)
}
}, {
'type': Token.BACKGROUND,
'as_set': {11, },
'stm': {
'b': (1, None, None, None, None, None, None, None, None, None, None, None),
'a': (None, 2, None, None, None, None, None, None, None, None, None, None),
'c': (None, None, 3, None, None, None, None, None, None, None, None, None),
'k': (None, None, None, 4, None, None, None, None, None, None, None, None),
'g': (None, None, None, None, 5, None, None, None, None, None, None, None),
'r': (None, None, None, None, None, 6, None, None, None, None, None, None),
'o': (None, None, None, None, None, None, 7, None, None, None, None, None),
'u': (None, None, None, None, None, None, None, 8, None, None, None, None),
'n': (None, None, None, None, None, None, None, None, 9, None, None, None),
'd': (None, None, None, None, None, None, None, None, None, 10, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, None, None, None, None, None, 11, None)
}
},
# 分隔符
{
'type': Token.SEMICOLON,
'as_set': {1, },
'stm': {
';': (1, None)
}
}, {
'type': Token.L_BRACKET,
'as_set': {1, },
'stm': {
'(': (1, None)
}
}, {
'type': Token.R_BRACKET,
'as_set': {1, },
'stm': {
')': (1, None)
}
}, {
'type': Token.COMMA,
'as_set': {1, },
'stm': {
',': (1, None)
}
},
# 运算符
{
'type': Token.PLUS,
'as_set': {1, },
'stm': {
'+': (1, None)
}
}, {
'type': Token.MINUS,
'as_set': {1, },
'stm': {
'-': (1, None)
}
}, {
'type': Token.MUL,
'as_set': {1, },
'stm': {
'*': (1, None)
}
}, {
'type': Token.DIV,
'as_set': {1, },
'stm': {
'/': (1, None)
}
}, {
'type': Token.POWER,
'as_set': {1, },
'stm': {
'^': (1, None)
}
},
# 其他
{
'type': Token.FUNC,
'as_set': {10, },
'stm': {
'a': (None, 6, None, None, None, None, None, None, None, None, None),
'c': (3, None, None, None, None, None, None, None, None, None, None),
'e': (4, None, None, None, None, None, None, None, None, None, None),
'i': (None, None, 6, None, None, None, None, None, None, None, None),
'l': (6, None, None, None, None, None, None, None, None, None, None),
'n': (None, None, None, None, None, None, 10, None, None, None, None),
'o': (None, None, None, 8, None, None, None, None, None, None, None),
'p': (None, None, None, None, None, None, None, None, None, 10, None),
'q': (None, None, 5, None, None, None, None, None, None, None, None),
'r': (None, None, None, None, None, 7, None, None, None, None, None),
's': (2, None, None, None, None, None, None, None, 10, None, None),
't': (1, None, None, None, None, None, None, 10, None, None, None),
'x': (None, None, None, None, 9, None, None, None, None, None, None)
}
}, {
'type': Token.NUM,
'as_set': {2, 3, 4},
'stm': {
SpecificCharSet.NONZERO_DIGIT: (3, 4, None, 3, 4),
'0': (2, 4, None, 3, 4),
'.': (1, None, 4, 4, None)
}
}, {
'type': Token.CONST_ID,
'as_set': {2, },
'stm': {
'e': (2, None, None),
'p': (1, None, None),
'i': (None, 2, None),
}
}, {
'type': Token.T,
'as_set': {1, },
'stm': {
't': (1, None)
}
}, {
'type': Token.COMMENT,
'as_set': {3, },
'stm': {
SpecificCharSet.ANY: (None, None, 2, None),
'/': (1, 2, 2, None),
'\n': (None, None, 3, None),
}
}, {
'type': Token.ERR_TOKEN,
'as_set': {0, 1},
'stm': {
SpecificCharSet.ANY: (1, 1),
SpecificCharSet.BLANK: (None, None)
}
}
)
| 34.970667 | 108 | 0.416959 | from enum import unique, Enum
class DFA:
def __init__(self, source_data):
if type(source_data) != dict:
raise TypeError('第 1 个参数期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='dict', arg_type=str(type(source_data))
))
if type(source_data.get('type')) != Token:
raise TypeError('第 1 个参数的 "type" 字段期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='Token', arg_type=str(type(source_data.get('type')))
))
self.token_type = source_data.get('type')
if type(source_data.get('as_set')) != set:
raise TypeError('第 1 个参数的 "as_set" 字段期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='set', arg_type=str(type(source_data.get('as_set')))
))
self.as_set = source_data.get('as_set')
if type(source_data.get('stm')) != dict:
raise TypeError('第 1 个参数的 "stm" 字段期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='dict', arg_type=str(type(source_data.get('stm')))
))
self.stm = source_data.get('stm')
self.state = 0
def clear(self):
self.state = 0
def move(self, ch):
if self.stm.get(ch) is not None:
if self.stm.get(ch)[self.state] is not None:
self.state = self.stm.get(ch)[self.state]
else:
return False
elif self.stm.get(SpecificCharSet.BLANK) is not None \
and ch in SpecificCharSet.CHARSET_MAP.get(SpecificCharSet.BLANK):
if self.stm.get(SpecificCharSet.BLANK)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.BLANK)[self.state]
else:
return False
elif self.stm.get(SpecificCharSet.NONZERO_DIGIT) is not None \
and ch in SpecificCharSet.CHARSET_MAP.get(SpecificCharSet.NONZERO_DIGIT):
if self.stm.get(SpecificCharSet.NONZERO_DIGIT)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.NONZERO_DIGIT)[self.state]
else:
return False
elif self.stm.get(SpecificCharSet.DIGIT) is not None \
and ch in SpecificCharSet.CHARSET_MAP.get(SpecificCharSet.DIGIT):
if self.stm.get(SpecificCharSet.DIGIT)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.DIGIT)[self.state]
else:
return False
elif self.stm.get(SpecificCharSet.ANY) is not None:
if self.stm.get(SpecificCharSet.ANY)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.ANY)[self.state]
else:
return False
else:
return False
return True
def is_access(self):
return self.state in self.as_set
@unique
class Token(Enum):
ORIGIN = 1
SCALE = 2
ROT = 3
IS = 4
TO = 5
STEP = 6
DRAW = 7
FOR = 8
FROM = 9
COLOR = 10
BACKGROUND = 11
SEMICOLON = 21
L_BRACKET = 22
R_BRACKET = 23
COMMA = 24
PLUS = 35
MINUS = 36
MUL = 37
DIV = 38
POWER = 39
FUNC = 51
NUM = 52
CONST_ID = 53
T = 54
COMMENT = 61
NON_TOKEN = 62
ERR_TOKEN = 63
class SpecificCharSet(object):
NONZERO_DIGIT = 'NONZERO_DIGIT'
DIGIT = 'DIGIT'
BLANK = 'BLANK'
ANY = 'ANY'
CHARSET_MAP = {
'NONZERO_DIGIT': {'1', '2', '3', '4', '5', '6', '7', '8', '9'},
'DIGIT': {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'},
'BLANK': {'\n', ' '}
}
DFA_DATA = (
{
'type': Token.ORIGIN,
'as_set': {7, },
'stm': {
'o': (1, None, None, None, None, None, None, None),
'r': (None, 2, None, None, None, None, None, None),
'i': (None, None, 3, None, 5, None, None, None),
'g': (None, None, None, 4, None, None, None, None),
'n': (None, None, None, None, None, 6, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, None, 7, None)
}
}, {
'type': Token.SCALE,
'as_set': {6, },
'stm': {
's': (1, None, None, None, None, None, None),
'c': (None, 2, None, None, None, None, None),
'a': (None, None, 3, None, None, None, None),
'l': (None, None, None, 4, None, None, None),
'e': (None, None, None, None, 5, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, 6, None)
}
}, {
'type': Token.ROT,
'as_set': {4, },
'stm': {
'r': (1, None, None, None, None),
'o': (None, 2, None, None, None),
't': (None, None, 3, None, None),
SpecificCharSet.BLANK: (None, None, None, 4, None)
}
}, {
'type': Token.IS,
'as_set': {3, },
'stm': {
'i': (1, None, None, None),
's': (None, 2, None, None),
SpecificCharSet.BLANK: (None, None, 3, None)
}
}, {
'type': Token.TO,
'as_set': {3, },
'stm': {
't': (1, None, None, None),
'o': (None, 2, None, None),
SpecificCharSet.BLANK: (None, None, 3, None)
}
}, {
'type': Token.STEP,
'as_set': {5, },
'stm': {
's': (1, None, None, None, None, None),
't': (None, 2, None, None, None, None),
'e': (None, None, 3, None, None, None),
'p': (None, None, None, 4, None, None),
SpecificCharSet.BLANK: (None, None, None, None, 5, None),
}
}, {
'type': Token.DRAW,
'as_set': {5, },
'stm': {
'd': (1, None, None, None, None, None),
'r': (None, 2, None, None, None, None),
'a': (None, None, 3, None, None, None),
'w': (None, None, None, 4, None, None),
SpecificCharSet.BLANK: (None, None, None, None, 5, None),
}
}, {
'type': Token.FOR,
'as_set': {4, },
'stm': {
'f': (1, None, None, None, None),
'o': (None, 2, None, None, None),
'r': (None, None, 3, None, None),
SpecificCharSet.BLANK: (None, None, None, 4, None)
}
}, {
'type': Token.FROM,
'as_set': {5, },
'stm': {
'f': (1, None, None, None, None, None),
'r': (None, 2, None, None, None, None),
'o': (None, None, 3, None, None, None),
'm': (None, None, None, 4, None, None),
SpecificCharSet.BLANK: (None, None, None, None, 5, None)
}
}, {
'type': Token.COLOR,
'as_set': {6, },
'stm': {
'c': (1, None, None, None, None, None, None),
'o': (None, 2, None, 4, None, None, None),
'l': (None, None, 3, None, None, None, None),
'r': (None, None, None, None, 5, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, 6, None)
}
}, {
'type': Token.BACKGROUND,
'as_set': {11, },
'stm': {
'b': (1, None, None, None, None, None, None, None, None, None, None, None),
'a': (None, 2, None, None, None, None, None, None, None, None, None, None),
'c': (None, None, 3, None, None, None, None, None, None, None, None, None),
'k': (None, None, None, 4, None, None, None, None, None, None, None, None),
'g': (None, None, None, None, 5, None, None, None, None, None, None, None),
'r': (None, None, None, None, None, 6, None, None, None, None, None, None),
'o': (None, None, None, None, None, None, 7, None, None, None, None, None),
'u': (None, None, None, None, None, None, None, 8, None, None, None, None),
'n': (None, None, None, None, None, None, None, None, 9, None, None, None),
'd': (None, None, None, None, None, None, None, None, None, 10, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, None, None, None, None, None, 11, None)
}
},
{
'type': Token.SEMICOLON,
'as_set': {1, },
'stm': {
';': (1, None)
}
}, {
'type': Token.L_BRACKET,
'as_set': {1, },
'stm': {
'(': (1, None)
}
}, {
'type': Token.R_BRACKET,
'as_set': {1, },
'stm': {
')': (1, None)
}
}, {
'type': Token.COMMA,
'as_set': {1, },
'stm': {
',': (1, None)
}
},
{
'type': Token.PLUS,
'as_set': {1, },
'stm': {
'+': (1, None)
}
}, {
'type': Token.MINUS,
'as_set': {1, },
'stm': {
'-': (1, None)
}
}, {
'type': Token.MUL,
'as_set': {1, },
'stm': {
'*': (1, None)
}
}, {
'type': Token.DIV,
'as_set': {1, },
'stm': {
'/': (1, None)
}
}, {
'type': Token.POWER,
'as_set': {1, },
'stm': {
'^': (1, None)
}
},
{
'type': Token.FUNC,
'as_set': {10, },
'stm': {
'a': (None, 6, None, None, None, None, None, None, None, None, None),
'c': (3, None, None, None, None, None, None, None, None, None, None),
'e': (4, None, None, None, None, None, None, None, None, None, None),
'i': (None, None, 6, None, None, None, None, None, None, None, None),
'l': (6, None, None, None, None, None, None, None, None, None, None),
'n': (None, None, None, None, None, None, 10, None, None, None, None),
'o': (None, None, None, 8, None, None, None, None, None, None, None),
'p': (None, None, None, None, None, None, None, None, None, 10, None),
'q': (None, None, 5, None, None, None, None, None, None, None, None),
'r': (None, None, None, None, None, 7, None, None, None, None, None),
's': (2, None, None, None, None, None, None, None, 10, None, None),
't': (1, None, None, None, None, None, None, 10, None, None, None),
'x': (None, None, None, None, 9, None, None, None, None, None, None)
}
}, {
'type': Token.NUM,
'as_set': {2, 3, 4},
'stm': {
SpecificCharSet.NONZERO_DIGIT: (3, 4, None, 3, 4),
'0': (2, 4, None, 3, 4),
'.': (1, None, 4, 4, None)
}
}, {
'type': Token.CONST_ID,
'as_set': {2, },
'stm': {
'e': (2, None, None),
'p': (1, None, None),
'i': (None, 2, None),
}
}, {
'type': Token.T,
'as_set': {1, },
'stm': {
't': (1, None)
}
}, {
'type': Token.COMMENT,
'as_set': {3, },
'stm': {
SpecificCharSet.ANY: (None, None, 2, None),
'/': (1, 2, 2, None),
'\n': (None, None, 3, None),
}
}, {
'type': Token.ERR_TOKEN,
'as_set': {0, 1},
'stm': {
SpecificCharSet.ANY: (1, 1),
SpecificCharSet.BLANK: (None, None)
}
}
)
| true | true |
f7153c4f5db58c6522a6d97004d7dcdde2bcc24c | 262 | py | Python | src/core/migrations/0050_merge_20190212_0720.py | metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3 | c754d3b1b401906a21640b8eacb6b724a448b31c | [
"MIT"
] | null | null | null | src/core/migrations/0050_merge_20190212_0720.py | metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3 | c754d3b1b401906a21640b8eacb6b724a448b31c | [
"MIT"
] | null | null | null | src/core/migrations/0050_merge_20190212_0720.py | metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3 | c754d3b1b401906a21640b8eacb6b724a448b31c | [
"MIT"
] | null | null | null | # Generated by Django 2.1.2 on 2019-02-12 07:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0049_merge_20190212_0544'),
('core', '0049_article_head'),
]
operations = [
]
| 17.466667 | 47 | 0.633588 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0049_merge_20190212_0544'),
('core', '0049_article_head'),
]
operations = [
]
| true | true |
f7153d3e5f303fac4afc1dc66b303035bd382d50 | 969 | py | Python | doc/api/epydoc/build.py | swamper123/pymodbus | 7dfac6f19c60d3aa50a168ff82db88204dfb3a30 | [
"BSD-3-Clause"
] | null | null | null | doc/api/epydoc/build.py | swamper123/pymodbus | 7dfac6f19c60d3aa50a168ff82db88204dfb3a30 | [
"BSD-3-Clause"
] | 1 | 2020-10-29T12:01:38.000Z | 2022-03-21T02:39:59.000Z | doc/api/epydoc/build.py | swamper123/pymodbus | 7dfac6f19c60d3aa50a168ff82db88204dfb3a30 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
'''
Epydoc API Runner
------------------
Using pkg_resources, we attempt to see if epydoc is installed,
if so, we use its cli program to compile the documents
'''
try:
import sys, os, shutil
import pkg_resources
pkg_resources.require("epydoc")
from epydoc.cli import cli
sys.argv = '''epydoc.py pymodbus
--html --simple-term --quiet
--include-log
--graph=all
--docformat=plaintext
--debug
--exclude=._
--exclude=tests
--output=html/
'''.split()
#bugs in trunk for --docformat=restructuredtext
if not os.path.exists("./html"):
os.mkdir("./html")
print( "Building Epydoc API Documentation")
cli()
if os.path.exists('../../../build'):
shutil.move("html", "../../../build/epydoc")
except Exception as ex:
import traceback,sys
traceback.print_exc(file=sys.stdout)
print( "Epydoc not avaliable...not building")
| 24.846154 | 62 | 0.603715 |
try:
import sys, os, shutil
import pkg_resources
pkg_resources.require("epydoc")
from epydoc.cli import cli
sys.argv = '''epydoc.py pymodbus
--html --simple-term --quiet
--include-log
--graph=all
--docformat=plaintext
--debug
--exclude=._
--exclude=tests
--output=html/
'''.split()
if not os.path.exists("./html"):
os.mkdir("./html")
print( "Building Epydoc API Documentation")
cli()
if os.path.exists('../../../build'):
shutil.move("html", "../../../build/epydoc")
except Exception as ex:
import traceback,sys
traceback.print_exc(file=sys.stdout)
print( "Epydoc not avaliable...not building")
| true | true |
f7153eb3b8319ab7b8fbdefb6c2a256421b0226c | 256 | py | Python | submissions/joi2012yo/a.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 1 | 2021-05-10T01:16:28.000Z | 2021-05-10T01:16:28.000Z | submissions/joi2012yo/a.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 3 | 2021-05-11T06:14:15.000Z | 2021-06-19T08:18:36.000Z | submissions/joi2012yo/a.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | null | null | null | import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
p = min([int(readline()) for _ in range(3)])
g = min([int(readline()) for _ in range(2)])
print(p + g - 50)
| 25.6 | 44 | 0.699219 | import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
p = min([int(readline()) for _ in range(3)])
g = min([int(readline()) for _ in range(2)])
print(p + g - 50)
| true | true |
f7153eeb0752afecb51dc681dd7cab991cb43202 | 1,722 | py | Python | Model.py | Giorgiobientinesi/Workshop2 | f454499d4befdb705b4672be25d8698ef2b37116 | [
"MIT"
] | null | null | null | Model.py | Giorgiobientinesi/Workshop2 | f454499d4befdb705b4672be25d8698ef2b37116 | [
"MIT"
] | null | null | null | Model.py | Giorgiobientinesi/Workshop2 | f454499d4befdb705b4672be25d8698ef2b37116 | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
df = pd.read_csv("Airbnb-cleaned.csv")
df.columns
del df["Unnamed: 0"]
df1 = df[['neighbourhood', 'property_type', 'room_type']]
# IMPORT ENCODER
from sklearn.preprocessing import OneHotEncoder
# FIT ENCODER ON THE ORIGINAL DATASET TO MAKE IT REMEMBER CATEGORIES
enc = OneHotEncoder(sparse=False)
enc.fit(df1)
df["neighbourhood"].unique()
df[['Bijlmer-Oost', 'Noord-Oost', 'Noord-West', 'Oud-Noord',
'IJburg - Zeeburgereiland', 'Centrum-West',
'Oostelijk Havengebied - Indische Buurt', 'Centrum-Oost',
'Oud-Oost', 'Watergraafsmeer', 'Gaasperdam - Driemond',
'Westerpark', 'Bijlmer-Centrum', 'De Pijp - Rivierenbuurt', 'Zuid',
'Buitenveldert - Zuidas', 'De Baarsjes - Oud-West',
'Bos en Lommer', 'Geuzenveld - Slotermeer', 'Slotervaart',
'Osdorp', 'De Aker - Nieuw Sloten',
'Apartment', 'Bed & Breakfast', 'House',
'Entire home/apt', 'Private room', 'Shared room']] = enc.transform(
df1[["neighbourhood", "property_type", "room_type"]])
df = df.drop(["neighbourhood", "property_type", "room_type"], axis =1)
df["Distance_from_center(m)"] = df["Distance_from_center(m)"]/1000
y = df['price']
data = df.drop(['price'], axis=1)
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=7)
model = RandomForestRegressor()
model.fit(X_train,y_train)
pred = model.predict(X_test)
mean_absolute_error(y_test, pred)
from joblib import dump, load
dump(model, 'Airbnb.joblib')
| 31.309091 | 91 | 0.702091 | import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
df = pd.read_csv("Airbnb-cleaned.csv")
df.columns
del df["Unnamed: 0"]
df1 = df[['neighbourhood', 'property_type', 'room_type']]
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder(sparse=False)
enc.fit(df1)
df["neighbourhood"].unique()
df[['Bijlmer-Oost', 'Noord-Oost', 'Noord-West', 'Oud-Noord',
'IJburg - Zeeburgereiland', 'Centrum-West',
'Oostelijk Havengebied - Indische Buurt', 'Centrum-Oost',
'Oud-Oost', 'Watergraafsmeer', 'Gaasperdam - Driemond',
'Westerpark', 'Bijlmer-Centrum', 'De Pijp - Rivierenbuurt', 'Zuid',
'Buitenveldert - Zuidas', 'De Baarsjes - Oud-West',
'Bos en Lommer', 'Geuzenveld - Slotermeer', 'Slotervaart',
'Osdorp', 'De Aker - Nieuw Sloten',
'Apartment', 'Bed & Breakfast', 'House',
'Entire home/apt', 'Private room', 'Shared room']] = enc.transform(
df1[["neighbourhood", "property_type", "room_type"]])
df = df.drop(["neighbourhood", "property_type", "room_type"], axis =1)
df["Distance_from_center(m)"] = df["Distance_from_center(m)"]/1000
y = df['price']
data = df.drop(['price'], axis=1)
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=7)
model = RandomForestRegressor()
model.fit(X_train,y_train)
pred = model.predict(X_test)
mean_absolute_error(y_test, pred)
from joblib import dump, load
dump(model, 'Airbnb.joblib')
| true | true |
f7153f357c7a65ac30f9f3d65e5017cda2f97c38 | 1,185 | py | Python | scrython/rulings/arena.py | vtbassmatt/Scrython | 49fd9bd112e0f552a4310ac81fdb3f2b9e2a3976 | [
"MIT"
] | null | null | null | scrython/rulings/arena.py | vtbassmatt/Scrython | 49fd9bd112e0f552a4310ac81fdb3f2b9e2a3976 | [
"MIT"
] | null | null | null | scrython/rulings/arena.py | vtbassmatt/Scrython | 49fd9bd112e0f552a4310ac81fdb3f2b9e2a3976 | [
"MIT"
] | null | null | null | from .rulings_object import RulingsObject
class Arena(RulingsObject):
"""
cards/mtgo/:id/rulings
Gets the ruling of a card by the Arena Id.
Args:
id (string): The arena id of the card you want rulings for.
format (string, optional): Returns data in the specified method. Defaults to JSON.
face (string, optional):
If you're using the `image` format, this will specify if you want the front or back face.
version (string, optional):
If you're using the `image` format, this will specify if you want the small, normal, large, etc version of the image.
pretty (string, optional):
Returns a prettier version of the json object. Note that this may break functionality with Scrython.
Returns:
N/A
Raises:
N/A
Examples:
>>> rule = scrython.rulings.Arena(id="66975")
>>> rule.data_length()
"""
def __init__(self, **kwargs):
if kwargs.get('id') is None:
raise TypeError('No id provided to search by')
self.url = 'cards/arena/{}/rulings?'.format(str(kwargs.get('id')))
super(Arena, self).__init__(self.url)
| 33.857143 | 129 | 0.62616 | from .rulings_object import RulingsObject
class Arena(RulingsObject):
def __init__(self, **kwargs):
if kwargs.get('id') is None:
raise TypeError('No id provided to search by')
self.url = 'cards/arena/{}/rulings?'.format(str(kwargs.get('id')))
super(Arena, self).__init__(self.url)
| true | true |
f715404495e00bc2fc41e3195c1aac56c832e314 | 2,319 | py | Python | survos2/improc/regions/ccl.py | DiamondLightSource/SuRVoS2 | 42bacfb6a5cc267f38ca1337e51a443eae1a9d2b | [
"MIT"
] | 4 | 2017-10-10T14:47:16.000Z | 2022-01-14T05:57:50.000Z | survos2/improc/regions/ccl.py | DiamondLightSource/SuRVoS2 | 42bacfb6a5cc267f38ca1337e51a443eae1a9d2b | [
"MIT"
] | 1 | 2022-01-11T21:11:12.000Z | 2022-01-12T08:22:34.000Z | survos2/improc/regions/ccl.py | DiamondLightSource/SuRVoS2 | 42bacfb6a5cc267f38ca1337e51a443eae1a9d2b | [
"MIT"
] | 2 | 2018-03-06T06:31:29.000Z | 2019-03-04T03:33:18.000Z | import logging
import os.path as op
import numpy as np
import pycuda.driver as cuda
import pycuda.gpuarray as gpuarray
import pycuda.autoinit
from pycuda.compiler import SourceModule
from ..improc_types import int3
from ..utils import gpuregion, cpuregion
from ..cuda import asgpuarray, grid_kernel_config
from ._ccl import _remap, _relabel2d, _relabel3d, _merge_small3d
__dirname__ = op.dirname(__file__)
@gpuregion
def ccl3d(labels, remap=True):
assert labels.ndim == 3
assert labels.dtype == np.uint32
with open(op.join(__dirname__, "kernels", "ccl3d.cu"), "r") as f:
_mod_conv = SourceModule(f.read())
gpu_ccl_local = _mod_conv.get_function("uf_local")
gpu_ccl_global = _mod_conv.get_function("uf_global")
gpu_ccl_final = _mod_conv.get_function("uf_final")
labels_gpu = asgpuarray(labels, dtype=np.uint32)
result_gpu = gpuarray.zeros_like(labels_gpu)
shape = np.asarray(tuple(labels.shape[::-1]), dtype=int3)
block, grid = grid_kernel_config(gpu_ccl_local, labels.shape)
shared = int(np.prod(block) * 8)
gpu_ccl_local(labels_gpu, result_gpu, shape, block=block, grid=grid, shared=shared)
gpu_ccl_global(labels_gpu, result_gpu, shape, block=block, grid=grid)
gpu_ccl_final(result_gpu, shape, block=block, grid=grid)
if remap:
return remap_labels(result_gpu.get())
return result_gpu
def remap_labels(labels):
assert labels.dtype == np.uint32
new_labels = _remap(labels.ravel())
new_labels.shape = labels.shape
return new_labels
def relabel(labels):
assert labels.dtype == np.uint32
if labels.ndim == 2:
new_labels = _relabel2d(labels.ravel(), labels.shape[1])
elif labels.ndim == 3:
new_labels = _relabel3d(labels.ravel(), labels.shape[1], labels.shape[2])
else:
raise ValueError(
"Input array has to be 2 or 3 dimensional: {}".format(labels.ndim)
)
new_labels.shape = labels.shape
return new_labels
@cpuregion
def merge_small(data, labels, min_size=1, **kwargs):
if data.ndim != labels.ndim + 1:
data = data[..., None]
assert data.ndim == labels.ndim + 1
return _merge_small3d(data, labels, labels.max() + 1, min_size)
| 29.35443 | 88 | 0.675722 | import logging
import os.path as op
import numpy as np
import pycuda.driver as cuda
import pycuda.gpuarray as gpuarray
import pycuda.autoinit
from pycuda.compiler import SourceModule
from ..improc_types import int3
from ..utils import gpuregion, cpuregion
from ..cuda import asgpuarray, grid_kernel_config
from ._ccl import _remap, _relabel2d, _relabel3d, _merge_small3d
__dirname__ = op.dirname(__file__)
@gpuregion
def ccl3d(labels, remap=True):
assert labels.ndim == 3
assert labels.dtype == np.uint32
with open(op.join(__dirname__, "kernels", "ccl3d.cu"), "r") as f:
_mod_conv = SourceModule(f.read())
gpu_ccl_local = _mod_conv.get_function("uf_local")
gpu_ccl_global = _mod_conv.get_function("uf_global")
gpu_ccl_final = _mod_conv.get_function("uf_final")
labels_gpu = asgpuarray(labels, dtype=np.uint32)
result_gpu = gpuarray.zeros_like(labels_gpu)
shape = np.asarray(tuple(labels.shape[::-1]), dtype=int3)
block, grid = grid_kernel_config(gpu_ccl_local, labels.shape)
shared = int(np.prod(block) * 8)
gpu_ccl_local(labels_gpu, result_gpu, shape, block=block, grid=grid, shared=shared)
gpu_ccl_global(labels_gpu, result_gpu, shape, block=block, grid=grid)
gpu_ccl_final(result_gpu, shape, block=block, grid=grid)
if remap:
return remap_labels(result_gpu.get())
return result_gpu
def remap_labels(labels):
assert labels.dtype == np.uint32
new_labels = _remap(labels.ravel())
new_labels.shape = labels.shape
return new_labels
def relabel(labels):
assert labels.dtype == np.uint32
if labels.ndim == 2:
new_labels = _relabel2d(labels.ravel(), labels.shape[1])
elif labels.ndim == 3:
new_labels = _relabel3d(labels.ravel(), labels.shape[1], labels.shape[2])
else:
raise ValueError(
"Input array has to be 2 or 3 dimensional: {}".format(labels.ndim)
)
new_labels.shape = labels.shape
return new_labels
@cpuregion
def merge_small(data, labels, min_size=1, **kwargs):
if data.ndim != labels.ndim + 1:
data = data[..., None]
assert data.ndim == labels.ndim + 1
return _merge_small3d(data, labels, labels.max() + 1, min_size)
| true | true |
f71540c4bd66f93fc57f13dd1acee11e0731db26 | 1,753 | py | Python | fyle/platform/platform.py | fylein/fyle-platform-sdk-py | dcf0f1de25e95e41ec213dc97c09196203090d01 | [
"MIT"
] | 1 | 2022-03-08T09:43:30.000Z | 2022-03-08T09:43:30.000Z | fyle/platform/platform.py | fylein/fyle-platform-sdk-py | dcf0f1de25e95e41ec213dc97c09196203090d01 | [
"MIT"
] | 2 | 2021-11-22T09:12:12.000Z | 2022-03-17T10:13:40.000Z | fyle/platform/platform.py | fylein/fyle-platform-sdk-py | dcf0f1de25e95e41ec213dc97c09196203090d01 | [
"MIT"
] | null | null | null | """
Fyle Platform SDK Class
"""
from .apis import v1beta
from .globals.config import config
from .internals.auth import Auth
class Platform(Auth):
"""The main class which creates a connection with
Fyle APIs using OAuth2 authentication (refresh token grant type).
Parameters:
client_id (str): Client ID for Fyle API.
client_secret (str): Client secret for Fyle API.
refresh_token (str): Refresh token for Fyle API.
"""
def __init__(self, server_url, token_url, client_id, client_secret, refresh_token):
super().__init__()
# store the credentials
self.__server_url = server_url
self.__token_url = token_url
self.__client_id = client_id
self.__client_secret = client_secret
self.__refresh_token = refresh_token
self.v1beta = v1beta
# get the access token
self.set_server_url()
self.set_token_url()
self.set_client_id()
self.set_client_secret()
self.set_refresh_token()
self.update_access_token()
def set_server_url(self):
"""Set the Server URL in all API objects."""
config.set('FYLE', 'SERVER_URL', self.__server_url)
def set_token_url(self):
"""Set the Token URL in all API objects."""
config.set('FYLE', 'TOKEN_URL', self.__token_url)
def set_client_id(self):
"""Set the Client ID."""
config.set('AUTH', 'CLIENT_ID', self.__client_id)
def set_client_secret(self):
"""Set the Client Secret."""
config.set('AUTH', 'CLIENT_SECRET', self.__client_secret)
def set_refresh_token(self):
"""Set the Refresh token."""
config.set('AUTH', 'REFRESH_TOKEN', self.__refresh_token)
| 27.390625 | 87 | 0.64575 |
from .apis import v1beta
from .globals.config import config
from .internals.auth import Auth
class Platform(Auth):
def __init__(self, server_url, token_url, client_id, client_secret, refresh_token):
super().__init__()
self.__server_url = server_url
self.__token_url = token_url
self.__client_id = client_id
self.__client_secret = client_secret
self.__refresh_token = refresh_token
self.v1beta = v1beta
self.set_server_url()
self.set_token_url()
self.set_client_id()
self.set_client_secret()
self.set_refresh_token()
self.update_access_token()
def set_server_url(self):
config.set('FYLE', 'SERVER_URL', self.__server_url)
def set_token_url(self):
config.set('FYLE', 'TOKEN_URL', self.__token_url)
def set_client_id(self):
config.set('AUTH', 'CLIENT_ID', self.__client_id)
def set_client_secret(self):
config.set('AUTH', 'CLIENT_SECRET', self.__client_secret)
def set_refresh_token(self):
config.set('AUTH', 'REFRESH_TOKEN', self.__refresh_token)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.