code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
def regex_matches(test=None):
# [START regex_matches]
import apache_beam as beam
# Matches a named group 'icon', and then two comma-separated groups.
regex = r'(?P<icon>[^\s,]+), *(\w+), *(\w+)'
with beam.Pipeline() as pipeline:
plants_matches = (
pipeline
| 'Garden plants' >> beam.Create([
'π, Strawberry, perennial',
'π₯, Carrot, biennial ignoring trailing words',
'π, Eggplant, perennial',
'π
, Tomato, annual',
'π₯, Potato, perennial',
'# π, invalid, format',
'invalid, π, format',
])
| 'Parse plants' >> beam.Regex.matches(regex)
| beam.Map(print))
# [END regex_matches]
if test:
test(plants_matches)
def regex_all_matches(test=None):
# [START regex_all_matches]
import apache_beam as beam
# Matches a named group 'icon', and then two comma-separated groups.
regex = r'(?P<icon>[^\s,]+), *(\w+), *(\w+)'
with beam.Pipeline() as pipeline:
plants_all_matches = (
pipeline
| 'Garden plants' >> beam.Create([
'π, Strawberry, perennial',
'π₯, Carrot, biennial ignoring trailing words',
'π, Eggplant, perennial',
'π
, Tomato, annual',
'π₯, Potato, perennial',
'# π, invalid, format',
'invalid, π, format',
])
| 'Parse plants' >> beam.Regex.all_matches(regex)
| beam.Map(print))
# [END regex_all_matches]
if test:
test(plants_all_matches)
def regex_matches_kv(test=None):
# [START regex_matches_kv]
import apache_beam as beam
# Matches a named group 'icon', and then two comma-separated groups.
regex = r'(?P<icon>[^\s,]+), *(\w+), *(\w+)'
with beam.Pipeline() as pipeline:
plants_matches_kv = (
pipeline
| 'Garden plants' >> beam.Create([
'π, Strawberry, perennial',
'π₯, Carrot, biennial ignoring trailing words',
'π, Eggplant, perennial',
'π
, Tomato, annual',
'π₯, Potato, perennial',
'# π, invalid, format',
'invalid, π, format',
])
| 'Parse plants' >> beam.Regex.matches_kv(regex, keyGroup='icon')
| beam.Map(print))
# [END regex_matches_kv]
if test:
test(plants_matches_kv)
def regex_find(test=None):
# [START regex_find]
import apache_beam as beam
# Matches a named group 'icon', and then two comma-separated groups.
regex = r'(?P<icon>[^\s,]+), *(\w+), *(\w+)'
with beam.Pipeline() as pipeline:
plants_matches = (
pipeline
| 'Garden plants' >> beam.Create([
'# π, Strawberry, perennial',
'# π₯, Carrot, biennial ignoring trailing words',
'# π, Eggplant, perennial - π, Banana, perennial',
'# π
, Tomato, annual - π, Watermelon, annual',
'# π₯, Potato, perennial',
])
| 'Parse plants' >> beam.Regex.find(regex)
| beam.Map(print))
# [END regex_find]
if test:
test(plants_matches)
def regex_find_all(test=None):
# [START regex_find_all]
import apache_beam as beam
# Matches a named group 'icon', and then two comma-separated groups.
regex = r'(?P<icon>[^\s,]+), *(\w+), *(\w+)'
with beam.Pipeline() as pipeline:
plants_find_all = (
pipeline
| 'Garden plants' >> beam.Create([
'# π, Strawberry, perennial',
'# π₯, Carrot, biennial ignoring trailing words',
'# π, Eggplant, perennial - π, Banana, perennial',
'# π
, Tomato, annual - π, Watermelon, annual',
'# π₯, Potato, perennial',
])
| 'Parse plants' >> beam.Regex.find_all(regex)
| beam.Map(print))
# [END regex_find_all]
if test:
test(plants_find_all)
def regex_find_kv(test=None):
# [START regex_find_kv]
import apache_beam as beam
# Matches a named group 'icon', and then two comma-separated groups.
regex = r'(?P<icon>[^\s,]+), *(\w+), *(\w+)'
with beam.Pipeline() as pipeline:
plants_matches_kv = (
pipeline
| 'Garden plants' >> beam.Create([
'# π, Strawberry, perennial',
'# π₯, Carrot, biennial ignoring trailing words',
'# π, Eggplant, perennial - π, Banana, perennial',
'# π
, Tomato, annual - π, Watermelon, annual',
'# π₯, Potato, perennial',
])
| 'Parse plants' >> beam.Regex.find_kv(regex, keyGroup='icon')
| beam.Map(print))
# [END regex_find_kv]
if test:
test(plants_matches_kv)
def regex_replace_all(test=None):
# [START regex_replace_all]
import apache_beam as beam
with beam.Pipeline() as pipeline:
plants_replace_all = (
pipeline
| 'Garden plants' >> beam.Create([
'π : Strawberry : perennial',
'π₯ : Carrot : biennial',
'π\t:\tEggplant\t:\tperennial',
'π
: Tomato : annual',
'π₯ : Potato : perennial',
])
| 'To CSV' >> beam.Regex.replace_all(r'\s*:\s*', ',')
| beam.Map(print))
# [END regex_replace_all]
if test:
test(plants_replace_all)
def regex_replace_first(test=None):
# [START regex_replace_first]
import apache_beam as beam
with beam.Pipeline() as pipeline:
plants_replace_first = (
pipeline
| 'Garden plants' >> beam.Create([
'π, Strawberry, perennial',
'π₯, Carrot, biennial',
'π,\tEggplant, perennial',
'π
, Tomato, annual',
'π₯, Potato, perennial',
])
| 'As dictionary' >> beam.Regex.replace_first(r'\s*,\s*', ': ')
| beam.Map(print))
# [END regex_replace_first]
if test:
test(plants_replace_first)
def regex_split(test=None):
# [START regex_split]
import apache_beam as beam
with beam.Pipeline() as pipeline:
plants_split = (
pipeline
| 'Garden plants' >> beam.Create([
'π : Strawberry : perennial',
'π₯ : Carrot : biennial',
'π\t:\tEggplant : perennial',
'π
: Tomato : annual',
'π₯ : Potato : perennial',
])
| 'Parse plants' >> beam.Regex.split(r'\s*:\s*')
| beam.Map(print))
# [END regex_split]
if test:
test(plants_split) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/snippets/transforms/elementwise/regex.py | 0.630344 | 0.343273 | regex.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def combineglobally_function(test=None):
# [START combineglobally_function]
import apache_beam as beam
def get_common_items(sets):
# set.intersection() takes multiple sets as separete arguments.
# We unpack the `sets` list into multiple arguments with the * operator.
# The combine transform might give us an empty list of `sets`,
# so we use a list with an empty set as a default value.
return set.intersection(*(sets or [set()]))
with beam.Pipeline() as pipeline:
common_items = (
pipeline
| 'Create produce' >> beam.Create([
{'π', 'π₯', 'π', 'π
', 'πΆοΈ'},
{'π', 'π₯', 'π₯', 'π
', 'π₯'},
{'π', 'π₯', 'π', 'π
', 'π'},
{'π₯', 'π₯', 'π½', 'π
', 'π₯₯'},
])
| 'Get common items' >> beam.CombineGlobally(get_common_items)
| beam.Map(print))
# [END combineglobally_function]
if test:
test(common_items)
def combineglobally_lambda(test=None):
# [START combineglobally_lambda]
import apache_beam as beam
with beam.Pipeline() as pipeline:
common_items = (
pipeline
| 'Create produce' >> beam.Create([
{'π', 'π₯', 'π', 'π
', 'πΆοΈ'},
{'π', 'π₯', 'π₯', 'π
', 'π₯'},
{'π', 'π₯', 'π', 'π
', 'π'},
{'π₯', 'π₯', 'π½', 'π
', 'π₯₯'},
])
| 'Get common items' >>
beam.CombineGlobally(lambda sets: set.intersection(*(sets or [set()])))
| beam.Map(print))
# [END combineglobally_lambda]
if test:
test(common_items)
def combineglobally_multiple_arguments(test=None):
# [START combineglobally_multiple_arguments]
import apache_beam as beam
with beam.Pipeline() as pipeline:
common_items_with_exceptions = (
pipeline
| 'Create produce' >> beam.Create([
{'π', 'π₯', 'π', 'π
', 'πΆοΈ'},
{'π', 'π₯', 'π₯', 'π
', 'π₯'},
{'π', 'π₯', 'π', 'π
', 'π'},
{'π₯', 'π₯', 'π½', 'π
', 'π₯₯'},
])
| 'Get common items with exceptions' >> beam.CombineGlobally(
lambda sets, exclude: \
set.intersection(*(sets or [set()])) - exclude,
exclude={'π₯'})
| beam.Map(print)
)
# [END combineglobally_multiple_arguments]
if test:
test(common_items_with_exceptions)
def combineglobally_side_inputs_singleton(test=None):
# [START combineglobally_side_inputs_singleton]
import apache_beam as beam
with beam.Pipeline() as pipeline:
single_exclude = pipeline | 'Create single_exclude' >> beam.Create(['π₯'])
common_items_with_exceptions = (
pipeline
| 'Create produce' >> beam.Create([
{'π', 'π₯', 'π', 'π
', 'πΆοΈ'},
{'π', 'π₯', 'π₯', 'π
', 'π₯'},
{'π', 'π₯', 'π', 'π
', 'π'},
{'π₯', 'π₯', 'π½', 'π
', 'π₯₯'},
])
| 'Get common items with exceptions' >> beam.CombineGlobally(
lambda sets, single_exclude: \
set.intersection(*(sets or [set()])) - {single_exclude},
single_exclude=beam.pvalue.AsSingleton(single_exclude))
| beam.Map(print)
)
# [END combineglobally_side_inputs_singleton]
if test:
test(common_items_with_exceptions)
def combineglobally_side_inputs_iter(test=None):
# [START combineglobally_side_inputs_iter]
import apache_beam as beam
with beam.Pipeline() as pipeline:
exclude = pipeline | 'Create exclude' >> beam.Create(['π₯'])
common_items_with_exceptions = (
pipeline
| 'Create produce' >> beam.Create([
{'π', 'π₯', 'π', 'π
', 'πΆοΈ'},
{'π', 'π₯', 'π₯', 'π
', 'π₯'},
{'π', 'π₯', 'π', 'π
', 'π'},
{'π₯', 'π₯', 'π½', 'π
', 'π₯₯'},
])
| 'Get common items with exceptions' >> beam.CombineGlobally(
lambda sets, exclude: \
set.intersection(*(sets or [set()])) - set(exclude),
exclude=beam.pvalue.AsIter(exclude))
| beam.Map(print)
)
# [END combineglobally_side_inputs_iter]
if test:
test(common_items_with_exceptions)
def combineglobally_side_inputs_dict(test=None):
# [START combineglobally_side_inputs_dict]
import apache_beam as beam
def get_custom_common_items(sets, options):
sets = sets or [set()]
common_items = set.intersection(*sets)
common_items |= options['include'] # union
common_items &= options['exclude'] # intersection
return common_items
with beam.Pipeline() as pipeline:
options = pipeline | 'Create options' >> beam.Create([
('exclude', {'π₯'}),
('include', {'π', 'π½'}),
])
custom_common_items = (
pipeline
| 'Create produce' >> beam.Create([
{'π', 'π₯', 'π', 'π
', 'πΆοΈ'},
{'π', 'π₯', 'π₯', 'π
', 'π₯'},
{'π', 'π₯', 'π', 'π
', 'π'},
{'π₯', 'π₯', 'π½', 'π
', 'π₯₯'},
])
| 'Get common items' >> beam.CombineGlobally(
get_custom_common_items, options=beam.pvalue.AsDict(options))
| beam.Map(print))
# [END combineglobally_side_inputs_dict]
if test:
test(custom_common_items)
def combineglobally_combinefn(test=None):
# [START combineglobally_combinefn]
import apache_beam as beam
class PercentagesFn(beam.CombineFn):
def create_accumulator(self):
return {}
def add_input(self, accumulator, input):
# accumulator == {}
# input == 'π₯'
if input not in accumulator:
accumulator[input] = 0 # {'π₯': 0}
accumulator[input] += 1 # {'π₯': 1}
return accumulator
def merge_accumulators(self, accumulators):
# accumulators == [
# {'π₯': 1, 'π
': 2},
# {'π₯': 1, 'π
': 1, 'π': 1},
# {'π₯': 1, 'π
': 3},
# ]
merged = {}
for accum in accumulators:
for item, count in accum.items():
if item not in merged:
merged[item] = 0
merged[item] += count
# merged == {'π₯': 3, 'π
': 6, 'π': 1}
return merged
def extract_output(self, accumulator):
# accumulator == {'π₯': 3, 'π
': 6, 'π': 1}
total = sum(accumulator.values()) # 10
percentages = {item: count / total for item, count in accumulator.items()}
# percentages == {'π₯': 0.3, 'π
': 0.6, 'π': 0.1}
return percentages
with beam.Pipeline() as pipeline:
percentages = (
pipeline
| 'Create produce' >> beam.Create(
['π₯', 'π
', 'π
', 'π₯', 'π', 'π
', 'π
', 'π
', 'π₯', 'π
'])
| 'Get percentages' >> beam.CombineGlobally(PercentagesFn())
| beam.Map(print))
# [END combineglobally_combinefn]
if test:
test(percentages) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/snippets/transforms/aggregation/combineglobally.py | 0.737631 | 0.446676 | combineglobally.py | pypi |
from __future__ import absolute_import
from __future__ import print_function
def top_largest(test=None):
# [START top_largest]
import apache_beam as beam
with beam.Pipeline() as pipeline:
largest_elements = (
pipeline
| 'Create numbers' >> beam.Create([3, 4, 1, 2])
| 'Largest N values' >> beam.combiners.Top.Largest(2)
| beam.Map(print))
# [END top_largest]
if test:
test(largest_elements)
def top_largest_per_key(test=None):
# [START top_largest_per_key]
import apache_beam as beam
with beam.Pipeline() as pipeline:
largest_elements_per_key = (
pipeline
| 'Create produce' >> beam.Create([
('π₯', 3),
('π₯', 2),
('π', 1),
('π
', 4),
('π
', 5),
('π
', 3),
])
| 'Largest N values per key' >> beam.combiners.Top.LargestPerKey(2)
| beam.Map(print))
# [END top_largest_per_key]
if test:
test(largest_elements_per_key)
def top_smallest(test=None):
# [START top_smallest]
import apache_beam as beam
with beam.Pipeline() as pipeline:
smallest_elements = (
pipeline
| 'Create numbers' >> beam.Create([3, 4, 1, 2])
| 'Smallest N values' >> beam.combiners.Top.Smallest(2)
| beam.Map(print))
# [END top_smallest]
if test:
test(smallest_elements)
def top_smallest_per_key(test=None):
# [START top_smallest_per_key]
import apache_beam as beam
with beam.Pipeline() as pipeline:
smallest_elements_per_key = (
pipeline
| 'Create produce' >> beam.Create([
('π₯', 3),
('π₯', 2),
('π', 1),
('π
', 4),
('π
', 5),
('π
', 3),
])
| 'Smallest N values per key' >> beam.combiners.Top.SmallestPerKey(2)
| beam.Map(print))
# [END top_smallest_per_key]
if test:
test(smallest_elements_per_key)
def top_of(test=None):
# [START top_of]
import apache_beam as beam
with beam.Pipeline() as pipeline:
shortest_elements = (
pipeline
| 'Create produce names' >> beam.Create([
'π Strawberry',
'π₯ Carrot',
'π Green apple',
'π Eggplant',
'π½ Corn',
])
| 'Shortest names' >> beam.combiners.Top.Of(
2, # number of elements
key=len, # optional, defaults to the element itself
reverse=True, # optional, defaults to False (largest/descending)
)
| beam.Map(print)
)
# [END top_of]
if test:
test(shortest_elements)
def top_per_key(test=None):
# [START top_per_key]
import apache_beam as beam
with beam.Pipeline() as pipeline:
shortest_elements_per_key = (
pipeline
| 'Create produce names' >> beam.Create([
('spring', 'π₯ Carrot'),
('spring', 'π Strawberry'),
('summer', 'π₯ Carrot'),
('summer', 'π½ Corn'),
('summer', 'π Green apple'),
('fall', 'π₯ Carrot'),
('fall', 'π Green apple'),
('winter', 'π Eggplant'),
])
| 'Shortest names per key' >> beam.combiners.Top.PerKey(
2, # number of elements
key=len, # optional, defaults to the value itself
reverse=True, # optional, defaults to False (largest/descending)
)
| beam.Map(print)
)
# [END top_per_key]
if test:
test(shortest_elements_per_key) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/snippets/transforms/aggregation/top.py | 0.604983 | 0.39636 | top.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def combineperkey_simple(test=None):
# [START combineperkey_simple]
import apache_beam as beam
with beam.Pipeline() as pipeline:
total = (
pipeline
| 'Create plant counts' >> beam.Create([
('π₯', 3),
('π₯', 2),
('π', 1),
('π
', 4),
('π
', 5),
('π
', 3),
])
| 'Sum' >> beam.CombinePerKey(sum)
| beam.Map(print))
# [END combineperkey_simple]
if test:
test(total)
def combineperkey_function(test=None):
# [START combineperkey_function]
import apache_beam as beam
def saturated_sum(values):
max_value = 8
return min(sum(values), max_value)
with beam.Pipeline() as pipeline:
saturated_total = (
pipeline
| 'Create plant counts' >> beam.Create([
('π₯', 3),
('π₯', 2),
('π', 1),
('π
', 4),
('π
', 5),
('π
', 3),
])
| 'Saturated sum' >> beam.CombinePerKey(saturated_sum)
| beam.Map(print))
# [END combineperkey_function]
if test:
test(saturated_total)
def combineperkey_lambda(test=None):
# [START combineperkey_lambda]
import apache_beam as beam
with beam.Pipeline() as pipeline:
saturated_total = (
pipeline
| 'Create plant counts' >> beam.Create([
('π₯', 3),
('π₯', 2),
('π', 1),
('π
', 4),
('π
', 5),
('π
', 3),
])
| 'Saturated sum' >>
beam.CombinePerKey(lambda values: min(sum(values), 8))
| beam.Map(print))
# [END combineperkey_lambda]
if test:
test(saturated_total)
def combineperkey_multiple_arguments(test=None):
# [START combineperkey_multiple_arguments]
import apache_beam as beam
with beam.Pipeline() as pipeline:
saturated_total = (
pipeline
| 'Create plant counts' >> beam.Create([
('π₯', 3),
('π₯', 2),
('π', 1),
('π
', 4),
('π
', 5),
('π
', 3),
])
| 'Saturated sum' >> beam.CombinePerKey(
lambda values, max_value: min(sum(values), max_value), max_value=8)
| beam.Map(print))
# [END combineperkey_multiple_arguments]
if test:
test(saturated_total)
def combineperkey_side_inputs_singleton(test=None):
# [START combineperkey_side_inputs_singleton]
import apache_beam as beam
with beam.Pipeline() as pipeline:
max_value = pipeline | 'Create max_value' >> beam.Create([8])
saturated_total = (
pipeline
| 'Create plant counts' >> beam.Create([
('π₯', 3),
('π₯', 2),
('π', 1),
('π
', 4),
('π
', 5),
('π
', 3),
])
| 'Saturated sum' >> beam.CombinePerKey(
lambda values,
max_value: min(sum(values), max_value),
max_value=beam.pvalue.AsSingleton(max_value))
| beam.Map(print))
# [END combineperkey_side_inputs_singleton]
if test:
test(saturated_total)
def combineperkey_side_inputs_iter(test=None):
# [START combineperkey_side_inputs_iter]
import apache_beam as beam
def bounded_sum(values, data_range):
min_value = min(data_range)
result = sum(values)
if result < min_value:
return min_value
max_value = max(data_range)
if result > max_value:
return max_value
return result
with beam.Pipeline() as pipeline:
data_range = pipeline | 'Create data_range' >> beam.Create([2, 4, 8])
bounded_total = (
pipeline
| 'Create plant counts' >> beam.Create([
('π₯', 3),
('π₯', 2),
('π', 1),
('π
', 4),
('π
', 5),
('π
', 3),
])
| 'Bounded sum' >> beam.CombinePerKey(
bounded_sum, data_range=beam.pvalue.AsIter(data_range))
| beam.Map(print))
# [END combineperkey_side_inputs_iter]
if test:
test(bounded_total)
def combineperkey_side_inputs_dict(test=None):
# [START combineperkey_side_inputs_dict]
import apache_beam as beam
def bounded_sum(values, data_range):
min_value = data_range['min']
result = sum(values)
if result < min_value:
return min_value
max_value = data_range['max']
if result > max_value:
return max_value
return result
with beam.Pipeline() as pipeline:
data_range = pipeline | 'Create data_range' >> beam.Create([
('min', 2),
('max', 8),
])
bounded_total = (
pipeline
| 'Create plant counts' >> beam.Create([
('π₯', 3),
('π₯', 2),
('π', 1),
('π
', 4),
('π
', 5),
('π
', 3),
])
| 'Bounded sum' >> beam.CombinePerKey(
bounded_sum, data_range=beam.pvalue.AsDict(data_range))
| beam.Map(print))
# [END combineperkey_side_inputs_dict]
if test:
test(bounded_total)
def combineperkey_combinefn(test=None):
# [START combineperkey_combinefn]
import apache_beam as beam
class AverageFn(beam.CombineFn):
def create_accumulator(self):
sum = 0.0
count = 0
accumulator = sum, count
return accumulator
def add_input(self, accumulator, input):
sum, count = accumulator
return sum + input, count + 1
def merge_accumulators(self, accumulators):
# accumulators = [(sum1, count1), (sum2, count2), (sum3, count3), ...]
sums, counts = zip(*accumulators)
# sums = [sum1, sum2, sum3, ...]
# counts = [count1, count2, count3, ...]
return sum(sums), sum(counts)
def extract_output(self, accumulator):
sum, count = accumulator
if count == 0:
return float('NaN')
return sum / count
with beam.Pipeline() as pipeline:
average = (
pipeline
| 'Create plant counts' >> beam.Create([
('π₯', 3),
('π₯', 2),
('π', 1),
('π
', 4),
('π
', 5),
('π
', 3),
])
| 'Average' >> beam.CombinePerKey(AverageFn())
| beam.Map(print))
# [END combineperkey_combinefn]
if test:
test(average) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/snippets/transforms/aggregation/combineperkey.py | 0.706292 | 0.487185 | combineperkey.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def combinevalues_simple(test=None):
# [START combinevalues_simple]
import apache_beam as beam
with beam.Pipeline() as pipeline:
total = (
pipeline
| 'Create produce counts' >> beam.Create([
('π₯', [3, 2]),
('π', [1]),
('π
', [4, 5, 3]),
])
| 'Sum' >> beam.CombineValues(sum)
| beam.Map(print))
# [END combinevalues_simple]
if test:
test(total)
def combinevalues_function(test=None):
# [START combinevalues_function]
import apache_beam as beam
def saturated_sum(values):
max_value = 8
return min(sum(values), max_value)
with beam.Pipeline() as pipeline:
saturated_total = (
pipeline
| 'Create plant counts' >> beam.Create([
('π₯', [3, 2]),
('π', [1]),
('π
', [4, 5, 3]),
])
| 'Saturated sum' >> beam.CombineValues(saturated_sum)
| beam.Map(print))
# [END combinevalues_function]
if test:
test(saturated_total)
def combinevalues_lambda(test=None):
# [START combinevalues_lambda]
import apache_beam as beam
with beam.Pipeline() as pipeline:
saturated_total = (
pipeline
| 'Create plant counts' >> beam.Create([
('π₯', [3, 2]),
('π', [1]),
('π
', [4, 5, 3]),
])
| 'Saturated sum' >>
beam.CombineValues(lambda values: min(sum(values), 8))
| beam.Map(print))
# [END combinevalues_lambda]
if test:
test(saturated_total)
def combinevalues_multiple_arguments(test=None):
# [START combinevalues_multiple_arguments]
import apache_beam as beam
with beam.Pipeline() as pipeline:
saturated_total = (
pipeline
| 'Create plant counts' >> beam.Create([
('π₯', [3, 2]),
('π', [1]),
('π
', [4, 5, 3]),
])
| 'Saturated sum' >> beam.CombineValues(
lambda values, max_value: min(sum(values), max_value), max_value=8)
| beam.Map(print))
# [END combinevalues_multiple_arguments]
if test:
test(saturated_total)
def combinevalues_side_inputs_singleton(test=None):
# [START combinevalues_side_inputs_singleton]
import apache_beam as beam
with beam.Pipeline() as pipeline:
max_value = pipeline | 'Create max_value' >> beam.Create([8])
saturated_total = (
pipeline
| 'Create plant counts' >> beam.Create([
('π₯', [3, 2]),
('π', [1]),
('π
', [4, 5, 3]),
])
| 'Saturated sum' >> beam.CombineValues(
lambda values,
max_value: min(sum(values), max_value),
max_value=beam.pvalue.AsSingleton(max_value))
| beam.Map(print))
# [END combinevalues_side_inputs_singleton]
if test:
test(saturated_total)
def combinevalues_side_inputs_iter(test=None):
# [START combinevalues_side_inputs_iter]
import apache_beam as beam
def bounded_sum(values, data_range):
min_value = min(data_range)
result = sum(values)
if result < min_value:
return min_value
max_value = max(data_range)
if result > max_value:
return max_value
return result
with beam.Pipeline() as pipeline:
data_range = pipeline | 'Create data_range' >> beam.Create([2, 4, 8])
bounded_total = (
pipeline
| 'Create plant counts' >> beam.Create([
('π₯', [3, 2]),
('π', [1]),
('π
', [4, 5, 3]),
])
| 'Bounded sum' >> beam.CombineValues(
bounded_sum, data_range=beam.pvalue.AsIter(data_range))
| beam.Map(print))
# [END combinevalues_side_inputs_iter]
if test:
test(bounded_total)
def combinevalues_side_inputs_dict(test=None):
# [START combinevalues_side_inputs_dict]
import apache_beam as beam
def bounded_sum(values, data_range):
min_value = data_range['min']
result = sum(values)
if result < min_value:
return min_value
max_value = data_range['max']
if result > max_value:
return max_value
return result
with beam.Pipeline() as pipeline:
data_range = pipeline | 'Create data_range' >> beam.Create([
('min', 2),
('max', 8),
])
bounded_total = (
pipeline
| 'Create plant counts' >> beam.Create([
('π₯', [3, 2]),
('π', [1]),
('π
', [4, 5, 3]),
])
| 'Bounded sum' >> beam.CombineValues(
bounded_sum, data_range=beam.pvalue.AsDict(data_range))
| beam.Map(print))
# [END combinevalues_side_inputs_dict]
if test:
test(bounded_total)
def combinevalues_combinefn(test=None):
# [START combinevalues_combinefn]
import apache_beam as beam
class AverageFn(beam.CombineFn):
def create_accumulator(self):
return {}
def add_input(self, accumulator, input):
# accumulator == {}
# input == 'π₯'
if input not in accumulator:
accumulator[input] = 0 # {'π₯': 0}
accumulator[input] += 1 # {'π₯': 1}
return accumulator
def merge_accumulators(self, accumulators):
# accumulators == [
# {'π₯': 1, 'π
': 1},
# {'π₯': 1, 'π
': 1, 'π': 1},
# ]
merged = {}
for accum in accumulators:
for item, count in accum.items():
if item not in merged:
merged[item] = 0
merged[item] += count
# merged == {'π₯': 2, 'π
': 2, 'π': 1}
return merged
def extract_output(self, accumulator):
# accumulator == {'π₯': 2, 'π
': 2, 'π': 1}
total = sum(accumulator.values()) # 5
percentages = {item: count / total for item, count in accumulator.items()}
# percentages == {'π₯': 0.4, 'π
': 0.4, 'π': 0.2}
return percentages
with beam.Pipeline() as pipeline:
percentages_per_season = (
pipeline
| 'Create produce' >> beam.Create([
('spring', ['π₯', 'π
', 'π₯', 'π
', 'π']),
('summer', ['π₯', 'π
', 'π½', 'π
', 'π
']),
('fall', ['π₯', 'π₯', 'π
', 'π
']),
('winter', ['π', 'π']),
])
| 'Average' >> beam.CombineValues(AverageFn())
| beam.Map(print))
# [END combinevalues_combinefn]
if test:
test(percentages_per_season) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/snippets/transforms/aggregation/combinevalues.py | 0.665193 | 0.624694 | combinevalues.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import argparse
import logging
import re
from builtins import range
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
def run(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True, help='Input file to process.')
parser.add_argument(
'--output', required=True, help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
def format_result(prefix_candidates):
(prefix, candidates) = prefix_candidates
return '%s: %s' % (prefix, candidates)
( # pylint: disable=expression-not-assigned
p
| 'read' >> ReadFromText(known_args.input)
| 'split' >> beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
| 'TopPerPrefix' >> TopPerPrefix(5)
| 'format' >> beam.Map(format_result)
| 'write' >> WriteToText(known_args.output))
class TopPerPrefix(beam.PTransform):
def __init__(self, count):
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
# super(TopPerPrefix, self).__init__()
beam.PTransform.__init__(self)
self._count = count
def expand(self, words):
"""Compute the most common words for each possible prefixes.
Args:
words: a PCollection of strings
Returns:
A PCollection of most common words with each prefix, in the form
(prefix, [(count, word), (count, word), ...])
"""
return (
words
| beam.combiners.Count.PerElement()
| beam.FlatMap(extract_prefixes)
| beam.combiners.Top.LargestPerKey(self._count))
def extract_prefixes(element):
word, count = element
for k in range(1, len(word) + 1):
prefix = word[:k]
yield prefix, (count, word)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/complete/autocomplete.py | 0.55254 | 0.211987 | autocomplete.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import argparse
import json
import logging
import apache_beam as beam
from apache_beam import combiners
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.transforms.window import FixedWindows
from apache_beam.transforms.window import Sessions
from apache_beam.transforms.window import TimestampedValue
ONE_HOUR_IN_SECONDS = 3600
THIRTY_DAYS_IN_SECONDS = 30 * 24 * ONE_HOUR_IN_SECONDS
MAX_TIMESTAMP = 0x7fffffffffffffff
class ExtractUserAndTimestampDoFn(beam.DoFn):
"""Extracts user and timestamp representing a Wikipedia edit."""
def process(self, element):
table_row = json.loads(element)
if 'contributor_username' in table_row:
user_name = table_row['contributor_username']
timestamp = table_row['timestamp']
yield TimestampedValue(user_name, timestamp)
class ComputeSessions(beam.PTransform):
"""Computes the number of edits in each user session.
A session is defined as a string of edits where each is separated from the
next by less than an hour.
"""
def expand(self, pcoll):
return (
pcoll
| 'ComputeSessionsWindow' >> beam.WindowInto(
Sessions(gap_size=ONE_HOUR_IN_SECONDS))
| combiners.Count.PerElement())
class TopPerMonth(beam.PTransform):
"""Computes the longest session ending in each month."""
def expand(self, pcoll):
return (
pcoll
| 'TopPerMonthWindow' >> beam.WindowInto(
FixedWindows(size=THIRTY_DAYS_IN_SECONDS))
| 'Top' >> combiners.core.CombineGlobally(
combiners.TopCombineFn(
10,
lambda first, second: first[1] < second[1])).without_defaults())
class SessionsToStringsDoFn(beam.DoFn):
"""Adds the session information to be part of the key."""
def process(self, element, window=beam.DoFn.WindowParam):
yield (element[0] + ' : ' + str(window), element[1])
class FormatOutputDoFn(beam.DoFn):
"""Formats a string containing the user, count, and session."""
def process(self, element, window=beam.DoFn.WindowParam):
for kv in element:
session = kv[0]
count = kv[1]
yield session + ' : ' + str(count) + ' : ' + str(window)
class ComputeTopSessions(beam.PTransform):
"""Computes the top user sessions for each month."""
def __init__(self, sampling_threshold):
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
# super(ComputeTopSessions, self).__init__()
beam.PTransform.__init__(self)
self.sampling_threshold = sampling_threshold
def expand(self, pcoll):
return (
pcoll
|
'ExtractUserAndTimestamp' >> beam.ParDo(ExtractUserAndTimestampDoFn())
| beam.Filter(
lambda x: (abs(hash(x)) <= MAX_TIMESTAMP * self.sampling_threshold))
| ComputeSessions()
| 'SessionsToStrings' >> beam.ParDo(SessionsToStringsDoFn())
| TopPerMonth()
| 'FormatOutput' >> beam.ParDo(FormatOutputDoFn()))
def run(argv=None):
"""Runs the Wikipedia top edits pipeline.
Args:
argv: Pipeline options as a list of arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
default='gs://dataflow-samples/wikipedia_edits/*.json',
help='Input specified as a GCS path containing a BigQuery table exported '
'as json.')
parser.add_argument(
'--output', required=True, help='Output file to write results to.')
parser.add_argument(
'--sampling_threshold',
type=float,
default=0.1,
help='Fraction of entries used for session tracking')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
( # pylint: disable=expression-not-assigned
p
| ReadFromText(known_args.input)
| ComputeTopSessions(known_args.sampling_threshold)
| WriteToText(known_args.output))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/complete/top_wikipedia_sessions.py | 0.678433 | 0.322846 | top_wikipedia_sessions.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import argparse
import json
import logging
import random
from builtins import object
from builtins import range
from typing import Any
from typing import Iterable
from typing import Tuple
import apache_beam as beam
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
@beam.typehints.with_output_types(Tuple[int, int, int])
@beam.typehints.with_input_types(int)
def run_trials(runs):
"""Run trials and return a 3-tuple representing the results.
Args:
runs: Number of trial runs to be executed.
Returns:
A 3-tuple (total trials, inside trials, 0).
The final zero is needed solely to make sure that the combine_results function
has same type for inputs and outputs (a requirement for combiner functions).
"""
inside_runs = 0
for _ in range(runs):
x = random.uniform(0, 1)
y = random.uniform(0, 1)
inside_runs += 1 if x * x + y * y <= 1.0 else 0
return runs, inside_runs, 0
@beam.typehints.with_output_types(Tuple[int, int, float])
@beam.typehints.with_input_types(Iterable[Tuple[int, int, Any]])
def combine_results(results):
"""Combiner function to sum up trials and compute the estimate.
Args:
results: An iterable of 3-tuples (total trials, inside trials, ignored).
Returns:
A 3-tuple containing the sum of total trials, sum of inside trials, and
the probability computed from the two numbers.
"""
# TODO(silviuc): Do we guarantee that argument can be iterated repeatedly?
# Should document one way or the other.
total, inside = sum(r[0] for r in results), sum(r[1] for r in results)
return total, inside, 4 * float(inside) / total
class JsonCoder(object):
"""A JSON coder used to format the final result."""
def encode(self, x):
return json.dumps(x)
class EstimatePiTransform(beam.PTransform):
"""Runs 10M trials, and combine the results to estimate pi."""
def __init__(self, tries_per_work_item=100000):
self.tries_per_work_item = tries_per_work_item
def expand(self, pcoll):
# A hundred work items of a hundred thousand tries each.
return (
pcoll
| 'Initialize' >> beam.Create(
[self.tries_per_work_item] * 100).with_output_types(int)
| 'Run trials' >> beam.Map(run_trials)
| 'Sum' >> beam.CombineGlobally(combine_results).without_defaults())
def run(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'--output', required=True, help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
( # pylint: disable=expression-not-assigned
p
| EstimatePiTransform()
| WriteToText(known_args.output, coder=JsonCoder()))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/complete/estimate_pi.py | 0.753104 | 0.470676 | estimate_pi.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import argparse
import csv
import logging
import apache_beam as beam
from apache_beam.metrics.metric import Metrics
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
class ParseGameEventFn(beam.DoFn):
"""Parses the raw game event info into a Python dictionary.
Each event line has the following format:
username,teamname,score,timestamp_in_ms,readable_time
e.g.:
user2_AsparagusPig,AsparagusPig,10,1445230923951,2015-11-02 09:09:28.224
The human-readable time string is not used here.
"""
def __init__(self):
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
# super(ParseGameEventFn, self).__init__()
beam.DoFn.__init__(self)
self.num_parse_errors = Metrics.counter(self.__class__, 'num_parse_errors')
def process(self, elem):
try:
row = list(csv.reader([elem]))[0]
yield {
'user': row[0],
'team': row[1],
'score': int(row[2]),
'timestamp': int(row[3]) / 1000.0,
}
except: # pylint: disable=bare-except
# Log and count parse errors
self.num_parse_errors.inc()
logging.error('Parse error on "%s"', elem)
# [START extract_and_sum_score]
class ExtractAndSumScore(beam.PTransform):
"""A transform to extract key/score information and sum the scores.
The constructor argument `field` determines whether 'team' or 'user' info is
extracted.
"""
def __init__(self, field):
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
# super(ExtractAndSumScore, self).__init__()
beam.PTransform.__init__(self)
self.field = field
def expand(self, pcoll):
return (
pcoll
| beam.Map(lambda elem: (elem[self.field], elem['score']))
| beam.CombinePerKey(sum))
# [END extract_and_sum_score]
class UserScore(beam.PTransform):
def expand(self, pcoll):
return (
pcoll
| 'ParseGameEventFn' >> beam.ParDo(ParseGameEventFn())
# Extract and sum username/score pairs from the event data.
| 'ExtractAndSumScore' >> ExtractAndSumScore('user'))
# [START main]
def run(argv=None, save_main_session=True):
"""Main entry point; defines and runs the user_score pipeline."""
parser = argparse.ArgumentParser()
# The default maps to two large Google Cloud Storage files (each ~12GB)
# holding two subsequent day's worth (roughly) of data.
parser.add_argument(
'--input',
type=str,
default='gs://apache-beam-samples/game/gaming_data*.csv',
help='Path to the data file(s) containing game data.')
parser.add_argument(
'--output', type=str, required=True, help='Path to the output file(s).')
args, pipeline_args = parser.parse_known_args(argv)
options = PipelineOptions(pipeline_args)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
options.view_as(SetupOptions).save_main_session = save_main_session
with beam.Pipeline(options=options) as p:
def format_user_score_sums(user_score):
(user, score) = user_score
return 'user: %s, total_score: %s' % (user, score)
( # pylint: disable=expression-not-assigned
p
| 'ReadInputText' >> beam.io.ReadFromText(args.input)
| 'UserScore' >> UserScore()
| 'FormatUserScoreSums' >> beam.Map(format_user_score_sums)
| 'WriteUserScoreSums' >> beam.io.WriteToText(args.output))
# [END main]
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/complete/game/user_score.py | 0.5083 | 0.340677 | user_score.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import logging
import sys
import time
from datetime import datetime
import apache_beam as beam
from apache_beam.metrics.metric import Metrics
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
def str2timestamp(s, fmt='%Y-%m-%d-%H-%M'):
"""Converts a string into a unix timestamp."""
dt = datetime.strptime(s, fmt)
epoch = datetime.utcfromtimestamp(0)
return (dt - epoch).total_seconds()
def timestamp2str(t, fmt='%Y-%m-%d %H:%M:%S.000'):
"""Converts a unix timestamp into a formatted string."""
return datetime.fromtimestamp(t).strftime(fmt)
class ParseGameEventFn(beam.DoFn):
"""Parses the raw game event info into a Python dictionary.
Each event line has the following format:
username,teamname,score,timestamp_in_ms,readable_time
e.g.:
user2_AsparagusPig,AsparagusPig,10,1445230923951,2015-11-02 09:09:28.224
The human-readable time string is not used here.
"""
def __init__(self):
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
# super(ParseGameEventFn, self).__init__()
beam.DoFn.__init__(self)
self.num_parse_errors = Metrics.counter(self.__class__, 'num_parse_errors')
def process(self, elem):
try:
row = list(csv.reader([elem]))[0]
yield {
'user': row[0],
'team': row[1],
'score': int(row[2]),
'timestamp': int(row[3]) / 1000.0,
}
except: # pylint: disable=bare-except
# Log and count parse errors
self.num_parse_errors.inc()
logging.error('Parse error on "%s"', elem)
class ExtractAndSumScore(beam.PTransform):
"""A transform to extract key/score information and sum the scores.
The constructor argument `field` determines whether 'team' or 'user' info is
extracted.
"""
def __init__(self, field):
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
# super(ExtractAndSumScore, self).__init__()
beam.PTransform.__init__(self)
self.field = field
def expand(self, pcoll):
return (
pcoll
| beam.Map(lambda elem: (elem[self.field], elem['score']))
| beam.CombinePerKey(sum))
class TeamScoresDict(beam.DoFn):
"""Formats the data into a dictionary of BigQuery columns with their values
Receives a (team, score) pair, extracts the window start timestamp, and
formats everything together into a dictionary. The dictionary is in the format
{'bigquery_column': value}
"""
def process(self, team_score, window=beam.DoFn.WindowParam):
team, score = team_score
start = timestamp2str(int(window.start))
yield {
'team': team,
'total_score': score,
'window_start': start,
'processing_time': timestamp2str(int(time.time()))
}
class WriteToBigQuery(beam.PTransform):
"""Generate, format, and write BigQuery table row information."""
def __init__(self, table_name, dataset, schema, project):
"""Initializes the transform.
Args:
table_name: Name of the BigQuery table to use.
dataset: Name of the dataset to use.
schema: Dictionary in the format {'column_name': 'bigquery_type'}
project: Name of the Cloud project containing BigQuery table.
"""
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
# super(WriteToBigQuery, self).__init__()
beam.PTransform.__init__(self)
self.table_name = table_name
self.dataset = dataset
self.schema = schema
self.project = project
def get_schema(self):
"""Build the output table schema."""
return ', '.join('%s:%s' % (col, self.schema[col]) for col in self.schema)
def expand(self, pcoll):
return (
pcoll
| 'ConvertToRow' >>
beam.Map(lambda elem: {col: elem[col]
for col in self.schema})
| beam.io.WriteToBigQuery(
self.table_name, self.dataset, self.project, self.get_schema()))
# [START main]
class HourlyTeamScore(beam.PTransform):
def __init__(self, start_min, stop_min, window_duration):
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
# super(HourlyTeamScore, self).__init__()
beam.PTransform.__init__(self)
self.start_timestamp = str2timestamp(start_min)
self.stop_timestamp = str2timestamp(stop_min)
self.window_duration_in_seconds = window_duration * 60
def expand(self, pcoll):
return (
pcoll
| 'ParseGameEventFn' >> beam.ParDo(ParseGameEventFn())
# Filter out data before and after the given times so that it is not
# included in the calculations. As we collect data in batches (say, by
# day), the batch for the day that we want to analyze could potentially
# include some late-arriving data from the previous day. If so, we want
# to weed it out. Similarly, if we include data from the following day
# (to scoop up late-arriving events from the day we're analyzing), we
# need to weed out events that fall after the time period we want to
# analyze.
# [START filter_by_time_range]
| 'FilterStartTime' >>
beam.Filter(lambda elem: elem['timestamp'] > self.start_timestamp)
| 'FilterEndTime' >>
beam.Filter(lambda elem: elem['timestamp'] < self.stop_timestamp)
# [END filter_by_time_range]
# [START add_timestamp_and_window]
# Add an element timestamp based on the event log, and apply fixed
# windowing.
| 'AddEventTimestamps' >> beam.Map(
lambda elem: beam.window.TimestampedValue(elem, elem['timestamp']))
| 'FixedWindowsTeam' >> beam.WindowInto(
beam.window.FixedWindows(self.window_duration_in_seconds))
# [END add_timestamp_and_window]
# Extract and sum teamname/score pairs from the event data.
| 'ExtractAndSumScore' >> ExtractAndSumScore('team'))
def run(argv=None, save_main_session=True):
"""Main entry point; defines and runs the hourly_team_score pipeline."""
parser = argparse.ArgumentParser()
# The default maps to two large Google Cloud Storage files (each ~12GB)
# holding two subsequent day's worth (roughly) of data.
parser.add_argument(
'--input',
type=str,
default='gs://apache-beam-samples/game/gaming_data*.csv',
help='Path to the data file(s) containing game data.')
parser.add_argument(
'--dataset',
type=str,
required=True,
help='BigQuery Dataset to write tables to. '
'Must already exist.')
parser.add_argument(
'--table_name',
default='leader_board',
help='The BigQuery table name. Should not already exist.')
parser.add_argument(
'--window_duration',
type=int,
default=60,
help='Numeric value of fixed window duration, in minutes')
parser.add_argument(
'--start_min',
type=str,
default='1970-01-01-00-00',
help='String representation of the first minute after '
'which to generate results in the format: '
'yyyy-MM-dd-HH-mm. Any input data timestamped '
'prior to that minute won\'t be included in the '
'sums.')
parser.add_argument(
'--stop_min',
type=str,
default='2100-01-01-00-00',
help='String representation of the first minute for '
'which to generate results in the format: '
'yyyy-MM-dd-HH-mm. Any input data timestamped '
'after to that minute won\'t be included in the '
'sums.')
args, pipeline_args = parser.parse_known_args(argv)
options = PipelineOptions(pipeline_args)
# We also require the --project option to access --dataset
if options.view_as(GoogleCloudOptions).project is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --project is required')
sys.exit(1)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
options.view_as(SetupOptions).save_main_session = save_main_session
with beam.Pipeline(options=options) as p:
( # pylint: disable=expression-not-assigned
p
| 'ReadInputText' >> beam.io.ReadFromText(args.input)
| 'HourlyTeamScore' >> HourlyTeamScore(
args.start_min, args.stop_min, args.window_duration)
| 'TeamScoresDict' >> beam.ParDo(TeamScoresDict())
| 'WriteTeamScoreSums' >> WriteToBigQuery(
args.table_name,
args.dataset,
{
'team': 'STRING',
'total_score': 'INTEGER',
'window_start': 'STRING',
},
options.view_as(GoogleCloudOptions).project))
# [END main]
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/examples/complete/game/hourly_team_score.py | 0.49585 | 0.291419 | hourly_team_score.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from past.builtins import long
from past.builtins import unicode
from apache_beam.options.value_provider import ValueProvider
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apitools.base.py import extra_types
except ImportError:
extra_types = None
# pylint: enable=wrong-import-order, wrong-import-position
_MAXINT64 = (1 << 63) - 1
_MININT64 = -(1 << 63)
def get_typed_value_descriptor(obj):
"""For internal use only; no backwards-compatibility guarantees.
Converts a basic type into a @type/value dictionary.
Args:
obj: A bytes, unicode, bool, int, or float to be converted.
Returns:
A dictionary containing the keys ``@type`` and ``value`` with the value for
the ``@type`` of appropriate type.
Raises:
TypeError: if the Python object has a type that is not
supported.
"""
if isinstance(obj, (bytes, unicode)):
type_name = 'Text'
elif isinstance(obj, bool):
type_name = 'Boolean'
elif isinstance(obj, int):
type_name = 'Integer'
elif isinstance(obj, float):
type_name = 'Float'
else:
raise TypeError('Cannot get a type descriptor for %s.' % repr(obj))
return {'@type': 'http://schema.org/%s' % type_name, 'value': obj}
def to_json_value(obj, with_type=False):
"""For internal use only; no backwards-compatibility guarantees.
Converts Python objects into extra_types.JsonValue objects.
Args:
obj: Python object to be converted. Can be :data:`None`.
with_type: If true then the basic types (``bytes``, ``unicode``, ``int``,
``float``, ``bool``) will be wrapped in ``@type:value`` dictionaries.
Otherwise the straight value is encoded into a ``JsonValue``.
Returns:
A ``JsonValue`` object using ``JsonValue``, ``JsonArray`` and ``JsonObject``
types for the corresponding values, lists, or dictionaries.
Raises:
TypeError: if the Python object contains a type that is not
supported.
The types supported are ``str``, ``bool``, ``list``, ``tuple``, ``dict``, and
``None``. The Dataflow API requires JsonValue(s) in many places, and it is
quite convenient to be able to specify these hierarchical objects using
Python syntax.
"""
if obj is None:
return extra_types.JsonValue(is_null=True)
elif isinstance(obj, (list, tuple)):
return extra_types.JsonValue(
array_value=extra_types.JsonArray(
entries=[to_json_value(o, with_type=with_type) for o in obj]))
elif isinstance(obj, dict):
json_object = extra_types.JsonObject()
for k, v in obj.items():
json_object.properties.append(
extra_types.JsonObject.Property(
key=k, value=to_json_value(v, with_type=with_type)))
return extra_types.JsonValue(object_value=json_object)
elif with_type:
return to_json_value(get_typed_value_descriptor(obj), with_type=False)
elif isinstance(obj, (str, unicode)):
return extra_types.JsonValue(string_value=obj)
elif isinstance(obj, bytes):
return extra_types.JsonValue(string_value=obj.decode('utf8'))
elif isinstance(obj, bool):
return extra_types.JsonValue(boolean_value=obj)
elif isinstance(obj, (int, long)):
if _MININT64 <= obj <= _MAXINT64:
return extra_types.JsonValue(integer_value=obj)
else:
raise TypeError('Can not encode {} as a 64-bit integer'.format(obj))
elif isinstance(obj, float):
return extra_types.JsonValue(double_value=obj)
elif isinstance(obj, ValueProvider):
if obj.is_accessible():
return to_json_value(obj.get())
return extra_types.JsonValue(is_null=True)
else:
raise TypeError('Cannot convert %s to a JSON value.' % repr(obj))
def from_json_value(v):
"""For internal use only; no backwards-compatibility guarantees.
Converts ``extra_types.JsonValue`` objects into Python objects.
Args:
v: ``JsonValue`` object to be converted.
Returns:
A Python object structured as values, lists, and dictionaries corresponding
to ``JsonValue``, ``JsonArray`` and ``JsonObject`` types.
Raises:
TypeError: if the ``JsonValue`` object contains a type that is
not supported.
The types supported are ``str``, ``bool``, ``list``, ``dict``, and ``None``.
The Dataflow API returns JsonValue(s) in many places and it is quite
convenient to be able to convert these hierarchical objects to much simpler
Python objects.
"""
if isinstance(v, extra_types.JsonValue):
if v.string_value is not None:
return v.string_value
elif v.boolean_value is not None:
return v.boolean_value
elif v.integer_value is not None:
return v.integer_value
elif v.double_value is not None:
return v.double_value
elif v.array_value is not None:
return from_json_value(v.array_value)
elif v.object_value is not None:
return from_json_value(v.object_value)
elif v.is_null:
return None
elif isinstance(v, extra_types.JsonArray):
return [from_json_value(e) for e in v.entries]
elif isinstance(v, extra_types.JsonObject):
return {p.key: from_json_value(p.value) for p in v.properties}
raise TypeError('Cannot convert %s from a JSON value.' % repr(v)) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/internal/gcp/json_value.py | 0.850236 | 0.270943 | json_value.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import logging
import socket
import threading
from oauth2client.client import GoogleCredentials
from apache_beam.utils import retry
# Protect against environments where apitools library is not available.
try:
from apitools.base.py.credentials_lib import GceAssertionCredentials
except ImportError:
GceAssertionCredentials = None
# When we are running in GCE, we can authenticate with VM credentials.
is_running_in_gce = False
# When we are running in GCE, this value is set based on worker startup
# information.
executing_project = None
_LOGGER = logging.getLogger(__name__)
if GceAssertionCredentials is not None:
class _GceAssertionCredentials(GceAssertionCredentials):
"""GceAssertionCredentials with retry wrapper.
For internal use only; no backwards-compatibility guarantees.
"""
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _do_refresh_request(self, http_request):
return super(_GceAssertionCredentials,
self)._do_refresh_request(http_request)
def set_running_in_gce(worker_executing_project):
"""For internal use only; no backwards-compatibility guarantees.
Informs the authentication library that we are running in GCE.
When we are running in GCE, we have the option of using the VM metadata
credentials for authentication to Google services.
Args:
worker_executing_project: The project running the workflow. This information
comes from worker startup information.
"""
global is_running_in_gce
global executing_project
is_running_in_gce = True
executing_project = worker_executing_project
def get_service_credentials():
"""For internal use only; no backwards-compatibility guarantees.
Get credentials to access Google services.
Returns:
A ``oauth2client.client.OAuth2Credentials`` object or None if credentials
not found. Returned object is thread-safe.
"""
return _Credentials.get_service_credentials()
class _Credentials(object):
_credentials_lock = threading.Lock()
_credentials_init = False
_credentials = None
@classmethod
def get_service_credentials(cls):
if cls._credentials_init:
return cls._credentials
with cls._credentials_lock:
if cls._credentials_init:
return cls._credentials
# apitools use urllib with the global timeout. Set it to 60 seconds
# to prevent network related stuckness issues.
if not socket.getdefaulttimeout():
_LOGGER.info("Setting socket default timeout to 60 seconds.")
socket.setdefaulttimeout(60)
_LOGGER.info(
"socket default timeout is %s seconds.", socket.getdefaulttimeout())
cls._credentials = cls._get_service_credentials()
cls._credentials_init = True
return cls._credentials
@staticmethod
def _get_service_credentials():
if is_running_in_gce:
# We are currently running as a GCE taskrunner worker.
return _GceAssertionCredentials(user_agent='beam-python-sdk/1.0')
else:
client_scopes = [
'https://www.googleapis.com/auth/bigquery',
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/datastore',
'https://www.googleapis.com/auth/spanner.admin',
'https://www.googleapis.com/auth/spanner.data'
]
try:
credentials = GoogleCredentials.get_application_default()
credentials = credentials.create_scoped(client_scopes)
logging.debug(
'Connecting using Google Application Default '
'Credentials.')
return credentials
except Exception as e:
_LOGGER.warning(
'Unable to find default credentials to use: %s\n'
'Connecting anonymously.',
e)
return None | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/internal/gcp/auth.py | 0.751283 | 0.17892 | auth.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from builtins import object
from typing import TYPE_CHECKING
from typing import Optional
from apache_beam.metrics.cells import MetricAggregator
from apache_beam.metrics.cells import MetricCell
from apache_beam.metrics.cells import MetricCellFactory
from apache_beam.utils.histogram import Histogram
if TYPE_CHECKING:
from apache_beam.utils.histogram import BucketType
class HistogramCell(MetricCell):
"""For internal use only; no backwards-compatibility guarantees.
Tracks the current value and delta for a histogram metric.
Each cell tracks the state of a metric independently per context per bundle.
Therefore, each metric has a different cell in each bundle, that is later
aggregated.
This class is thread safe since underlying histogram object is thread safe.
"""
def __init__(self, bucket_type):
self._bucket_type = bucket_type
self.data = HistogramAggregator(bucket_type).identity_element()
def reset(self):
self.data = HistogramAggregator(self._bucket_type).identity_element()
def combine(self, other):
# type: (HistogramCell) -> HistogramCell
result = HistogramCell(self._bucket_type)
result.data = self.data.combine(other.data)
return result
def update(self, value):
self.data.histogram.record(value)
def get_cumulative(self):
# type: () -> HistogramData
return self.data.get_cumulative()
def to_runner_api_monitoring_info(self, name, transform_id):
# Histogram metric is currently worker-local and internal
# use only. This method should be implemented when runners
# support Histogram metric reporting.
return None
class HistogramCellFactory(MetricCellFactory):
def __init__(self, bucket_type):
self._bucket_type = bucket_type
def __call__(self):
return HistogramCell(self._bucket_type)
def __eq__(self, other):
if not isinstance(other, HistogramCellFactory):
return False
return self._bucket_type == other._bucket_type
def __hash__(self):
return hash(self._bucket_type)
class HistogramResult(object):
def __init__(self, data):
# type: (HistogramData) -> None
self.data = data
def __eq__(self, other):
if isinstance(other, HistogramResult):
return self.data == other.data
else:
return False
def __hash__(self):
return hash(self.data)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __repr__(self):
return '<HistogramResult({})>'.format(
self.data.histogram.get_percentile_info())
@property
def p99(self):
return self.data.histogram.p99()
@property
def p95(self):
return self.data.histogram.p95()
@property
def p90(self):
return self.data.histogram.p90()
class HistogramData(object):
"""For internal use only; no backwards-compatibility guarantees.
The data structure that holds data about a histogram metric.
This object is not thread safe, so it's not supposed to be modified
outside the HistogramCell.
"""
def __init__(self, histogram):
self.histogram = histogram
def __eq__(self, other):
return self.histogram == other.histogram
def __hash__(self):
return hash(self.histogram)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __repr__(self):
return 'HistogramData({})'.format(self.histogram.get_percentile_info())
def get_cumulative(self):
# type: () -> HistogramData
return HistogramData(self.histogram)
def combine(self, other):
# type: (Optional[HistogramData]) -> HistogramData
if other is None:
return self
return HistogramData(self.histogram.combine(other.histogram))
class HistogramAggregator(MetricAggregator):
"""For internal use only; no backwards-compatibility guarantees.
Aggregator for Histogram metric data during pipeline execution.
Values aggregated should be ``HistogramData`` objects.
"""
def __init__(self, bucket_type):
# type: (BucketType) -> None
self._bucket_type = bucket_type
def identity_element(self):
# type: () -> HistogramData
return HistogramData(Histogram(self._bucket_type))
def combine(self, x, y):
# type: (HistogramData, HistogramData) -> HistogramData
return x.combine(y)
def result(self, x):
# type: (HistogramData) -> HistogramResult
return HistogramResult(x.get_cumulative()) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/internal/metrics/cells.py | 0.812682 | 0.547706 | cells.py | pypi |
# pytype: skip-file
# mypy: disallow-untyped-defs
from __future__ import absolute_import
import datetime
import logging
import threading
import time
from builtins import object
from typing import TYPE_CHECKING
from typing import Dict
from typing import Optional
from typing import Type
from typing import Union
from apache_beam.internal.metrics.cells import HistogramCellFactory
from apache_beam.metrics import monitoring_infos
from apache_beam.metrics.execution import MetricUpdater
from apache_beam.metrics.metric import Metrics as UserMetrics
from apache_beam.metrics.metricbase import Histogram
from apache_beam.metrics.metricbase import MetricName
if TYPE_CHECKING:
from apache_beam.metrics.cells import MetricCell
from apache_beam.metrics.cells import MetricCellFactory
from apache_beam.utils.histogram import BucketType
# Protect against environments where bigquery library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apitools.base.py.exceptions import HttpError
except ImportError:
pass
__all__ = ['Metrics']
_LOGGER = logging.getLogger(__name__)
class Metrics(object):
@staticmethod
def counter(urn, labels=None, process_wide=False):
# type: (str, Optional[Dict[str, str]], bool) -> UserMetrics.DelegatingCounter
"""Obtains or creates a Counter metric.
Args:
namespace: A class or string that gives the namespace to a metric
name: A string that gives a unique name to a metric
urn: URN to populate on a MonitoringInfo, when sending to RunnerHarness.
labels: Labels to populate on a MonitoringInfo
process_wide: Whether or not the metric is specific to the current bundle
or should be calculated for the entire process.
Returns:
A Counter object.
"""
return UserMetrics.DelegatingCounter(
MetricName(namespace=None, name=None, urn=urn, labels=labels),
process_wide=process_wide)
@staticmethod
def histogram(namespace, name, bucket_type, logger=None):
# type: (Union[Type, str], str, BucketType, Optional[MetricLogger]) -> Metrics.DelegatingHistogram
"""Obtains or creates a Histogram metric.
Args:
namespace: A class or string that gives the namespace to a metric
name: A string that gives a unique name to a metric
bucket_type: A type of bucket used in a histogram. A subclass of
apache_beam.utils.histogram.BucketType
logger: MetricLogger for logging locally aggregated metric
Returns:
A Histogram object.
"""
namespace = UserMetrics.get_namespace(namespace)
return Metrics.DelegatingHistogram(
MetricName(namespace, name), bucket_type, logger)
class DelegatingHistogram(Histogram):
"""Metrics Histogram that Delegates functionality to MetricsEnvironment."""
def __init__(self, metric_name, bucket_type, logger):
# type: (MetricName, BucketType, Optional[MetricLogger]) -> None
super(Metrics.DelegatingHistogram, self).__init__(metric_name)
self.metric_name = metric_name
self.cell_type = HistogramCellFactory(bucket_type)
self.logger = logger
self.updater = MetricUpdater(self.cell_type, self.metric_name)
def update(self, value):
# type: (object) -> None
self.updater(value)
if self.logger:
self.logger.update(self.cell_type, self.metric_name, value)
class MetricLogger(object):
"""Simple object to locally aggregate and log metrics.
This class is experimental. No backwards-compatibility guarantees.
"""
def __init__(self):
# type: () -> None
self._metric = dict() # type: Dict[MetricName, MetricCell]
self._lock = threading.Lock()
self._last_logging_millis = int(time.time() * 1000)
self.minimum_logging_frequency_msec = 180000
def update(self, cell_type, metric_name, value):
# type: (Union[Type[MetricCell], MetricCellFactory], MetricName, object) -> None
cell = self._get_metric_cell(cell_type, metric_name)
cell.update(value)
def _get_metric_cell(self, cell_type, metric_name):
# type: (Union[Type[MetricCell], MetricCellFactory], MetricName) -> MetricCell
with self._lock:
if metric_name not in self._metric:
self._metric[metric_name] = cell_type()
return self._metric[metric_name]
def log_metrics(self, reset_after_logging=False):
# type: (bool) -> None
if self._lock.acquire(False):
try:
current_millis = int(time.time() * 1000)
if ((current_millis - self._last_logging_millis) >
self.minimum_logging_frequency_msec):
logging_metric_info = [
'[Locally aggregated metrics since %s]' %
datetime.datetime.fromtimestamp(
self._last_logging_millis / 1000.0)
]
for name, cell in self._metric.items():
logging_metric_info.append('%s: %s' % (name, cell.get_cumulative()))
_LOGGER.info('\n'.join(logging_metric_info))
if reset_after_logging:
self._metric = dict()
self._last_logging_millis = current_millis
finally:
self._lock.release()
class ServiceCallMetric(object):
"""Metric class which records Service API call metrics.
This class will capture a request count metric for the specified
request_count_urn and base_labels.
When call() is invoked the status must be provided, which will
be converted to a canonical GCP status code, if possible.
TODO(ajamato): Add Request latency metric.
"""
def __init__(self, request_count_urn, base_labels=None):
# type: (str, Optional[Dict[str, str]]) -> None
self.base_labels = base_labels if base_labels else {}
self.request_count_urn = request_count_urn
def call(self, status):
# type: (Union[int, str, HttpError]) -> None
"""Record the status of the call into appropriate metrics."""
canonical_status = self.convert_to_canonical_status_string(status)
additional_labels = {monitoring_infos.STATUS_LABEL: canonical_status}
labels = dict(
list(self.base_labels.items()) + list(additional_labels.items()))
request_counter = Metrics.counter(
urn=self.request_count_urn, labels=labels, process_wide=True)
request_counter.inc()
def convert_to_canonical_status_string(self, status):
# type: (Union[int, str, HttpError]) -> str
"""Converts a status to a canonical GCP status cdoe string."""
http_status_code = None
if isinstance(status, int):
http_status_code = status
elif isinstance(status, str):
return status.lower()
elif isinstance(status, HttpError):
http_status_code = int(status.status_code)
http_to_canonical_gcp_status = {
200: 'ok',
400: 'out_of_range',
401: 'unauthenticated',
403: 'permission_denied',
404: 'not_found',
409: 'already_exists',
429: 'resource_exhausted',
499: 'cancelled',
500: 'internal',
501: 'not_implemented',
503: 'unavailable',
504: 'deadline_exceeded'
}
if (http_status_code is not None and
http_status_code in http_to_canonical_gcp_status):
return http_to_canonical_gcp_status[http_status_code]
return str(http_status_code) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/internal/metrics/metric.py | 0.880232 | 0.270787 | metric.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import inspect
import itertools
import logging
import sys
import traceback
import types
from builtins import next
from builtins import object
from builtins import zip
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Tuple
from typing import TypeVar
from typing import Union
from apache_beam.typehints import native_type_compatibility
from apache_beam.typehints import typehints
from apache_beam.typehints.native_type_compatibility import convert_to_beam_type
from apache_beam.typehints.typehints import CompositeTypeHintError
from apache_beam.typehints.typehints import SimpleTypeHintError
from apache_beam.typehints.typehints import check_constraint
from apache_beam.typehints.typehints import validate_composite_type_param
try:
import funcsigs # Python 2 only.
except ImportError:
funcsigs = None
__all__ = [
'disable_type_annotations',
'no_annotations',
'with_input_types',
'with_output_types',
'WithTypeHints',
'TypeCheckError',
]
T = TypeVar('T')
WithTypeHintsT = TypeVar('WithTypeHintsT', bound='WithTypeHints') # pylint: disable=invalid-name
# This is missing in the builtin types module. str.upper is arbitrary, any
# method on a C-implemented type will do.
# pylint: disable=invalid-name
_MethodDescriptorType = type(str.upper)
# pylint: enable=invalid-name
_ANY_VAR_POSITIONAL = typehints.Tuple[typehints.Any, ...]
_ANY_VAR_KEYWORD = typehints.Dict[typehints.Any, typehints.Any]
_disable_from_callable = False
try:
_original_getfullargspec = inspect.getfullargspec
_use_full_argspec = True
except AttributeError: # Python 2
_original_getfullargspec = inspect.getargspec # type: ignore
_use_full_argspec = False
def getfullargspec(func):
# Python 3: Use get_signature instead.
assert sys.version_info < (3, ), 'This method should not be used in Python 3'
try:
return _original_getfullargspec(func)
except TypeError:
if isinstance(func, type):
argspec = getfullargspec(func.__init__)
del argspec.args[0]
return argspec
elif callable(func):
try:
return _original_getfullargspec(func.__call__)
except TypeError:
# Return an ArgSpec with at least one positional argument,
# and any number of other (positional or keyword) arguments
# whose name won't match any real argument.
# Arguments with the %unknown% prefix will be ignored in the type
# checking code.
if _use_full_argspec:
return inspect.FullArgSpec(['_'],
'__unknown__varargs',
'__unknown__keywords', (), [], {}, {})
else: # Python 2
return inspect.ArgSpec(['_'],
'__unknown__varargs',
'__unknown__keywords', ())
else:
raise
def get_signature(func):
"""Like inspect.signature(), but supports Py2 as well.
This module uses inspect.signature instead of getfullargspec since in the
latter: 'the "self" parameter is always reported, even for bound methods'
https://github.com/python/cpython/blob/44f91c388a6f4da9ed3300df32ca290b8aa104ea/Lib/inspect.py#L1103
"""
# Fall back on funcsigs if inspect module doesn't have 'signature'; prefer
# inspect.signature over funcsigs.signature if both are available.
if hasattr(inspect, 'signature'):
inspect_ = inspect
else:
inspect_ = funcsigs
try:
signature = inspect_.signature(func)
except ValueError:
# Fall back on a catch-all signature.
params = [
inspect_.Parameter('_', inspect_.Parameter.POSITIONAL_OR_KEYWORD),
inspect_.Parameter(
'__unknown__varargs', inspect_.Parameter.VAR_POSITIONAL),
inspect_.Parameter(
'__unknown__keywords', inspect_.Parameter.VAR_KEYWORD)
]
signature = inspect_.Signature(params)
# This is a specialization to hint the first argument of certain builtins,
# such as str.strip.
if isinstance(func, _MethodDescriptorType):
params = list(signature.parameters.values())
if params[0].annotation == params[0].empty:
params[0] = params[0].replace(annotation=func.__objclass__)
signature = signature.replace(parameters=params)
# This is a specialization to hint the return value of type callables.
if (signature.return_annotation == signature.empty and
isinstance(func, type)):
signature = signature.replace(return_annotation=typehints.normalize(func))
return signature
def no_annotations(fn):
"""Decorator that prevents Beam from using type hint annotations on a
callable."""
setattr(fn, '_beam_no_annotations', True)
return fn
def disable_type_annotations():
"""Prevent Beam from using type hint annotations to determine input and output
types of transforms.
This setting applies globally.
"""
global _disable_from_callable
_disable_from_callable = True
class IOTypeHints(NamedTuple(
'IOTypeHints',
[('input_types', Optional[Tuple[Tuple[Any, ...], Dict[str, Any]]]),
('output_types', Optional[Tuple[Tuple[Any, ...], Dict[str, Any]]]),
('origin', List[str])])):
"""Encapsulates all type hint information about a Dataflow construct.
This should primarily be used via the WithTypeHints mixin class, though
may also be attached to other objects (such as Python functions).
Attributes:
input_types: (tuple, dict) List of typing types, and an optional dictionary.
May be None. The list and dict correspond to args and kwargs.
output_types: (tuple, dict) List of typing types, and an optional dictionary
(unused). Only the first element of the list is used. May be None.
origin: (List[str]) Stack of tracebacks of method calls used to create this
instance.
"""
traceback_limit = 5
@classmethod
def _make_origin(cls, bases, tb=True, msg=()):
# type: (List[IOTypeHints], bool, Iterable[str]) -> List[str]
if msg:
res = list(msg)
else:
res = []
if tb:
# Omit this method and the IOTypeHints method that called it.
num_frames_skip = 2
tbs = traceback.format_stack(limit=cls.traceback_limit +
num_frames_skip)[:-num_frames_skip]
# tb is a list of strings in the form of 'File ...\n[code]\n'. Split into
# single lines and flatten.
res += list(
itertools.chain.from_iterable(s.strip().split('\n') for s in tbs))
bases = [base for base in bases if base.origin]
if bases:
res += ['', 'based on:']
for i, base in enumerate(bases):
if i > 0:
res += ['', 'and:']
res += [' ' + str(base)]
res += [' ' + s for s in base.origin]
return res
@classmethod
def empty(cls):
# type: () -> IOTypeHints
"""Construct a base IOTypeHints object with no hints."""
return IOTypeHints(None, None, [])
@classmethod
def from_callable(cls, fn):
# type: (Callable) -> Optional[IOTypeHints]
"""Construct an IOTypeHints object from a callable's signature.
Supports Python 3 annotations. For partial annotations, sets unknown types
to Any, _ANY_VAR_POSITIONAL, or _ANY_VAR_KEYWORD.
Returns:
A new IOTypeHints or None if no annotations found.
"""
if _disable_from_callable or getattr(fn, '_beam_no_annotations', False):
return None
signature = get_signature(fn)
if (all(param.annotation == param.empty
for param in signature.parameters.values()) and
signature.return_annotation == signature.empty):
return None
input_args = []
input_kwargs = {}
for param in signature.parameters.values():
if param.annotation == param.empty:
if param.kind == param.VAR_POSITIONAL:
input_args.append(_ANY_VAR_POSITIONAL)
elif param.kind == param.VAR_KEYWORD:
input_kwargs[param.name] = _ANY_VAR_KEYWORD
elif param.kind == param.KEYWORD_ONLY:
input_kwargs[param.name] = typehints.Any
else:
input_args.append(typehints.Any)
else:
if param.kind in [param.KEYWORD_ONLY, param.VAR_KEYWORD]:
input_kwargs[param.name] = convert_to_beam_type(param.annotation)
else:
assert param.kind in [param.POSITIONAL_ONLY,
param.POSITIONAL_OR_KEYWORD,
param.VAR_POSITIONAL], \
'Unsupported Parameter kind: %s' % param.kind
input_args.append(convert_to_beam_type(param.annotation))
output_args = []
if signature.return_annotation != signature.empty:
output_args.append(convert_to_beam_type(signature.return_annotation))
else:
output_args.append(typehints.Any)
name = getattr(fn, '__name__', '<unknown>')
msg = ['from_callable(%s)' % name, ' signature: %s' % signature]
if hasattr(fn, '__code__'):
msg.append(
' File "%s", line %d' %
(fn.__code__.co_filename, fn.__code__.co_firstlineno))
return IOTypeHints(
input_types=(tuple(input_args), input_kwargs),
output_types=(tuple(output_args), {}),
origin=cls._make_origin([], tb=False, msg=msg))
def with_input_types(self, *args, **kwargs):
# type: (...) -> IOTypeHints
return self._replace(
input_types=(args, kwargs), origin=self._make_origin([self]))
def with_output_types(self, *args, **kwargs):
# type: (...) -> IOTypeHints
return self._replace(
output_types=(args, kwargs), origin=self._make_origin([self]))
def simple_output_type(self, context):
if self._has_output_types():
args, kwargs = self.output_types
if len(args) != 1 or kwargs:
raise TypeError(
'Expected single output type hint for %s but got: %s' %
(context, self.output_types))
return args[0]
def has_simple_output_type(self):
"""Whether there's a single positional output type."""
return (
self.output_types and len(self.output_types[0]) == 1 and
not self.output_types[1])
def strip_pcoll(self):
from apache_beam.pipeline import Pipeline
from apache_beam.pvalue import PBegin
from apache_beam.pvalue import PDone
return self.strip_pcoll_helper(self.input_types,
self._has_input_types,
'input_types',
[Pipeline, PBegin],
'This input type hint will be ignored '
'and not used for type-checking purposes. '
'Typically, input type hints for a '
'PTransform are single (or nested) types '
'wrapped by a PCollection, or PBegin.',
'strip_pcoll_input()').\
strip_pcoll_helper(self.output_types,
self.has_simple_output_type,
'output_types',
[PDone, None],
'This output type hint will be ignored '
'and not used for type-checking purposes. '
'Typically, output type hints for a '
'PTransform are single (or nested) types '
'wrapped by a PCollection, PDone, or None.',
'strip_pcoll_output()')
def strip_pcoll_helper(
self,
my_type, # type: any
has_my_type, # type: Callable[[], bool]
my_key, # type: str
special_containers, # type: List[Union[PBegin, PDone, PCollection]]
error_str, # type: str
source_str # type: str
):
# type: (...) -> IOTypeHints
from apache_beam.pvalue import PCollection
if not has_my_type() or not my_type or len(my_type[0]) != 1:
return self
my_type = my_type[0][0]
if isinstance(my_type, typehints.AnyTypeConstraint):
return self
special_containers += [PCollection]
kwarg_dict = {}
if (my_type not in special_containers and
getattr(my_type, '__origin__', None) != PCollection):
logging.warning(error_str + ' Got: %s instead.' % my_type)
kwarg_dict[my_key] = None
return self._replace(
origin=self._make_origin([self], tb=False, msg=[source_str]),
**kwarg_dict)
if (getattr(my_type, '__args__', -1) in [-1, None] or
len(my_type.__args__) == 0):
# e.g. PCollection (or PBegin/PDone)
kwarg_dict[my_key] = ((typehints.Any, ), {})
else:
# e.g. PCollection[type]
kwarg_dict[my_key] = ((convert_to_beam_type(my_type.__args__[0]), ), {})
return self._replace(
origin=self._make_origin([self], tb=False, msg=[source_str]),
**kwarg_dict)
def strip_iterable(self):
# type: () -> IOTypeHints
"""Removes outer Iterable (or equivalent) from output type.
Only affects instances with simple output types, otherwise is a no-op.
Does not modify self.
Designed to be used with type hints from callables of ParDo, FlatMap, DoFn.
Output type may be Optional[T], in which case the result of stripping T is
used as the output type.
Output type may be None/NoneType, in which case nothing is done.
Example: Generator[Tuple(int, int)] becomes Tuple(int, int)
Returns:
A copy of this instance with a possibly different output type.
Raises:
ValueError if output type is simple and not iterable.
"""
if self.output_types is None or not self.has_simple_output_type():
return self
output_type = self.output_types[0][0]
if output_type is None or isinstance(output_type, type(None)):
return self
# If output_type == Optional[T]: output_type = T.
if isinstance(output_type, typehints.UnionConstraint):
types = list(output_type.union_types)
if len(types) == 2:
try:
types.remove(type(None))
output_type = types[0]
except ValueError:
pass
yielded_type = typehints.get_yielded_type(output_type)
return self._replace(
output_types=((yielded_type, ), {}),
origin=self._make_origin([self], tb=False, msg=['strip_iterable()']))
def with_defaults(self, hints):
# type: (Optional[IOTypeHints]) -> IOTypeHints
if not hints:
return self
if not self:
return hints
if self._has_input_types():
input_types = self.input_types
else:
input_types = hints.input_types
if self._has_output_types():
output_types = self.output_types
else:
output_types = hints.output_types
res = IOTypeHints(
input_types,
output_types,
self._make_origin([self, hints], tb=False, msg=['with_defaults()']))
if res == self:
return self # Don't needlessly increase origin traceback length.
else:
return res
def _has_input_types(self):
return self.input_types is not None and any(self.input_types)
def _has_output_types(self):
return self.output_types is not None and any(self.output_types)
def __bool__(self):
return self._has_input_types() or self._has_output_types()
def __repr__(self):
return 'IOTypeHints[inputs=%s, outputs=%s]' % (
self.input_types, self.output_types)
def debug_str(self):
return '\n'.join([self.__repr__()] + self.origin)
def __eq__(self, other):
def same(a, b):
if a is None or not any(a):
return b is None or not any(b)
else:
return a == b
return (
same(self.input_types, other.input_types) and
same(self.output_types, other.output_types))
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(str(self))
def __reduce__(self):
# Don't include "origin" debug information in pickled form.
return (IOTypeHints, (self.input_types, self.output_types, []))
class WithTypeHints(object):
"""A mixin class that provides the ability to set and retrieve type hints.
"""
def __init__(self, *unused_args, **unused_kwargs):
self._type_hints = IOTypeHints.empty()
def _get_or_create_type_hints(self):
# type: () -> IOTypeHints
# __init__ may have not been called
try:
# Only return an instance bound to self (see BEAM-8629).
return self.__dict__['_type_hints']
except KeyError:
self._type_hints = IOTypeHints.empty()
return self._type_hints
def get_type_hints(self):
"""Gets and/or initializes type hints for this object.
If type hints have not been set, attempts to initialize type hints in this
order:
- Using self.default_type_hints().
- Using self.__class__ type hints.
"""
return (
self._get_or_create_type_hints().with_defaults(
self.default_type_hints()).with_defaults(
get_type_hints(self.__class__)))
def _set_type_hints(self, type_hints):
# type: (IOTypeHints) -> None
self._type_hints = type_hints
def default_type_hints(self):
return None
def with_input_types(self, *arg_hints, **kwarg_hints):
# type: (WithTypeHintsT, *Any, **Any) -> WithTypeHintsT
arg_hints = native_type_compatibility.convert_to_beam_types(arg_hints)
kwarg_hints = native_type_compatibility.convert_to_beam_types(kwarg_hints)
self._type_hints = self._get_or_create_type_hints().with_input_types(
*arg_hints, **kwarg_hints)
return self
def with_output_types(self, *arg_hints, **kwarg_hints):
# type: (WithTypeHintsT, *Any, **Any) -> WithTypeHintsT
arg_hints = native_type_compatibility.convert_to_beam_types(arg_hints)
kwarg_hints = native_type_compatibility.convert_to_beam_types(kwarg_hints)
self._type_hints = self._get_or_create_type_hints().with_output_types(
*arg_hints, **kwarg_hints)
return self
class TypeCheckError(Exception):
pass
def _positional_arg_hints(arg, hints):
"""Returns the type of a (possibly tuple-packed) positional argument.
E.g. for lambda ((a, b), c): None the single positional argument is (as
returned by inspect) [[a, b], c] which should have type
Tuple[Tuple[Int, Any], float] when applied to the type hints
{a: int, b: Any, c: float}.
"""
if isinstance(arg, list):
return typehints.Tuple[[_positional_arg_hints(a, hints) for a in arg]]
return hints.get(arg, typehints.Any)
def _unpack_positional_arg_hints(arg, hint):
"""Unpacks the given hint according to the nested structure of arg.
For example, if arg is [[a, b], c] and hint is Tuple[Any, int], then
this function would return ((Any, Any), int) so it can be used in conjunction
with inspect.getcallargs.
"""
if isinstance(arg, list):
tuple_constraint = typehints.Tuple[[typehints.Any] * len(arg)]
if not typehints.is_consistent_with(hint, tuple_constraint):
raise TypeCheckError(
'Bad tuple arguments for %s: expected %s, got %s' %
(arg, tuple_constraint, hint))
if isinstance(hint, typehints.TupleConstraint):
return tuple(
_unpack_positional_arg_hints(a, t) for a,
t in zip(arg, hint.tuple_types))
return (typehints.Any, ) * len(arg)
return hint
def getcallargs_forhints(func, *typeargs, **typekwargs):
"""Like inspect.getcallargs, with support for declaring default args as Any.
In Python 2, understands that Tuple[] and an Any unpack.
Returns:
(Dict[str, Any]) A dictionary from arguments names to values.
"""
if sys.version_info < (3, ):
return getcallargs_forhints_impl_py2(func, typeargs, typekwargs)
else:
return getcallargs_forhints_impl_py3(func, typeargs, typekwargs)
def getcallargs_forhints_impl_py2(func, typeargs, typekwargs):
argspec = getfullargspec(func)
# Turn Tuple[x, y] into (x, y) so getcallargs can do the proper unpacking.
packed_typeargs = [
_unpack_positional_arg_hints(arg, hint)
for (arg, hint) in zip(argspec.args, typeargs)
]
packed_typeargs += list(typeargs[len(packed_typeargs):])
# Monkeypatch inspect.getfullargspec to allow passing non-function objects.
# getfullargspec (getargspec on Python 2) are used by inspect.getcallargs.
# TODO(BEAM-5490): Reimplement getcallargs and stop relying on monkeypatch.
inspect.getargspec = getfullargspec
try:
callargs = inspect.getcallargs(func, *packed_typeargs, **typekwargs) # pylint: disable=deprecated-method
except TypeError as e:
raise TypeCheckError(e)
finally:
# Revert monkey-patch.
inspect.getargspec = _original_getfullargspec
if argspec.defaults:
# Declare any default arguments to be Any.
for k, var in enumerate(reversed(argspec.args)):
if k >= len(argspec.defaults):
break
if callargs.get(var, None) is argspec.defaults[-k - 1]:
callargs[var] = typehints.Any
# Patch up varargs and keywords
if argspec.varargs:
# TODO(BEAM-8122): This will always assign _ANY_VAR_POSITIONAL. Should be
# "callargs.get(...) or _ANY_VAR_POSITIONAL".
callargs[argspec.varargs] = typekwargs.get(
argspec.varargs, _ANY_VAR_POSITIONAL)
varkw = argspec.keywords
if varkw:
# TODO(robertwb): Consider taking the union of key and value types.
callargs[varkw] = typekwargs.get(varkw, _ANY_VAR_KEYWORD)
# TODO(BEAM-5878) Support kwonlyargs.
return callargs
def _normalize_var_positional_hint(hint):
"""Converts a var_positional hint into Tuple[Union[<types>], ...] form.
Args:
hint: (tuple) Should be either a tuple of one or more types, or a single
Tuple[<type>, ...].
Raises:
TypeCheckError if hint does not have the right form.
"""
if not hint or type(hint) != tuple:
raise TypeCheckError('Unexpected VAR_POSITIONAL value: %s' % hint)
if len(hint) == 1 and isinstance(hint[0], typehints.TupleSequenceConstraint):
# Example: tuple(Tuple[Any, ...]) -> Tuple[Any, ...]
return hint[0]
else:
# Example: tuple(int, str) -> Tuple[Union[int, str], ...]
return typehints.Tuple[typehints.Union[hint], ...]
def _normalize_var_keyword_hint(hint, arg_name):
"""Converts a var_keyword hint into Dict[<key type>, <value type>] form.
Args:
hint: (dict) Should either contain a pair (arg_name,
Dict[<key type>, <value type>]), or one or more possible types for the
value.
arg_name: (str) The keyword receiving this hint.
Raises:
TypeCheckError if hint does not have the right form.
"""
if not hint or type(hint) != dict:
raise TypeCheckError('Unexpected VAR_KEYWORD value: %s' % hint)
keys = list(hint.keys())
values = list(hint.values())
if (len(values) == 1 and keys[0] == arg_name and
isinstance(values[0], typehints.DictConstraint)):
# Example: dict(kwargs=Dict[str, Any]) -> Dict[str, Any]
return values[0]
else:
# Example: dict(k1=str, k2=int) -> Dict[str, Union[str,int]]
return typehints.Dict[str, typehints.Union[values]]
def getcallargs_forhints_impl_py3(func, type_args, type_kwargs):
"""Bind type_args and type_kwargs to func.
Works like inspect.getcallargs, with some modifications to support type hint
checks.
For unbound args, will use annotations and fall back to Any (or variants of
Any).
Returns:
A mapping from parameter name to argument.
"""
try:
signature = get_signature(func)
except ValueError as e:
logging.warning('Could not get signature for function: %s: %s', func, e)
return {}
try:
bindings = signature.bind(*type_args, **type_kwargs)
except TypeError as e:
# Might be raised due to too few or too many arguments.
raise TypeCheckError(e)
bound_args = bindings.arguments
for param in signature.parameters.values():
if param.name in bound_args:
# Bound: unpack/convert variadic arguments.
if param.kind == param.VAR_POSITIONAL:
bound_args[param.name] = _normalize_var_positional_hint(
bound_args[param.name])
elif param.kind == param.VAR_KEYWORD:
bound_args[param.name] = _normalize_var_keyword_hint(
bound_args[param.name], param.name)
else:
# Unbound: must have a default or be variadic.
if param.annotation != param.empty:
bound_args[param.name] = param.annotation
elif param.kind == param.VAR_POSITIONAL:
bound_args[param.name] = _ANY_VAR_POSITIONAL
elif param.kind == param.VAR_KEYWORD:
bound_args[param.name] = _ANY_VAR_KEYWORD
elif param.default is not param.empty:
# Declare unbound parameters with defaults to be Any.
bound_args[param.name] = typehints.Any
else:
# This case should be caught by signature.bind() above.
raise ValueError('Unexpected unbound parameter: %s' % param.name)
return dict(bound_args)
def get_type_hints(fn):
# type: (Any) -> IOTypeHints
"""Gets the type hint associated with an arbitrary object fn.
Always returns a valid IOTypeHints object, creating one if necessary.
"""
# pylint: disable=protected-access
if not hasattr(fn, '_type_hints'):
try:
fn._type_hints = IOTypeHints.empty()
except (AttributeError, TypeError):
# Can't add arbitrary attributes to this object,
# but might have some restrictions anyways...
hints = IOTypeHints.empty()
# Python 3.7 introduces annotations for _MethodDescriptorTypes.
if isinstance(fn, _MethodDescriptorType) and sys.version_info < (3, 7):
hints = hints.with_input_types(fn.__objclass__) # type: ignore
return hints
return fn._type_hints
# pylint: enable=protected-access
def with_input_types(*positional_hints, **keyword_hints):
# type: (*Any, **Any) -> Callable[[T], T]
"""A decorator that type-checks defined type-hints with passed func arguments.
All type-hinted arguments can be specified using positional arguments,
keyword arguments, or a mix of both. Additionaly, all function arguments must
be type-hinted in totality if even one parameter is type-hinted.
Once fully decorated, if the arguments passed to the resulting function
violate the type-hint constraints defined, a :class:`TypeCheckError`
detailing the error will be raised.
To be used as:
.. testcode::
from apache_beam.typehints import with_input_types
@with_input_types(str)
def upper(s):
return s.upper()
Or:
.. testcode::
from apache_beam.typehints import with_input_types
from apache_beam.typehints import List
from apache_beam.typehints import Tuple
@with_input_types(ls=List[Tuple[int, int]])
def increment(ls):
[(i + 1, j + 1) for (i,j) in ls]
Args:
*positional_hints: Positional type-hints having identical order as the
function's formal arguments. Values for this argument must either be a
built-in Python type or an instance of a
:class:`~apache_beam.typehints.typehints.TypeConstraint` created by
'indexing' a
:class:`~apache_beam.typehints.typehints.CompositeTypeHint` instance
with a type parameter.
**keyword_hints: Keyword arguments mirroring the names of the parameters to
the decorated functions. The value of each keyword argument must either
be one of the allowed built-in Python types, a custom class, or an
instance of a :class:`~apache_beam.typehints.typehints.TypeConstraint`
created by 'indexing' a
:class:`~apache_beam.typehints.typehints.CompositeTypeHint` instance
with a type parameter.
Raises:
:class:`ValueError`: If not all function arguments have
corresponding type-hints specified. Or if the inner wrapper function isn't
passed a function object.
:class:`TypeCheckError`: If the any of the passed type-hint
constraints are not a type or
:class:`~apache_beam.typehints.typehints.TypeConstraint` instance.
Returns:
The original function decorated such that it enforces type-hint constraints
for all received function arguments.
"""
converted_positional_hints = (
native_type_compatibility.convert_to_beam_types(positional_hints))
converted_keyword_hints = (
native_type_compatibility.convert_to_beam_types(keyword_hints))
del positional_hints
del keyword_hints
def annotate_input_types(f):
if isinstance(f, types.FunctionType):
for t in (list(converted_positional_hints) +
list(converted_keyword_hints.values())):
validate_composite_type_param(
t, error_msg_prefix='All type hint arguments')
th = getattr(f, '_type_hints', IOTypeHints.empty()).with_input_types(
*converted_positional_hints, **converted_keyword_hints)
f._type_hints = th # pylint: disable=protected-access
return f
return annotate_input_types
def with_output_types(*return_type_hint, **kwargs):
# type: (*Any, **Any) -> Callable[[T], T]
"""A decorator that type-checks defined type-hints for return values(s).
This decorator will type-check the return value(s) of the decorated function.
Only a single type-hint is accepted to specify the return type of the return
value. If the function to be decorated has multiple return values, then one
should use: ``Tuple[type_1, type_2]`` to annotate the types of the return
values.
If the ultimate return value for the function violates the specified type-hint
a :class:`TypeCheckError` will be raised detailing the type-constraint
violation.
This decorator is intended to be used like:
.. testcode::
from apache_beam.typehints import with_output_types
from apache_beam.typehints import Set
class Coordinate(object):
def __init__(self, x, y):
self.x = x
self.y = y
@with_output_types(Set[Coordinate])
def parse_ints(ints):
return {Coordinate(i, i) for i in ints}
Or with a simple type-hint:
.. testcode::
from apache_beam.typehints import with_output_types
@with_output_types(bool)
def negate(p):
return not p if p else p
Args:
*return_type_hint: A type-hint specifying the proper return type of the
function. This argument should either be a built-in Python type or an
instance of a :class:`~apache_beam.typehints.typehints.TypeConstraint`
created by 'indexing' a
:class:`~apache_beam.typehints.typehints.CompositeTypeHint`.
**kwargs: Not used.
Raises:
:class:`ValueError`: If any kwarg parameters are passed in,
or the length of **return_type_hint** is greater than ``1``. Or if the
inner wrapper function isn't passed a function object.
:class:`TypeCheckError`: If the **return_type_hint** object is
in invalid type-hint.
Returns:
The original function decorated such that it enforces type-hint constraints
for all return values.
"""
if kwargs:
raise ValueError(
"All arguments for the 'returns' decorator must be "
"positional arguments.")
if len(return_type_hint) != 1:
raise ValueError(
"'returns' accepts only a single positional argument. In "
"order to specify multiple return types, use the 'Tuple' "
"type-hint.")
return_type_hint = native_type_compatibility.convert_to_beam_type(
return_type_hint[0])
validate_composite_type_param(
return_type_hint, error_msg_prefix='All type hint arguments')
def annotate_output_types(f):
th = getattr(f, '_type_hints', IOTypeHints.empty())
f._type_hints = th.with_output_types(return_type_hint) # pylint: disable=protected-access
return f
return annotate_output_types
def _check_instance_type(
type_constraint, instance, var_name=None, verbose=False):
"""A helper function to report type-hint constraint violations.
Args:
type_constraint: An instance of a 'TypeConstraint' or a built-in Python
type.
instance: The candidate object which will be checked by to satisfy
'type_constraint'.
var_name: If 'instance' is an argument, then the actual name for the
parameter in the original function definition.
Raises:
TypeCheckError: If 'instance' fails to meet the type-constraint of
'type_constraint'.
"""
hint_type = (
"argument: '%s'" % var_name if var_name is not None else 'return type')
try:
check_constraint(type_constraint, instance)
except SimpleTypeHintError:
if verbose:
verbose_instance = '%s, ' % instance
else:
verbose_instance = ''
raise TypeCheckError(
'Type-hint for %s violated. Expected an '
'instance of %s, instead found %san instance of %s.' %
(hint_type, type_constraint, verbose_instance, type(instance)))
except CompositeTypeHintError as e:
raise TypeCheckError('Type-hint for %s violated: %s' % (hint_type, e))
def _interleave_type_check(type_constraint, var_name=None):
"""Lazily type-check the type-hint for a lazily generated sequence type.
This function can be applied as a decorator or called manually in a curried
manner:
* @_interleave_type_check(List[int])
def gen():
yield 5
or
* gen = _interleave_type_check(Tuple[int, int], 'coord_gen')(gen)
As a result, all type-checking for the passed generator will occur at 'yield'
time. This way, we avoid having to depleat the generator in order to
type-check it.
Args:
type_constraint: An instance of a TypeConstraint. The output yielded of
'gen' will be type-checked according to this type constraint.
var_name: The variable name binded to 'gen' if type-checking a function
argument. Used solely for templating in error message generation.
Returns:
A function which takes a generator as an argument and returns a wrapped
version of the generator that interleaves type-checking at 'yield'
iteration. If the generator received is already wrapped, then it is simply
returned to avoid nested wrapping.
"""
def wrapper(gen):
if isinstance(gen, GeneratorWrapper):
return gen
return GeneratorWrapper(
gen, lambda x: _check_instance_type(type_constraint, x, var_name))
return wrapper
class GeneratorWrapper(object):
"""A wrapper around a generator, allows execution of a callback per yield.
Additionally, wrapping a generator with this class allows one to assign
arbitary attributes to a generator object just as with a function object.
Attributes:
internal_gen: A instance of a generator object. As part of 'step' of the
generator, the yielded object will be passed to 'interleave_func'.
interleave_func: A callback accepting a single argument. This function will
be called with the result of each yielded 'step' in the internal
generator.
"""
def __init__(self, gen, interleave_func):
self.internal_gen = gen
self.interleave_func = interleave_func
def __getattr__(self, attr):
# TODO(laolu): May also want to intercept 'send' in the future if we move to
# a GeneratorHint with 3 type-params:
# * Generator[send_type, return_type, yield_type]
if attr == '__next__':
return self.__next__()
elif attr == '__iter__':
return self.__iter__()
return getattr(self.internal_gen, attr)
def __next__(self):
next_val = next(self.internal_gen)
self.interleave_func(next_val)
return next_val
next = __next__
def __iter__(self):
for x in self.internal_gen:
self.interleave_func(x)
yield x | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/typehints/decorators.py | 0.639061 | 0.199308 | decorators.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from six import with_metaclass
from apache_beam import coders
from apache_beam.typehints import typehints
from apache_beam.utils.sharded_key import ShardedKey
class ShardedKeyTypeConstraint(with_metaclass(typehints.GetitemConstructor,
typehints.TypeConstraint)):
def __init__(self, key_type):
typehints.validate_composite_type_param(
key_type, error_msg_prefix='Parameter to ShardedKeyType hint')
self.key_type = typehints.normalize(key_type)
def _inner_types(self):
yield self.key_type
def _consistent_with_check_(self, sub):
return (
isinstance(sub, self.__class__) and
typehints.is_consistent_with(sub.key_type, self.key_type))
def type_check(self, instance):
if not isinstance(instance, ShardedKey):
raise typehints.CompositeTypeHintError(
"ShardedKey type-constraint violated. Valid object instance "
"must be of type 'ShardedKey'. Instead, an instance of '%s' "
"was received." % (instance.__class__.__name__))
try:
typehints.check_constraint(self.key_type, instance.key)
except (typehints.CompositeTypeHintError, typehints.SimpleTypeHintError):
raise typehints.CompositeTypeHintError(
"%s type-constraint violated. The type of key in 'ShardedKey' "
"is incorrect. Expected an instance of type '%s', "
"instead received an instance of type '%s'." % (
repr(self),
typehints._unified_repr(self.key_type),
instance.key.__class__.__name__))
def match_type_variables(self, concrete_type):
if isinstance(concrete_type, ShardedKeyTypeConstraint):
return typehints.match_type_variables(
self.key_type, concrete_type.key_type)
return {}
def __eq__(self, other):
return isinstance(
other, ShardedKeyTypeConstraint) and self.key_type == other.key_type
def __hash__(self):
return hash(self.key_type)
def __repr__(self):
return 'ShardedKey[%s]' % typehints._unified_repr(self.key_type)
ShardedKeyType = ShardedKeyTypeConstraint
coders.typecoders.registry.register_coder(
ShardedKeyType, coders.ShardedKeyCoder) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/typehints/sharded_key_type.py | 0.62681 | 0.203609 | sharded_key_type.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import collections
import inspect
import types
from future.utils import raise_with_traceback
from past.builtins import unicode
from apache_beam import pipeline
from apache_beam.pvalue import TaggedOutput
from apache_beam.transforms import core
from apache_beam.transforms.core import DoFn
from apache_beam.transforms.window import WindowedValue
from apache_beam.typehints.decorators import GeneratorWrapper
from apache_beam.typehints.decorators import TypeCheckError
from apache_beam.typehints.decorators import _check_instance_type
from apache_beam.typehints.decorators import getcallargs_forhints
from apache_beam.typehints.typehints import CompositeTypeHintError
from apache_beam.typehints.typehints import SimpleTypeHintError
from apache_beam.typehints.typehints import check_constraint
class AbstractDoFnWrapper(DoFn):
"""An abstract class to create wrapper around DoFn"""
def __init__(self, dofn):
super(AbstractDoFnWrapper, self).__init__()
self.dofn = dofn
def _inspect_start_bundle(self):
return self.dofn.get_function_arguments('start_bundle')
def _inspect_process(self):
return self.dofn.get_function_arguments('process')
def _inspect_finish_bundle(self):
return self.dofn.get_function_arguments('finish_bundle')
def wrapper(self, method, args, kwargs):
return method(*args, **kwargs)
def setup(self):
return self.dofn.setup()
def start_bundle(self, *args, **kwargs):
return self.wrapper(self.dofn.start_bundle, args, kwargs)
def process(self, *args, **kwargs):
return self.wrapper(self.dofn.process, args, kwargs)
def finish_bundle(self, *args, **kwargs):
return self.wrapper(self.dofn.finish_bundle, args, kwargs)
def teardown(self):
return self.dofn.teardown()
class OutputCheckWrapperDoFn(AbstractDoFnWrapper):
"""A DoFn that verifies against common errors in the output type."""
def __init__(self, dofn, full_label):
super(OutputCheckWrapperDoFn, self).__init__(dofn)
self.full_label = full_label
def wrapper(self, method, args, kwargs):
try:
result = method(*args, **kwargs)
except TypeCheckError as e:
# TODO(BEAM-10710): Remove the 'ParDo' prefix for the label name
error_msg = (
'Runtime type violation detected within ParDo(%s): '
'%s' % (self.full_label, e))
raise_with_traceback(TypeCheckError(error_msg))
else:
return self._check_type(result)
@staticmethod
def _check_type(output):
if output is None:
return output
elif isinstance(output, (dict, bytes, str, unicode)):
object_type = type(output).__name__
raise TypeCheckError(
'Returning a %s from a ParDo or FlatMap is '
'discouraged. Please use list("%s") if you really '
'want this behavior.' % (object_type, output))
elif not isinstance(output, collections.Iterable):
raise TypeCheckError(
'FlatMap and ParDo must return an '
'iterable. %s was returned instead.' % type(output))
return output
class TypeCheckWrapperDoFn(AbstractDoFnWrapper):
"""A wrapper around a DoFn which performs type-checking of input and output.
"""
def __init__(self, dofn, type_hints, label=None):
super(TypeCheckWrapperDoFn, self).__init__(dofn)
self._process_fn = self.dofn._process_argspec_fn()
if type_hints.input_types:
input_args, input_kwargs = type_hints.input_types
self._input_hints = getcallargs_forhints(
self._process_fn, *input_args, **input_kwargs)
else:
self._input_hints = None
# TODO(robertwb): Multi-output.
self._output_type_hint = type_hints.simple_output_type(label)
def wrapper(self, method, args, kwargs):
result = method(*args, **kwargs)
return self._type_check_result(result)
def process(self, *args, **kwargs):
if self._input_hints:
actual_inputs = inspect.getcallargs(self._process_fn, *args, **kwargs) # pylint: disable=deprecated-method
for var, hint in self._input_hints.items():
if hint is actual_inputs[var]:
# self parameter
continue
_check_instance_type(hint, actual_inputs[var], var, True)
return self._type_check_result(self.dofn.process(*args, **kwargs))
def _type_check_result(self, transform_results):
if self._output_type_hint is None or transform_results is None:
return transform_results
def type_check_output(o):
# TODO(robertwb): Multi-output.
x = o.value if isinstance(o, (TaggedOutput, WindowedValue)) else o
self.type_check(self._output_type_hint, x, is_input=False)
# If the return type is a generator, then we will need to interleave our
# type-checking with its normal iteration so we don't deplete the
# generator initially just by type-checking its yielded contents.
if isinstance(transform_results, types.GeneratorType):
return GeneratorWrapper(transform_results, type_check_output)
for o in transform_results:
type_check_output(o)
return transform_results
@staticmethod
def type_check(type_constraint, datum, is_input):
"""Typecheck a PTransform related datum according to a type constraint.
This function is used to optionally type-check either an input or an output
to a PTransform.
Args:
type_constraint: An instance of a typehints.TypeContraint, one of the
white-listed builtin Python types, or a custom user class.
datum: An instance of a Python object.
is_input: True if 'datum' is an input to a PTransform's DoFn. False
otherwise.
Raises:
TypeError: If 'datum' fails to type-check according to 'type_constraint'.
"""
datum_type = 'input' if is_input else 'output'
try:
check_constraint(type_constraint, datum)
except CompositeTypeHintError as e:
raise_with_traceback(TypeCheckError(e.args[0]))
except SimpleTypeHintError:
error_msg = (
"According to type-hint expected %s should be of type %s. "
"Instead, received '%s', an instance of type %s." %
(datum_type, type_constraint, datum, type(datum)))
raise_with_traceback(TypeCheckError(error_msg))
class TypeCheckCombineFn(core.CombineFn):
"""A wrapper around a CombineFn performing type-checking of input and output.
"""
def __init__(self, combinefn, type_hints, label=None):
self._combinefn = combinefn
self._input_type_hint = type_hints.input_types
self._output_type_hint = type_hints.simple_output_type(label)
self._label = label
def setup(self, *args, **kwargs):
self._combinefn.setup(*args, **kwargs)
def create_accumulator(self, *args, **kwargs):
return self._combinefn.create_accumulator(*args, **kwargs)
def add_input(self, accumulator, element, *args, **kwargs):
if self._input_type_hint:
try:
_check_instance_type(
self._input_type_hint[0][0].tuple_types[1],
element,
'element',
True)
except TypeCheckError as e:
error_msg = (
'Runtime type violation detected within %s: '
'%s' % (self._label, e))
raise_with_traceback(TypeCheckError(error_msg))
return self._combinefn.add_input(accumulator, element, *args, **kwargs)
def merge_accumulators(self, accumulators, *args, **kwargs):
return self._combinefn.merge_accumulators(accumulators, *args, **kwargs)
def compact(self, accumulator, *args, **kwargs):
return self._combinefn.compact(accumulator, *args, **kwargs)
def extract_output(self, accumulator, *args, **kwargs):
result = self._combinefn.extract_output(accumulator, *args, **kwargs)
if self._output_type_hint:
try:
_check_instance_type(
self._output_type_hint.tuple_types[1], result, None, True)
except TypeCheckError as e:
error_msg = (
'Runtime type violation detected within %s: '
'%s' % (self._label, e))
raise_with_traceback(TypeCheckError(error_msg))
return result
def teardown(self, *args, **kwargs):
self._combinefn.teardown(*args, **kwargs)
class TypeCheckVisitor(pipeline.PipelineVisitor):
_in_combine = False
def enter_composite_transform(self, applied_transform):
if isinstance(applied_transform.transform, core.CombinePerKey):
self._in_combine = True
self._wrapped_fn = applied_transform.transform.fn = TypeCheckCombineFn(
applied_transform.transform.fn,
applied_transform.transform.get_type_hints(),
applied_transform.full_label)
def leave_composite_transform(self, applied_transform):
if isinstance(applied_transform.transform, core.CombinePerKey):
self._in_combine = False
def visit_transform(self, applied_transform):
transform = applied_transform.transform
if isinstance(transform, core.ParDo):
if self._in_combine:
if isinstance(transform.fn, core.CombineValuesDoFn):
transform.fn.combinefn = self._wrapped_fn
else:
transform.fn = transform.dofn = OutputCheckWrapperDoFn(
TypeCheckWrapperDoFn(
transform.fn,
transform.get_type_hints(),
applied_transform.full_label),
applied_transform.full_label)
class PerformanceTypeCheckVisitor(pipeline.PipelineVisitor):
def visit_transform(self, applied_transform):
transform = applied_transform.transform
full_label = applied_transform.full_label
# Store output type hints in current transform
output_type_hints = self.get_output_type_hints(transform)
if output_type_hints:
transform._add_type_constraint_from_consumer(
full_label, output_type_hints)
# Store input type hints in producer transform
input_type_hints = self.get_input_type_hints(transform)
if input_type_hints and len(applied_transform.inputs):
producer = applied_transform.inputs[0].producer
if producer:
producer.transform._add_type_constraint_from_consumer(
full_label, input_type_hints)
def get_input_type_hints(self, transform):
type_hints = transform.get_type_hints()
input_types = None
if type_hints.input_types:
normal_hints, kwarg_hints = type_hints.input_types
if kwarg_hints:
input_types = kwarg_hints
if normal_hints:
input_types = normal_hints
parameter_name = 'Unknown Parameter'
if hasattr(transform, 'fn'):
try:
argspec = inspect.getfullargspec(transform.fn._process_argspec_fn())
except TypeError:
# An unsupported callable was passed to getfullargspec
pass
else:
if len(argspec.args):
arg_index = 0
if argspec.args[0] == 'self' and len(argspec.args) > 1:
arg_index = 1
parameter_name = argspec.args[arg_index]
if isinstance(input_types, dict):
input_types = (input_types[argspec.args[arg_index]], )
if input_types and len(input_types):
input_types = input_types[0]
return parameter_name, input_types
def get_output_type_hints(self, transform):
type_hints = transform.get_type_hints()
output_types = None
if type_hints.output_types:
normal_hints, kwarg_hints = type_hints.output_types
if kwarg_hints:
output_types = kwarg_hints
if normal_hints:
output_types = normal_hints
if output_types and len(output_types):
output_types = output_types[0]
return None, output_types | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/typehints/typecheck.py | 0.581303 | 0.191706 | typecheck.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import collections
import copy
import logging
import sys
import types
import typing
from builtins import next
from builtins import zip
from future.utils import with_metaclass
__all__ = [
'Any',
'Union',
'Optional',
'Tuple',
'List',
'KV',
'Dict',
'Set',
'FrozenSet',
'Iterable',
'Iterator',
'Generator',
'WindowedValue',
'TypeVariable',
]
# A set of the built-in Python types we don't support, guiding the users
# to templated (upper-case) versions instead.
DISALLOWED_PRIMITIVE_TYPES = (list, set, frozenset, tuple, dict)
_LOGGER = logging.getLogger(__name__)
class SimpleTypeHintError(TypeError):
pass
class CompositeTypeHintError(TypeError):
pass
class GetitemConstructor(type):
"""A metaclass that makes Cls[arg] an alias for Cls(arg)."""
def __getitem__(cls, arg):
return cls(arg)
class TypeConstraint(object):
"""The base-class for all created type-constraints defined below.
A :class:`TypeConstraint` is the result of parameterizing a
:class:`CompositeTypeHint` with with one of the allowed Python types or
another :class:`CompositeTypeHint`. It binds and enforces a specific
version of a generalized TypeHint.
"""
def _consistent_with_check_(self, sub):
"""Returns whether sub is consistent with self.
Has the same relationship to is_consistent_with() as
__subclasscheck__ does for issubclass().
Not meant to be called directly; call is_consistent_with(sub, self)
instead.
Implementation may assume that maybe_sub_type is not Any
and has been normalized.
"""
raise NotImplementedError
def type_check(self, instance):
"""Determines if the type of 'instance' satisfies this type constraint.
Args:
instance: An instance of a Python object.
Raises:
:class:`TypeError`: The passed **instance** doesn't satisfy
this :class:`TypeConstraint`. Subclasses of
:class:`TypeConstraint` are free to raise any of the subclasses of
:class:`TypeError` defined above, depending on
the manner of the type hint error.
All :class:`TypeConstraint` sub-classes must define this method in other
for the class object to be created.
"""
raise NotImplementedError
def match_type_variables(self, unused_concrete_type):
return {}
def bind_type_variables(self, unused_bindings):
return self
def _inner_types(self):
"""Iterates over the inner types of the composite type."""
return []
def visit(self, visitor, visitor_arg):
"""Visitor method to visit all inner types of a composite type.
Args:
visitor: A callable invoked for all nodes in the type tree comprising
a composite type. The visitor will be called with the node visited
and the visitor argument specified here.
visitor_arg: Visitor callback second argument.
"""
visitor(self, visitor_arg)
for t in self._inner_types():
if isinstance(t, TypeConstraint):
t.visit(visitor, visitor_arg)
else:
visitor(t, visitor_arg)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def match_type_variables(type_constraint, concrete_type):
if isinstance(type_constraint, TypeConstraint):
return type_constraint.match_type_variables(concrete_type)
return {}
def bind_type_variables(type_constraint, bindings):
if isinstance(type_constraint, TypeConstraint):
return type_constraint.bind_type_variables(bindings)
return type_constraint
class IndexableTypeConstraint(TypeConstraint):
"""An internal common base-class for all type constraints with indexing.
E.G. SequenceTypeConstraint + Tuple's of fixed size.
"""
def _constraint_for_index(self, idx):
"""Returns the type at the given index. This is used to allow type inference
to determine the correct type for a specific index. On lists this will also
be the same, however for tuples the value will depend on the position. This
was added as part of the futurize changes since more of the expressions now
index into tuples."""
raise NotImplementedError
class SequenceTypeConstraint(IndexableTypeConstraint):
"""A common base-class for all sequence related type-constraint classes.
A sequence is defined as an arbitrary length homogeneous container type. Type
hints which fall under this category include: List[T], Set[T], Iterable[T],
and Tuple[T, ...].
Sub-classes may need to override '_consistent_with_check_' if a particular
sequence requires special handling with respect to type compatibility.
Attributes:
inner_type: The type which every element in the sequence should be an
instance of.
"""
def __init__(self, inner_type, sequence_type):
self.inner_type = normalize(inner_type)
self._sequence_type = sequence_type
def __eq__(self, other):
return (
isinstance(other, SequenceTypeConstraint) and
type(self) == type(other) and self.inner_type == other.inner_type)
def __hash__(self):
return hash(self.inner_type) ^ 13 * hash(type(self))
def _inner_types(self):
yield self.inner_type
def _constraint_for_index(self, idx):
"""Returns the type at the given index."""
return self.inner_type
def _consistent_with_check_(self, sub):
return (
isinstance(sub, self.__class__) and
is_consistent_with(sub.inner_type, self.inner_type))
def type_check(self, sequence_instance):
if not isinstance(sequence_instance, self._sequence_type):
raise CompositeTypeHintError(
"%s type-constraint violated. Valid object instance "
"must be of type '%s'. Instead, an instance of '%s' "
"was received." % (
self._sequence_type.__name__.title(),
self._sequence_type.__name__.lower(),
sequence_instance.__class__.__name__))
for index, elem in enumerate(sequence_instance):
try:
check_constraint(self.inner_type, elem)
except SimpleTypeHintError as e:
raise CompositeTypeHintError(
'%s hint type-constraint violated. The type of element #%s in '
'the passed %s is incorrect. Expected an instance of type %s, '
'instead received an instance of type %s.' % (
repr(self),
index,
_unified_repr(self._sequence_type),
_unified_repr(self.inner_type),
elem.__class__.__name__))
except CompositeTypeHintError as e:
raise CompositeTypeHintError(
'%s hint type-constraint violated. The type of element #%s in '
'the passed %s is incorrect: %s' %
(repr(self), index, self._sequence_type.__name__, e))
def match_type_variables(self, concrete_type):
if isinstance(concrete_type, SequenceTypeConstraint):
return match_type_variables(self.inner_type, concrete_type.inner_type)
return {}
def bind_type_variables(self, bindings):
bound_inner_type = bind_type_variables(self.inner_type, bindings)
if bound_inner_type == self.inner_type:
return self
bound_self = copy.copy(self)
bound_self.inner_type = bound_inner_type
return bound_self
class CompositeTypeHint(object):
"""The base-class for all created type-hint classes defined below.
CompositeTypeHint's serve primarily as TypeConstraint factories. They are
only required to define a single method: '__getitem__' which should return a
parameterized TypeConstraint, that can be used to enforce static or run-time
type-checking.
'__getitem__' is used as a factory function in order to provide a familiar
API for defining type-hints. The ultimate result is that one will be able to
use: CompositeTypeHint[type_parameter] to create a type-hint object that
behaves like any other Python object. This allows one to create
'type-aliases' by assigning the returned type-hints to a variable.
* Example: 'Coordinates = List[Tuple[int, int]]'
"""
def __getitem___(self, py_type):
"""Given a type creates a TypeConstraint instance parameterized by the type.
This function serves as a factory function which creates TypeConstraint
instances. Additionally, implementations by sub-classes should perform any
sanity checking of the passed types in this method in order to rule-out
disallowed behavior. Such as, attempting to create a TypeConstraint whose
parameterized type is actually an object instance.
Args:
py_type: An instance of a Python type or TypeConstraint.
Returns: An instance of a custom TypeConstraint for this CompositeTypeHint.
Raises:
TypeError: If the passed type violates any contraints for this particular
TypeHint.
"""
raise NotImplementedError
def validate_composite_type_param(type_param, error_msg_prefix):
"""Determines if an object is a valid type parameter to a
:class:`CompositeTypeHint`.
Implements sanity checking to disallow things like::
List[1, 2, 3] or Dict[5].
Args:
type_param: An object instance.
error_msg_prefix (:class:`str`): A string prefix used to format an error
message in the case of an exception.
Raises:
TypeError: If the passed **type_param** is not a valid type
parameter for a :class:`CompositeTypeHint`.
"""
# Must either be a TypeConstraint instance or a basic Python type.
possible_classes = [type, TypeConstraint]
if sys.version_info[0] == 2:
# Access from __dict__ to avoid py27-lint3 compatibility checker complaint.
possible_classes.append(types.__dict__["ClassType"])
is_not_type_constraint = (
not isinstance(type_param, tuple(possible_classes)) and
type_param is not None and
getattr(type_param, '__module__', None) != 'typing')
is_forbidden_type = (
isinstance(type_param, type) and type_param in DISALLOWED_PRIMITIVE_TYPES)
if is_not_type_constraint or is_forbidden_type:
raise TypeError(
'%s must be a non-sequence, a type, or a TypeConstraint. %s'
' is an instance of %s.' %
(error_msg_prefix, type_param, type_param.__class__.__name__))
def _unified_repr(o):
"""Given an object return a qualified name for the object.
This function closely mirrors '__qualname__' which was introduced in
Python 3.3. It is used primarily to format types or object instances for
error messages.
Args:
o: An instance of a TypeConstraint or a type.
Returns:
A qualified name for the passed Python object fit for string formatting.
"""
return repr(o) if isinstance(o, (TypeConstraint, type(None))) else o.__name__
def check_constraint(type_constraint, object_instance):
"""Determine if the passed type instance satisfies the TypeConstraint.
When examining a candidate type for constraint satisfaction in
'type_check', all CompositeTypeHint's eventually call this function. This
function may end up being called recursively if the hinted type of a
CompositeTypeHint is another CompositeTypeHint.
Args:
type_constraint: An instance of a TypeConstraint or a built-in Python type.
object_instance: An object instance.
Raises:
SimpleTypeHintError: If 'type_constraint' is a one of the allowed primitive
Python types and 'object_instance' isn't an instance of this type.
CompositeTypeHintError: If 'type_constraint' is a TypeConstraint object and
'object_instance' does not satisfy its constraint.
"""
if type_constraint is None and object_instance is None:
return
elif isinstance(type_constraint, TypeConstraint):
type_constraint.type_check(object_instance)
elif type_constraint is None:
# TODO(robertwb): Fix uses of None for Any.
pass
elif not isinstance(type_constraint, type):
raise RuntimeError("bad type: %s" % (type_constraint, ))
elif not isinstance(object_instance, type_constraint):
raise SimpleTypeHintError
class AnyTypeConstraint(TypeConstraint):
"""An Any type-hint.
Any is intended to be used as a "don't care" when hinting the types of
function arguments or return types. All other TypeConstraint's are equivalent
to 'Any', and its 'type_check' method is a no-op.
"""
def __eq__(self, other):
return type(self) == type(other)
def __repr__(self):
return 'Any'
def __hash__(self):
# TODO(BEAM-3730): Fix typehints.TypeVariable issues with __hash__.
return hash(id(self))
def type_check(self, instance):
pass
class TypeVariable(AnyTypeConstraint):
def __init__(self, name, use_name_in_eq=True):
self.name = name
self.use_name_in_eq = use_name_in_eq
def __eq__(self, other):
# The "other" may be an Ellipsis object
# so we have to check if it has use_name_in_eq first
if self.use_name_in_eq and (hasattr(other, 'use_name_in_eq') and
other.use_name_in_eq):
return type(self) == type(other) and self.name == other.name
return type(self) == type(other)
def __hash__(self):
# TODO(BEAM-3730): Fix typehints.TypeVariable issues with __hash__.
return hash(id(self))
def __repr__(self):
return 'TypeVariable[%s]' % self.name
def match_type_variables(self, concrete_type):
return {self: concrete_type}
def bind_type_variables(self, bindings):
return bindings.get(self, self)
class UnionHint(CompositeTypeHint):
"""A Union type-hint. Union[X, Y] accepts instances of type X OR type Y.
Duplicate type parameters are ignored. Additonally, Nested Union hints will
be flattened out. For example:
* Union[Union[str, int], bool] -> Union[str, int, bool]
A candidate type instance satisfies a UnionConstraint if it is an
instance of any of the parameterized 'union_types' for a Union.
Union[X] is disallowed, and all type parameters will be sanity checked to
ensure compatibility with nested type-hints.
When comparing two Union hints, ordering is enforced before comparison.
* Union[int, str] == Union[str, int]
"""
class UnionConstraint(TypeConstraint):
def __init__(self, union_types):
self.union_types = set(normalize(t) for t in union_types)
def __eq__(self, other):
return (
isinstance(other, UnionHint.UnionConstraint) and
self.union_types == other.union_types)
def __hash__(self):
return 1 + sum(hash(t) for t in self.union_types)
def __repr__(self):
# Sorting the type name strings simplifies unit tests.
return 'Union[%s]' % (
', '.join(sorted(_unified_repr(t) for t in self.union_types)))
def _inner_types(self):
for t in self.union_types:
yield t
def _consistent_with_check_(self, sub):
if isinstance(sub, UnionConstraint):
# A union type is compatible if every possible type is compatible.
# E.g. Union[A, B, C] > Union[A, B].
return all(is_consistent_with(elem, self) for elem in sub.union_types)
# Other must be compatible with at least one of this union's subtypes.
# E.g. Union[A, B, C] > T if T > A or T > B or T > C.
return any(is_consistent_with(sub, elem) for elem in self.union_types)
def type_check(self, instance):
error_msg = ''
for t in self.union_types:
try:
check_constraint(t, instance)
return
except TypeError as e:
error_msg = str(e)
continue
raise CompositeTypeHintError(
'%s type-constraint violated. Expected an instance of one of: %s, '
'received %s instead.%s' % (
repr(self),
tuple(sorted(_unified_repr(t) for t in self.union_types)),
instance.__class__.__name__,
error_msg))
def __getitem__(self, type_params):
if not isinstance(type_params, (collections.Sequence, set)):
raise TypeError('Cannot create Union without a sequence of types.')
# Flatten nested Union's and duplicated repeated type hints.
params = set()
dict_union = None
for t in type_params:
validate_composite_type_param(
t, error_msg_prefix='All parameters to a Union hint')
if isinstance(t, self.UnionConstraint):
params |= t.union_types
elif isinstance(t, DictConstraint):
if dict_union is None:
dict_union = t
else:
dict_union.key_type = Union[dict_union.key_type, t.key_type]
dict_union.value_type = Union[dict_union.value_type, t.value_type]
else:
params.add(t)
if dict_union is not None:
params.add(dict_union)
if Any in params:
return Any
elif len(params) == 1:
return next(iter(params))
return self.UnionConstraint(params)
UnionConstraint = UnionHint.UnionConstraint
class OptionalHint(UnionHint):
"""An Option type-hint. Optional[X] accepts instances of X or None.
The Optional[X] factory function proxies to Union[X, type(None)]
"""
def __getitem__(self, py_type):
# A single type must have been passed.
if isinstance(py_type, collections.Sequence):
raise TypeError(
'An Option type-hint only accepts a single type '
'parameter.')
return Union[py_type, type(None)]
class TupleHint(CompositeTypeHint):
"""A Tuple type-hint.
Tuple can accept 1 or more type-hint parameters.
Tuple[X, Y] represents a tuple of *exactly* two elements, with the first
being of type 'X' and the second an instance of type 'Y'.
* (1, 2) satisfies Tuple[int, int]
Additionally, one is able to type-hint an arbitary length, homogeneous tuple
by passing the Ellipsis (...) object as the second parameter.
As an example, Tuple[str, ...] indicates a tuple of any length with each
element being an instance of 'str'.
"""
class TupleSequenceConstraint(SequenceTypeConstraint):
def __init__(self, type_param):
super(TupleHint.TupleSequenceConstraint, self).__init__(type_param, tuple)
def __repr__(self):
return 'Tuple[%s, ...]' % _unified_repr(self.inner_type)
def _consistent_with_check_(self, sub):
if isinstance(sub, TupleConstraint):
# E.g. Tuple[A, B] < Tuple[C, ...] iff A < C and B < C.
return all(
is_consistent_with(elem, self.inner_type)
for elem in sub.tuple_types)
return super(TupleSequenceConstraint, self)._consistent_with_check_(sub)
class TupleConstraint(IndexableTypeConstraint):
def __init__(self, type_params):
self.tuple_types = tuple(normalize(t) for t in type_params)
def __eq__(self, other):
return (
isinstance(other, TupleHint.TupleConstraint) and
self.tuple_types == other.tuple_types)
def __hash__(self):
return hash(self.tuple_types)
def __repr__(self):
return 'Tuple[%s]' % (
', '.join(_unified_repr(t) for t in self.tuple_types))
def _inner_types(self):
for t in self.tuple_types:
yield t
def _constraint_for_index(self, idx):
"""Returns the type at the given index."""
return self.tuple_types[idx]
def _consistent_with_check_(self, sub):
return (
isinstance(sub, self.__class__) and
len(sub.tuple_types) == len(self.tuple_types) and all(
is_consistent_with(sub_elem, elem) for sub_elem,
elem in zip(sub.tuple_types, self.tuple_types)))
def type_check(self, tuple_instance):
if not isinstance(tuple_instance, tuple):
raise CompositeTypeHintError(
"Tuple type constraint violated. Valid object instance must be of "
"type 'tuple'. Instead, an instance of '%s' was received." %
tuple_instance.__class__.__name__)
if len(tuple_instance) != len(self.tuple_types):
raise CompositeTypeHintError(
'Passed object instance is of the proper type, but differs in '
'length from the hinted type. Expected a tuple of length %s, '
'received a tuple of length %s.' %
(len(self.tuple_types), len(tuple_instance)))
for type_pos, (expected, actual) in enumerate(zip(self.tuple_types,
tuple_instance)):
try:
check_constraint(expected, actual)
continue
except SimpleTypeHintError:
raise CompositeTypeHintError(
'%s hint type-constraint violated. The type of element #%s in '
'the passed tuple is incorrect. Expected an instance of '
'type %s, instead received an instance of type %s.' % (
repr(self),
type_pos,
_unified_repr(expected),
actual.__class__.__name__))
except CompositeTypeHintError as e:
raise CompositeTypeHintError(
'%s hint type-constraint violated. The type of element #%s in '
'the passed tuple is incorrect. %s' % (repr(self), type_pos, e))
def match_type_variables(self, concrete_type):
bindings = {}
if isinstance(concrete_type, TupleConstraint):
for a, b in zip(self.tuple_types, concrete_type.tuple_types):
bindings.update(match_type_variables(a, b))
return bindings
def bind_type_variables(self, bindings):
bound_tuple_types = tuple(
bind_type_variables(t, bindings) for t in self.tuple_types)
if bound_tuple_types == self.tuple_types:
return self
return Tuple[bound_tuple_types]
def __getitem__(self, type_params):
ellipsis = False
if not isinstance(type_params, collections.Iterable):
# Special case for hinting tuples with arity-1.
type_params = (type_params, )
if type_params and type_params[-1] == Ellipsis:
if len(type_params) != 2:
raise TypeError(
'Ellipsis can only be used to type-hint an arbitrary '
'length tuple of containing a single type: '
'Tuple[A, ...].')
# Tuple[A, ...] indicates an arbitary length homogeneous tuple.
type_params = type_params[:1]
ellipsis = True
for t in type_params:
validate_composite_type_param(
t, error_msg_prefix='All parameters to a Tuple hint')
if ellipsis:
return self.TupleSequenceConstraint(type_params[0])
return self.TupleConstraint(type_params)
TupleConstraint = TupleHint.TupleConstraint
TupleSequenceConstraint = TupleHint.TupleSequenceConstraint
class ListHint(CompositeTypeHint):
"""A List type-hint.
List[X] represents an instance of a list populated by a single homogeneous
type. The parameterized type 'X' can either be a built-in Python type or an
instance of another TypeConstraint.
* ['1', '2', '3'] satisfies List[str]
"""
class ListConstraint(SequenceTypeConstraint):
def __init__(self, list_type):
super(ListHint.ListConstraint, self).__init__(list_type, list)
def __repr__(self):
return 'List[%s]' % _unified_repr(self.inner_type)
def __getitem__(self, t):
validate_composite_type_param(t, error_msg_prefix='Parameter to List hint')
return self.ListConstraint(t)
ListConstraint = ListHint.ListConstraint
class KVHint(CompositeTypeHint):
"""A KV type-hint, represents a Key-Value pair of a particular type.
Internally, KV[X, Y] proxies to Tuple[X, Y]. A KV type-hint accepts only
accepts exactly two type-parameters. The first represents the required
key-type and the second the required value-type.
"""
def __getitem__(self, type_params):
if not isinstance(type_params, tuple):
raise TypeError(
'Parameter to KV type-hint must be a tuple of types: '
'KV[.., ..].')
if len(type_params) != 2:
raise TypeError(
'Length of parameters to a KV type-hint must be exactly 2. Passed '
'parameters: %s, have a length of %s.' %
(type_params, len(type_params)))
return Tuple[type_params]
def key_value_types(kv):
"""Returns the key and value type of a KV type-hint.
Args:
kv: An instance of a TypeConstraint sub-class.
Returns:
A tuple: (key_type, value_type) if the passed type-hint is an instance of a
KV type-hint, and (Any, Any) otherwise.
"""
if isinstance(kv, TupleHint.TupleConstraint):
return kv.tuple_types
return Any, Any
class DictHint(CompositeTypeHint):
"""A Dict type-hint.
Dict[K, V] Represents a dictionary where all keys are of a particular type
and all values are of another (possible the same) type.
"""
class DictConstraint(TypeConstraint):
def __init__(self, key_type, value_type):
self.key_type = normalize(key_type)
self.value_type = normalize(value_type)
def __repr__(self):
return 'Dict[%s, %s]' % (
_unified_repr(self.key_type), _unified_repr(self.value_type))
def __eq__(self, other):
return (
type(self) == type(other) and self.key_type == other.key_type and
self.value_type == other.value_type)
def __hash__(self):
return hash((type(self), self.key_type, self.value_type))
def _inner_types(self):
yield self.key_type
yield self.value_type
def _consistent_with_check_(self, sub):
return (
isinstance(sub, self.__class__) and
is_consistent_with(sub.key_type, self.key_type) and
is_consistent_with(sub.value_type, self.value_type))
def _raise_hint_exception_or_inner_exception(
self, is_key, incorrect_instance, inner_error_message=''):
incorrect_type = 'values' if not is_key else 'keys'
hinted_type = self.value_type if not is_key else self.key_type
if inner_error_message:
raise CompositeTypeHintError(
'%s hint %s-type constraint violated. All %s should be of type '
'%s. Instead: %s' % (
repr(self),
incorrect_type[:-1],
incorrect_type,
_unified_repr(hinted_type),
inner_error_message))
else:
raise CompositeTypeHintError(
'%s hint %s-type constraint violated. All %s should be of '
'type %s. Instead, %s is of type %s.' % (
repr(self),
incorrect_type[:-1],
incorrect_type,
_unified_repr(hinted_type),
incorrect_instance,
incorrect_instance.__class__.__name__))
def type_check(self, dict_instance):
if not isinstance(dict_instance, dict):
raise CompositeTypeHintError(
'Dict type-constraint violated. All passed instances must be of '
'type dict. %s is of type %s.' %
(dict_instance, dict_instance.__class__.__name__))
for key, value in dict_instance.items():
try:
check_constraint(self.key_type, key)
except CompositeTypeHintError as e:
self._raise_hint_exception_or_inner_exception(True, key, str(e))
except SimpleTypeHintError:
self._raise_hint_exception_or_inner_exception(True, key)
try:
check_constraint(self.value_type, value)
except CompositeTypeHintError as e:
self._raise_hint_exception_or_inner_exception(False, value, str(e))
except SimpleTypeHintError:
self._raise_hint_exception_or_inner_exception(False, value)
def match_type_variables(self, concrete_type):
if isinstance(concrete_type, DictConstraint):
bindings = {}
bindings.update(
match_type_variables(self.key_type, concrete_type.key_type))
bindings.update(
match_type_variables(self.value_type, concrete_type.value_type))
return bindings
return {}
def bind_type_variables(self, bindings):
bound_key_type = bind_type_variables(self.key_type, bindings)
bound_value_type = bind_type_variables(self.value_type, bindings)
if (bound_key_type, self.key_type) == (bound_value_type, self.value_type):
return self
return Dict[bound_key_type, bound_value_type]
def __getitem__(self, type_params):
# Type param must be a (k, v) pair.
if not isinstance(type_params, tuple):
raise TypeError(
'Parameter to Dict type-hint must be a tuple of types: '
'Dict[.., ..].')
if len(type_params) != 2:
raise TypeError(
'Length of parameters to a Dict type-hint must be exactly 2. Passed '
'parameters: %s, have a length of %s.' %
(type_params, len(type_params)))
key_type, value_type = type_params
validate_composite_type_param(
key_type, error_msg_prefix='Key-type parameter to a Dict hint')
validate_composite_type_param(
value_type, error_msg_prefix='Value-type parameter to a Dict hint')
return self.DictConstraint(key_type, value_type)
DictConstraint = DictHint.DictConstraint
class SetHint(CompositeTypeHint):
"""A Set type-hint.
Set[X] defines a type-hint for a set of homogeneous types. 'X' may be either a
built-in Python type or a another nested TypeConstraint.
"""
class SetTypeConstraint(SequenceTypeConstraint):
def __init__(self, type_param):
super(SetHint.SetTypeConstraint, self).__init__(type_param, set)
def __repr__(self):
return 'Set[%s]' % _unified_repr(self.inner_type)
def __getitem__(self, type_param):
validate_composite_type_param(
type_param, error_msg_prefix='Parameter to a Set hint')
return self.SetTypeConstraint(type_param)
SetTypeConstraint = SetHint.SetTypeConstraint
class FrozenSetHint(CompositeTypeHint):
"""A FrozenSet type-hint.
FrozenSet[X] defines a type-hint for a set of homogeneous types. 'X' may be
either a built-in Python type or a another nested TypeConstraint.
This is a mirror copy of SetHint - consider refactoring common functionality.
"""
class FrozenSetTypeConstraint(SequenceTypeConstraint):
def __init__(self, type_param):
super(FrozenSetHint.FrozenSetTypeConstraint,
self).__init__(type_param, frozenset)
def __repr__(self):
return 'FrozenSet[%s]' % _unified_repr(self.inner_type)
def __getitem__(self, type_param):
validate_composite_type_param(
type_param, error_msg_prefix='Parameter to a FrozenSet hint')
return self.FrozenSetTypeConstraint(type_param)
FrozenSetTypeConstraint = FrozenSetHint.FrozenSetTypeConstraint
class IterableHint(CompositeTypeHint):
"""An Iterable type-hint.
Iterable[X] defines a type-hint for an object implementing an '__iter__'
method which yields objects which are all of the same type.
"""
class IterableTypeConstraint(SequenceTypeConstraint):
def __init__(self, iter_type):
super(IterableHint.IterableTypeConstraint,
self).__init__(iter_type, collections.Iterable)
def __repr__(self):
return 'Iterable[%s]' % _unified_repr(self.inner_type)
def _consistent_with_check_(self, sub):
if isinstance(sub, SequenceTypeConstraint):
return is_consistent_with(sub.inner_type, self.inner_type)
elif isinstance(sub, TupleConstraint):
if not sub.tuple_types:
# The empty tuple is consistent with Iterator[T] for any T.
return True
# Each element in the hetrogenious tuple must be consistent with
# the iterator type.
# E.g. Tuple[A, B] < Iterable[C] if A < C and B < C.
return all(
is_consistent_with(elem, self.inner_type)
for elem in sub.tuple_types)
return False
def __getitem__(self, type_param):
validate_composite_type_param(
type_param, error_msg_prefix='Parameter to an Iterable hint')
return self.IterableTypeConstraint(type_param)
IterableTypeConstraint = IterableHint.IterableTypeConstraint
class IteratorHint(CompositeTypeHint):
"""An Iterator type-hint.
Iterator[X] defines a type-hint for an object implementing both '__iter__'
and a 'next' method which yields objects which are all of the same type. Type
checking a type-hint of this type is deferred in order to avoid depleting the
underlying lazily generated sequence. See decorators.interleave_type_check for
further information.
"""
class IteratorTypeConstraint(TypeConstraint):
def __init__(self, t):
self.yielded_type = normalize(t)
def __repr__(self):
return 'Iterator[%s]' % _unified_repr(self.yielded_type)
def __eq__(self, other):
return (
type(self) == type(other) and self.yielded_type == other.yielded_type)
def __hash__(self):
return hash(self.yielded_type)
def _inner_types(self):
yield self.yielded_type
def _consistent_with_check_(self, sub):
return (
isinstance(sub, self.__class__) and
is_consistent_with(sub.yielded_type, self.yielded_type))
def type_check(self, instance):
# Special case for lazy types, we only need to enforce the underlying
# type. This avoid having to compute the entirety of the generator/iter.
try:
check_constraint(self.yielded_type, instance)
return
except CompositeTypeHintError as e:
raise CompositeTypeHintError(
'%s hint type-constraint violated: %s' % (repr(self), str(e)))
except SimpleTypeHintError:
raise CompositeTypeHintError(
'%s hint type-constraint violated. Expected a iterator of type %s. '
'Instead received a iterator of type %s.' % (
repr(self),
_unified_repr(self.yielded_type),
instance.__class__.__name__))
def __getitem__(self, type_param):
validate_composite_type_param(
type_param, error_msg_prefix='Parameter to an Iterator hint')
return self.IteratorTypeConstraint(type_param)
IteratorTypeConstraint = IteratorHint.IteratorTypeConstraint
class WindowedTypeConstraint(with_metaclass(GetitemConstructor, TypeConstraint)
): # type: ignore[misc]
"""A type constraint for WindowedValue objects.
Mostly for internal use.
Attributes:
inner_type: The type which the element should be an instance of.
"""
def __init__(self, inner_type):
self.inner_type = normalize(inner_type)
def __eq__(self, other):
return (
isinstance(other, WindowedTypeConstraint) and
self.inner_type == other.inner_type)
def __hash__(self):
return hash(self.inner_type) ^ 13 * hash(type(self))
def _inner_types(self):
yield self.inner_type
def _consistent_with_check_(self, sub):
return (
isinstance(sub, self.__class__) and
is_consistent_with(sub.inner_type, self.inner_type))
def type_check(self, instance):
from apache_beam.transforms import window
if not isinstance(instance, window.WindowedValue):
raise CompositeTypeHintError(
"Window type-constraint violated. Valid object instance "
"must be of type 'WindowedValue'. Instead, an instance of '%s' "
"was received." % (instance.__class__.__name__))
try:
check_constraint(self.inner_type, instance.value)
except (CompositeTypeHintError, SimpleTypeHintError):
raise CompositeTypeHintError(
'%s hint type-constraint violated. The type of element in '
'is incorrect. Expected an instance of type %s, '
'instead received an instance of type %s.' % (
repr(self),
_unified_repr(self.inner_type),
instance.value.__class__.__name__))
class GeneratorHint(IteratorHint):
"""A Generator type hint.
Subscriptor is in the form [yield_type, send_type, return_type], however
only yield_type is supported. The 2 others are expected to be None.
"""
def __getitem__(self, type_params):
if isinstance(type_params, tuple) and len(type_params) == 3:
yield_type, send_type, return_type = type_params
if send_type is not None:
_LOGGER.warning('Ignoring send_type hint: %s' % send_type)
if return_type is not None:
_LOGGER.warning('Ignoring return_type hint: %s' % return_type)
else:
yield_type = type_params
return self.IteratorTypeConstraint(yield_type)
# Create the actual instances for all defined type-hints above.
Any = AnyTypeConstraint()
Union = UnionHint()
Optional = OptionalHint()
Tuple = TupleHint()
List = ListHint()
KV = KVHint()
Dict = DictHint()
Set = SetHint()
FrozenSet = FrozenSetHint()
Iterable = IterableHint()
Iterator = IteratorHint()
Generator = GeneratorHint()
WindowedValue = WindowedTypeConstraint
# There is a circular dependency between defining this mapping
# and using it in normalize(). Initialize it here and populate
# it below.
_KNOWN_PRIMITIVE_TYPES = {} # type: typing.Dict[type, typing.Any]
def normalize(x, none_as_type=False):
# None is inconsistantly used for Any, unknown, or NoneType.
if none_as_type and x is None:
return type(None)
elif x in _KNOWN_PRIMITIVE_TYPES:
return _KNOWN_PRIMITIVE_TYPES[x]
elif getattr(x, '__module__', None) == 'typing':
# Avoid circular imports
from apache_beam.typehints import native_type_compatibility
beam_type = native_type_compatibility.convert_to_beam_type(x)
if beam_type != x:
# We were able to do the conversion.
return beam_type
else:
# It might be a compatible type we don't understand.
return Any
return x
_KNOWN_PRIMITIVE_TYPES.update({
dict: Dict[Any, Any],
list: List[Any],
tuple: Tuple[Any, ...],
set: Set[Any],
frozenset: FrozenSet[Any],
})
def is_consistent_with(sub, base):
"""Checks whether sub a is consistent with base.
This is according to the terminology of PEP 483/484. This relationship is
neither symmetric nor transitive, but a good mnemonic to keep in mind is that
is_consistent_with(a, b) is roughly equivalent to the issubclass(a, b)
relation, but also handles the special Any type as well as type
parameterization.
"""
if sub == base:
# Common special case.
return True
if isinstance(sub, AnyTypeConstraint) or isinstance(base, AnyTypeConstraint):
return True
sub = normalize(sub, none_as_type=True)
base = normalize(base, none_as_type=True)
if isinstance(base, TypeConstraint):
if isinstance(sub, UnionConstraint):
return all(is_consistent_with(c, base) for c in sub.union_types)
return base._consistent_with_check_(sub)
elif isinstance(sub, TypeConstraint):
# Nothing but object lives above any type constraints.
return base == object
return issubclass(sub, base)
def get_yielded_type(type_hint):
"""Obtains the type of elements yielded by an iterable.
Note that "iterable" here means: can be iterated over in a for loop, excluding
strings and dicts.
Args:
type_hint: (TypeConstraint) The iterable in question. Must be normalize()-d.
Returns:
Yielded type of the iterable.
Raises:
ValueError if not iterable.
"""
if isinstance(type_hint, AnyTypeConstraint):
return type_hint
if is_consistent_with(type_hint, Iterator[Any]):
return type_hint.yielded_type
if is_consistent_with(type_hint, Tuple[Any, ...]):
if isinstance(type_hint, TupleConstraint):
return Union[type_hint.tuple_types]
else: # TupleSequenceConstraint
return type_hint.inner_type
if is_consistent_with(type_hint, Iterable[Any]):
return type_hint.inner_type
raise ValueError('%s is not iterable' % type_hint)
def coerce_to_kv_type(element_type, label=None, side_input_producer=None):
"""Attempts to coerce element_type to a compatible kv type.
Raises an error on failure.
"""
if side_input_producer:
consumer = 'side-input of %r (producer: %r)' % (label, side_input_producer)
else:
consumer = '%r' % label
# If element_type is not specified, then treat it as `Any`.
if not element_type:
return KV[Any, Any]
elif isinstance(element_type, TupleHint.TupleConstraint):
if len(element_type.tuple_types) == 2:
return element_type
else:
raise ValueError(
"Tuple input to %s must have two components. "
"Found %s." % (consumer, element_type))
elif isinstance(element_type, AnyTypeConstraint):
# `Any` type needs to be replaced with a KV[Any, Any] to
# satisfy the KV form.
return KV[Any, Any]
elif isinstance(element_type, UnionConstraint):
union_types = [coerce_to_kv_type(t) for t in element_type.union_types]
return KV[Union[tuple(t.tuple_types[0] for t in union_types)],
Union[tuple(t.tuple_types[1] for t in union_types)]]
else:
# TODO: Possibly handle other valid types.
raise ValueError(
"Input to %s must be compatible with KV[Any, Any]. "
"Found %s." % (consumer, element_type)) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/typehints/typehints.py | 0.73659 | 0.30379 | typehints.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import collections
import dis
import inspect
import pprint
import sys
import traceback
import types
from builtins import object
from builtins import zip
from functools import reduce
from apache_beam import pvalue
from apache_beam.typehints import Any
from apache_beam.typehints import row_type
from apache_beam.typehints import typehints
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try: # Python 2
import __builtin__ as builtins
except ImportError: # Python 3
import builtins # type: ignore
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
class TypeInferenceError(ValueError):
"""Error to raise when type inference failed."""
pass
def instance_to_type(o):
"""Given a Python object o, return the corresponding type hint.
"""
t = type(o)
if o is None:
return type(None)
elif t == pvalue.Row:
return row_type.RowTypeConstraint([
(name, instance_to_type(value)) for name, value in o.as_dict().items()
])
elif t not in typehints.DISALLOWED_PRIMITIVE_TYPES:
# pylint: disable=deprecated-types-field
if sys.version_info[0] == 2 and t == types.InstanceType:
return o.__class__
if t == BoundMethod:
return types.MethodType
return t
elif t == tuple:
return typehints.Tuple[[instance_to_type(item) for item in o]]
elif t == list:
if len(o) > 0:
return typehints.List[typehints.Union[[
instance_to_type(item) for item in o
]]]
else:
return typehints.List[typehints.Any]
elif t == set:
if len(o) > 0:
return typehints.Set[typehints.Union[[
instance_to_type(item) for item in o
]]]
else:
return typehints.Set[typehints.Any]
elif t == frozenset:
if len(o) > 0:
return typehints.FrozenSet[typehints.Union[[
instance_to_type(item) for item in o
]]]
else:
return typehints.FrozenSet[typehints.Any]
elif t == dict:
if len(o) > 0:
return typehints.Dict[
typehints.Union[[instance_to_type(k) for k, v in o.items()]],
typehints.Union[[instance_to_type(v) for k, v in o.items()]],
]
else:
return typehints.Dict[typehints.Any, typehints.Any]
else:
raise TypeInferenceError('Unknown forbidden type: %s' % t)
def union_list(xs, ys):
assert len(xs) == len(ys)
return [union(x, y) for x, y in zip(xs, ys)]
class Const(object):
def __init__(self, value):
self.value = value
self.type = instance_to_type(value)
def __eq__(self, other):
return isinstance(other, Const) and self.value == other.value
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash(self.value)
def __repr__(self):
return 'Const[%s]' % str(self.value)[:100]
@staticmethod
def unwrap(x):
if isinstance(x, Const):
return x.type
return x
@staticmethod
def unwrap_all(xs):
return [Const.unwrap(x) for x in xs]
class FrameState(object):
"""Stores the state of the frame at a particular point of execution.
"""
def __init__(self, f, local_vars=None, stack=()):
self.f = f
self.co = f.__code__
self.vars = list(local_vars)
self.stack = list(stack)
def __eq__(self, other):
return isinstance(other, FrameState) and self.__dict__ == other.__dict__
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
def copy(self):
return FrameState(self.f, self.vars, self.stack)
def const_type(self, i):
return Const(self.co.co_consts[i])
def get_closure(self, i):
num_cellvars = len(self.co.co_cellvars)
if i < num_cellvars:
return self.vars[i]
else:
return self.f.__closure__[i - num_cellvars].cell_contents
def closure_type(self, i):
"""Returns a TypeConstraint or Const."""
val = self.get_closure(i)
if isinstance(val, typehints.TypeConstraint):
return val
else:
return Const(val)
def get_global(self, i):
name = self.get_name(i)
if name in self.f.__globals__:
return Const(self.f.__globals__[name])
if name in builtins.__dict__:
return Const(builtins.__dict__[name])
return Any
def get_name(self, i):
return self.co.co_names[i]
def __repr__(self):
return 'Stack: %s Vars: %s' % (self.stack, self.vars)
def __or__(self, other):
if self is None:
return other.copy()
elif other is None:
return self.copy()
return FrameState(
self.f,
union_list(self.vars, other.vars),
union_list(self.stack, other.stack))
def __ror__(self, left):
return self | left
def union(a, b):
"""Returns the union of two types or Const values.
"""
if a == b:
return a
elif not a:
return b
elif not b:
return a
a = Const.unwrap(a)
b = Const.unwrap(b)
# TODO(robertwb): Work this into the Union code in a more generic way.
if type(a) == type(b) and element_type(a) == typehints.Union[()]:
return b
elif type(a) == type(b) and element_type(b) == typehints.Union[()]:
return a
return typehints.Union[a, b]
def finalize_hints(type_hint):
"""Sets type hint for empty data structures to Any."""
def visitor(tc, unused_arg):
if isinstance(tc, typehints.DictConstraint):
empty_union = typehints.Union[()]
if tc.key_type == empty_union:
tc.key_type = Any
if tc.value_type == empty_union:
tc.value_type = Any
if isinstance(type_hint, typehints.TypeConstraint):
type_hint.visit(visitor, None)
def element_type(hint):
"""Returns the element type of a composite type.
"""
hint = Const.unwrap(hint)
if isinstance(hint, typehints.SequenceTypeConstraint):
return hint.inner_type
elif isinstance(hint, typehints.TupleHint.TupleConstraint):
return typehints.Union[hint.tuple_types]
return Any
def key_value_types(kv_type):
"""Returns the key and value type of a KV type.
"""
# TODO(robertwb): Unions of tuples, etc.
# TODO(robertwb): Assert?
if (isinstance(kv_type, typehints.TupleHint.TupleConstraint) and
len(kv_type.tuple_types) == 2):
return kv_type.tuple_types
return Any, Any
known_return_types = {
len: int,
hash: int,
}
class BoundMethod(object):
"""Used to create a bound method when we only know the type of the instance.
"""
def __init__(self, func, type):
"""Instantiates a bound method object.
Args:
func (types.FunctionType): The method's underlying function
type (type): The class of the method.
"""
self.func = func
self.type = type
def hashable(c):
try:
hash(c)
return True
except TypeError:
return False
def infer_return_type(c, input_types, debug=False, depth=5):
"""Analyses a callable to deduce its return type.
Args:
c: A Python callable to infer the return type of.
input_types: A sequence of inputs corresponding to the input types.
debug: Whether to print verbose debugging information.
depth: Maximum inspection depth during type inference.
Returns:
A TypeConstraint that that the return value of this function will (likely)
satisfy given the specified inputs.
"""
try:
if hashable(c) and c in known_return_types:
return known_return_types[c]
elif isinstance(c, types.FunctionType):
return infer_return_type_func(c, input_types, debug, depth)
elif isinstance(c, types.MethodType):
if c.__self__ is not None:
input_types = [Const(c.__self__)] + input_types
return infer_return_type_func(c.__func__, input_types, debug, depth)
elif isinstance(c, BoundMethod):
input_types = [c.type] + input_types
return infer_return_type_func(c.func, input_types, debug, depth)
elif inspect.isclass(c):
if c in typehints.DISALLOWED_PRIMITIVE_TYPES:
return {
list: typehints.List[Any],
set: typehints.Set[Any],
frozenset: typehints.FrozenSet[Any],
tuple: typehints.Tuple[Any, ...],
dict: typehints.Dict[Any, Any]
}[c]
return c
elif (c == getattr and len(input_types) == 2 and
isinstance(input_types[1], Const)):
from apache_beam.typehints import opcodes
return opcodes._getattr(input_types[0], input_types[1].value)
else:
return Any
except TypeInferenceError:
if debug:
traceback.print_exc()
return Any
except Exception:
if debug:
sys.stdout.flush()
raise
else:
return Any
def infer_return_type_func(f, input_types, debug=False, depth=0):
"""Analyses a function to deduce its return type.
Args:
f: A Python function object to infer the return type of.
input_types: A sequence of inputs corresponding to the input types.
debug: Whether to print verbose debugging information.
depth: Maximum inspection depth during type inference.
Returns:
A TypeConstraint that that the return value of this function will (likely)
satisfy given the specified inputs.
Raises:
TypeInferenceError: if no type can be inferred.
"""
if debug:
print()
print(f, id(f), input_types)
dis.dis(f)
from . import opcodes
simple_ops = dict((k.upper(), v) for k, v in opcodes.__dict__.items())
co = f.__code__
code = co.co_code
end = len(code)
pc = 0
extended_arg = 0 # Python 2 only.
free = None
yields = set()
returns = set()
# TODO(robertwb): Default args via inspect module.
local_vars = list(input_types) + [typehints.Union[()]] * (
len(co.co_varnames) - len(input_types))
state = FrameState(f, local_vars)
states = collections.defaultdict(lambda: None)
jumps = collections.defaultdict(int)
# In Python 3, use dis library functions to disassemble bytecode and handle
# EXTENDED_ARGs.
is_py3 = sys.version_info[0] == 3
if is_py3:
ofs_table = {} # offset -> instruction
for instruction in dis.get_instructions(f):
ofs_table[instruction.offset] = instruction
# Python 2 - 3.5: 1 byte opcode + optional 2 byte arg (1 or 3 bytes).
# Python 3.6+: 1 byte opcode + 1 byte arg (2 bytes, arg may be ignored).
if sys.version_info >= (3, 6):
inst_size = 2
opt_arg_size = 0
else:
inst_size = 1
opt_arg_size = 2
last_pc = -1
while pc < end: # pylint: disable=too-many-nested-blocks
start = pc
if is_py3:
instruction = ofs_table[pc]
op = instruction.opcode
else:
op = ord(code[pc])
if debug:
print('-->' if pc == last_pc else ' ', end=' ')
print(repr(pc).rjust(4), end=' ')
print(dis.opname[op].ljust(20), end=' ')
pc += inst_size
if op >= dis.HAVE_ARGUMENT:
if is_py3:
arg = instruction.arg
else:
arg = ord(code[pc]) + ord(code[pc + 1]) * 256 + extended_arg
extended_arg = 0
pc += opt_arg_size
if op == dis.EXTENDED_ARG:
extended_arg = arg * 65536
if debug:
print(str(arg).rjust(5), end=' ')
if op in dis.hasconst:
print('(' + repr(co.co_consts[arg]) + ')', end=' ')
elif op in dis.hasname:
print('(' + co.co_names[arg] + ')', end=' ')
elif op in dis.hasjrel:
print('(to ' + repr(pc + arg) + ')', end=' ')
elif op in dis.haslocal:
print('(' + co.co_varnames[arg] + ')', end=' ')
elif op in dis.hascompare:
print('(' + dis.cmp_op[arg] + ')', end=' ')
elif op in dis.hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
print('(' + free[arg] + ')', end=' ')
# Actually emulate the op.
if state is None and states[start] is None:
# No control reaches here (yet).
if debug:
print()
continue
state |= states[start]
opname = dis.opname[op]
jmp = jmp_state = None
if opname.startswith('CALL_FUNCTION'):
if sys.version_info < (3, 6):
# Each keyword takes up two arguments on the stack (name and value).
standard_args = (arg & 0xFF) + 2 * (arg >> 8)
var_args = 'VAR' in opname
kw_args = 'KW' in opname
pop_count = standard_args + var_args + kw_args + 1
if depth <= 0:
return_type = Any
elif arg >> 8:
if not var_args and not kw_args and not arg & 0xFF:
# Keywords only, maybe it's a call to Row.
if isinstance(state.stack[-pop_count], Const):
from apache_beam.pvalue import Row
if state.stack[-pop_count].value == Row:
fields = state.stack[-pop_count + 1::2]
types = state.stack[-pop_count + 2::2]
return_type = row_type.RowTypeConstraint(
zip([fld.value for fld in fields], Const.unwrap_all(types)))
else:
return_type = Any
else:
# TODO(robertwb): Handle this case.
return_type = Any
elif isinstance(state.stack[-pop_count], Const):
# TODO(robertwb): Handle this better.
if var_args or kw_args:
state.stack[-1] = Any
state.stack[-var_args - kw_args] = Any
return_type = infer_return_type(
state.stack[-pop_count].value,
state.stack[1 - pop_count:],
debug=debug,
depth=depth - 1)
else:
return_type = Any
state.stack[-pop_count:] = [return_type]
else: # Python 3.6+
if opname == 'CALL_FUNCTION':
pop_count = arg + 1
if depth <= 0:
return_type = Any
elif isinstance(state.stack[-pop_count], Const):
return_type = infer_return_type(
state.stack[-pop_count].value,
state.stack[1 - pop_count:],
debug=debug,
depth=depth - 1)
else:
return_type = Any
elif opname == 'CALL_FUNCTION_KW':
# TODO(udim): Handle keyword arguments. Requires passing them by name
# to infer_return_type.
pop_count = arg + 2
if isinstance(state.stack[-pop_count], Const):
from apache_beam.pvalue import Row
if state.stack[-pop_count].value == Row:
fields = state.stack[-1].value
return_type = row_type.RowTypeConstraint(
zip(fields, Const.unwrap_all(state.stack[-pop_count + 1:-1])))
else:
return_type = Any
else:
return_type = Any
elif opname == 'CALL_FUNCTION_EX':
# stack[-has_kwargs]: Map of keyword args.
# stack[-1 - has_kwargs]: Iterable of positional args.
# stack[-2 - has_kwargs]: Function to call.
has_kwargs = arg & 1 # type: int
pop_count = has_kwargs + 2
if has_kwargs:
# TODO(udim): Unimplemented. Requires same functionality as a
# CALL_FUNCTION_KW implementation.
return_type = Any
else:
args = state.stack[-1]
_callable = state.stack[-2]
if isinstance(args, typehints.ListConstraint):
# Case where there's a single var_arg argument.
args = [args]
elif isinstance(args, typehints.TupleConstraint):
args = list(args._inner_types())
return_type = infer_return_type(
_callable.value, args, debug=debug, depth=depth - 1)
else:
raise TypeInferenceError('unable to handle %s' % opname)
state.stack[-pop_count:] = [return_type]
elif opname == 'CALL_METHOD':
pop_count = 1 + arg
# LOAD_METHOD will return a non-Const (Any) if loading from an Any.
if isinstance(state.stack[-pop_count], Const) and depth > 0:
return_type = infer_return_type(
state.stack[-pop_count].value,
state.stack[1 - pop_count:],
debug=debug,
depth=depth - 1)
else:
return_type = typehints.Any
state.stack[-pop_count:] = [return_type]
elif opname in simple_ops:
if debug:
print("Executing simple op " + opname)
simple_ops[opname](state, arg)
elif opname == 'RETURN_VALUE':
returns.add(state.stack[-1])
state = None
elif opname == 'YIELD_VALUE':
yields.add(state.stack[-1])
elif opname == 'JUMP_FORWARD':
jmp = pc + arg
jmp_state = state
state = None
elif opname == 'JUMP_ABSOLUTE':
jmp = arg
jmp_state = state
state = None
elif opname in ('POP_JUMP_IF_TRUE', 'POP_JUMP_IF_FALSE'):
state.stack.pop()
jmp = arg
jmp_state = state.copy()
elif opname in ('JUMP_IF_TRUE_OR_POP', 'JUMP_IF_FALSE_OR_POP'):
jmp = arg
jmp_state = state.copy()
state.stack.pop()
elif opname == 'FOR_ITER':
jmp = pc + arg
jmp_state = state.copy()
jmp_state.stack.pop()
state.stack.append(element_type(state.stack[-1]))
else:
raise TypeInferenceError('unable to handle %s' % opname)
if jmp is not None:
# TODO(robertwb): Is this guaranteed to converge?
new_state = states[jmp] | jmp_state
if jmp < pc and new_state != states[jmp] and jumps[pc] < 5:
jumps[pc] += 1
pc = jmp
states[jmp] = new_state
if debug:
print()
print(state)
pprint.pprint(dict(item for item in states.items() if item[1]))
if yields:
result = typehints.Iterable[reduce(union, Const.unwrap_all(yields))]
else:
result = reduce(union, Const.unwrap_all(returns))
finalize_hints(result)
if debug:
print(f, id(f), input_types, '->', result)
return result | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/typehints/trivial_inference.py | 0.47244 | 0.245221 | trivial_inference.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from abc import ABCMeta
from abc import abstractmethod
from builtins import object
from future.utils import with_metaclass
from apache_beam.portability.api import beam_runner_api_pb2
__all__ = [
'TimeDomain',
]
class TimeDomain(object):
"""Time domain for streaming timers."""
WATERMARK = 'WATERMARK'
REAL_TIME = 'REAL_TIME'
DEPENDENT_REAL_TIME = 'DEPENDENT_REAL_TIME'
_RUNNER_API_MAPPING = {
WATERMARK: beam_runner_api_pb2.TimeDomain.EVENT_TIME,
REAL_TIME: beam_runner_api_pb2.TimeDomain.PROCESSING_TIME,
DEPENDENT_REAL_TIME: beam_runner_api_pb2.TimeDomain.
SYNCHRONIZED_PROCESSING_TIME,
}
@staticmethod
def from_string(domain):
if domain in (TimeDomain.WATERMARK,
TimeDomain.REAL_TIME,
TimeDomain.DEPENDENT_REAL_TIME):
return domain
raise ValueError('Unknown time domain: %s' % domain)
@staticmethod
def to_runner_api(domain):
return TimeDomain._RUNNER_API_MAPPING[domain]
@staticmethod
def is_event_time(domain):
return TimeDomain.from_string(domain) == TimeDomain.WATERMARK
class TimestampCombinerImpl(with_metaclass(ABCMeta, object)): # type: ignore[misc]
"""Implementation of TimestampCombiner."""
@abstractmethod
def assign_output_time(self, window, input_timestamp):
raise NotImplementedError
@abstractmethod
def combine(self, output_timestamp, other_output_timestamp):
raise NotImplementedError
def combine_all(self, merging_timestamps):
"""Apply combine to list of timestamps."""
combined_output_time = None
for output_time in merging_timestamps:
if combined_output_time is None:
combined_output_time = output_time
elif output_time is not None:
combined_output_time = self.combine(combined_output_time, output_time)
return combined_output_time
def merge(self, unused_result_window, merging_timestamps):
"""Default to returning the result of combine_all."""
return self.combine_all(merging_timestamps)
class DependsOnlyOnWindow(with_metaclass(ABCMeta, TimestampCombinerImpl)): # type: ignore[misc]
"""TimestampCombinerImpl that only depends on the window."""
def merge(self, result_window, unused_merging_timestamps):
# Since we know that the result only depends on the window, we can ignore
# the given timestamps.
return self.assign_output_time(result_window, None)
class OutputAtEarliestInputTimestampImpl(TimestampCombinerImpl):
"""TimestampCombinerImpl outputting at earliest input timestamp."""
def assign_output_time(self, window, input_timestamp):
return input_timestamp
def combine(self, output_timestamp, other_output_timestamp):
"""Default to returning the earlier of two timestamps."""
return min(output_timestamp, other_output_timestamp)
class OutputAtEarliestTransformedInputTimestampImpl(TimestampCombinerImpl):
"""TimestampCombinerImpl outputting at earliest input timestamp."""
def __init__(self, window_fn):
self.window_fn = window_fn
def assign_output_time(self, window, input_timestamp):
return self.window_fn.get_transformed_output_time(window, input_timestamp)
def combine(self, output_timestamp, other_output_timestamp):
return min(output_timestamp, other_output_timestamp)
class OutputAtLatestInputTimestampImpl(TimestampCombinerImpl):
"""TimestampCombinerImpl outputting at latest input timestamp."""
def assign_output_time(self, window, input_timestamp):
return input_timestamp
def combine(self, output_timestamp, other_output_timestamp):
return max(output_timestamp, other_output_timestamp)
class OutputAtEndOfWindowImpl(DependsOnlyOnWindow):
"""TimestampCombinerImpl outputting at end of window."""
def assign_output_time(self, window, unused_input_timestamp):
return window.max_timestamp()
def combine(self, output_timestamp, other_output_timestamp):
return max(output_timestamp, other_output_timestamp) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/transforms/timeutil.py | 0.871803 | 0.196248 | timeutil.py | pypi |
from __future__ import absolute_import
from __future__ import division
import math
import time
import apache_beam as beam
import apache_beam.runners.sdf_utils as sdf_utils
from apache_beam.io.restriction_trackers import OffsetRange
from apache_beam.io.restriction_trackers import OffsetRestrictionTracker
from apache_beam.transforms import core
from apache_beam.transforms import window
from apache_beam.transforms.ptransform import PTransform
from apache_beam.transforms.window import TimestampedValue
from apache_beam.utils import timestamp
from apache_beam.utils.timestamp import MAX_TIMESTAMP
from apache_beam.utils.timestamp import Timestamp
class ImpulseSeqGenRestrictionProvider(core.RestrictionProvider):
def initial_restriction(self, element):
start, end, interval = element
assert start <= end
assert interval > 0
total_outputs = math.ceil((end - start) / interval)
return OffsetRange(0, total_outputs)
def create_tracker(self, restriction):
return OffsetRestrictionTracker(restriction)
def restriction_size(self, unused_element, restriction):
return restriction.size()
class ImpulseSeqGenDoFn(beam.DoFn):
'''
ImpulseSeqGenDoFn fn receives tuple elements with three parts:
* first_timestamp = first timestamp to output element for.
* last_timestamp = last timestamp/time to output element for.
* fire_interval = how often to fire an element.
For each input element received, ImpulseSeqGenDoFn fn will start
generating output elements in following pattern:
* if element timestamp is less than current runtime then output element.
* if element timestamp is greater than current runtime, wait until next
element timestamp.
ImpulseSeqGenDoFn can't guarantee that each element is output at exact time.
ImpulseSeqGenDoFn guarantees that elements would not be output prior to
given runtime timestamp.
'''
def process(
self,
element,
restriction_tracker=beam.DoFn.RestrictionParam(
ImpulseSeqGenRestrictionProvider())):
'''
:param element: (start_timestamp, end_timestamp, interval)
:param restriction_tracker:
:return: yields elements at processing real-time intervals with value of
target output timestamp for the element.
'''
start, _, interval = element
assert isinstance(restriction_tracker, sdf_utils.RestrictionTrackerView)
current_output_index = restriction_tracker.current_restriction().start
current_output_timestamp = start + interval * current_output_index
current_time = time.time()
while current_output_timestamp <= current_time:
if restriction_tracker.try_claim(current_output_index):
yield current_output_timestamp
current_output_index += 1
current_output_timestamp = start + interval * current_output_index
current_time = time.time()
else:
return
restriction_tracker.defer_remainder(
timestamp.Timestamp(current_output_timestamp))
class PeriodicSequence(PTransform):
'''
PeriodicSequence transform receives tuple elements with three parts:
* first_timestamp = first timestamp to output element for.
* last_timestamp = last timestamp/time to output element for.
* fire_interval = how often to fire an element.
For each input element received, PeriodicSequence transform will start
generating output elements in following pattern:
* if element timestamp is less than current runtime then output element.
* if element timestamp is greater than current runtime, wait until next
element timestamp.
PeriodicSequence can't guarantee that each element is output at exact time.
PeriodicSequence guarantees that elements would not be output prior to given
runtime timestamp.
'''
def __init_(self):
pass
def expand(self, pcoll):
return (
pcoll
| 'GenSequence' >> beam.ParDo(ImpulseSeqGenDoFn())
| 'MapToTimestamped' >> beam.Map(lambda tt: TimestampedValue(tt, tt)))
class PeriodicImpulse(PTransform):
'''
PeriodicImpulse transform generates an infinite sequence of elements with
given runtime interval.
PeriodicImpulse transform behaves same as {@link PeriodicSequence} transform,
but can be used as first transform in pipeline.
'''
def __init__(
self,
start_timestamp=Timestamp.now(),
stop_timestamp=MAX_TIMESTAMP,
fire_interval=360.0,
apply_windowing=False):
'''
:param start_timestamp: Timestamp for first element.
:param stop_timestamp: Timestamp after which no elements will be output.
:param fire_interval: Interval at which to output elements.
:param apply_windowing: Whether each element should be assigned to
individual window. If false, all elements will reside in global window.
'''
self.start_ts = start_timestamp
self.stop_ts = stop_timestamp
self.interval = fire_interval
self.apply_windowing = apply_windowing
def expand(self, pbegin):
result = (
pbegin
| 'ImpulseElement' >> beam.Create(
[(self.start_ts, self.stop_ts, self.interval)])
| 'GenSequence' >> beam.ParDo(ImpulseSeqGenDoFn())
| 'MapToTimestamped' >> beam.Map(lambda tt: TimestampedValue(tt, tt)))
if self.apply_windowing:
result = result | 'ApplyWindowing' >> beam.WindowInto(
window.FixedWindows(self.interval))
return result | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/transforms/periodicsequence.py | 0.857828 | 0.464659 | periodicsequence.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from builtins import map
from builtins import next
from builtins import range
from apache_beam.io import iobase
from apache_beam.transforms.core import Create
class _CreateSource(iobase.BoundedSource):
"""Internal source that is used by Create()"""
def __init__(self, serialized_values, coder):
self._coder = coder
self._serialized_values = []
self._total_size = 0
self._serialized_values = serialized_values
self._total_size = sum(map(len, self._serialized_values))
def read(self, range_tracker):
start_position = range_tracker.start_position()
current_position = start_position
def split_points_unclaimed(stop_position):
if current_position >= stop_position:
return 0
return stop_position - current_position - 1
range_tracker.set_split_points_unclaimed_callback(split_points_unclaimed)
element_iter = iter(self._serialized_values[start_position:])
for i in range(start_position, range_tracker.stop_position()):
if not range_tracker.try_claim(i):
return
current_position = i
yield self._coder.decode(next(element_iter))
def split(self, desired_bundle_size, start_position=None, stop_position=None):
if len(self._serialized_values) < 2:
yield iobase.SourceBundle(
weight=0,
source=self,
start_position=0,
stop_position=len(self._serialized_values))
else:
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = len(self._serialized_values)
avg_size_per_value = self._total_size // len(self._serialized_values)
num_values_per_split = max(
int(desired_bundle_size // avg_size_per_value), 1)
start = start_position
while start < stop_position:
end = min(start + num_values_per_split, stop_position)
remaining = stop_position - end
# Avoid having a too small bundle at the end.
if remaining < (num_values_per_split // 4):
end = stop_position
sub_source = Create._create_source(
self._serialized_values[start:end], self._coder)
yield iobase.SourceBundle(
weight=(end - start),
source=sub_source,
start_position=0,
stop_position=(end - start))
start = end
def get_range_tracker(self, start_position, stop_position):
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = len(self._serialized_values)
from apache_beam import io
return io.OffsetRangeTracker(start_position, stop_position)
def estimate_size(self):
return self._total_size | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/transforms/create_source.py | 0.656108 | 0.200695 | create_source.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import contextlib
import copy
import functools
import sys
import threading
from typing import ByteString
from typing import Dict
import grpc
from apache_beam import pvalue
from apache_beam.coders import RowCoder
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_expansion_api_pb2
from apache_beam.portability.api import beam_expansion_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api.external_transforms_pb2 import ExternalConfigurationPayload
from apache_beam.runners import pipeline_context
from apache_beam.runners.portability import artifact_service
from apache_beam.transforms import ptransform
from apache_beam.typehints.native_type_compatibility import convert_to_typing_type
from apache_beam.typehints.schemas import named_fields_to_schema
from apache_beam.typehints.schemas import named_tuple_from_schema
from apache_beam.typehints.schemas import named_tuple_to_schema
from apache_beam.typehints.trivial_inference import instance_to_type
from apache_beam.typehints.typehints import Union
from apache_beam.typehints.typehints import UnionConstraint
from apache_beam.utils import subprocess_server
DEFAULT_EXPANSION_SERVICE = 'localhost:8097'
def _is_optional_or_none(typehint):
return (
type(None) in typehint.union_types if isinstance(
typehint, UnionConstraint) else typehint is type(None))
def _strip_optional(typehint):
if not _is_optional_or_none(typehint):
return typehint
new_types = typehint.union_types.difference({type(None)})
if len(new_types) == 1:
return list(new_types)[0]
return Union[new_types]
def iter_urns(coder, context=None):
yield coder.to_runner_api_parameter(context)[0]
for child in coder._get_component_coders():
for urn in iter_urns(child, context):
yield urn
class PayloadBuilder(object):
"""
Abstract base class for building payloads to pass to ExternalTransform.
"""
def build(self):
"""
:return: ExternalConfigurationPayload
"""
raise NotImplementedError
def payload(self):
"""
The serialized ExternalConfigurationPayload
:return: bytes
"""
return self.build().SerializeToString()
class SchemaBasedPayloadBuilder(PayloadBuilder):
"""
Base class for building payloads based on a schema that provides
type information for each configuration value to encode.
"""
def _get_named_tuple_instance(self):
raise NotImplementedError()
def build(self):
row = self._get_named_tuple_instance()
schema = named_tuple_to_schema(type(row))
return ExternalConfigurationPayload(
schema=schema, payload=RowCoder(schema).encode(row))
class ImplicitSchemaPayloadBuilder(SchemaBasedPayloadBuilder):
"""
Build a payload that generates a schema from the provided values.
"""
def __init__(self, values):
self._values = values
def _get_named_tuple_instance(self):
# omit fields with value=None since we can't infer their type
values = {
key: value
for key, value in self._values.items() if value is not None
}
# In python 2 named_fields_to_schema will not accept str because its
# ambiguous. This converts str hints to ByteString recursively so its clear
# we intend to use BYTES.
# TODO(BEAM-7372): Remove coercion to ByteString
def coerce_str_to_bytes(typ):
if typ == str:
return ByteString
elif hasattr(typ, '__args__') and hasattr(typ, '__origin__'):
# Create a new type rather than modifying the existing one
typ = typ.__origin__[tuple(map(coerce_str_to_bytes, typ.__args__))]
return typ
if sys.version_info[0] >= 3:
coerce_str_to_bytes = lambda x: x
schema = named_fields_to_schema([(
key,
coerce_str_to_bytes(convert_to_typing_type(instance_to_type(value))))
for key,
value in values.items()])
return named_tuple_from_schema(schema)(**values)
class NamedTupleBasedPayloadBuilder(SchemaBasedPayloadBuilder):
"""
Build a payload based on a NamedTuple schema.
"""
def __init__(self, tuple_instance):
"""
:param tuple_instance: an instance of a typing.NamedTuple
"""
super(NamedTupleBasedPayloadBuilder, self).__init__()
self._tuple_instance = tuple_instance
def _get_named_tuple_instance(self):
return self._tuple_instance
class AnnotationBasedPayloadBuilder(SchemaBasedPayloadBuilder):
"""
Build a payload based on an external transform's type annotations.
Supported in python 3 only.
"""
def __init__(self, transform, **values):
"""
:param transform: a PTransform instance or class. type annotations will
be gathered from its __init__ method
:param values: values to encode
"""
self._transform = transform
self._values = values
def _get_named_tuple_instance(self):
schema = named_fields_to_schema([
(k, convert_to_typing_type(v)) for k,
v in self._transform.__init__.__annotations__.items()
if k in self._values
])
return named_tuple_from_schema(schema)(**self._values)
class DataclassBasedPayloadBuilder(SchemaBasedPayloadBuilder):
"""
Build a payload based on an external transform that uses dataclasses.
Supported in python 3 only.
"""
def __init__(self, transform):
"""
:param transform: a dataclass-decorated PTransform instance from which to
gather type annotations and values
"""
self._transform = transform
def _get_named_tuple_instance(self):
import dataclasses
schema = named_fields_to_schema([
(field.name, convert_to_typing_type(field.type))
for field in dataclasses.fields(self._transform)
])
return named_tuple_from_schema(schema)(
**dataclasses.asdict(self._transform))
class ExternalTransform(ptransform.PTransform):
"""
External provides a cross-language transform via expansion services in
foreign SDKs.
Experimental; no backwards compatibility guarantees.
"""
_namespace_counter = 0
# Variable name _namespace conflicts with DisplayData._namespace so we use
# name _external_namespace here.
_external_namespace = threading.local()
_IMPULSE_PREFIX = 'impulse'
def __init__(self, urn, payload, expansion_service=None):
"""Wrapper for an external transform with the given urn and payload.
:param urn: the unique beam identifier for this transform
:param payload: the payload, either as a byte string or a PayloadBuilder
:param expansion_service: an expansion service implementing the beam
ExpansionService protocol, either as an object with an Expand method
or an address (as a str) to a grpc server that provides this method.
"""
expansion_service = expansion_service or DEFAULT_EXPANSION_SERVICE
self._urn = urn
self._payload = (
payload.payload() if isinstance(payload, PayloadBuilder) else payload)
self._expansion_service = expansion_service
self._external_namespace = self._fresh_namespace()
self._inputs = {} # type: Dict[str, pvalue.PCollection]
self._output = {} # type: Dict[str, pvalue.PCollection]
def __post_init__(self, expansion_service):
"""
This will only be invoked if ExternalTransform is used as a base class
for a class decorated with dataclasses.dataclass
"""
ExternalTransform.__init__(
self, self.URN, DataclassBasedPayloadBuilder(self), expansion_service)
def default_label(self):
return '%s(%s)' % (self.__class__.__name__, self._urn)
@classmethod
def get_local_namespace(cls):
return getattr(cls._external_namespace, 'value', 'external')
@classmethod
@contextlib.contextmanager
def outer_namespace(cls, namespace):
prev = cls.get_local_namespace()
cls._external_namespace.value = namespace
yield
cls._external_namespace.value = prev
@classmethod
def _fresh_namespace(cls):
# type: () -> str
ExternalTransform._namespace_counter += 1
return '%s_%d' % (cls.get_local_namespace(), cls._namespace_counter)
def expand(self, pvalueish):
# type: (pvalue.PCollection) -> pvalue.PCollection
if isinstance(pvalueish, pvalue.PBegin):
self._inputs = {}
elif isinstance(pvalueish, (list, tuple)):
self._inputs = {str(ix): pvalue for ix, pvalue in enumerate(pvalueish)}
elif isinstance(pvalueish, dict):
self._inputs = pvalueish
else:
self._inputs = {'input': pvalueish}
pipeline = (
next(iter(self._inputs.values())).pipeline
if self._inputs else pvalueish.pipeline)
context = pipeline_context.PipelineContext(
component_id_map=pipeline.component_id_map)
transform_proto = beam_runner_api_pb2.PTransform(
unique_name=pipeline._current_transform().full_label,
spec=beam_runner_api_pb2.FunctionSpec(
urn=self._urn, payload=self._payload))
for tag, pcoll in self._inputs.items():
transform_proto.inputs[tag] = context.pcollections.get_id(pcoll)
# Conversion to/from proto assumes producers.
# TODO: Possibly loosen this.
context.transforms.put_proto(
'%s_%s' % (self._IMPULSE_PREFIX, tag),
beam_runner_api_pb2.PTransform(
unique_name='%s_%s' % (self._IMPULSE_PREFIX, tag),
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.primitives.IMPULSE.urn),
outputs={'out': transform_proto.inputs[tag]}))
components = context.to_runner_api()
request = beam_expansion_api_pb2.ExpansionRequest(
components=components,
namespace=self._external_namespace, # type: ignore # mypy thinks self._namespace is threading.local
transform=transform_proto)
with self._service() as service:
response = service.Expand(request)
if response.error:
raise RuntimeError(response.error)
self._expanded_components = response.components
if any(env.dependencies
for env in self._expanded_components.environments.values()):
self._expanded_components = self._resolve_artifacts(
self._expanded_components,
service.artifact_service(),
pipeline.local_tempdir)
self._expanded_transform = response.transform
self._expanded_requirements = response.requirements
result_context = pipeline_context.PipelineContext(response.components)
def fix_output(pcoll, tag):
pcoll.pipeline = pipeline
pcoll.tag = tag
return pcoll
self._outputs = {
tag: fix_output(result_context.pcollections.get_by_id(pcoll_id), tag)
for tag,
pcoll_id in self._expanded_transform.outputs.items()
}
return self._output_to_pvalueish(self._outputs)
@contextlib.contextmanager
def _service(self):
if isinstance(self._expansion_service, str):
channel_options = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
if hasattr(grpc, 'local_channel_credentials'):
# Some environments may not support insecure channels. Hence use a
# secure channel with local credentials here.
# TODO: update this to support secure non-local channels.
channel_factory_fn = functools.partial(
grpc.secure_channel,
self._expansion_service,
grpc.local_channel_credentials(),
options=channel_options)
else:
# local_channel_credentials is an experimental API which is unsupported
# by older versions of grpc which may be pulled in due to other project
# dependencies.
channel_factory_fn = functools.partial(
grpc.insecure_channel,
self._expansion_service,
options=channel_options)
with channel_factory_fn() as channel:
yield ExpansionAndArtifactRetrievalStub(channel)
elif hasattr(self._expansion_service, 'Expand'):
yield self._expansion_service
else:
with self._expansion_service as stub:
yield stub
def _resolve_artifacts(self, components, service, dest):
for env in components.environments.values():
if env.dependencies:
resolved = list(
artifact_service.resolve_artifacts(env.dependencies, service, dest))
del env.dependencies[:]
env.dependencies.extend(resolved)
return components
def _output_to_pvalueish(self, output_dict):
if len(output_dict) == 1:
return next(iter(output_dict.values()))
else:
return output_dict
def to_runner_api_transform(self, context, full_label):
pcoll_renames = {}
renamed_tag_seen = False
for tag, pcoll in self._inputs.items():
if tag not in self._expanded_transform.inputs:
if renamed_tag_seen:
raise RuntimeError(
'Ambiguity due to non-preserved tags: %s vs %s' % (
sorted(self._expanded_transform.inputs.keys()),
sorted(self._inputs.keys())))
else:
renamed_tag_seen = True
tag, = self._expanded_transform.inputs.keys()
pcoll_renames[self._expanded_transform.inputs[tag]] = (
context.pcollections.get_id(pcoll))
for tag, pcoll in self._outputs.items():
pcoll_renames[self._expanded_transform.outputs[tag]] = (
context.pcollections.get_id(pcoll))
def _equivalent(coder1, coder2):
return coder1 == coder2 or _normalize(coder1) == _normalize(coder2)
def _normalize(coder_proto):
normalized = copy.copy(coder_proto)
normalized.spec.environment_id = ''
# TODO(robertwb): Normalize components as well.
return normalized
for id, proto in self._expanded_components.coders.items():
if id.startswith(self._external_namespace):
context.coders.put_proto(id, proto)
elif id in context.coders:
if not _equivalent(context.coders._id_to_proto[id], proto):
raise RuntimeError(
'Re-used coder id: %s\n%s\n%s' %
(id, context.coders._id_to_proto[id], proto))
else:
context.coders.put_proto(id, proto)
for id, proto in self._expanded_components.windowing_strategies.items():
if id.startswith(self._external_namespace):
context.windowing_strategies.put_proto(id, proto)
for id, proto in self._expanded_components.environments.items():
if id.startswith(self._external_namespace):
context.environments.put_proto(id, proto)
for id, proto in self._expanded_components.pcollections.items():
id = pcoll_renames.get(id, id)
if id not in context.pcollections._id_to_obj.keys():
context.pcollections.put_proto(id, proto)
for id, proto in self._expanded_components.transforms.items():
if id.startswith(self._IMPULSE_PREFIX):
# Our fake inputs.
continue
assert id.startswith(
self._external_namespace), (id, self._external_namespace)
new_proto = beam_runner_api_pb2.PTransform(
unique_name=proto.unique_name,
# If URN is not set this is an empty spec.
spec=proto.spec if proto.spec.urn else None,
subtransforms=proto.subtransforms,
inputs={
tag: pcoll_renames.get(pcoll, pcoll)
for tag,
pcoll in proto.inputs.items()
},
outputs={
tag: pcoll_renames.get(pcoll, pcoll)
for tag,
pcoll in proto.outputs.items()
},
environment_id=proto.environment_id)
context.transforms.put_proto(id, new_proto)
for requirement in self._expanded_requirements:
context.add_requirement(requirement)
return beam_runner_api_pb2.PTransform(
unique_name=full_label,
spec=self._expanded_transform.spec,
subtransforms=self._expanded_transform.subtransforms,
inputs={
tag: pcoll_renames.get(pcoll, pcoll)
for tag,
pcoll in self._expanded_transform.inputs.items()
},
outputs={
tag: pcoll_renames.get(pcoll, pcoll)
for tag,
pcoll in self._expanded_transform.outputs.items()
},
environment_id=self._expanded_transform.environment_id)
class ExpansionAndArtifactRetrievalStub(
beam_expansion_api_pb2_grpc.ExpansionServiceStub):
def __init__(self, channel, **kwargs):
self._channel = channel
self._kwargs = kwargs
super(ExpansionAndArtifactRetrievalStub, self).__init__(channel, **kwargs)
def artifact_service(self):
return beam_artifact_api_pb2_grpc.ArtifactRetrievalServiceStub(
self._channel, **self._kwargs)
class JavaJarExpansionService(object):
"""An expansion service based on an Java Jar file.
This can be passed into an ExternalTransform as the expansion_service
argument which will spawn a subprocess using this jar to expand the
transform.
"""
def __init__(self, path_to_jar, extra_args=None):
if extra_args is None:
extra_args = ['{{PORT}}']
self._path_to_jar = path_to_jar
self._extra_args = extra_args
self._service_count = 0
def __enter__(self):
if self._service_count == 0:
self._path_to_jar = subprocess_server.JavaJarServer.local_jar(
self._path_to_jar)
# Consider memoizing these servers (with some timeout).
self._service_provider = subprocess_server.JavaJarServer(
ExpansionAndArtifactRetrievalStub,
self._path_to_jar,
self._extra_args)
self._service = self._service_provider.__enter__()
self._service_count += 1
return self._service
def __exit__(self, *args):
self._service_count -= 1
if self._service_count == 0:
self._service_provider.__exit__(*args)
class BeamJarExpansionService(JavaJarExpansionService):
"""An expansion service based on an Beam Java Jar file.
Attempts to use a locally-built copy of the jar based on the gradle target,
if it exists, otherwise attempts to download and cache the released artifact
corresponding to this version of Beam from the apache maven repository.
"""
def __init__(self, gradle_target, extra_args=None, gradle_appendix=None):
path_to_jar = subprocess_server.JavaJarServer.path_to_beam_jar(
gradle_target, gradle_appendix)
super(BeamJarExpansionService, self).__init__(path_to_jar, extra_args)
def memoize(func):
cache = {}
def wrapper(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return wrapper | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/transforms/external.py | 0.602763 | 0.243749 | external.py | pypi |
from __future__ import absolute_import
import argparse
import logging
import subprocess
import sys
import grpc
from mock import patch
from past.builtins import unicode
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms.external import ImplicitSchemaPayloadBuilder
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apache_beam.runners.dataflow.internal import apiclient as _apiclient
except ImportError:
apiclient = None
else:
apiclient = _apiclient
# pylint: enable=wrong-import-order, wrong-import-position
class JavaExternalTransformTest(object):
# This will be overwritten if set via a flag.
expansion_service_jar = None # type: str
expansion_service_port = None # type: int
class _RunWithExpansion(object):
def __init__(self):
self._server = None
def __enter__(self):
if not (JavaExternalTransformTest.expansion_service_jar or
JavaExternalTransformTest.expansion_service_port):
raise RuntimeError('No expansion service jar or port provided.')
JavaExternalTransformTest.expansion_service_port = (
JavaExternalTransformTest.expansion_service_port or 8091)
jar = JavaExternalTransformTest.expansion_service_jar
port = JavaExternalTransformTest.expansion_service_port
# Start the java server and wait for it to be ready.
if jar:
self._server = subprocess.Popen(['java', '-jar', jar, str(port)])
address = 'localhost:%s' % str(port)
with grpc.insecure_channel(address) as channel:
grpc.channel_ready_future(channel).result()
def __exit__(self, type, value, traceback):
if self._server:
self._server.kill()
self._server = None
@staticmethod
def test_java_expansion_dataflow():
if apiclient is None:
return
# This test does not actually running the pipeline in Dataflow. It just
# tests the translation to a Dataflow job request.
with patch.object(apiclient.DataflowApplicationClient,
'create_job') as mock_create_job:
with JavaExternalTransformTest._RunWithExpansion():
pipeline_options = PipelineOptions([
'--runner=DataflowRunner',
'--project=dummyproject',
'--region=some-region1',
'--experiments=beam_fn_api',
'--temp_location=gs://dummybucket/'
])
# Run a simple count-filtered-letters pipeline.
JavaExternalTransformTest.run_pipeline(
pipeline_options,
JavaExternalTransformTest.expansion_service_port,
False)
mock_args = mock_create_job.call_args_list
assert mock_args
args, kwargs = mock_args[0]
job = args[0]
job_str = '%s' % job
assert 'beam:transforms:xlang:filter_less_than_eq' in job_str
@staticmethod
def run_pipeline_with_expansion_service(pipeline_options):
with JavaExternalTransformTest._RunWithExpansion():
# Run a simple count-filtered-letters pipeline.
JavaExternalTransformTest.run_pipeline(
pipeline_options,
JavaExternalTransformTest.expansion_service_port,
True)
@staticmethod
def run_pipeline(pipeline_options, expansion_service, wait_until_finish=True):
# The actual definitions of these transforms is in
# org.apache.beam.runners.core.construction.TestExpansionService.
TEST_COUNT_URN = "beam:transforms:xlang:count"
TEST_FILTER_URN = "beam:transforms:xlang:filter_less_than_eq"
# Run a simple count-filtered-letters pipeline.
p = TestPipeline(options=pipeline_options)
if isinstance(expansion_service, int):
# Only the port was specified.
expansion_service = 'localhost:%s' % str(expansion_service)
res = (
p
| beam.Create(list('aaabccxyyzzz'))
| beam.Map(unicode)
| beam.ExternalTransform(
TEST_FILTER_URN,
ImplicitSchemaPayloadBuilder({'data': u'middle'}),
expansion_service)
| beam.ExternalTransform(TEST_COUNT_URN, None, expansion_service)
| beam.Map(lambda kv: '%s: %s' % kv))
assert_that(res, equal_to(['a: 3', 'b: 1', 'c: 2']))
result = p.run()
if wait_until_finish:
result.wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--expansion_service_jar')
parser.add_argument('--expansion_service_port')
parser.add_argument('--expansion_service_target')
parser.add_argument('--expansion_service_target_appendix')
known_args, pipeline_args = parser.parse_known_args(sys.argv)
if known_args.expansion_service_jar:
JavaExternalTransformTest.expansion_service_jar = (
known_args.expansion_service_jar)
JavaExternalTransformTest.expansion_service_port = int(
known_args.expansion_service_port)
pipeline_options = PipelineOptions(pipeline_args)
JavaExternalTransformTest.run_pipeline_with_expansion_service(
pipeline_options)
elif known_args.expansion_service_target:
pipeline_options = PipelineOptions(pipeline_args)
JavaExternalTransformTest.run_pipeline(
pipeline_options,
beam.transforms.external.BeamJarExpansionService(
known_args.expansion_service_target,
gradle_appendix=known_args.expansion_service_target_appendix))
else:
raise RuntimeError(
"--expansion_service_jar or --expansion_service_target "
"should be provided.") | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/transforms/external_java.py | 0.510252 | 0.242452 | external_java.py | pypi |
# cython: language_level=3
"""A library of basic cythonized CombineFn subclasses.
For internal use only; no backwards-compatibility guarantees.
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import operator
from builtins import object
from apache_beam.transforms import core
try:
from apache_beam.transforms.cy_dataflow_distribution_counter import DataflowDistributionCounter
except ImportError:
from apache_beam.transforms.py_dataflow_distribution_counter import DataflowDistributionCounter
class AccumulatorCombineFn(core.CombineFn):
# singleton?
def create_accumulator(self):
return self._accumulator_type()
@staticmethod
def add_input(accumulator, element):
accumulator.add_input(element)
return accumulator
def merge_accumulators(self, accumulators):
accumulator = self._accumulator_type()
accumulator.merge(accumulators)
return accumulator
@staticmethod
def extract_output(accumulator):
return accumulator.extract_output()
def __eq__(self, other):
return (
isinstance(other, AccumulatorCombineFn) and
self._accumulator_type is other._accumulator_type)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash(self._accumulator_type)
_63 = 63 # Avoid large literals in C source code.
globals()['INT64_MAX'] = 2**_63 - 1
globals()['INT64_MIN'] = -2**_63
class CountAccumulator(object):
def __init__(self):
self.value = 0
def add_input(self, unused_element):
self.value += 1
def merge(self, accumulators):
for accumulator in accumulators:
self.value += accumulator.value
def extract_output(self):
return self.value
class SumInt64Accumulator(object):
def __init__(self):
self.value = 0
def add_input(self, element):
global INT64_MAX, INT64_MIN # pylint: disable=global-variable-not-assigned
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
self.value += element
def merge(self, accumulators):
for accumulator in accumulators:
self.value += accumulator.value
def extract_output(self):
if not INT64_MIN <= self.value <= INT64_MAX:
self.value %= 2**64
if self.value >= INT64_MAX:
self.value -= 2**64
return self.value
class MinInt64Accumulator(object):
def __init__(self):
self.value = INT64_MAX
def add_input(self, element):
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
if element < self.value:
self.value = element
def merge(self, accumulators):
for accumulator in accumulators:
if accumulator.value < self.value:
self.value = accumulator.value
def extract_output(self):
return self.value
class MaxInt64Accumulator(object):
def __init__(self):
self.value = INT64_MIN
def add_input(self, element):
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
if element > self.value:
self.value = element
def merge(self, accumulators):
for accumulator in accumulators:
if accumulator.value > self.value:
self.value = accumulator.value
def extract_output(self):
return self.value
class MeanInt64Accumulator(object):
def __init__(self):
self.sum = 0
self.count = 0
def add_input(self, element):
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
self.sum += element
self.count += 1
def merge(self, accumulators):
for accumulator in accumulators:
self.sum += accumulator.sum
self.count += accumulator.count
def extract_output(self):
if not INT64_MIN <= self.sum <= INT64_MAX:
self.sum %= 2**64
if self.sum >= INT64_MAX:
self.sum -= 2**64
return self.sum // self.count if self.count else _NAN
class DistributionInt64Accumulator(object):
def __init__(self):
self.sum = 0
self.count = 0
self.min = INT64_MAX
self.max = INT64_MIN
def add_input(self, element):
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
self.sum += element
self.count += 1
self.min = min(self.min, element)
self.max = max(self.max, element)
def merge(self, accumulators):
for accumulator in accumulators:
self.sum += accumulator.sum
self.count += accumulator.count
self.min = min(self.min, accumulator.min)
self.max = max(self.max, accumulator.max)
def extract_output(self):
if not INT64_MIN <= self.sum <= INT64_MAX:
self.sum %= 2**64
if self.sum >= INT64_MAX:
self.sum -= 2**64
mean = self.sum // self.count if self.count else _NAN
return mean, self.sum, self.count, self.min, self.max
class CountCombineFn(AccumulatorCombineFn):
_accumulator_type = CountAccumulator
class SumInt64Fn(AccumulatorCombineFn):
_accumulator_type = SumInt64Accumulator
class MinInt64Fn(AccumulatorCombineFn):
_accumulator_type = MinInt64Accumulator
class MaxInt64Fn(AccumulatorCombineFn):
_accumulator_type = MaxInt64Accumulator
class MeanInt64Fn(AccumulatorCombineFn):
_accumulator_type = MeanInt64Accumulator
class DistributionInt64Fn(AccumulatorCombineFn):
_accumulator_type = DistributionInt64Accumulator
_POS_INF = float('inf')
_NEG_INF = float('-inf')
_NAN = float('nan')
class SumDoubleAccumulator(object):
def __init__(self):
self.value = 0
def add_input(self, element):
element = float(element)
self.value += element
def merge(self, accumulators):
for accumulator in accumulators:
self.value += accumulator.value
def extract_output(self):
return self.value
class MinDoubleAccumulator(object):
def __init__(self):
self.value = _POS_INF
def add_input(self, element):
element = float(element)
if element < self.value:
self.value = element
def merge(self, accumulators):
for accumulator in accumulators:
if accumulator.value < self.value:
self.value = accumulator.value
def extract_output(self):
return self.value
class MaxDoubleAccumulator(object):
def __init__(self):
self.value = _NEG_INF
def add_input(self, element):
element = float(element)
if element > self.value:
self.value = element
def merge(self, accumulators):
for accumulator in accumulators:
if accumulator.value > self.value:
self.value = accumulator.value
def extract_output(self):
return self.value
class MeanDoubleAccumulator(object):
def __init__(self):
self.sum = 0
self.count = 0
def add_input(self, element):
element = float(element)
self.sum += element
self.count += 1
def merge(self, accumulators):
for accumulator in accumulators:
self.sum += accumulator.sum
self.count += accumulator.count
def extract_output(self):
return self.sum // self.count if self.count else _NAN
class SumFloatFn(AccumulatorCombineFn):
_accumulator_type = SumDoubleAccumulator
class MinFloatFn(AccumulatorCombineFn):
_accumulator_type = MinDoubleAccumulator
class MaxFloatFn(AccumulatorCombineFn):
_accumulator_type = MaxDoubleAccumulator
class MeanFloatFn(AccumulatorCombineFn):
_accumulator_type = MeanDoubleAccumulator
class AllAccumulator(object):
def __init__(self):
self.value = True
def add_input(self, element):
self.value &= not not element
def merge(self, accumulators):
for accumulator in accumulators:
self.value &= accumulator.value
def extract_output(self):
return self.value
class AnyAccumulator(object):
def __init__(self):
self.value = False
def add_input(self, element):
self.value |= not not element
def merge(self, accumulators):
for accumulator in accumulators:
self.value |= accumulator.value
def extract_output(self):
return self.value
class AnyCombineFn(AccumulatorCombineFn):
_accumulator_type = AnyAccumulator
class AllCombineFn(AccumulatorCombineFn):
_accumulator_type = AllAccumulator
class DataflowDistributionCounterFn(AccumulatorCombineFn):
"""A subclass of cy_combiners.AccumulatorCombineFn.
Make DataflowDistributionCounter able to report to Dataflow service via
CounterFactory.
When cythonized DataflowDistributinoCounter available, make
CounterFn combine with cythonized module, otherwise, combine with python
version.
"""
_accumulator_type = DataflowDistributionCounter
class ComparableValue(object):
"""A way to allow comparing elements in a rich fashion."""
__slots__ = (
'value', '_less_than_fn', '_comparable_value', 'requires_hydration')
def __init__(self, value, less_than_fn, key_fn, _requires_hydration=False):
self.value = value
self.hydrate(less_than_fn, key_fn)
self.requires_hydration = _requires_hydration
def hydrate(self, less_than_fn, key_fn):
self._less_than_fn = less_than_fn if less_than_fn else operator.lt
self._comparable_value = key_fn(self.value) if key_fn else self.value
self.requires_hydration = False
def __lt__(self, other):
assert not self.requires_hydration
assert self._less_than_fn is other._less_than_fn
return self._less_than_fn(self._comparable_value, other._comparable_value)
def __repr__(self):
return 'ComparableValue[%s]' % str(self.value)
def __reduce__(self):
# Since we can't pickle the Compare and Key Fn we pass None and we signify
# that this object _requires_hydration.
return ComparableValue, (self.value, None, None, True) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/transforms/cy_combiners.py | 0.632957 | 0.414247 | cy_combiners.py | pypi |
# pytype: skip-file
"""a collection of ptransforms for deduplicating elements."""
from __future__ import absolute_import
from __future__ import division
import typing
from apache_beam import typehints
from apache_beam.coders.coders import BooleanCoder
from apache_beam.transforms import core
from apache_beam.transforms import ptransform
from apache_beam.transforms import userstate
from apache_beam.transforms.timeutil import TimeDomain
from apache_beam.utils import timestamp
__all__ = [
'Deduplicate',
'DeduplicatePerKey',
]
K = typing.TypeVar('K')
V = typing.TypeVar('V')
@typehints.with_input_types(typing.Tuple[K, V])
@typehints.with_output_types(typing.Tuple[K, V])
class DeduplicatePerKey(ptransform.PTransform):
""" A PTransform which deduplicates <key, value> pair over a time domain and
threshold. Values in different windows will NOT be considered duplicates of
each other. Deduplication is guaranteed with respect of time domain and
duration.
Time durations are required so as to avoid unbounded memory and/or storage
requirements within a runner and care might need to be used to ensure that the
deduplication time limit is long enough to remove duplicates but short enough
to not cause performance problems within a runner. Each runner may provide an
optimized implementation of their choice using the deduplication time domain
and threshold specified.
Does not preserve any order the input PCollection might have had.
"""
def __init__(self, processing_time_duration=None, event_time_duration=None):
if processing_time_duration is None and event_time_duration is None:
raise ValueError(
'DeduplicatePerKey requires at lease provide either'
'processing_time_duration or event_time_duration.')
self.processing_time_duration = processing_time_duration
self.event_time_duration = event_time_duration
def _create_deduplicate_fn(self):
processing_timer_spec = userstate.TimerSpec(
'processing_timer', TimeDomain.REAL_TIME)
event_timer_spec = userstate.TimerSpec('event_timer', TimeDomain.WATERMARK)
state_spec = userstate.BagStateSpec('seen', BooleanCoder())
processing_time_duration = self.processing_time_duration
event_time_duration = self.event_time_duration
class DeduplicationFn(core.DoFn):
def process(
self,
kv,
ts=core.DoFn.TimestampParam,
seen_state=core.DoFn.StateParam(state_spec),
processing_timer=core.DoFn.TimerParam(processing_timer_spec),
event_timer=core.DoFn.TimerParam(event_timer_spec)):
if True in seen_state.read():
return
if processing_time_duration is not None:
processing_timer.set(
timestamp.Timestamp.now() + processing_time_duration)
if event_time_duration is not None:
event_timer.set(ts + event_time_duration)
seen_state.add(True)
yield kv
@userstate.on_timer(processing_timer_spec)
def process_processing_timer(
self, seen_state=core.DoFn.StateParam(state_spec)):
seen_state.clear()
@userstate.on_timer(event_timer_spec)
def process_event_timer(
self, seen_state=core.DoFn.StateParam(state_spec)):
seen_state.clear()
return DeduplicationFn()
def expand(self, pcoll):
return (
pcoll
| 'DeduplicateFn' >> core.ParDo(self._create_deduplicate_fn()))
class Deduplicate(ptransform.PTransform):
"""Similar to DeduplicatePerKey, the Deduplicate transform takes any arbitray
value as input and uses value as key to deduplicate among certain amount of
time duration.
"""
def __init__(self, processing_time_duration=None, event_time_duration=None):
if processing_time_duration is None and event_time_duration is None:
raise ValueError(
'Deduplicate requires at lease provide either'
'processing_time_duration or event_time_duration.')
self.processing_time_duration = processing_time_duration
self.event_time_duration = event_time_duration
def expand(self, pcoll):
return (
pcoll
| 'Use Value as Key' >> core.Map(lambda x: (x, None))
| 'DeduplicatePerKey' >> DeduplicatePerKey(
processing_time_duration=self.processing_time_duration,
event_time_duration=self.event_time_duration)
| 'Output Value' >> core.Map(lambda kv: kv[0])) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/transforms/deduplicate.py | 0.875121 | 0.486332 | deduplicate.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from builtins import object
from builtins import range
globals()['INT64_MAX'] = 2**63 - 1
globals()['INT64_MIN'] = -2**63
POWER_TEN = [
10e-1,
10e0,
10e1,
10e2,
10e3,
10e4,
10e5,
10e6,
10e7,
10e8,
10e9,
10e10,
10e11,
10e12,
10e13,
10e14,
10e15,
10e16,
10e17,
10e18
]
def get_log10_round_to_floor(element):
power = 0
while element >= POWER_TEN[power]:
power += 1
return power - 1
class DataflowDistributionCounter(object):
"""Pure python DataflowDistributionCounter in case Cython not available.
Please avoid using python mode if possible, since it's super slow
Cythonized DatadflowDistributionCounter defined in
apache_beam.transforms.cy_dataflow_distribution_counter.
Currently using special bucketing strategy suitable for Dataflow
Attributes:
min: minimum value of all inputs.
max: maximum value of all inputs.
count: total count of all inputs.
sum: sum of all inputs.
buckets: histogram buckets of value counts for a
distribution(1,2,5 bucketing). Max bucket_index is 58( sys.maxint as input).
is_cythonized: mark whether DataflowDistributionCounter cythonized.
"""
# Assume the max input is sys.maxint, then the possible max bucket size is 59
MAX_BUCKET_SIZE = 59
# 3 buckets for every power of ten -> 1, 2, 5
BUCKET_PER_TEN = 3
def __init__(self):
global INT64_MAX # pylint: disable=global-variable-not-assigned
self.min = INT64_MAX
self.max = 0
self.count = 0
self.sum = 0
self.buckets = [0] * self.MAX_BUCKET_SIZE
self.is_cythonized = False
def add_input(self, element):
if element < 0:
raise ValueError('Distribution counters support only non-negative value')
self.min = min(self.min, element)
self.max = max(self.max, element)
self.count += 1
self.sum += element
bucket_index = self.calculate_bucket_index(element)
self.buckets[bucket_index] += 1
def calculate_bucket_index(self, element):
"""Calculate the bucket index for the given element."""
if element == 0:
return 0
log10_floor = get_log10_round_to_floor(element)
power_of_ten = POWER_TEN[log10_floor]
if element < power_of_ten * 2:
bucket_offset = 0
elif element < power_of_ten * 5:
bucket_offset = 1
else:
bucket_offset = 2
return 1 + log10_floor * self.BUCKET_PER_TEN + bucket_offset
def translate_to_histogram(self, histogram):
"""Translate buckets into Histogram.
Args:
histogram: apache_beam.runners.dataflow.internal.clents.dataflow.Histogram
Ideally, only call this function when reporting counter to
dataflow service.
"""
first_bucket_offset = 0
last_bucket_offset = 0
for index in range(0, self.MAX_BUCKET_SIZE):
if self.buckets[index] != 0:
first_bucket_offset = index
break
for index in range(self.MAX_BUCKET_SIZE - 1, -1, -1):
if self.buckets[index] != 0:
last_bucket_offset = index
break
histogram.firstBucketOffset = first_bucket_offset
histogram.bucketCounts = (
self.buckets[first_bucket_offset:last_bucket_offset + 1])
def merge(self, accumulators):
raise NotImplementedError() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/transforms/py_dataflow_distribution_counter.py | 0.59514 | 0.479321 | py_dataflow_distribution_counter.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import json
import logging
import sys
import tempfile
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
from typing import overload
from google.protobuf import message
from apache_beam import coders
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.portability import stager
from apache_beam.runners.portability.sdk_container_builder import SdkContainerImageBuilder
from apache_beam.utils import proto_utils
if TYPE_CHECKING:
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.runners.pipeline_context import PipelineContext
__all__ = [
'Environment',
'DockerEnvironment',
'ProcessEnvironment',
'ExternalEnvironment',
'EmbeddedPythonEnvironment',
'EmbeddedPythonGrpcEnvironment',
'SubprocessSDKEnvironment',
'RunnerAPIEnvironmentHolder'
]
T = TypeVar('T')
EnvironmentT = TypeVar('EnvironmentT', bound='Environment')
ConstructorFn = Callable[[
Optional[Any],
Iterable[str],
Iterable[beam_runner_api_pb2.ArtifactInformation],
'PipelineContext'
],
Any]
def looks_like_json(s):
import re
return re.match(r'\s*\{.*\}\s*$', s)
class Environment(object):
"""Abstract base class for environments.
Represents a type and configuration of environment.
Each type of Environment should have a unique urn.
For internal use only. No backwards compatibility guarantees.
"""
_known_urns = {} # type: Dict[str, Tuple[Optional[type], ConstructorFn]]
_urn_to_env_cls = {} # type: Dict[str, type]
def __init__(self,
capabilities, # type: Iterable[str]
artifacts, # type: Iterable[beam_runner_api_pb2.ArtifactInformation]
):
# type: (...) -> None
self._capabilities = capabilities
self._artifacts = artifacts
def artifacts(self):
# type: () -> Iterable[beam_runner_api_pb2.ArtifactInformation]
return self._artifacts
def to_runner_api_parameter(self, context):
# type: (PipelineContext) -> Tuple[str, Optional[Union[message.Message, bytes, str]]]
raise NotImplementedError
def capabilities(self):
# type: () -> Iterable[str]
return self._capabilities
@classmethod
@overload
def register_urn(
cls,
urn, # type: str
parameter_type, # type: Type[T]
):
# type: (...) -> Callable[[Union[type, Callable[[T, Iterable[str], PipelineContext], Any]]], Callable[[T, Iterable[str], PipelineContext], Any]]
pass
@classmethod
@overload
def register_urn(
cls,
urn, # type: str
parameter_type, # type: None
):
# type: (...) -> Callable[[Union[type, Callable[[bytes, Iterable[str], Iterable[beam_runner_api_pb2.ArtifactInformation], PipelineContext], Any]]], Callable[[bytes, Iterable[str], PipelineContext], Any]]
pass
@classmethod
@overload
def register_urn(cls,
urn, # type: str
parameter_type, # type: Type[T]
constructor # type: Callable[[T, Iterable[str], Iterable[beam_runner_api_pb2.ArtifactInformation], PipelineContext], Any]
):
# type: (...) -> None
pass
@classmethod
@overload
def register_urn(cls,
urn, # type: str
parameter_type, # type: None
constructor # type: Callable[[bytes, Iterable[str], Iterable[beam_runner_api_pb2.ArtifactInformation], PipelineContext], Any]
):
# type: (...) -> None
pass
@classmethod
def register_urn(cls, urn, parameter_type, constructor=None):
def register(constructor):
if isinstance(constructor, type):
constructor.from_runner_api_parameter = register(
constructor.from_runner_api_parameter)
# register environment urn to environment class
cls._urn_to_env_cls[urn] = constructor
return constructor
else:
cls._known_urns[urn] = parameter_type, constructor
return staticmethod(constructor)
if constructor:
# Used as a statement.
register(constructor)
else:
# Used as a decorator.
return register
@classmethod
def get_env_cls_from_urn(cls, urn):
# type: (str) -> Type[Environment]
return cls._urn_to_env_cls[urn]
def to_runner_api(self, context):
# type: (PipelineContext) -> beam_runner_api_pb2.Environment
urn, typed_param = self.to_runner_api_parameter(context)
return beam_runner_api_pb2.Environment(
urn=urn,
payload=typed_param.SerializeToString() if isinstance(
typed_param, message.Message) else typed_param if
(isinstance(typed_param, bytes) or
typed_param is None) else typed_param.encode('utf-8'),
capabilities=self.capabilities(),
dependencies=self.artifacts())
@classmethod
def from_runner_api(cls,
proto, # type: Optional[beam_runner_api_pb2.Environment]
context # type: PipelineContext
):
# type: (...) -> Optional[Environment]
if proto is None or not proto.urn:
return None
parameter_type, constructor = cls._known_urns[proto.urn]
return constructor(
proto_utils.parse_Bytes(proto.payload, parameter_type),
proto.capabilities,
proto.dependencies,
context)
@classmethod
def from_options(cls, options):
# type: (Type[EnvironmentT], PortableOptions) -> EnvironmentT
"""Creates an Environment object from PortableOptions.
Args:
options: The PortableOptions object.
"""
raise NotImplementedError
@Environment.register_urn(
common_urns.environments.DOCKER.urn, beam_runner_api_pb2.DockerPayload)
class DockerEnvironment(Environment):
def __init__(
self,
container_image=None, # type: Optional[str]
capabilities=(), # type: Iterable[str]
artifacts=(), # type: Iterable[beam_runner_api_pb2.ArtifactInformation]
):
super(DockerEnvironment, self).__init__(capabilities, artifacts)
if container_image:
logging.info(
'Using provided Python SDK container image: %s' % (container_image))
self.container_image = container_image
else:
logging.info('No image given, using default Python SDK image')
self.container_image = self.default_docker_image()
logging.info(
'Python SDK container image set to "%s" for Docker environment' %
(self.container_image))
def __eq__(self, other):
return self.__class__ == other.__class__ \
and self.container_image == other.container_image
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash((self.__class__, self.container_image))
def __repr__(self):
return 'DockerEnvironment(container_image=%s)' % self.container_image
def to_runner_api_parameter(self, context):
# type: (PipelineContext) -> Tuple[str, beam_runner_api_pb2.DockerPayload]
return (
common_urns.environments.DOCKER.urn,
beam_runner_api_pb2.DockerPayload(container_image=self.container_image))
@staticmethod
def from_runner_api_parameter(payload, # type: beam_runner_api_pb2.DockerPayload
capabilities, # type: Iterable[str]
artifacts, # type: Iterable[beam_runner_api_pb2.ArtifactInformation]
context # type: PipelineContext
):
# type: (...) -> DockerEnvironment
return DockerEnvironment(
container_image=payload.container_image,
capabilities=capabilities,
artifacts=artifacts)
@classmethod
def from_options(cls, options):
# type: (PortableOptions) -> DockerEnvironment
if options.view_as(SetupOptions).prebuild_sdk_container_engine:
prebuilt_container_image = SdkContainerImageBuilder.build_container_image(
options)
return cls.from_container_image(
container_image=prebuilt_container_image,
artifacts=python_sdk_dependencies(options))
return cls.from_container_image(
container_image=options.lookup_environment_option(
'docker_container_image') or options.environment_config,
artifacts=python_sdk_dependencies(options))
@classmethod
def from_container_image(cls, container_image, artifacts=()):
# type: (str, Iterable[beam_runner_api_pb2.ArtifactInformation]) -> DockerEnvironment
return cls(
container_image=container_image,
capabilities=python_sdk_capabilities(),
artifacts=artifacts)
@staticmethod
def default_docker_image():
# type: () -> str
from apache_beam import version as beam_version
sdk_version = beam_version.__version__
version_suffix = '.'.join([str(i) for i in sys.version_info[0:2]])
logging.warning(
'Make sure that locally built Python SDK docker image '
'has Python %d.%d interpreter.' %
(sys.version_info[0], sys.version_info[1]))
image = (
'apache/beam_python{version_suffix}_sdk:{tag}'.format(
version_suffix=version_suffix, tag=sdk_version))
logging.info('Default Python SDK image for environment is %s' % (image))
return image
@Environment.register_urn(
common_urns.environments.PROCESS.urn, beam_runner_api_pb2.ProcessPayload)
class ProcessEnvironment(Environment):
def __init__(
self,
command, # type: str
os='', # type: str
arch='', # type: str
env=None, # type: Optional[Mapping[str, str]]
capabilities=(), # type: Iterable[str]
artifacts=(), # type: Iterable[beam_runner_api_pb2.ArtifactInformation]
):
# type: (...) -> None
super(ProcessEnvironment, self).__init__(capabilities, artifacts)
self.command = command
self.os = os
self.arch = arch
self.env = env or {}
def __eq__(self, other):
return self.__class__ == other.__class__ \
and self.command == other.command and self.os == other.os \
and self.arch == other.arch and self.env == other.env
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
# type: () -> int
return hash((
self.__class__,
self.command,
self.os,
self.arch,
frozenset(self.env.items())))
def __repr__(self):
# type: () -> str
repr_parts = ['command=%s' % self.command]
if self.os:
repr_parts.append('os=%s' % self.os)
if self.arch:
repr_parts.append('arch=%s' % self.arch)
repr_parts.append('env=%s' % self.env)
return 'ProcessEnvironment(%s)' % ','.join(repr_parts)
def to_runner_api_parameter(self, context):
# type: (PipelineContext) -> Tuple[str, beam_runner_api_pb2.ProcessPayload]
return (
common_urns.environments.PROCESS.urn,
beam_runner_api_pb2.ProcessPayload(
os=self.os, arch=self.arch, command=self.command, env=self.env))
@staticmethod
def from_runner_api_parameter(payload,
capabilities, # type: Iterable[str]
artifacts, # type: Iterable[beam_runner_api_pb2.ArtifactInformation]
context # type: PipelineContext
):
# type: (...) -> ProcessEnvironment
return ProcessEnvironment(
command=payload.command,
os=payload.os,
arch=payload.arch,
env=payload.env,
capabilities=capabilities,
artifacts=artifacts)
@staticmethod
def parse_environment_variables(variables):
env = {}
for var in variables:
try:
name, value = var.split('=', 1)
env[name] = value
except ValueError:
raise ValueError(
'Invalid process_variables "%s" (expected assignment in the '
'form "FOO=bar").' % var)
return env
@classmethod
def from_options(cls, options):
# type: (PortableOptions) -> ProcessEnvironment
if options.environment_config:
config = json.loads(options.environment_config)
return cls(
config.get('command'),
os=config.get('os', ''),
arch=config.get('arch', ''),
env=config.get('env', ''),
capabilities=python_sdk_capabilities(),
artifacts=python_sdk_dependencies(options))
env = cls.parse_environment_variables(
options.lookup_environment_option('process_variables').split(',')
if options.lookup_environment_option('process_variables') else [])
return cls(
options.lookup_environment_option('process_command'),
env=env,
capabilities=python_sdk_capabilities(),
artifacts=python_sdk_dependencies(options))
@Environment.register_urn(
common_urns.environments.EXTERNAL.urn, beam_runner_api_pb2.ExternalPayload)
class ExternalEnvironment(Environment):
def __init__(
self,
url, # type: str
params=None, # type: Optional[Mapping[str, str]]
capabilities=(), # type: Iterable[str]
artifacts=(), # type: Iterable[beam_runner_api_pb2.ArtifactInformation]
):
super(ExternalEnvironment, self).__init__(capabilities, artifacts)
self.url = url
self.params = params
def __eq__(self, other):
return self.__class__ == other.__class__ and self.url == other.url \
and self.params == other.params
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
# type: () -> int
return hash((
self.__class__,
self.url,
frozenset(self.params.items()) if self.params is not None else None))
def __repr__(self):
# type: () -> str
return 'ExternalEnvironment(url=%s,params=%s)' % (self.url, self.params)
def to_runner_api_parameter(self, context):
# type: (PipelineContext) -> Tuple[str, beam_runner_api_pb2.ExternalPayload]
return (
common_urns.environments.EXTERNAL.urn,
beam_runner_api_pb2.ExternalPayload(
endpoint=endpoints_pb2.ApiServiceDescriptor(url=self.url),
params=self.params))
@staticmethod
def from_runner_api_parameter(payload, # type: beam_runner_api_pb2.ExternalPayload
capabilities, # type: Iterable[str]
artifacts, # type: Iterable[beam_runner_api_pb2.ArtifactInformation]
context # type: PipelineContext
):
# type: (...) -> ExternalEnvironment
return ExternalEnvironment(
payload.endpoint.url,
params=payload.params or None,
capabilities=capabilities,
artifacts=artifacts)
@classmethod
def from_options(cls, options):
# type: (PortableOptions) -> ExternalEnvironment
if looks_like_json(options.environment_config):
config = json.loads(options.environment_config)
url = config.get('url')
if not url:
raise ValueError('External environment endpoint must be set.')
params = config.get('params')
elif options.environment_config:
url = options.environment_config
params = None
else:
url = options.lookup_environment_option('external_service_address')
params = None
return cls(
url,
params=params,
capabilities=python_sdk_capabilities(),
artifacts=python_sdk_dependencies(options))
@Environment.register_urn(python_urns.EMBEDDED_PYTHON, None)
class EmbeddedPythonEnvironment(Environment):
def __init__(self, capabilities=None, artifacts=()):
super(EmbeddedPythonEnvironment, self).__init__(capabilities, artifacts)
def __eq__(self, other):
return self.__class__ == other.__class__
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
# type: () -> int
return hash(self.__class__)
def to_runner_api_parameter(self, context):
# type: (PipelineContext) -> Tuple[str, None]
return python_urns.EMBEDDED_PYTHON, None
@staticmethod
def from_runner_api_parameter(unused_payload, # type: None
capabilities, # type: Iterable[str]
artifacts, # type: Iterable[beam_runner_api_pb2.ArtifactInformation]
context # type: PipelineContext
):
# type: (...) -> EmbeddedPythonEnvironment
return EmbeddedPythonEnvironment(capabilities, artifacts)
@classmethod
def from_options(cls, options):
# type: (PortableOptions) -> EmbeddedPythonEnvironment
return cls(
capabilities=python_sdk_capabilities(),
artifacts=python_sdk_dependencies(options))
@Environment.register_urn(python_urns.EMBEDDED_PYTHON_GRPC, bytes)
class EmbeddedPythonGrpcEnvironment(Environment):
def __init__(
self,
state_cache_size=None,
data_buffer_time_limit_ms=None,
capabilities=(),
artifacts=()):
super(EmbeddedPythonGrpcEnvironment, self).__init__(capabilities, artifacts)
self.state_cache_size = state_cache_size
self.data_buffer_time_limit_ms = data_buffer_time_limit_ms
def __eq__(self, other):
return self.__class__ == other.__class__ \
and self.state_cache_size == other.state_cache_size \
and self.data_buffer_time_limit_ms == other.data_buffer_time_limit_ms
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
# type: () -> int
return hash(
(self.__class__, self.state_cache_size, self.data_buffer_time_limit_ms))
def __repr__(self):
# type: () -> str
repr_parts = []
if not self.state_cache_size is None:
repr_parts.append('state_cache_size=%d' % self.state_cache_size)
if not self.data_buffer_time_limit_ms is None:
repr_parts.append(
'data_buffer_time_limit_ms=%d' % self.data_buffer_time_limit_ms)
return 'EmbeddedPythonGrpcEnvironment(%s)' % ','.join(repr_parts)
def to_runner_api_parameter(self, context):
# type: (PipelineContext) -> Tuple[str, bytes]
params = {}
if self.state_cache_size is not None:
params['state_cache_size'] = self.state_cache_size
if self.data_buffer_time_limit_ms is not None:
params['data_buffer_time_limit_ms'] = self.data_buffer_time_limit_ms
payload = json.dumps(params).encode('utf-8')
return python_urns.EMBEDDED_PYTHON_GRPC, payload
@staticmethod
def from_runner_api_parameter(payload, # type: bytes
capabilities, # type: Iterable[str]
artifacts, # type: Iterable[beam_runner_api_pb2.ArtifactInformation]
context # type: PipelineContext
):
# type: (...) -> EmbeddedPythonGrpcEnvironment
if payload:
config = EmbeddedPythonGrpcEnvironment.parse_config(
payload.decode('utf-8'))
return EmbeddedPythonGrpcEnvironment(
state_cache_size=config.get('state_cache_size'),
data_buffer_time_limit_ms=config.get('data_buffer_time_limit_ms'),
capabilities=capabilities,
artifacts=artifacts)
else:
return EmbeddedPythonGrpcEnvironment()
@classmethod
def from_options(cls, options):
# type: (PortableOptions) -> EmbeddedPythonGrpcEnvironment
if options.environment_config:
config = EmbeddedPythonGrpcEnvironment.parse_config(
options.environment_config)
return cls(
state_cache_size=config.get('state_cache_size'),
data_buffer_time_limit_ms=config.get('data_buffer_time_limit_ms'))
else:
return cls(
capabilities=python_sdk_capabilities(),
artifacts=python_sdk_dependencies(options))
@staticmethod
def parse_config(s):
# type: (str) -> Dict[str, Any]
if looks_like_json(s):
config_dict = json.loads(s)
if 'state_cache_size' in config_dict:
config_dict['state_cache_size'] = int(config_dict['state_cache_size'])
if 'data_buffer_time_limit_ms' in config_dict:
config_dict['data_buffer_time_limit_ms'] = \
int(config_dict['data_buffer_time_limit_ms'])
return config_dict
else:
return {'state_cache_size': int(s)}
@Environment.register_urn(python_urns.SUBPROCESS_SDK, bytes)
class SubprocessSDKEnvironment(Environment):
def __init__(
self,
command_string, # type: str
capabilities=(), # type: Iterable[str]
artifacts=(), # type: Iterable[beam_runner_api_pb2.ArtifactInformation]
):
super(SubprocessSDKEnvironment, self).__init__(capabilities, artifacts)
self.command_string = command_string
def __eq__(self, other):
return self.__class__ == other.__class__ \
and self.command_string == other.command_string
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
# type: () -> int
return hash((self.__class__, self.command_string))
def __repr__(self):
# type: () -> str
return 'SubprocessSDKEnvironment(command_string=%s)' % self.command_string
def to_runner_api_parameter(self, context):
# type: (PipelineContext) -> Tuple[str, bytes]
return python_urns.SUBPROCESS_SDK, self.command_string.encode('utf-8')
@staticmethod
def from_runner_api_parameter(payload, # type: bytes
capabilities, # type: Iterable[str]
artifacts, # type: Iterable[beam_runner_api_pb2.ArtifactInformation]
context # type: PipelineContext
):
# type: (...) -> SubprocessSDKEnvironment
return SubprocessSDKEnvironment(
payload.decode('utf-8'), capabilities, artifacts)
@classmethod
def from_options(cls, options):
# type: (PortableOptions) -> SubprocessSDKEnvironment
return cls(
options.environment_config,
capabilities=python_sdk_capabilities(),
artifacts=python_sdk_dependencies(options))
class RunnerAPIEnvironmentHolder(Environment):
def __init__(self, proto):
# type: (beam_runner_api_pb2.Environment) -> None
self.proto = proto
def to_runner_api(self, context):
# type: (PipelineContext) -> beam_runner_api_pb2.Environment
return self.proto
def capabilities(self):
# type: () -> Iterable[str]
return self.proto.capabilities
def __eq__(self, other):
return self.__class__ == other.__class__ and self.proto == other.proto
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
# type: () -> int
return hash((self.__class__, self.proto))
def python_sdk_capabilities():
# type: () -> List[str]
return list(_python_sdk_capabilities_iter())
def _python_sdk_capabilities_iter():
# type: () -> Iterator[str]
for urn_spec in common_urns.coders.__dict__.values():
if getattr(urn_spec, 'urn', None) in coders.Coder._known_urns:
yield urn_spec.urn
yield common_urns.protocols.LEGACY_PROGRESS_REPORTING.urn
yield common_urns.protocols.HARNESS_MONITORING_INFOS.urn
yield common_urns.protocols.WORKER_STATUS.urn
yield python_urns.PACKED_COMBINE_FN
yield 'beam:version:sdk_base:' + DockerEnvironment.default_docker_image()
yield common_urns.sdf_components.TRUNCATE_SIZED_RESTRICTION.urn
def python_sdk_dependencies(options, tmp_dir=None):
if tmp_dir is None:
tmp_dir = tempfile.mkdtemp()
skip_prestaged_dependencies = options.view_as(
SetupOptions).prebuild_sdk_container_engine is not None
return tuple(
beam_runner_api_pb2.ArtifactInformation(
type_urn=common_urns.artifact_types.FILE.urn,
type_payload=beam_runner_api_pb2.ArtifactFilePayload(
path=local_path).SerializeToString(),
role_urn=common_urns.artifact_roles.STAGING_TO.urn,
role_payload=beam_runner_api_pb2.ArtifactStagingToRolePayload(
staged_name=staged_name).SerializeToString()) for local_path,
staged_name in stager.Stager.create_job_resources(
options,
tmp_dir,
skip_prestaged_dependencies=skip_prestaged_dependencies)) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/transforms/environments.py | 0.711331 | 0.196036 | environments.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import typing
from past.builtins import unicode
from apache_beam.transforms.external import BeamJarExpansionService
from apache_beam.transforms.external import ExternalTransform
from apache_beam.transforms.external import NamedTupleBasedPayloadBuilder
__all__ = ['SqlTransform']
SqlTransformSchema = typing.NamedTuple(
'SqlTransformSchema', [('query', unicode),
('dialect', typing.Optional[unicode])])
class SqlTransform(ExternalTransform):
"""A transform that can translate a SQL query into PTransforms.
Input PCollections must have a schema. Currently, there are two ways to define
a schema for a PCollection:
1) Register a `typing.NamedTuple` type to use RowCoder, and specify it as the
output type. For example::
Purchase = typing.NamedTuple('Purchase',
[('item_name', unicode), ('price', float)])
coders.registry.register_coder(Purchase, coders.RowCoder)
with Pipeline() as p:
purchases = (p | beam.io...
| beam.Map(..).with_output_types(Purchase))
2) Produce `beam.Row` instances. Note this option will fail if Beam is unable
to infer data types for any of the fields. For example::
with Pipeline() as p:
purchases = (p | beam.io...
| beam.Map(lambda x: beam.Row(item_name=unicode(..),
price=float(..))))
Similarly, the output of SqlTransform is a PCollection with a schema.
The columns produced by the query can be accessed as attributes. For example::
purchases | SqlTransform(\"\"\"
SELECT item_name, COUNT(*) AS `count`
FROM PCOLLECTION GROUP BY item_name\"\"\")
| beam.Map(lambda row: "We've sold %d %ss!" % (row.count,
row.item_name))
Additional examples can be found in
`apache_beam.examples.wordcount_xlang_sql`, `apache_beam.examples.sql_taxi`,
and `apache_beam.transforms.sql_test`.
For more details about Beam SQL in general see the `Java transform
<https://beam.apache.org/releases/javadoc/current/org/apache/beam/sdk/extensions/sql/SqlTransform.html>`_,
and the `documentation
<https://beam.apache.org/documentation/dsls/sql/overview/>`_.
"""
URN = 'beam:external:java:sql:v1'
def __init__(self, query, dialect=None, expansion_service=None):
"""
Creates a SqlTransform which will be expanded to Java's SqlTransform.
(See class docs).
:param query: The SQL query.
:param dialect: (optional) The dialect, e.g. use 'zetasql' for ZetaSQL.
:param expansion_service: (optional) The URL of the expansion service to use
"""
expansion_service = expansion_service or BeamJarExpansionService(
':sdks:java:extensions:sql:expansion-service:shadowJar')
super(SqlTransform, self).__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
SqlTransformSchema(query=query, dialect=dialect)),
expansion_service=expansion_service) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/transforms/sql.py | 0.872252 | 0.466724 | sql.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import calendar
import inspect
import json
from builtins import object
from datetime import datetime
from datetime import timedelta
from typing import TYPE_CHECKING
from typing import List
from past.builtins import unicode
if TYPE_CHECKING:
from apache_beam.options.pipeline_options import PipelineOptions
__all__ = ['HasDisplayData', 'DisplayDataItem', 'DisplayData']
class HasDisplayData(object):
""" Basic mixin for elements that contain display data.
It implements only the display_data method and a _namespace method.
"""
def display_data(self):
# type: () -> dict
""" Returns the display data associated to a pipeline component.
It should be reimplemented in pipeline components that wish to have
static display data.
Returns:
Dict[str, Any]: A dictionary containing ``key:value`` pairs.
The value might be an integer, float or string value; a
:class:`DisplayDataItem` for values that have more data
(e.g. short value, label, url); or a :class:`HasDisplayData` instance
that has more display data that should be picked up. For example::
{
'key1': 'string_value',
'key2': 1234,
'key3': 3.14159265,
'key4': DisplayDataItem('apache.org', url='http://apache.org'),
'key5': subComponent
}
"""
return {}
def _namespace(self):
# type: () -> str
return '{}.{}'.format(self.__module__, self.__class__.__name__)
class DisplayData(object):
""" Static display data associated with a pipeline component.
"""
def __init__(
self,
namespace, # type: str
display_data_dict # type: dict
):
# type: (...) -> None
self.namespace = namespace
self.items = [] # type: List[DisplayDataItem]
self._populate_items(display_data_dict)
def _populate_items(self, display_data_dict):
""" Populates the list of display data items.
"""
for key, element in display_data_dict.items():
if isinstance(element, HasDisplayData):
subcomponent_display_data = DisplayData(
element._namespace(), element.display_data())
self.items += subcomponent_display_data.items
continue
if isinstance(element, DisplayDataItem):
if element.should_drop():
continue
element.key = key
element.namespace = self.namespace
self.items.append(element)
continue
# If it's not a HasDisplayData element,
# nor a dictionary, then it's a simple value
self.items.append(
DisplayDataItem(element, namespace=self.namespace, key=key))
@classmethod
def create_from_options(cls, pipeline_options):
""" Creates :class:`~apache_beam.transforms.display.DisplayData` from a
:class:`~apache_beam.options.pipeline_options.PipelineOptions` instance.
When creating :class:`~apache_beam.transforms.display.DisplayData`, this
method will convert the value of any item of a non-supported type to its
string representation.
The normal :meth:`.create_from()` method rejects those items.
Returns:
~apache_beam.transforms.display.DisplayData:
A :class:`~apache_beam.transforms.display.DisplayData` instance with
populated items.
Raises:
ValueError: If the **has_display_data** argument is
not an instance of :class:`HasDisplayData`.
"""
from apache_beam.options.pipeline_options import PipelineOptions
if not isinstance(pipeline_options, PipelineOptions):
raise ValueError(
'Element of class {}.{} does not subclass PipelineOptions'.format(
pipeline_options.__module__, pipeline_options.__class__.__name__))
items = {
k: (v if DisplayDataItem._get_value_type(v) is not None else str(v))
for k,
v in pipeline_options.display_data().items()
}
return cls(pipeline_options._namespace(), items)
@classmethod
def create_from(cls, has_display_data):
""" Creates :class:`~apache_beam.transforms.display.DisplayData` from a
:class:`HasDisplayData` instance.
Returns:
~apache_beam.transforms.display.DisplayData:
A :class:`~apache_beam.transforms.display.DisplayData` instance with
populated items.
Raises:
ValueError: If the **has_display_data** argument is
not an instance of :class:`HasDisplayData`.
"""
if not isinstance(has_display_data, HasDisplayData):
raise ValueError(
'Element of class {}.{} does not subclass HasDisplayData'.format(
has_display_data.__module__, has_display_data.__class__.__name__))
return cls(has_display_data._namespace(), has_display_data.display_data())
class DisplayDataItem(object):
""" A DisplayDataItem represents a unit of static display data.
Each item is identified by a key and the namespace of the component the
display item belongs to.
"""
typeDict = {
str: 'STRING',
unicode: 'STRING',
int: 'INTEGER',
float: 'FLOAT',
bool: 'BOOLEAN',
timedelta: 'DURATION',
datetime: 'TIMESTAMP'
}
def __init__(
self,
value,
url=None,
label=None,
namespace=None,
key=None,
shortValue=None):
self.namespace = namespace
self.key = key
self.type = self._get_value_type(value)
self.shortValue = (
shortValue if shortValue is not None else self._get_short_value(
value, self.type))
self.value = value
self.url = url
self.label = label
self._drop_if_none = False
self._drop_if_default = False
def drop_if_none(self):
# type: () -> DisplayDataItem
""" The item should be dropped if its value is None.
Returns:
Returns self.
"""
self._drop_if_none = True
return self
def drop_if_default(self, default):
# type: (...) -> DisplayDataItem
""" The item should be dropped if its value is equal to its default.
Returns:
Returns self.
"""
self._default = default
self._drop_if_default = True
return self
def should_drop(self):
# type: () -> bool
""" Return True if the item should be dropped, or False if it should not
be dropped. This depends on the drop_if_none, and drop_if_default calls.
Returns:
True or False; depending on whether the item should be dropped or kept.
"""
if self._drop_if_none and self.value is None:
return True
if self._drop_if_default and self.value == self._default:
return True
return False
def is_valid(self):
# type: () -> None
""" Checks that all the necessary fields of the :class:`DisplayDataItem`
are filled in. It checks that neither key, namespace, value or type are
:data:`None`.
Raises:
ValueError: If the item does not have a key, namespace,
value or type.
"""
if self.key is None:
raise ValueError(
'Invalid DisplayDataItem %s. Key must not be None.' % self)
if self.namespace is None:
raise ValueError(
'Invalid DisplayDataItem %s. Namespace must not be None' % self)
if self.value is None:
raise ValueError(
'Invalid DisplayDataItem %s. Value must not be None' % self)
if self.type is None:
raise ValueError(
'Invalid DisplayDataItem. Value {} is of an unsupported type.'.format(
self.value))
def _get_dict(self):
res = {
'key': self.key,
'namespace': self.namespace,
'type': self.type if self.type != 'CLASS' else 'STRING'
}
# TODO: Python Class types should not be special-cased once
# the Fn API is in.
if self.url is not None:
res['url'] = self.url
if self.shortValue is not None:
res['shortValue'] = self.shortValue
if self.label is not None:
res['label'] = self.label
res['value'] = self._format_value(self.value, self.type)
return res
def get_dict(self):
# type: () -> dict
""" Returns the internal-API dictionary representing the
:class:`DisplayDataItem`.
Returns:
Dict[str, Any]: A dictionary. The internal-API dictionary representing
the :class:`DisplayDataItem`.
Raises:
ValueError: if the item is not valid.
"""
self.is_valid()
return self._get_dict()
def __repr__(self):
return 'DisplayDataItem({})'.format(json.dumps(self._get_dict()))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._get_dict() == other._get_dict()
return False
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash(tuple(sorted(self._get_dict().items())))
@classmethod
def _format_value(cls, value, type_):
""" Returns the API representation of a value given its type.
Args:
value: The value of the item that needs to be shortened.
type_(string): The type of the value.
Returns:
A formatted value in the form of a float, int, or string.
"""
res = value
if type_ == 'CLASS':
res = '{}.{}'.format(value.__module__, value.__name__)
elif type_ == 'DURATION':
res = value.total_seconds() * 1000
elif type_ == 'TIMESTAMP':
res = calendar.timegm(
value.timetuple()) * 1000 + value.microsecond // 1000
return res
@classmethod
def _get_short_value(cls, value, type_):
""" Calculates the short value for an item.
Args:
value: The value of the item that needs to be shortened.
type_(string): The type of the value.
Returns:
The unqualified name of a class if type_ is 'CLASS'. None otherwise.
"""
if type_ == 'CLASS':
return value.__name__
return None
@classmethod
def _get_value_type(cls, value):
""" Infers the type of a given value.
Args:
value: The value whose type needs to be inferred. For 'DURATION' and
'TIMESTAMP', the corresponding Python type is datetime.timedelta and
datetime.datetime respectively. For Python classes, the API type is
just 'STRING' at the moment.
Returns:
One of 'STRING', 'INTEGER', 'FLOAT', 'CLASS', 'DURATION', or
'TIMESTAMP', depending on the type of the value.
"""
#TODO: Fix Args: documentation once the Python classes handling has changed
type_ = cls.typeDict.get(type(value))
if type_ is None:
type_ = 'CLASS' if inspect.isclass(value) else None
if type_ is None and value is None:
type_ = 'STRING'
return type_ | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/transforms/display.py | 0.902408 | 0.383295 | display.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import abc
from builtins import object
from builtins import range
from functools import total_ordering
from typing import Any
from typing import Iterable
from typing import List
from typing import Optional
from future.utils import with_metaclass
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
from apache_beam.coders import coders
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import standard_window_fns_pb2
from apache_beam.transforms import timeutil
from apache_beam.utils import proto_utils
from apache_beam.utils import urns
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import Duration
from apache_beam.utils.timestamp import DurationTypes # pylint: disable=unused-import
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.timestamp import TimestampTypes # pylint: disable=unused-import
from apache_beam.utils.windowed_value import WindowedValue
__all__ = [
'TimestampCombiner',
'WindowFn',
'BoundedWindow',
'IntervalWindow',
'TimestampedValue',
'GlobalWindow',
'NonMergingWindowFn',
'GlobalWindows',
'FixedWindows',
'SlidingWindows',
'Sessions',
]
# TODO(ccy): revisit naming and semantics once Java Apache Beam finalizes their
# behavior.
class TimestampCombiner(object):
"""Determines how output timestamps of grouping operations are assigned."""
OUTPUT_AT_EOW = beam_runner_api_pb2.OutputTime.END_OF_WINDOW
OUTPUT_AT_EARLIEST = beam_runner_api_pb2.OutputTime.EARLIEST_IN_PANE
OUTPUT_AT_LATEST = beam_runner_api_pb2.OutputTime.LATEST_IN_PANE
# TODO(robertwb): Add this to the runner API or remove it.
OUTPUT_AT_EARLIEST_TRANSFORMED = 'OUTPUT_AT_EARLIEST_TRANSFORMED'
@staticmethod
def get_impl(timestamp_combiner, window_fn):
# type: (beam_runner_api_pb2.OutputTime.Enum, WindowFn) -> timeutil.TimestampCombinerImpl
if timestamp_combiner == TimestampCombiner.OUTPUT_AT_EOW:
return timeutil.OutputAtEndOfWindowImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST:
return timeutil.OutputAtEarliestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_LATEST:
return timeutil.OutputAtLatestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED:
return timeutil.OutputAtEarliestTransformedInputTimestampImpl(window_fn)
else:
raise ValueError('Invalid TimestampCombiner: %s.' % timestamp_combiner)
class WindowFn(with_metaclass(abc.ABCMeta, urns.RunnerApiFn)): # type: ignore[misc]
"""An abstract windowing function defining a basic assign and merge."""
class AssignContext(object):
"""Context passed to WindowFn.assign()."""
def __init__(
self,
timestamp, # type: TimestampTypes
element=None, # type: Optional[Any]
window=None # type: Optional[BoundedWindow]
):
# type: (...) -> None
self.timestamp = Timestamp.of(timestamp)
self.element = element
self.window = window
@abc.abstractmethod
def assign(self, assign_context):
# type: (AssignContext) -> Iterable[BoundedWindow]
"""Associates windows to an element.
Arguments:
assign_context: Instance of AssignContext.
Returns:
An iterable of BoundedWindow.
"""
raise NotImplementedError
class MergeContext(object):
"""Context passed to WindowFn.merge() to perform merging, if any."""
def __init__(self, windows):
# type: (Iterable[BoundedWindow]) -> None
self.windows = list(windows)
def merge(self, to_be_merged, merge_result):
# type: (Iterable[BoundedWindow], BoundedWindow) -> None
raise NotImplementedError
@abc.abstractmethod
def merge(self, merge_context):
# type: (WindowFn.MergeContext) -> None
"""Returns a window that is the result of merging a set of windows."""
raise NotImplementedError
def is_merging(self):
# type: () -> bool
"""Returns whether this WindowFn merges windows."""
return True
@abc.abstractmethod
def get_window_coder(self):
# type: () -> coders.Coder
raise NotImplementedError
def get_transformed_output_time(self, window, input_timestamp): # pylint: disable=unused-argument
# type: (BoundedWindow, Timestamp) -> Timestamp
"""Given input time and output window, returns output time for window.
If TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED is used in the
Windowing, the output timestamp for the given window will be the earliest
of the timestamps returned by get_transformed_output_time() for elements
of the window.
Arguments:
window: Output window of element.
input_timestamp: Input timestamp of element as a timeutil.Timestamp
object.
Returns:
Transformed timestamp.
"""
# By default, just return the input timestamp.
return input_timestamp
urns.RunnerApiFn.register_pickle_urn(python_urns.PICKLED_WINDOWFN)
class BoundedWindow(object):
"""A window for timestamps in range (-infinity, end).
Attributes:
end: End of window.
"""
def __init__(self, end):
# type: (TimestampTypes) -> None
self._end = Timestamp.of(end)
@property
def start(self):
# type: () -> Timestamp
raise NotImplementedError
@property
def end(self):
# type: () -> Timestamp
return self._end
def max_timestamp(self):
# type: () -> Timestamp
return self.end.predecessor()
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
# Order first by endpoint, then arbitrarily
return self.end != other.end or hash(self) != hash(other)
def __lt__(self, other):
if self.end != other.end:
return self.end < other.end
return hash(self) < hash(other)
def __le__(self, other):
if self.end != other.end:
return self.end <= other.end
return hash(self) <= hash(other)
def __gt__(self, other):
if self.end != other.end:
return self.end > other.end
return hash(self) > hash(other)
def __ge__(self, other):
if self.end != other.end:
return self.end >= other.end
return hash(self) >= hash(other)
def __hash__(self):
raise NotImplementedError
def __repr__(self):
return '[?, %s)' % float(self.end)
@total_ordering
class IntervalWindow(windowed_value._IntervalWindowBase, BoundedWindow):
"""A window for timestamps in range [start, end).
Attributes:
start: Start of window as seconds since Unix epoch.
end: End of window as seconds since Unix epoch.
"""
def __lt__(self, other):
if self.end != other.end:
return self.end < other.end
return hash(self) < hash(other)
def intersects(self, other):
# type: (IntervalWindow) -> bool
return other.start < self.end or self.start < other.end
def union(self, other):
# type: (IntervalWindow) -> IntervalWindow
return IntervalWindow(
min(self.start, other.start), max(self.end, other.end))
@total_ordering
class TimestampedValue(object):
"""A timestamped value having a value and a timestamp.
Attributes:
value: The underlying value.
timestamp: Timestamp associated with the value as seconds since Unix epoch.
"""
def __init__(self, value, timestamp):
# type: (Any, TimestampTypes) -> None
self.value = value
self.timestamp = Timestamp.of(timestamp)
def __eq__(self, other):
return (
type(self) == type(other) and self.value == other.value and
self.timestamp == other.timestamp)
def __hash__(self):
return hash((self.value, self.timestamp))
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if type(self) != type(other):
return type(self).__name__ < type(other).__name__
if self.value != other.value:
return self.value < other.value
return self.timestamp < other.timestamp
class GlobalWindow(BoundedWindow):
"""The default window into which all data is placed (via GlobalWindows)."""
_instance = None # type: GlobalWindow
def __new__(cls):
if cls._instance is None:
cls._instance = super(GlobalWindow, cls).__new__(cls)
return cls._instance
def __init__(self):
# type: () -> None
super(GlobalWindow, self).__init__(GlobalWindow._getTimestampFromProto())
def __repr__(self):
return 'GlobalWindow'
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
# Global windows are always and only equal to each other.
return self is other or type(self) is type(other)
def __ne__(self, other):
return not self == other
@property
def start(self):
# type: () -> Timestamp
return MIN_TIMESTAMP
@staticmethod
def _getTimestampFromProto():
# type: () -> Timestamp
ts_millis = int(
common_urns.constants.GLOBAL_WINDOW_MAX_TIMESTAMP_MILLIS.constant)
return Timestamp(micros=ts_millis * 1000)
class NonMergingWindowFn(WindowFn):
def is_merging(self):
# type: () -> bool
return False
def merge(self, merge_context):
# type: (WindowFn.MergeContext) -> None
pass # No merging.
class GlobalWindows(NonMergingWindowFn):
"""A windowing function that assigns everything to one global window."""
@classmethod
def windowed_value(
cls,
value, # type: Any
timestamp=MIN_TIMESTAMP, # type: Timestamp
pane_info=windowed_value.PANE_INFO_UNKNOWN # type: windowed_value.PaneInfo
):
# type: (...) -> WindowedValue
return WindowedValue(value, timestamp, (GlobalWindow(), ), pane_info)
def assign(self, assign_context):
# type: (WindowFn.AssignContext) -> List[GlobalWindow]
return [GlobalWindow()]
def get_window_coder(self):
# type: () -> coders.GlobalWindowCoder
return coders.GlobalWindowCoder()
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
# Global windowfn is always and only equal to each other.
return self is other or type(self) is type(other)
def __ne__(self, other):
return not self == other
def to_runner_api_parameter(self, context):
return common_urns.global_windows.urn, None
@staticmethod
@urns.RunnerApiFn.register_urn(common_urns.global_windows.urn, None)
def from_runner_api_parameter(unused_fn_parameter, unused_context):
# type: (...) -> GlobalWindows
return GlobalWindows()
class FixedWindows(NonMergingWindowFn):
"""A windowing function that assigns each element to one time interval.
The attributes size and offset determine in what time interval a timestamp
will be slotted. The time intervals have the following formula:
[N * size + offset, (N + 1) * size + offset)
Attributes:
size: Size of the window as seconds.
offset: Offset of this window as seconds. Windows start at
t=N * size + offset where t=0 is the UNIX epoch. The offset must be a
value in range [0, size). If it is not it will be normalized to this
range.
"""
def __init__(
self,
size, # type: DurationTypes
offset=0 # type: TimestampTypes
):
"""Initialize a ``FixedWindows`` function for a given size and offset.
Args:
size (int): Size of the window in seconds.
offset(int): Offset of this window as seconds. Windows start at
t=N * size + offset where t=0 is the UNIX epoch. The offset must be a
value in range [0, size). If it is not it will be normalized to this
range.
"""
if size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.size = Duration.of(size)
self.offset = Timestamp.of(offset) % self.size
def assign(self, context):
# type: (WindowFn.AssignContext) -> List[IntervalWindow]
timestamp = context.timestamp
start = timestamp - (timestamp - self.offset) % self.size
return [IntervalWindow(start, start + self.size)]
def get_window_coder(self):
# type: () -> coders.IntervalWindowCoder
return coders.IntervalWindowCoder()
def __eq__(self, other):
if type(self) == type(other) == FixedWindows:
return self.size == other.size and self.offset == other.offset
def __hash__(self):
return hash((self.size, self.offset))
def __ne__(self, other):
return not self == other
def to_runner_api_parameter(self, context):
return (
common_urns.fixed_windows.urn,
standard_window_fns_pb2.FixedWindowsPayload(
size=proto_utils.from_micros(
duration_pb2.Duration, self.size.micros),
offset=proto_utils.from_micros(
timestamp_pb2.Timestamp, self.offset.micros)))
@staticmethod
@urns.RunnerApiFn.register_urn(
common_urns.fixed_windows.urn,
standard_window_fns_pb2.FixedWindowsPayload)
def from_runner_api_parameter(fn_parameter, unused_context):
# type: (...) -> FixedWindows
return FixedWindows(
size=Duration(micros=fn_parameter.size.ToMicroseconds()),
offset=Timestamp(micros=fn_parameter.offset.ToMicroseconds()))
class SlidingWindows(NonMergingWindowFn):
"""A windowing function that assigns each element to a set of sliding windows.
The attributes size and offset determine in what time interval a timestamp
will be slotted. The time intervals have the following formula:
[N * period + offset, N * period + offset + size)
Attributes:
size: Size of the window as seconds.
period: Period of the windows as seconds.
offset: Offset of this window as seconds since Unix epoch. Windows start at
t=N * period + offset where t=0 is the epoch. The offset must be a value
in range [0, period). If it is not it will be normalized to this range.
"""
def __init__(self,
size, # type: DurationTypes
period, # type: DurationTypes
offset=0, # type: TimestampTypes
):
if size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.size = Duration.of(size)
self.period = Duration.of(period)
self.offset = Timestamp.of(offset) % period
def assign(self, context):
# type: (WindowFn.AssignContext) -> List[IntervalWindow]
timestamp = context.timestamp
start = timestamp - ((timestamp - self.offset) % self.period)
return [
IntervalWindow(Timestamp(micros=s), Timestamp(micros=s) + self.size)
for s in range(
start.micros,
timestamp.micros - self.size.micros,
-self.period.micros)
]
def get_window_coder(self):
# type: () -> coders.IntervalWindowCoder
return coders.IntervalWindowCoder()
def __eq__(self, other):
if type(self) == type(other) == SlidingWindows:
return (
self.size == other.size and self.offset == other.offset and
self.period == other.period)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.offset, self.period))
def to_runner_api_parameter(self, context):
return (
common_urns.sliding_windows.urn,
standard_window_fns_pb2.SlidingWindowsPayload(
size=proto_utils.from_micros(
duration_pb2.Duration, self.size.micros),
offset=proto_utils.from_micros(
timestamp_pb2.Timestamp, self.offset.micros),
period=proto_utils.from_micros(
duration_pb2.Duration, self.period.micros)))
@staticmethod
@urns.RunnerApiFn.register_urn(
common_urns.sliding_windows.urn,
standard_window_fns_pb2.SlidingWindowsPayload)
def from_runner_api_parameter(fn_parameter, unused_context):
# type: (...) -> SlidingWindows
return SlidingWindows(
size=Duration(micros=fn_parameter.size.ToMicroseconds()),
offset=Timestamp(micros=fn_parameter.offset.ToMicroseconds()),
period=Duration(micros=fn_parameter.period.ToMicroseconds()))
class Sessions(WindowFn):
"""A windowing function that groups elements into sessions.
A session is defined as a series of consecutive events
separated by a specified gap size.
Attributes:
gap_size: Size of the gap between windows as floating-point seconds.
"""
def __init__(self, gap_size):
# type: (DurationTypes) -> None
if gap_size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.gap_size = Duration.of(gap_size)
def assign(self, context):
# type: (WindowFn.AssignContext) -> List[IntervalWindow]
timestamp = context.timestamp
return [IntervalWindow(timestamp, timestamp + self.gap_size)]
def get_window_coder(self):
# type: () -> coders.IntervalWindowCoder
return coders.IntervalWindowCoder()
def merge(self, merge_context):
# type: (WindowFn.MergeContext) -> None
to_merge = [] # type: List[BoundedWindow]
end = MIN_TIMESTAMP
for w in sorted(merge_context.windows, key=lambda w: w.start):
if to_merge:
if end > w.start:
to_merge.append(w)
if w.end > end:
end = w.end
else:
if len(to_merge) > 1:
merge_context.merge(
to_merge, IntervalWindow(to_merge[0].start, end))
to_merge = [w]
end = w.end
else:
to_merge = [w]
end = w.end
if len(to_merge) > 1:
merge_context.merge(to_merge, IntervalWindow(to_merge[0].start, end))
def __eq__(self, other):
if type(self) == type(other) == Sessions:
return self.gap_size == other.gap_size
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.gap_size)
def to_runner_api_parameter(self, context):
return (
common_urns.session_windows.urn,
standard_window_fns_pb2.SessionWindowsPayload(
gap_size=proto_utils.from_micros(
duration_pb2.Duration, self.gap_size.micros)))
@staticmethod
@urns.RunnerApiFn.register_urn(
common_urns.session_windows.urn,
standard_window_fns_pb2.SessionWindowsPayload)
def from_runner_api_parameter(fn_parameter, unused_context):
# type: (...) -> Sessions
return Sessions(
gap_size=Duration(micros=fn_parameter.gap_size.ToMicroseconds())) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/transforms/window.py | 0.753557 | 0.200362 | window.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import copy
import itertools
import logging
import operator
import os
import sys
import threading
from builtins import hex
from builtins import object
from builtins import zip
from functools import reduce
from functools import wraps
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
from typing import overload
from google.protobuf import message
from apache_beam import error
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.internal import util
from apache_beam.portability import python_urns
from apache_beam.pvalue import DoOutputsTuple
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.transforms.display import HasDisplayData
from apache_beam.transforms.sideinputs import SIDE_INPUT_PREFIX
from apache_beam.typehints import native_type_compatibility
from apache_beam.typehints import typehints
from apache_beam.typehints.decorators import IOTypeHints
from apache_beam.typehints.decorators import TypeCheckError
from apache_beam.typehints.decorators import WithTypeHints
from apache_beam.typehints.decorators import get_signature
from apache_beam.typehints.decorators import get_type_hints
from apache_beam.typehints.decorators import getcallargs_forhints
from apache_beam.typehints.trivial_inference import instance_to_type
from apache_beam.typehints.typehints import validate_composite_type_param
from apache_beam.utils import proto_utils
if TYPE_CHECKING:
from apache_beam import coders
from apache_beam.pipeline import Pipeline
from apache_beam.runners.pipeline_context import PipelineContext
from apache_beam.transforms.core import Windowing
from apache_beam.portability.api import beam_runner_api_pb2
__all__ = [
'PTransform',
'ptransform_fn',
'label_from_callable',
]
_LOGGER = logging.getLogger(__name__)
T = TypeVar('T')
PTransformT = TypeVar('PTransformT', bound='PTransform')
ConstructorFn = Callable[
['beam_runner_api_pb2.PTransform', Optional[Any], 'PipelineContext'], Any]
ptransform_fn_typehints_enabled = False
class _PValueishTransform(object):
"""Visitor for PValueish objects.
A PValueish is a PValue, or list, tuple, dict of PValuesish objects.
This visits a PValueish, contstructing a (possibly mutated) copy.
"""
def visit_nested(self, node, *args):
if isinstance(node, (tuple, list)):
args = [self.visit(x, *args) for x in node]
if isinstance(node, tuple) and hasattr(node.__class__, '_make'):
# namedtuples require unpacked arguments in their constructor
return node.__class__(*args)
else:
return node.__class__(args)
elif isinstance(node, dict):
return node.__class__(
{key: self.visit(value, *args)
for (key, value) in node.items()})
else:
return node
class _SetInputPValues(_PValueishTransform):
def visit(self, node, replacements):
if id(node) in replacements:
return replacements[id(node)]
else:
return self.visit_nested(node, replacements)
# Caches to allow for materialization of values when executing a pipeline
# in-process, in eager mode. This cache allows the same _MaterializedResult
# object to be accessed and used despite Runner API round-trip serialization.
_pipeline_materialization_cache = {
} # type: Dict[Tuple[int, int], Dict[int, _MaterializedResult]]
_pipeline_materialization_lock = threading.Lock()
def _allocate_materialized_pipeline(pipeline):
# type: (Pipeline) -> None
pid = os.getpid()
with _pipeline_materialization_lock:
pipeline_id = id(pipeline)
_pipeline_materialization_cache[(pid, pipeline_id)] = {}
def _allocate_materialized_result(pipeline):
# type: (Pipeline) -> _MaterializedResult
pid = os.getpid()
with _pipeline_materialization_lock:
pipeline_id = id(pipeline)
if (pid, pipeline_id) not in _pipeline_materialization_cache:
raise ValueError(
'Materialized pipeline is not allocated for result '
'cache.')
result_id = len(_pipeline_materialization_cache[(pid, pipeline_id)])
result = _MaterializedResult(pipeline_id, result_id)
_pipeline_materialization_cache[(pid, pipeline_id)][result_id] = result
return result
def _get_materialized_result(pipeline_id, result_id):
# type: (int, int) -> _MaterializedResult
pid = os.getpid()
with _pipeline_materialization_lock:
if (pid, pipeline_id) not in _pipeline_materialization_cache:
raise Exception(
'Materialization in out-of-process and remote runners is not yet '
'supported.')
return _pipeline_materialization_cache[(pid, pipeline_id)][result_id]
def _release_materialized_pipeline(pipeline):
# type: (Pipeline) -> None
pid = os.getpid()
with _pipeline_materialization_lock:
pipeline_id = id(pipeline)
del _pipeline_materialization_cache[(pid, pipeline_id)]
class _MaterializedResult(object):
def __init__(self, pipeline_id, result_id):
# type: (int, int) -> None
self._pipeline_id = pipeline_id
self._result_id = result_id
self.elements = [] # type: List[Any]
def __reduce__(self):
# When unpickled (during Runner API roundtrip serailization), get the
# _MaterializedResult object from the cache so that values are written
# to the original _MaterializedResult when run in eager mode.
return (_get_materialized_result, (self._pipeline_id, self._result_id))
class _MaterializedDoOutputsTuple(pvalue.DoOutputsTuple):
def __init__(self, deferred, results_by_tag):
super(_MaterializedDoOutputsTuple,
self).__init__(None, None, deferred._tags, deferred._main_tag)
self._deferred = deferred
self._results_by_tag = results_by_tag
def __getitem__(self, tag):
if tag not in self._results_by_tag:
raise KeyError(
'Tag %r is not a a defined output tag of %s.' % (tag, self._deferred))
return self._results_by_tag[tag].elements
class _AddMaterializationTransforms(_PValueishTransform):
def _materialize_transform(self, pipeline):
result = _allocate_materialized_result(pipeline)
# Need to define _MaterializeValuesDoFn here to avoid circular
# dependencies.
from apache_beam import DoFn
from apache_beam import ParDo
class _MaterializeValuesDoFn(DoFn):
def process(self, element):
result.elements.append(element)
materialization_label = '_MaterializeValues%d' % result._result_id
return (materialization_label >> ParDo(_MaterializeValuesDoFn()), result)
def visit(self, node):
if isinstance(node, pvalue.PValue):
transform, result = self._materialize_transform(node.pipeline)
node | transform
return result
elif isinstance(node, pvalue.DoOutputsTuple):
results_by_tag = {}
for tag in itertools.chain([node._main_tag], node._tags):
results_by_tag[tag] = self.visit(node[tag])
return _MaterializedDoOutputsTuple(node, results_by_tag)
else:
return self.visit_nested(node)
class _FinalizeMaterialization(_PValueishTransform):
def visit(self, node):
if isinstance(node, _MaterializedResult):
return node.elements
elif isinstance(node, _MaterializedDoOutputsTuple):
return node
else:
return self.visit_nested(node)
def get_named_nested_pvalues(pvalueish):
if isinstance(pvalueish, tuple):
# Check to see if it's a named tuple.
fields = getattr(pvalueish, '_fields', None)
if fields and len(fields) == len(pvalueish):
tagged_values = zip(fields, pvalueish)
else:
tagged_values = enumerate(pvalueish)
elif isinstance(pvalueish, list):
tagged_values = enumerate(pvalueish)
elif isinstance(pvalueish, dict):
tagged_values = pvalueish.items()
else:
if isinstance(pvalueish, (pvalue.PValue, pvalue.DoOutputsTuple)):
yield None, pvalueish
return
for tag, subvalue in tagged_values:
for subtag, subsubvalue in get_named_nested_pvalues(subvalue):
if subtag is None:
yield tag, subsubvalue
else:
yield '%s.%s' % (tag, subtag), subsubvalue
class _ZipPValues(object):
"""Pairs each PValue in a pvalueish with a value in a parallel out sibling.
Sibling should have the same nested structure as pvalueish. Leaves in
sibling are expanded across nested pvalueish lists, tuples, and dicts.
For example
ZipPValues().visit({'a': pc1, 'b': (pc2, pc3)},
{'a': 'A', 'b', 'B'})
will return
[('a', pc1, 'A'), ('b', pc2, 'B'), ('b', pc3, 'B')]
"""
def visit(self, pvalueish, sibling, pairs=None, context=None):
if pairs is None:
pairs = []
self.visit(pvalueish, sibling, pairs, context)
return pairs
elif isinstance(pvalueish, (pvalue.PValue, pvalue.DoOutputsTuple)):
pairs.append((context, pvalueish, sibling))
elif isinstance(pvalueish, (list, tuple)):
self.visit_sequence(pvalueish, sibling, pairs, context)
elif isinstance(pvalueish, dict):
self.visit_dict(pvalueish, sibling, pairs, context)
def visit_sequence(self, pvalueish, sibling, pairs, context):
if isinstance(sibling, (list, tuple)):
for ix, (p, s) in enumerate(zip(pvalueish,
list(sibling) + [None] * len(pvalueish))):
self.visit(p, s, pairs, 'position %s' % ix)
else:
for p in pvalueish:
self.visit(p, sibling, pairs, context)
def visit_dict(self, pvalueish, sibling, pairs, context):
if isinstance(sibling, dict):
for key, p in pvalueish.items():
self.visit(p, sibling.get(key), pairs, key)
else:
for p in pvalueish.values():
self.visit(p, sibling, pairs, context)
class PTransform(WithTypeHints, HasDisplayData):
"""A transform object used to modify one or more PCollections.
Subclasses must define an expand() method that will be used when the transform
is applied to some arguments. Typical usage pattern will be:
input | CustomTransform(...)
The expand() method of the CustomTransform object passed in will be called
with input as an argument.
"""
# By default, transforms don't have any side inputs.
side_inputs = () # type: Sequence[pvalue.AsSideInput]
# Used for nullary transforms.
pipeline = None # type: Optional[Pipeline]
# Default is unset.
_user_label = None # type: Optional[str]
def __init__(self, label=None):
# type: (Optional[str]) -> None
super(PTransform, self).__init__()
self.label = label # type: ignore # https://github.com/python/mypy/issues/3004
@property
def label(self):
# type: () -> str
return self._user_label or self.default_label()
@label.setter
def label(self, value):
# type: (Optional[str]) -> None
self._user_label = value
def default_label(self):
# type: () -> str
return self.__class__.__name__
def default_type_hints(self):
fn_type_hints = IOTypeHints.from_callable(self.expand)
if fn_type_hints is not None:
fn_type_hints = fn_type_hints.strip_pcoll()
# Prefer class decorator type hints for backwards compatibility.
return get_type_hints(self.__class__).with_defaults(fn_type_hints)
def with_input_types(self, input_type_hint):
"""Annotates the input type of a :class:`PTransform` with a type-hint.
Args:
input_type_hint (type): An instance of an allowed built-in type, a custom
class, or an instance of a
:class:`~apache_beam.typehints.typehints.TypeConstraint`.
Raises:
TypeError: If **input_type_hint** is not a valid type-hint.
See
:obj:`apache_beam.typehints.typehints.validate_composite_type_param()`
for further details.
Returns:
PTransform: A reference to the instance of this particular
:class:`PTransform` object. This allows chaining type-hinting related
methods.
"""
input_type_hint = native_type_compatibility.convert_to_beam_type(
input_type_hint)
validate_composite_type_param(
input_type_hint, 'Type hints for a PTransform')
return super(PTransform, self).with_input_types(input_type_hint)
def with_output_types(self, type_hint):
"""Annotates the output type of a :class:`PTransform` with a type-hint.
Args:
type_hint (type): An instance of an allowed built-in type, a custom class,
or a :class:`~apache_beam.typehints.typehints.TypeConstraint`.
Raises:
TypeError: If **type_hint** is not a valid type-hint. See
:obj:`~apache_beam.typehints.typehints.validate_composite_type_param()`
for further details.
Returns:
PTransform: A reference to the instance of this particular
:class:`PTransform` object. This allows chaining type-hinting related
methods.
"""
type_hint = native_type_compatibility.convert_to_beam_type(type_hint)
validate_composite_type_param(type_hint, 'Type hints for a PTransform')
return super(PTransform, self).with_output_types(type_hint)
def type_check_inputs(self, pvalueish):
self.type_check_inputs_or_outputs(pvalueish, 'input')
def infer_output_type(self, unused_input_type):
return self.get_type_hints().simple_output_type(self.label) or typehints.Any
def type_check_outputs(self, pvalueish):
self.type_check_inputs_or_outputs(pvalueish, 'output')
def type_check_inputs_or_outputs(self, pvalueish, input_or_output):
type_hints = self.get_type_hints()
hints = getattr(type_hints, input_or_output + '_types')
if hints is None or not any(hints):
return
arg_hints, kwarg_hints = hints
if arg_hints and kwarg_hints:
raise TypeCheckError(
'PTransform cannot have both positional and keyword type hints '
'without overriding %s._type_check_%s()' %
(self.__class__, input_or_output))
root_hint = (
arg_hints[0] if len(arg_hints) == 1 else arg_hints or kwarg_hints)
for context, pvalue_, hint in _ZipPValues().visit(pvalueish, root_hint):
if isinstance(pvalue_, DoOutputsTuple):
continue
if pvalue_.element_type is None:
# TODO(robertwb): It's a bug that we ever get here. (typecheck)
continue
if hint and not typehints.is_consistent_with(pvalue_.element_type, hint):
at_context = ' %s %s' % (input_or_output, context) if context else ''
raise TypeCheckError(
'{type} type hint violation at {label}{context}: expected {hint}, '
'got {actual_type}\nFull type hint:\n{debug_str}'.format(
type=input_or_output.title(),
label=self.label,
context=at_context,
hint=hint,
actual_type=pvalue_.element_type,
debug_str=type_hints.debug_str()))
def _infer_output_coder(self, input_type=None, input_coder=None):
# type: (...) -> Optional[coders.Coder]
"""Returns the output coder to use for output of this transform.
Note: this API is experimental and is subject to change; please do not rely
on behavior induced by this method.
The Coder returned here should not be wrapped in a WindowedValueCoder
wrapper.
Args:
input_type: An instance of an allowed built-in type, a custom class, or a
typehints.TypeConstraint for the input type, or None if not available.
input_coder: Coder object for encoding input to this PTransform, or None
if not available.
Returns:
Coder object for encoding output of this PTransform or None if unknown.
"""
# TODO(ccy): further refine this API.
return None
def _clone(self, new_label):
"""Clones the current transform instance under a new label."""
transform = copy.copy(self)
transform.label = new_label
return transform
def expand(self, input_or_inputs):
raise NotImplementedError
def __str__(self):
return '<%s>' % self._str_internal()
def __repr__(self):
return '<%s at %s>' % (self._str_internal(), hex(id(self)))
def _str_internal(self):
return '%s(PTransform)%s%s%s' % (
self.__class__.__name__,
' label=[%s]' % self.label if
(hasattr(self, 'label') and self.label) else '',
' inputs=%s' % str(self.inputs) if
(hasattr(self, 'inputs') and self.inputs) else '',
' side_inputs=%s' % str(self.side_inputs) if self.side_inputs else '')
def _check_pcollection(self, pcoll):
# type: (pvalue.PCollection) -> None
if not isinstance(pcoll, pvalue.PCollection):
raise error.TransformError('Expecting a PCollection argument.')
if not pcoll.pipeline:
raise error.TransformError('PCollection not part of a pipeline.')
def get_windowing(self, inputs):
# type: (Any) -> Windowing
"""Returns the window function to be associated with transform's output.
By default most transforms just return the windowing function associated
with the input PCollection (or the first input if several).
"""
# TODO(robertwb): Assert all input WindowFns compatible.
return inputs[0].windowing
def __rrshift__(self, label):
return _NamedPTransform(self, label)
def __or__(self, right):
"""Used to compose PTransforms, e.g., ptransform1 | ptransform2."""
if isinstance(right, PTransform):
return _ChainedPTransform(self, right)
return NotImplemented
def __ror__(self, left, label=None):
"""Used to apply this PTransform to non-PValues, e.g., a tuple."""
pvalueish, pvalues = self._extract_input_pvalues(left)
pipelines = [v.pipeline for v in pvalues if isinstance(v, pvalue.PValue)]
if pvalues and not pipelines:
deferred = False
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import pipeline
from apache_beam.options.pipeline_options import PipelineOptions
# pylint: enable=wrong-import-order, wrong-import-position
p = pipeline.Pipeline('DirectRunner', PipelineOptions(sys.argv))
else:
if not pipelines:
if self.pipeline is not None:
p = self.pipeline
else:
raise ValueError(
'"%s" requires a pipeline to be specified '
'as there are no deferred inputs.' % self.label)
else:
p = self.pipeline or pipelines[0]
for pp in pipelines:
if p != pp:
raise ValueError(
'Mixing value from different pipelines not allowed.')
deferred = not getattr(p.runner, 'is_eager', False)
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.core import Create
# pylint: enable=wrong-import-order, wrong-import-position
replacements = {
id(v): p | 'CreatePInput%s' % ix >> Create(v, reshuffle=False)
for ix,
v in enumerate(pvalues)
if not isinstance(v, pvalue.PValue) and v is not None
}
pvalueish = _SetInputPValues().visit(pvalueish, replacements)
self.pipeline = p
result = p.apply(self, pvalueish, label)
if deferred:
return result
_allocate_materialized_pipeline(p)
materialized_result = _AddMaterializationTransforms().visit(result)
p.run().wait_until_finish()
_release_materialized_pipeline(p)
return _FinalizeMaterialization().visit(materialized_result)
def _extract_input_pvalues(self, pvalueish):
"""Extract all the pvalues contained in the input pvalueish.
Returns pvalueish as well as the flat inputs list as the input may have to
be copied as inspection may be destructive.
By default, recursively extracts tuple components and dict values.
Generally only needs to be overriden for multi-input PTransforms.
"""
# pylint: disable=wrong-import-order
from apache_beam import pipeline
# pylint: enable=wrong-import-order
if isinstance(pvalueish, pipeline.Pipeline):
pvalueish = pvalue.PBegin(pvalueish)
def _dict_tuple_leaves(pvalueish):
if isinstance(pvalueish, tuple):
for a in pvalueish:
for p in _dict_tuple_leaves(a):
yield p
elif isinstance(pvalueish, dict):
for a in pvalueish.values():
for p in _dict_tuple_leaves(a):
yield p
else:
yield pvalueish
return pvalueish, tuple(_dict_tuple_leaves(pvalueish))
def _pvaluish_from_dict(self, input_dict):
if len(input_dict) == 1:
return next(iter(input_dict.values()))
else:
return input_dict
def _named_inputs(self, inputs, side_inputs):
# type: (Sequence[pvalue.PValue], Sequence[Any]) -> Dict[str, pvalue.PValue]
"""Returns the dictionary of named inputs (including side inputs) as they
should be named in the beam proto.
"""
# TODO(BEAM-1833): Push names up into the sdk construction.
main_inputs = {
str(ix): input
for (ix, input) in enumerate(inputs)
if isinstance(input, pvalue.PCollection)
}
named_side_inputs = {(SIDE_INPUT_PREFIX + '%s') % ix: si.pvalue
for (ix, si) in enumerate(side_inputs)}
return dict(main_inputs, **named_side_inputs)
def _named_outputs(self, outputs):
# type: (Dict[object, pvalue.PCollection]) -> Dict[str, pvalue.PCollection]
"""Returns the dictionary of named outputs as they should be named in the
beam proto.
"""
# TODO(BEAM-1833): Push names up into the sdk construction.
return {
str(tag): output
for (tag, output) in outputs.items()
if isinstance(output, pvalue.PCollection)
}
_known_urns = {} # type: Dict[str, Tuple[Optional[type], ConstructorFn]]
@classmethod
@overload
def register_urn(
cls,
urn, # type: str
parameter_type, # type: Type[T]
):
# type: (...) -> Callable[[Union[type, Callable[[beam_runner_api_pb2.PTransform, T, PipelineContext], Any]]], Callable[[T, PipelineContext], Any]]
pass
@classmethod
@overload
def register_urn(
cls,
urn, # type: str
parameter_type, # type: None
):
# type: (...) -> Callable[[Union[type, Callable[[beam_runner_api_pb2.PTransform, bytes, PipelineContext], Any]]], Callable[[bytes, PipelineContext], Any]]
pass
@classmethod
@overload
def register_urn(cls,
urn, # type: str
parameter_type, # type: Type[T]
constructor # type: Callable[[beam_runner_api_pb2.PTransform, T, PipelineContext], Any]
):
# type: (...) -> None
pass
@classmethod
@overload
def register_urn(cls,
urn, # type: str
parameter_type, # type: None
constructor # type: Callable[[beam_runner_api_pb2.PTransform, bytes, PipelineContext], Any]
):
# type: (...) -> None
pass
@classmethod
def register_urn(cls, urn, parameter_type, constructor=None):
def register(constructor):
if isinstance(constructor, type):
constructor.from_runner_api_parameter = register(
constructor.from_runner_api_parameter)
else:
cls._known_urns[urn] = parameter_type, constructor
return constructor
if constructor:
# Used as a statement.
register(constructor)
else:
# Used as a decorator.
return register
def to_runner_api(self, context, has_parts=False, **extra_kwargs):
# type: (PipelineContext, bool, Any) -> beam_runner_api_pb2.FunctionSpec
from apache_beam.portability.api import beam_runner_api_pb2
# typing: only ParDo supports extra_kwargs
urn, typed_param = self.to_runner_api_parameter(context, **extra_kwargs) # type: ignore[call-arg]
if urn == python_urns.GENERIC_COMPOSITE_TRANSFORM and not has_parts:
# TODO(BEAM-3812): Remove this fallback.
urn, typed_param = self.to_runner_api_pickled(context)
return beam_runner_api_pb2.FunctionSpec(
urn=urn,
payload=typed_param.SerializeToString() if isinstance(
typed_param, message.Message) else typed_param.encode('utf-8')
if isinstance(typed_param, str) else typed_param)
@classmethod
def from_runner_api(cls,
proto, # type: Optional[beam_runner_api_pb2.PTransform]
context # type: PipelineContext
):
# type: (...) -> Optional[PTransform]
if proto is None or proto.spec is None or not proto.spec.urn:
return None
parameter_type, constructor = cls._known_urns[proto.spec.urn]
return constructor(
proto,
proto_utils.parse_Bytes(proto.spec.payload, parameter_type),
context)
def to_runner_api_parameter(
self,
unused_context # type: PipelineContext
):
# type: (...) -> Tuple[str, Optional[Union[message.Message, bytes, str]]]
# The payload here is just to ease debugging.
return (
python_urns.GENERIC_COMPOSITE_TRANSFORM,
getattr(self, '_fn_api_payload', str(self)))
def to_runner_api_pickled(self, unused_context):
# type: (PipelineContext) -> Tuple[str, bytes]
return (python_urns.PICKLED_TRANSFORM, pickler.dumps(self))
def runner_api_requires_keyed_input(self):
return False
def _add_type_constraint_from_consumer(self, full_label, input_type_hints):
# type: (str, Tuple[str, Any]) -> None
"""Adds a consumer transform's input type hints to our output type
constraints, which is used during performance runtime type-checking.
"""
pass
@PTransform.register_urn(python_urns.GENERIC_COMPOSITE_TRANSFORM, None)
def _create_transform(unused_ptransform, payload, unused_context):
empty_transform = PTransform()
empty_transform._fn_api_payload = payload
return empty_transform
@PTransform.register_urn(python_urns.PICKLED_TRANSFORM, None)
def _unpickle_transform(unused_ptransform, pickled_bytes, unused_context):
return pickler.loads(pickled_bytes)
class _ChainedPTransform(PTransform):
def __init__(self, *parts):
# type: (*PTransform) -> None
super(_ChainedPTransform, self).__init__(label=self._chain_label(parts))
self._parts = parts
def _chain_label(self, parts):
return '|'.join(p.label for p in parts)
def __or__(self, right):
if isinstance(right, PTransform):
# Create a flat list rather than a nested tree of composite
# transforms for better monitoring, etc.
return _ChainedPTransform(*(self._parts + (right, )))
return NotImplemented
def expand(self, pval):
return reduce(operator.or_, self._parts, pval)
class PTransformWithSideInputs(PTransform):
"""A superclass for any :class:`PTransform` (e.g.
:func:`~apache_beam.transforms.core.FlatMap` or
:class:`~apache_beam.transforms.core.CombineFn`)
invoking user code.
:class:`PTransform` s like :func:`~apache_beam.transforms.core.FlatMap`
invoke user-supplied code in some kind of package (e.g. a
:class:`~apache_beam.transforms.core.DoFn`) and optionally provide arguments
and side inputs to that code. This internal-use-only class contains common
functionality for :class:`PTransform` s that fit this model.
"""
def __init__(self, fn, *args, **kwargs):
# type: (WithTypeHints, *Any, **Any) -> None
if isinstance(fn, type) and issubclass(fn, WithTypeHints):
# Don't treat Fn class objects as callables.
raise ValueError('Use %s() not %s.' % (fn.__name__, fn.__name__))
self.fn = self.make_fn(fn, bool(args or kwargs))
# Now that we figure out the label, initialize the super-class.
super(PTransformWithSideInputs, self).__init__()
if (any([isinstance(v, pvalue.PCollection) for v in args]) or
any([isinstance(v, pvalue.PCollection) for v in kwargs.values()])):
raise error.SideInputError(
'PCollection used directly as side input argument. Specify '
'AsIter(pcollection) or AsSingleton(pcollection) to indicate how the '
'PCollection is to be used.')
self.args, self.kwargs, self.side_inputs = util.remove_objects_from_args(
args, kwargs, pvalue.AsSideInput)
self.raw_side_inputs = args, kwargs
# Prevent name collisions with fns of the form '<function <lambda> at ...>'
self._cached_fn = self.fn
# Ensure fn and side inputs are picklable for remote execution.
try:
self.fn = pickler.loads(pickler.dumps(self.fn))
except RuntimeError as e:
raise RuntimeError('Unable to pickle fn %s: %s' % (self.fn, e))
self.args = pickler.loads(pickler.dumps(self.args))
self.kwargs = pickler.loads(pickler.dumps(self.kwargs))
# For type hints, because loads(dumps(class)) != class.
self.fn = self._cached_fn
def with_input_types(
self, input_type_hint, *side_inputs_arg_hints, **side_input_kwarg_hints):
"""Annotates the types of main inputs and side inputs for the PTransform.
Args:
input_type_hint: An instance of an allowed built-in type, a custom class,
or an instance of a typehints.TypeConstraint.
*side_inputs_arg_hints: A variable length argument composed of
of an allowed built-in type, a custom class, or a
typehints.TypeConstraint.
**side_input_kwarg_hints: A dictionary argument composed of
of an allowed built-in type, a custom class, or a
typehints.TypeConstraint.
Example of annotating the types of side-inputs::
FlatMap().with_input_types(int, int, bool)
Raises:
:class:`TypeError`: If **type_hint** is not a valid type-hint.
See
:func:`~apache_beam.typehints.typehints.validate_composite_type_param`
for further details.
Returns:
:class:`PTransform`: A reference to the instance of this particular
:class:`PTransform` object. This allows chaining type-hinting related
methods.
"""
super(PTransformWithSideInputs, self).with_input_types(input_type_hint)
side_inputs_arg_hints = native_type_compatibility.convert_to_beam_types(
side_inputs_arg_hints)
side_input_kwarg_hints = native_type_compatibility.convert_to_beam_types(
side_input_kwarg_hints)
for si in side_inputs_arg_hints:
validate_composite_type_param(si, 'Type hints for a PTransform')
for si in side_input_kwarg_hints.values():
validate_composite_type_param(si, 'Type hints for a PTransform')
self.side_inputs_types = side_inputs_arg_hints
return WithTypeHints.with_input_types(
self, input_type_hint, *side_inputs_arg_hints, **side_input_kwarg_hints)
def type_check_inputs(self, pvalueish):
type_hints = self.get_type_hints()
input_types = type_hints.input_types
if input_types:
args, kwargs = self.raw_side_inputs
def element_type(side_input):
if isinstance(side_input, pvalue.AsSideInput):
return side_input.element_type
return instance_to_type(side_input)
arg_types = [pvalueish.element_type] + [element_type(v) for v in args]
kwargs_types = {k: element_type(v) for (k, v) in kwargs.items()}
argspec_fn = self._process_argspec_fn()
bindings = getcallargs_forhints(argspec_fn, *arg_types, **kwargs_types)
hints = getcallargs_forhints(
argspec_fn, *input_types[0], **input_types[1])
for arg, hint in hints.items():
if arg.startswith('__unknown__'):
continue
if hint is None:
continue
if not typehints.is_consistent_with(bindings.get(arg, typehints.Any),
hint):
raise TypeCheckError(
'Type hint violation for \'{label}\': requires {hint} but got '
'{actual_type} for {arg}\nFull type hint:\n{debug_str}'.format(
label=self.label,
hint=hint,
actual_type=bindings[arg],
arg=arg,
debug_str=type_hints.debug_str()))
def _process_argspec_fn(self):
"""Returns an argspec of the function actually consuming the data.
"""
raise NotImplementedError
def make_fn(self, fn, has_side_inputs):
# TODO(silviuc): Add comment describing that this is meant to be overriden
# by methods detecting callables and wrapping them in DoFns.
return fn
def default_label(self):
return '%s(%s)' % (self.__class__.__name__, self.fn.default_label())
class _PTransformFnPTransform(PTransform):
"""A class wrapper for a function-based transform."""
def __init__(self, fn, *args, **kwargs):
super(_PTransformFnPTransform, self).__init__()
self._fn = fn
self._args = args
self._kwargs = kwargs
def display_data(self):
res = {
'fn': (
self._fn.__name__
if hasattr(self._fn, '__name__') else self._fn.__class__),
'args': DisplayDataItem(str(self._args)).drop_if_default('()'),
'kwargs': DisplayDataItem(str(self._kwargs)).drop_if_default('{}')
}
return res
def expand(self, pcoll):
# Since the PTransform will be implemented entirely as a function
# (once called), we need to pass through any type-hinting information that
# may have been annotated via the .with_input_types() and
# .with_output_types() methods.
kwargs = dict(self._kwargs)
args = tuple(self._args)
# TODO(BEAM-5878) Support keyword-only arguments.
try:
if 'type_hints' in get_signature(self._fn).parameters:
args = (self.get_type_hints(), ) + args
except TypeError:
# Might not be a function.
pass
return self._fn(pcoll, *args, **kwargs)
def default_label(self):
if self._args:
return '%s(%s)' % (
label_from_callable(self._fn), label_from_callable(self._args[0]))
return label_from_callable(self._fn)
def ptransform_fn(fn):
"""A decorator for a function-based PTransform.
Args:
fn: A function implementing a custom PTransform.
Returns:
A CallablePTransform instance wrapping the function-based PTransform.
This wrapper provides an alternative, simpler way to define a PTransform.
The standard method is to subclass from PTransform and override the expand()
method. An equivalent effect can be obtained by defining a function that
accepts an input PCollection and additional optional arguments and returns a
resulting PCollection. For example::
@ptransform_fn
@beam.typehints.with_input_types(..)
@beam.typehints.with_output_types(..)
def CustomMapper(pcoll, mapfn):
return pcoll | ParDo(mapfn)
The equivalent approach using PTransform subclassing::
@beam.typehints.with_input_types(..)
@beam.typehints.with_output_types(..)
class CustomMapper(PTransform):
def __init__(self, mapfn):
super(CustomMapper, self).__init__()
self.mapfn = mapfn
def expand(self, pcoll):
return pcoll | ParDo(self.mapfn)
With either method the custom PTransform can be used in pipelines as if
it were one of the "native" PTransforms::
result_pcoll = input_pcoll | 'Label' >> CustomMapper(somefn)
Note that for both solutions the underlying implementation of the pipe
operator (i.e., `|`) will inject the pcoll argument in its proper place
(first argument if no label was specified and second argument otherwise).
Type hint support needs to be enabled via the
--type_check_additional=ptransform_fn flag in Beam 2.
If CustomMapper is a Cython function, you can still specify input and output
types provided the decorators appear before @ptransform_fn.
"""
# TODO(robertwb): Consider removing staticmethod to allow for self parameter.
@wraps(fn)
def callable_ptransform_factory(*args, **kwargs):
res = _PTransformFnPTransform(fn, *args, **kwargs)
if ptransform_fn_typehints_enabled:
# Apply type hints applied before or after the ptransform_fn decorator,
# falling back on PTransform defaults.
# If the @with_{input,output}_types decorator comes before ptransform_fn,
# the type hints get applied to this function. If it comes after they will
# get applied to fn, and @wraps will copy the _type_hints attribute to
# this function.
type_hints = get_type_hints(callable_ptransform_factory)
res._set_type_hints(type_hints.with_defaults(res.get_type_hints()))
_LOGGER.debug(
'type hints for %s: %s', res.default_label(), res.get_type_hints())
return res
return callable_ptransform_factory
def label_from_callable(fn):
if hasattr(fn, 'default_label'):
return fn.default_label()
elif hasattr(fn, '__name__'):
if fn.__name__ == '<lambda>':
return '<lambda at %s:%s>' % (
os.path.basename(fn.__code__.co_filename), fn.__code__.co_firstlineno)
return fn.__name__
return str(fn)
class _NamedPTransform(PTransform):
def __init__(self, transform, label):
super(_NamedPTransform, self).__init__(label)
self.transform = transform
def __ror__(self, pvalueish, _unused=None):
return self.transform.__ror__(pvalueish, self.label)
def expand(self, pvalue):
raise RuntimeError("Should never be expanded directly.") | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/transforms/ptransform.py | 0.628635 | 0.156362 | ptransform.py | pypi |
# pytype: skip-file
from typing import Set
from typing import Tuple
import apache_beam as beam
from apache_beam.options.pipeline_options import TypeOptions
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import combiners
from apache_beam.transforms import trigger
from apache_beam.transforms import userstate
from apache_beam.transforms import window
from apache_beam.typehints import with_input_types
from apache_beam.typehints import with_output_types
@with_input_types(int)
@with_output_types(int)
class CallSequenceEnforcingCombineFn(beam.CombineFn):
instances = set() # type: Set[CallSequenceEnforcingCombineFn]
def __init__(self):
super(CallSequenceEnforcingCombineFn, self).__init__()
self._setup_called = False
self._teardown_called = False
def setup(self, *args, **kwargs):
assert not self._setup_called, 'setup should not be called twice'
assert not self._teardown_called, 'setup should be called before teardown'
# Keep track of instances so that we can check if teardown is called
# properly after pipeline execution.
self.instances.add(self)
self._setup_called = True
def create_accumulator(self, *args, **kwargs):
assert self._setup_called, 'setup should have been called'
assert not self._teardown_called, 'teardown should not have been called'
return 0
def add_input(self, mutable_accumulator, element, *args, **kwargs):
assert self._setup_called, 'setup should have been called'
assert not self._teardown_called, 'teardown should not have been called'
mutable_accumulator += element
return mutable_accumulator
def add_inputs(self, mutable_accumulator, elements, *args, **kwargs):
return self.add_input(mutable_accumulator, sum(elements))
def merge_accumulators(self, accumulators, *args, **kwargs):
assert self._setup_called, 'setup should have been called'
assert not self._teardown_called, 'teardown should not have been called'
return sum(accumulators)
def extract_output(self, accumulator, *args, **kwargs):
assert self._setup_called, 'setup should have been called'
assert not self._teardown_called, 'teardown should not have been called'
return accumulator
def teardown(self, *args, **kwargs):
assert self._setup_called, 'setup should have been called'
assert not self._teardown_called, 'teardown should not be called twice'
self._teardown_called = True
@with_input_types(Tuple[None, str])
@with_output_types(Tuple[int, str])
class IndexAssigningDoFn(beam.DoFn):
state_param = beam.DoFn.StateParam(
userstate.CombiningValueStateSpec(
'index', beam.coders.VarIntCoder(), CallSequenceEnforcingCombineFn()))
def process(self, element, state=state_param):
_, value = element
current_index = state.read()
yield current_index, value
state.add(1)
def run_combine(pipeline, input_elements=5, lift_combiners=True):
# Calculate the expected result, which is the sum of an arithmetic sequence.
# By default, this is equal to: 0 + 1 + 2 + 3 + 4 = 10
expected_result = input_elements * (input_elements - 1) / 2
# Enable runtime type checking in order to cover TypeCheckCombineFn by
# the test.
pipeline.get_pipeline_options().view_as(TypeOptions).runtime_type_check = True
with pipeline as p:
pcoll = p | 'Start' >> beam.Create(range(input_elements))
# Certain triggers, such as AfterCount, are incompatible with combiner
# lifting. We can use that fact to prevent combiners from being lifted.
if not lift_combiners:
pcoll |= beam.WindowInto(
window.GlobalWindows(),
trigger=trigger.AfterCount(input_elements),
accumulation_mode=trigger.AccumulationMode.DISCARDING)
# Pass an additional 'None' in order to cover _CurriedFn by the test.
pcoll |= 'Do' >> beam.CombineGlobally(
combiners.SingleInputTupleCombineFn(
CallSequenceEnforcingCombineFn(), CallSequenceEnforcingCombineFn()),
None).with_fanout(fanout=1)
assert_that(pcoll, equal_to([(expected_result, expected_result)]))
def run_pardo(pipeline, input_elements=10):
with pipeline as p:
_ = (
p
| 'Start' >> beam.Create(('Hello' for _ in range(input_elements)))
| 'KeyWithNone' >> beam.Map(lambda elem: (None, elem))
| 'Do' >> beam.ParDo(IndexAssigningDoFn())) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/transforms/combinefn_lifecycle_pipeline.py | 0.860969 | 0.606178 | combinefn_lifecycle_pipeline.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import copy
import heapq
import operator
import random
import sys
import warnings
from builtins import object
from builtins import zip
from typing import Any
from typing import Dict
from typing import Iterable
from typing import List
from typing import Set
from typing import Tuple
from typing import TypeVar
from typing import Union
from past.builtins import long
from apache_beam import typehints
from apache_beam.transforms import core
from apache_beam.transforms import cy_combiners
from apache_beam.transforms import ptransform
from apache_beam.transforms import window
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.typehints import with_input_types
from apache_beam.typehints import with_output_types
from apache_beam.utils.timestamp import Duration
from apache_beam.utils.timestamp import Timestamp
__all__ = [
'Count', 'Mean', 'Sample', 'Top', 'ToDict', 'ToList', 'ToSet', 'Latest'
]
# Type variables
T = TypeVar('T')
K = TypeVar('K')
V = TypeVar('V')
TimestampType = Union[int, float, Timestamp, Duration]
class CombinerWithoutDefaults(ptransform.PTransform):
"""Super class to inherit without_defaults to built-in Combiners."""
def __init__(self, has_defaults=True):
super(CombinerWithoutDefaults, self).__init__()
self.has_defaults = has_defaults
def with_defaults(self, has_defaults=True):
new = copy.copy(self)
new.has_defaults = has_defaults
return new
def without_defaults(self):
return self.with_defaults(False)
class Mean(object):
"""Combiners for computing arithmetic means of elements."""
class Globally(CombinerWithoutDefaults):
"""combiners.Mean.Globally computes the arithmetic mean of the elements."""
def expand(self, pcoll):
if self.has_defaults:
return pcoll | core.CombineGlobally(MeanCombineFn())
else:
return pcoll | core.CombineGlobally(MeanCombineFn()).without_defaults()
class PerKey(ptransform.PTransform):
"""combiners.Mean.PerKey finds the means of the values for each key."""
def expand(self, pcoll):
return pcoll | core.CombinePerKey(MeanCombineFn())
# TODO(laolu): This type signature is overly restrictive. This should be
# more general.
@with_input_types(Union[float, int, long])
@with_output_types(float)
class MeanCombineFn(core.CombineFn):
"""CombineFn for computing an arithmetic mean."""
def create_accumulator(self):
return (0, 0)
def add_input(self, sum_count, element):
(sum_, count) = sum_count
return sum_ + element, count + 1
def merge_accumulators(self, accumulators):
sums, counts = zip(*accumulators)
return sum(sums), sum(counts)
def extract_output(self, sum_count):
(sum_, count) = sum_count
if count == 0:
return float('NaN')
return sum_ / float(count)
def for_input_type(self, input_type):
if input_type is int:
return cy_combiners.MeanInt64Fn()
elif input_type is float:
return cy_combiners.MeanFloatFn()
return self
class Count(object):
"""Combiners for counting elements."""
class Globally(CombinerWithoutDefaults):
"""combiners.Count.Globally counts the total number of elements."""
def expand(self, pcoll):
if self.has_defaults:
return pcoll | core.CombineGlobally(CountCombineFn())
else:
return pcoll | core.CombineGlobally(CountCombineFn()).without_defaults()
class PerKey(ptransform.PTransform):
"""combiners.Count.PerKey counts how many elements each unique key has."""
def expand(self, pcoll):
return pcoll | core.CombinePerKey(CountCombineFn())
class PerElement(ptransform.PTransform):
"""combiners.Count.PerElement counts how many times each element occurs."""
def expand(self, pcoll):
paired_with_void_type = typehints.Tuple[pcoll.element_type, Any]
output_type = typehints.KV[pcoll.element_type, int]
return (
pcoll
| (
'%s:PairWithVoid' % self.label >> core.Map(
lambda x: (x, None)).with_output_types(paired_with_void_type))
| core.CombinePerKey(CountCombineFn()).with_output_types(output_type))
@with_input_types(Any)
@with_output_types(int)
class CountCombineFn(core.CombineFn):
"""CombineFn for computing PCollection size."""
def create_accumulator(self):
return 0
def add_input(self, accumulator, element):
return accumulator + 1
def add_inputs(self, accumulator, elements):
return accumulator + len(list(elements))
def merge_accumulators(self, accumulators):
return sum(accumulators)
def extract_output(self, accumulator):
return accumulator
class Top(object):
"""Combiners for obtaining extremal elements."""
# pylint: disable=no-self-argument
class Of(CombinerWithoutDefaults):
"""Obtain a list of the compare-most N elements in a PCollection.
This transform will retrieve the n greatest elements in the PCollection
to which it is applied, where "greatest" is determined by the comparator
function supplied as the compare argument.
"""
def _py2__init__(self, n, compare=None, *args, **kwargs):
"""Initializer.
compare should be an implementation of "a < b" taking at least two
arguments (a and b). Additional arguments and side inputs specified in
the apply call become additional arguments to the comparator. Defaults to
the natural ordering of the elements.
The arguments 'key' and 'reverse' may instead be passed as keyword
arguments, and have the same meaning as for Python's sort functions.
Args:
pcoll: PCollection to process.
n: number of elements to extract from pcoll.
compare: as described above.
*args: as described above.
**kwargs: as described above.
"""
super(Top.Of, self).__init__()
if compare:
warnings.warn(
'Compare not available in Python 3, use key instead.',
DeprecationWarning)
self._n = n
self._compare = compare
self._key = kwargs.pop('key', None)
self._reverse = kwargs.pop('reverse', False)
self._args = args
self._kwargs = kwargs
def _py3__init__(self, n, **kwargs):
"""Creates a global Top operation.
The arguments 'key' and 'reverse' may be passed as keyword arguments,
and have the same meaning as for Python's sort functions.
Args:
pcoll: PCollection to process.
n: number of elements to extract from pcoll.
**kwargs: may contain 'key' and/or 'reverse'
"""
unknown_kwargs = set(kwargs.keys()) - set(['key', 'reverse'])
if unknown_kwargs:
raise ValueError(
'Unknown keyword arguments: ' + ', '.join(unknown_kwargs))
self._py2__init__(n, None, **kwargs)
# Python 3 sort does not accept a comparison operator, and nor do we.
# FIXME: mypy would handle this better if we placed the _py*__init__ funcs
# inside the if/else block below:
if sys.version_info[0] < 3:
__init__ = _py2__init__
else:
__init__ = _py3__init__ # type: ignore
def default_label(self):
return 'Top(%d)' % self._n
def expand(self, pcoll):
compare = self._compare
if (not self._args and not self._kwargs and pcoll.windowing.is_default()):
if self._reverse:
if compare is None or compare is operator.lt:
compare = operator.gt
else:
original_compare = compare
compare = lambda a, b: original_compare(b, a)
# This is a more efficient global algorithm.
top_per_bundle = pcoll | core.ParDo(
_TopPerBundle(self._n, compare, self._key))
# If pcoll is empty, we can't guerentee that top_per_bundle
# won't be empty, so inject at least one empty accumulator
# so that downstream is guerenteed to produce non-empty output.
empty_bundle = pcoll.pipeline | core.Create([(None, [])])
return ((top_per_bundle, empty_bundle) | core.Flatten()
| core.GroupByKey()
| core.ParDo(_MergeTopPerBundle(self._n, compare, self._key)))
else:
if self.has_defaults:
return pcoll | core.CombineGlobally(
TopCombineFn(self._n, compare, self._key, self._reverse),
*self._args,
**self._kwargs)
else:
return pcoll | core.CombineGlobally(
TopCombineFn(self._n, compare, self._key, self._reverse),
*self._args,
**self._kwargs).without_defaults()
class PerKey(ptransform.PTransform):
"""Identifies the compare-most N elements associated with each key.
This transform will produce a PCollection mapping unique keys in the input
PCollection to the n greatest elements with which they are associated, where
"greatest" is determined by the comparator function supplied as the compare
argument in the initializer.
"""
def _py2__init__(self, n, compare=None, *args, **kwargs):
"""Initializer.
compare should be an implementation of "a < b" taking at least two
arguments (a and b). Additional arguments and side inputs specified in
the apply call become additional arguments to the comparator. Defaults to
the natural ordering of the elements.
The arguments 'key' and 'reverse' may instead be passed as keyword
arguments, and have the same meaning as for Python's sort functions.
Args:
n: number of elements to extract from input.
compare: as described above.
*args: as described above.
**kwargs: as described above.
"""
if compare:
warnings.warn(
'Compare not available in Python 3, use key instead.',
DeprecationWarning)
self._n = n
self._compare = compare
self._key = kwargs.pop('key', None)
self._reverse = kwargs.pop('reverse', False)
self._args = args
self._kwargs = kwargs
def _py3__init__(self, n, **kwargs):
"""Creates a per-key Top operation.
The arguments 'key' and 'reverse' may be passed as keyword arguments,
and have the same meaning as for Python's sort functions.
Args:
pcoll: PCollection to process.
n: number of elements to extract from pcoll.
**kwargs: may contain 'key' and/or 'reverse'
"""
unknown_kwargs = set(kwargs.keys()) - set(['key', 'reverse'])
if unknown_kwargs:
raise ValueError(
'Unknown keyword arguments: ' + ', '.join(unknown_kwargs))
self._py2__init__(n, None, **kwargs)
# Python 3 sort does not accept a comparison operator, and nor do we.
if sys.version_info[0] < 3:
__init__ = _py2__init__
else:
__init__ = _py3__init__ # type: ignore
def default_label(self):
return 'TopPerKey(%d)' % self._n
def expand(self, pcoll):
"""Expands the transform.
Raises TypeCheckError: If the output type of the input PCollection is not
compatible with Tuple[A, B].
Args:
pcoll: PCollection to process
Returns:
the PCollection containing the result.
"""
return pcoll | core.CombinePerKey(
TopCombineFn(self._n, self._compare, self._key, self._reverse),
*self._args,
**self._kwargs)
@staticmethod
@ptransform.ptransform_fn
def Largest(pcoll, n, has_defaults=True):
"""Obtain a list of the greatest N elements in a PCollection."""
if has_defaults:
return pcoll | Top.Of(n)
else:
return pcoll | Top.Of(n).without_defaults()
@staticmethod
@ptransform.ptransform_fn
def Smallest(pcoll, n, has_defaults=True):
"""Obtain a list of the least N elements in a PCollection."""
if has_defaults:
return pcoll | Top.Of(n, reverse=True)
else:
return pcoll | Top.Of(n, reverse=True).without_defaults()
@staticmethod
@ptransform.ptransform_fn
def LargestPerKey(pcoll, n):
"""Identifies the N greatest elements associated with each key."""
return pcoll | Top.PerKey(n)
@staticmethod
@ptransform.ptransform_fn
def SmallestPerKey(pcoll, n, reverse=True):
"""Identifies the N least elements associated with each key."""
return pcoll | Top.PerKey(n, reverse=True)
@with_input_types(T)
@with_output_types(Tuple[None, List[T]])
class _TopPerBundle(core.DoFn):
def __init__(self, n, less_than, key):
self._n = n
self._less_than = None if less_than is operator.le else less_than
self._key = key
def start_bundle(self):
self._heap = []
def process(self, element):
if self._less_than or self._key:
element = cy_combiners.ComparableValue(
element, self._less_than, self._key)
if len(self._heap) < self._n:
heapq.heappush(self._heap, element)
else:
heapq.heappushpop(self._heap, element)
def finish_bundle(self):
# Though sorting here results in more total work, this allows us to
# skip most elements in the reducer.
# Essentially, given s map bundles, we are trading about O(sn) compares in
# the (single) reducer for O(sn log n) compares across all mappers.
self._heap.sort()
# Unwrap to avoid serialization via pickle.
if self._less_than or self._key:
yield window.GlobalWindows.windowed_value(
(None, [wrapper.value for wrapper in self._heap]))
else:
yield window.GlobalWindows.windowed_value((None, self._heap))
@with_input_types(Tuple[None, Iterable[List[T]]])
@with_output_types(List[T])
class _MergeTopPerBundle(core.DoFn):
def __init__(self, n, less_than, key):
self._n = n
self._less_than = None if less_than is operator.lt else less_than
self._key = key
def process(self, key_and_bundles):
_, bundles = key_and_bundles
def push(hp, e):
if len(hp) < self._n:
heapq.heappush(hp, e)
return False
elif e < hp[0]:
# Because _TopPerBundle returns sorted lists, all other elements
# will also be smaller.
return True
else:
heapq.heappushpop(hp, e)
return False
if self._less_than or self._key:
heapc = [] # type: List[cy_combiners.ComparableValue]
for bundle in bundles:
if not heapc:
heapc = [
cy_combiners.ComparableValue(element, self._less_than, self._key)
for element in bundle
]
continue
for element in reversed(bundle):
if push(heapc,
cy_combiners.ComparableValue(element,
self._less_than,
self._key)):
break
heapc.sort()
yield [wrapper.value for wrapper in reversed(heapc)]
else:
heap = []
for bundle in bundles:
if not heap:
heap = bundle
continue
for element in reversed(bundle):
if push(heap, element):
break
heap.sort()
yield heap[::-1]
@with_input_types(T)
@with_output_types(List[T])
class TopCombineFn(core.CombineFn):
"""CombineFn doing the combining for all of the Top transforms.
This CombineFn uses a key or comparison operator to rank the elements.
Args:
compare: (optional) an implementation of "a < b" taking at least two
arguments (a and b). Additional arguments and side inputs specified
in the apply call become additional arguments to the comparator.
key: (optional) a mapping of elements to a comparable key, similar to
the key argument of Python's sorting methods.
reverse: (optional) whether to order things smallest to largest, rather
than largest to smallest
"""
# TODO(robertwb): For Python 3, remove compare and only keep key.
def __init__(self, n, compare=None, key=None, reverse=False):
self._n = n
if compare is operator.lt:
compare = None
elif compare is operator.gt:
compare = None
reverse = not reverse
if compare:
self._compare = ((
lambda a, b, *args, **kwargs: not compare(a, b, *args, **kwargs))
if reverse else compare)
else:
self._compare = operator.gt if reverse else operator.lt
self._less_than = None
self._key = key
def _hydrated_heap(self, heap):
if heap:
first = heap[0]
if isinstance(first, cy_combiners.ComparableValue):
if first.requires_hydration:
assert self._less_than is not None
for comparable in heap:
assert comparable.requires_hydration
comparable.hydrate(self._less_than, self._key)
assert not comparable.requires_hydration
return heap
else:
return heap
else:
assert self._less_than is not None
return [
cy_combiners.ComparableValue(element, self._less_than, self._key)
for element in heap
]
else:
return heap
def display_data(self):
return {
'n': self._n,
'compare': DisplayDataItem(
self._compare.__name__ if hasattr(self._compare, '__name__') else
self._compare.__class__.__name__).drop_if_none()
}
# The accumulator type is a tuple
# (bool, Union[List[T], List[ComparableValue[T]])
# where the boolean indicates whether the second slot contains a List of T
# (False) or List of ComparableValue[T] (True). In either case, the List
# maintains heap invariance. When the contents of the List are
# ComparableValue[T] they either all 'requires_hydration' or none do.
# This accumulator representation allows us to minimize the data encoding
# overheads. Creation of ComparableValues is elided for performance reasons
# when there is no need for complicated comparison functions.
def create_accumulator(self, *args, **kwargs):
return (False, [])
def add_input(self, accumulator, element, *args, **kwargs):
# Caching to avoid paying the price of variadic expansion of args / kwargs
# when it's not needed (for the 'if' case below).
if self._less_than is None:
if args or kwargs:
self._less_than = lambda a, b: self._compare(a, b, *args, **kwargs)
else:
self._less_than = self._compare
holds_comparables, heap = accumulator
if self._less_than is not operator.lt or self._key:
heap = self._hydrated_heap(heap)
holds_comparables = True
else:
assert not holds_comparables
comparable = (
cy_combiners.ComparableValue(element, self._less_than, self._key)
if holds_comparables else element)
if len(heap) < self._n:
heapq.heappush(heap, comparable)
else:
heapq.heappushpop(heap, comparable)
return (holds_comparables, heap)
def merge_accumulators(self, accumulators, *args, **kwargs):
if args or kwargs:
self._less_than = lambda a, b: self._compare(a, b, *args, **kwargs)
add_input = lambda accumulator, element: self.add_input(
accumulator, element, *args, **kwargs)
else:
self._less_than = self._compare
add_input = self.add_input
result_heap = None
holds_comparables = None
for accumulator in accumulators:
holds_comparables, heap = accumulator
if self._less_than is not operator.lt or self._key:
heap = self._hydrated_heap(heap)
holds_comparables = True
else:
assert not holds_comparables
if result_heap is None:
result_heap = heap
else:
for comparable in heap:
_, result_heap = add_input(
(holds_comparables, result_heap),
comparable.value if holds_comparables else comparable)
assert result_heap is not None and holds_comparables is not None
return (holds_comparables, result_heap)
def compact(self, accumulator, *args, **kwargs):
holds_comparables, heap = accumulator
# Unwrap to avoid serialization via pickle.
if holds_comparables:
return (False, [comparable.value for comparable in heap])
else:
return accumulator
def extract_output(self, accumulator, *args, **kwargs):
if args or kwargs:
self._less_than = lambda a, b: self._compare(a, b, *args, **kwargs)
else:
self._less_than = self._compare
holds_comparables, heap = accumulator
if self._less_than is not operator.lt or self._key:
if not holds_comparables:
heap = self._hydrated_heap(heap)
holds_comparables = True
else:
assert not holds_comparables
assert len(heap) <= self._n
heap.sort(reverse=True)
return [
comparable.value if holds_comparables else comparable
for comparable in heap
]
class Largest(TopCombineFn):
def default_label(self):
return 'Largest(%s)' % self._n
class Smallest(TopCombineFn):
def __init__(self, n):
super(Smallest, self).__init__(n, reverse=True)
def default_label(self):
return 'Smallest(%s)' % self._n
class Sample(object):
"""Combiners for sampling n elements without replacement."""
# pylint: disable=no-self-argument
class FixedSizeGlobally(CombinerWithoutDefaults):
"""Sample n elements from the input PCollection without replacement."""
def __init__(self, n):
super(Sample.FixedSizeGlobally, self).__init__()
self._n = n
def expand(self, pcoll):
if self.has_defaults:
return pcoll | core.CombineGlobally(SampleCombineFn(self._n))
else:
return pcoll | core.CombineGlobally(SampleCombineFn(
self._n)).without_defaults()
def display_data(self):
return {'n': self._n}
def default_label(self):
return 'FixedSizeGlobally(%d)' % self._n
class FixedSizePerKey(ptransform.PTransform):
"""Sample n elements associated with each key without replacement."""
def __init__(self, n):
self._n = n
def expand(self, pcoll):
return pcoll | core.CombinePerKey(SampleCombineFn(self._n))
def display_data(self):
return {'n': self._n}
def default_label(self):
return 'FixedSizePerKey(%d)' % self._n
@with_input_types(T)
@with_output_types(List[T])
class SampleCombineFn(core.CombineFn):
"""CombineFn for all Sample transforms."""
def __init__(self, n):
super(SampleCombineFn, self).__init__()
# Most of this combiner's work is done by a TopCombineFn. We could just
# subclass TopCombineFn to make this class, but since sampling is not
# really a kind of Top operation, we use a TopCombineFn instance as a
# helper instead.
self._top_combiner = TopCombineFn(n)
def setup(self):
self._top_combiner.setup()
def create_accumulator(self):
return self._top_combiner.create_accumulator()
def add_input(self, heap, element):
# Before passing elements to the Top combiner, we pair them with random
# numbers. The elements with the n largest random number "keys" will be
# selected for the output.
return self._top_combiner.add_input(heap, (random.random(), element))
def merge_accumulators(self, heaps):
return self._top_combiner.merge_accumulators(heaps)
def compact(self, heap):
return self._top_combiner.compact(heap)
def extract_output(self, heap):
# Here we strip off the random number keys we added in add_input.
return [e for _, e in self._top_combiner.extract_output(heap)]
def teardown(self):
self._top_combiner.teardown()
class _TupleCombineFnBase(core.CombineFn):
def __init__(self, *combiners):
self._combiners = [core.CombineFn.maybe_from_callable(c) for c in combiners]
self._named_combiners = combiners
def display_data(self):
combiners = [
c.__name__ if hasattr(c, '__name__') else c.__class__.__name__
for c in self._named_combiners
]
return {'combiners': str(combiners)}
def setup(self, *args, **kwargs):
for c in self._combiners:
c.setup(*args, **kwargs)
def create_accumulator(self, *args, **kwargs):
return [c.create_accumulator(*args, **kwargs) for c in self._combiners]
def merge_accumulators(self, accumulators, *args, **kwargs):
return [
c.merge_accumulators(a, *args, **kwargs) for c,
a in zip(self._combiners, zip(*accumulators))
]
def compact(self, accumulator, *args, **kwargs):
return [
c.compact(a, *args, **kwargs) for c,
a in zip(self._combiners, accumulator)
]
def extract_output(self, accumulator, *args, **kwargs):
return tuple([
c.extract_output(a, *args, **kwargs) for c,
a in zip(self._combiners, accumulator)
])
def teardown(self, *args, **kwargs):
for c in reversed(self._combiners):
c.teardown(*args, **kwargs)
class TupleCombineFn(_TupleCombineFnBase):
"""A combiner for combining tuples via a tuple of combiners.
Takes as input a tuple of N CombineFns and combines N-tuples by
combining the k-th element of each tuple with the k-th CombineFn,
outputting a new N-tuple of combined values.
"""
def add_input(self, accumulator, element, *args, **kwargs):
return [
c.add_input(a, e, *args, **kwargs) for c,
a,
e in zip(self._combiners, accumulator, element)
]
def with_common_input(self):
return SingleInputTupleCombineFn(*self._combiners)
class SingleInputTupleCombineFn(_TupleCombineFnBase):
"""A combiner for combining a single value via a tuple of combiners.
Takes as input a tuple of N CombineFns and combines elements by
applying each CombineFn to each input, producing an N-tuple of
the outputs corresponding to each of the N CombineFn's outputs.
"""
def add_input(self, accumulator, element, *args, **kwargs):
return [
c.add_input(a, element, *args, **kwargs) for c,
a in zip(self._combiners, accumulator)
]
class ToList(CombinerWithoutDefaults):
"""A global CombineFn that condenses a PCollection into a single list."""
def expand(self, pcoll):
if self.has_defaults:
return pcoll | self.label >> core.CombineGlobally(ToListCombineFn())
else:
return pcoll | self.label >> core.CombineGlobally(
ToListCombineFn()).without_defaults()
@with_input_types(T)
@with_output_types(List[T])
class ToListCombineFn(core.CombineFn):
"""CombineFn for to_list."""
def create_accumulator(self):
return []
def add_input(self, accumulator, element):
accumulator.append(element)
return accumulator
def merge_accumulators(self, accumulators):
return sum(accumulators, [])
def extract_output(self, accumulator):
return accumulator
class ToDict(CombinerWithoutDefaults):
"""A global CombineFn that condenses a PCollection into a single dict.
PCollections should consist of 2-tuples, notionally (key, value) pairs.
If multiple values are associated with the same key, only one of the values
will be present in the resulting dict.
"""
def expand(self, pcoll):
if self.has_defaults:
return pcoll | self.label >> core.CombineGlobally(ToDictCombineFn())
else:
return pcoll | self.label >> core.CombineGlobally(
ToDictCombineFn()).without_defaults()
@with_input_types(Tuple[K, V])
@with_output_types(Dict[K, V])
class ToDictCombineFn(core.CombineFn):
"""CombineFn for to_dict."""
def create_accumulator(self):
return dict()
def add_input(self, accumulator, element):
key, value = element
accumulator[key] = value
return accumulator
def merge_accumulators(self, accumulators):
result = dict()
for a in accumulators:
result.update(a)
return result
def extract_output(self, accumulator):
return accumulator
class ToSet(CombinerWithoutDefaults):
"""A global CombineFn that condenses a PCollection into a set."""
def expand(self, pcoll):
if self.has_defaults:
return pcoll | self.label >> core.CombineGlobally(ToSetCombineFn())
else:
return pcoll | self.label >> core.CombineGlobally(
ToSetCombineFn()).without_defaults()
@with_input_types(T)
@with_output_types(Set[T])
class ToSetCombineFn(core.CombineFn):
"""CombineFn for ToSet."""
def create_accumulator(self):
return set()
def add_input(self, accumulator, element):
accumulator.add(element)
return accumulator
def merge_accumulators(self, accumulators):
return set.union(*accumulators)
def extract_output(self, accumulator):
return accumulator
class _CurriedFn(core.CombineFn):
"""Wrapped CombineFn with extra arguments."""
def __init__(self, fn, args, kwargs):
self.fn = fn
self.args = args
self.kwargs = kwargs
def setup(self):
self.fn.setup(*self.args, **self.kwargs)
def create_accumulator(self):
return self.fn.create_accumulator(*self.args, **self.kwargs)
def add_input(self, accumulator, element):
return self.fn.add_input(accumulator, element, *self.args, **self.kwargs)
def merge_accumulators(self, accumulators):
return self.fn.merge_accumulators(accumulators, *self.args, **self.kwargs)
def compact(self, accumulator):
return self.fn.compact(accumulator, *self.args, **self.kwargs)
def extract_output(self, accumulator):
return self.fn.extract_output(accumulator, *self.args, **self.kwargs)
def teardown(self):
self.fn.teardown(*self.args, **self.kwargs)
def apply(self, elements):
return self.fn.apply(elements, *self.args, **self.kwargs)
def curry_combine_fn(fn, args, kwargs):
if not args and not kwargs:
return fn
else:
return _CurriedFn(fn, args, kwargs)
class PhasedCombineFnExecutor(object):
"""Executor for phases of combine operations."""
def __init__(self, phase, fn, args, kwargs):
self.combine_fn = curry_combine_fn(fn, args, kwargs)
if phase == 'all':
self.apply = self.full_combine
elif phase == 'add':
self.apply = self.add_only
elif phase == 'merge':
self.apply = self.merge_only
elif phase == 'extract':
self.apply = self.extract_only
elif phase == 'convert':
self.apply = self.convert_to_accumulator
else:
raise ValueError('Unexpected phase: %s' % phase)
def full_combine(self, elements):
return self.combine_fn.apply(elements)
def add_only(self, elements):
return self.combine_fn.add_inputs(
self.combine_fn.create_accumulator(), elements)
def merge_only(self, accumulators):
return self.combine_fn.merge_accumulators(accumulators)
def extract_only(self, accumulator):
return self.combine_fn.extract_output(accumulator)
def convert_to_accumulator(self, element):
return self.combine_fn.add_input(
self.combine_fn.create_accumulator(), element)
class Latest(object):
"""Combiners for computing the latest element"""
@with_input_types(T)
@with_output_types(T)
class Globally(CombinerWithoutDefaults):
"""Compute the element with the latest timestamp from a
PCollection."""
@staticmethod
def add_timestamp(element, timestamp=core.DoFn.TimestampParam):
return [(element, timestamp)]
def expand(self, pcoll):
if self.has_defaults:
return (
pcoll
| core.ParDo(self.add_timestamp).with_output_types(
Tuple[T, TimestampType])
| core.CombineGlobally(LatestCombineFn()))
else:
return (
pcoll
| core.ParDo(self.add_timestamp).with_output_types(
Tuple[T, TimestampType])
| core.CombineGlobally(LatestCombineFn()).without_defaults())
@with_input_types(Tuple[K, V])
@with_output_types(Tuple[K, V])
class PerKey(ptransform.PTransform):
"""Compute elements with the latest timestamp for each key
from a keyed PCollection"""
@staticmethod
def add_timestamp(element, timestamp=core.DoFn.TimestampParam):
key, value = element
return [(key, (value, timestamp))]
def expand(self, pcoll):
return (
pcoll
| core.ParDo(self.add_timestamp).with_output_types(
Tuple[K, Tuple[T, TimestampType]])
| core.CombinePerKey(LatestCombineFn()))
@with_input_types(Tuple[T, TimestampType])
@with_output_types(T)
class LatestCombineFn(core.CombineFn):
"""CombineFn to get the element with the latest timestamp
from a PCollection."""
def create_accumulator(self):
return (None, window.MIN_TIMESTAMP)
def add_input(self, accumulator, element):
if accumulator[1] > element[1]:
return accumulator
else:
return element
def merge_accumulators(self, accumulators):
result = self.create_accumulator()
for accumulator in accumulators:
result = self.add_input(result, accumulator)
return result
def extract_output(self, accumulator):
return accumulator[0] | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/transforms/combiners.py | 0.745769 | 0.331552 | combiners.py | pypi |
from __future__ import absolute_import
import random
from typing import Any
from typing import Iterable
from typing import Tuple
from typing import TypeVar
import pandas as pd
Frame = TypeVar('Frame', bound=pd.core.generic.NDFrame)
class Partitioning(object):
"""A class representing a (consistent) partitioning of dataframe objects.
"""
def __repr__(self):
return self.__class__.__name__
def is_subpartitioning_of(self, other):
# type: (Partitioning) -> bool
"""Returns whether self is a sub-partition of other.
Specifically, returns whether something partitioned by self is necissarily
also partitioned by other.
"""
raise NotImplementedError
def partition_fn(self, df, num_partitions):
# type: (Frame, int) -> Iterable[Tuple[Any, Frame]]
"""A callable that actually performs the partitioning of a Frame df.
This will be invoked via a FlatMap in conjunction with a GroupKey to
achieve the desired partitioning.
"""
raise NotImplementedError
def test_partition_fn(self, df):
return self.partition_fn(df, 5)
class Index(Partitioning):
"""A partitioning by index (either fully or partially).
If the set of "levels" of the index to consider is not specified, the entire
index is used.
These form a partial order, given by
Nothing() < Index([i]) < Index([i, j]) < ... < Index() < Singleton()
The ordering is implemented via the is_subpartitioning_of method, where the
examples on the right are subpartitionings of the examples on the left above.
"""
def __init__(self, levels=None):
self._levels = levels
def __repr__(self):
if self._levels:
return 'Index%s' % self._levels
else:
return 'Index'
def __eq__(self, other):
return type(self) == type(other) and self._levels == other._levels
def __ne__(self, other):
return not self == other
def __hash__(self):
if self._levels:
return hash(tuple(sorted(self._levels)))
else:
return hash(type(self))
def is_subpartitioning_of(self, other):
if isinstance(other, Nothing):
return True
elif isinstance(other, Index):
if self._levels is None:
return True
elif other._levels is None:
return False
else:
return all(level in other._levels for level in self._levels)
else:
return False
def partition_fn(self, df, num_partitions):
if self._levels is None:
levels = list(range(df.index.nlevels))
else:
levels = self._levels
hashes = sum(
pd.util.hash_array(df.index.get_level_values(level))
for level in levels)
for key in range(num_partitions):
yield key, df[hashes % num_partitions == key]
def check(self, dfs):
# TODO(BEAM-11324): This check should be stronger, it should verify that
# running partition_fn on the concatenation of dfs yields the same
# partitions.
if self._levels is None:
def get_index_set(df):
return set(df.index)
else:
def get_index_set(df):
return set(zip(df.index.level[level] for level in self._levels))
index_sets = [get_index_set(df) for df in dfs]
for i, index_set in enumerate(index_sets[:-1]):
if not index_set.isdisjoint(set.union(*index_sets[i + 1:])):
return False
return True
class Singleton(Partitioning):
"""A partitioning of all the data into a single partition.
"""
def __eq__(self, other):
return type(self) == type(other)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(type(self))
def is_subpartitioning_of(self, other):
return True
def partition_fn(self, df, num_partitions):
yield None, df
def check(self, dfs):
return len(dfs) <= 1
class Nothing(Partitioning):
"""A partitioning imposing no constraints on the actual partitioning.
"""
def __eq__(self, other):
return type(self) == type(other)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(type(self))
def is_subpartitioning_of(self, other):
return isinstance(other, Nothing)
def test_partition_fn(self, df):
num_partitions = max(min(df.size, 10), 1)
def shuffled(seq):
seq = list(seq)
random.shuffle(seq)
return seq
# pylint: disable=range-builtin-not-iterating
part = pd.Series(shuffled(range(len(df))), index=df.index) % num_partitions
for k in range(num_partitions):
yield k, df[part == k]
def check(self, dfs):
return True | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/dataframe/partitionings.py | 0.799677 | 0.370168 | partitionings.py | pypi |
from __future__ import absolute_import
import contextlib
import random
import threading
from typing import Any
from typing import Callable
from typing import Iterable
from typing import Optional
from typing import TypeVar
from apache_beam.dataframe import partitionings
class Session(object):
"""A session represents a mapping of expressions to concrete values.
The bindings typically include required placeholders, but may be any
intermediate expression as well.
"""
def __init__(self, bindings=None):
self._bindings = dict(bindings or {})
def evaluate(self, expr): # type: (Expression) -> Any
if expr not in self._bindings:
self._bindings[expr] = expr.evaluate_at(self)
return self._bindings[expr]
def lookup(self, expr): # type: (Expression) -> Any
return self._bindings[expr]
class PartitioningSession(Session):
"""An extension of Session that enforces actual partitioning of inputs.
Each expression is evaluated multiple times for various supported
partitionings determined by its `requires_partition_by` specification. For
each tested partitioning, the input is partitioned and the expression is
evaluated on each partition separately, as if this were actually executed in
a parallel manner.
For each input partitioning, the results are verified to be partitioned
appropriately according to the expression's `preserves_partition_by`
specification.
For testing only.
"""
def evaluate(self, expr):
import pandas as pd
import collections
def is_scalar(expr):
return not isinstance(expr.proxy(), pd.core.generic.NDFrame)
if expr not in self._bindings:
if is_scalar(expr) or not expr.args():
result = super(PartitioningSession, self).evaluate(expr)
else:
scaler_args = [arg for arg in expr.args() if is_scalar(arg)]
def evaluate_with(input_partitioning):
parts = collections.defaultdict(
lambda: Session({arg: self.evaluate(arg)
for arg in scaler_args}))
for arg in expr.args():
if not is_scalar(arg):
input = self.evaluate(arg)
for key, part in input_partitioning.test_partition_fn(input):
parts[key]._bindings[arg] = part
if not parts:
parts[None] # Create at least one entry.
results = []
for session in parts.values():
if any(len(session.lookup(arg)) for arg in expr.args()
if not is_scalar(arg)):
results.append(session.evaluate(expr))
expected_output_partitioning = expr.preserves_partition_by(
) if input_partitioning.is_subpartitioning_of(
expr.preserves_partition_by()) else input_partitioning
if not expected_output_partitioning.check(results):
raise AssertionError(
f"""Expression does not preserve partitioning!
Expression: {expr}
Requires: {expr.requires_partition_by()}
Preserves: {expr.preserves_partition_by()}
Input partitioning: {input_partitioning}
Expected output partitioning: {expected_output_partitioning}
""")
if results:
return pd.concat(results)
else:
# Choose any single session.
return next(iter(parts.values())).evaluate(expr)
# Store random state so it can be re-used for each execution, in case
# the expression is part of a test that relies on the random seed.
random_state = random.getstate()
for input_partitioning in set([expr.requires_partition_by(),
partitionings.Nothing(),
partitionings.Index(),
partitionings.Singleton()]):
if not input_partitioning.is_subpartitioning_of(
expr.requires_partition_by()):
continue
random.setstate(random_state)
# TODO(BEAM-11324): Consider verifying result is always the same
result = evaluate_with(input_partitioning)
self._bindings[expr] = result
return self._bindings[expr]
# The return type of an Expression
T = TypeVar('T')
class Expression(object):
"""An expression is an operation bound to a set of arguments.
An expression represents a deferred tree of operations, which can be
evaluated at a specific bindings of root expressions to values.
"""
def __init__(
self,
name, # type: str
proxy, # type: T
_id=None # type: Optional[str]
):
self._name = name
self._proxy = proxy
# Store for preservation through pickling.
self._id = _id or '%s_%s_%s' % (name, type(proxy).__name__, id(self))
def proxy(self): # type: () -> T
return self._proxy
def __hash__(self):
return hash(self._id)
def __eq__(self, other):
return self._id == other._id
def __ne__(self, other):
return not self == other
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self._id)
def placeholders(self):
"""Returns all the placeholders that self depends on."""
raise NotImplementedError(type(self))
def evaluate_at(self, session): # type: (Session) -> T
"""Returns the result of self with the bindings given in session."""
raise NotImplementedError(type(self))
def requires_partition_by(self): # type: () -> partitionings.Partitioning
"""Returns the partitioning, if any, require to evaluate this expression.
Returns partitioning.Nothing() to require no partitioning is required.
"""
raise NotImplementedError(type(self))
def preserves_partition_by(self): # type: () -> partitionings.Partitioning
"""Returns the partitioning, if any, preserved by this expression.
This gives an upper bound on the partitioning of its ouput. The actual
partitioning of the output may be less strict (e.g. if the input was
less partitioned).
"""
raise NotImplementedError(type(self))
class PlaceholderExpression(Expression):
"""An expression whose value must be explicitly bound in the session."""
def __init__(
self, # type: PlaceholderExpression
proxy, # type: T
reference=None, # type: Any
):
"""Initialize a placeholder expression.
Args:
proxy: A proxy object with the type expected to be bound to this
expression. Used for type checking at pipeline construction time.
"""
super(PlaceholderExpression, self).__init__('placeholder', proxy)
self._reference = reference
def placeholders(self):
return frozenset([self])
def args(self):
return ()
def evaluate_at(self, session):
return session.lookup(self)
def requires_partition_by(self):
return partitionings.Nothing()
def preserves_partition_by(self):
return partitionings.Nothing()
class ConstantExpression(Expression):
"""An expression whose value is known at pipeline construction time."""
def __init__(
self, # type: ConstantExpression
value, # type: T
proxy=None # type: Optional[T]
):
"""Initialize a constant expression.
Args:
value: The constant value to be produced by this expression.
proxy: (Optional) a proxy object with same type as `value` to use for
rapid type checking at pipeline construction time. If not provided,
`value` will be used directly.
"""
if proxy is None:
proxy = value
super(ConstantExpression, self).__init__('constant', proxy)
self._value = value
def placeholders(self):
return frozenset()
def args(self):
return ()
def evaluate_at(self, session):
return self._value
def requires_partition_by(self):
return partitionings.Nothing()
def preserves_partition_by(self):
return partitionings.Nothing()
class ComputedExpression(Expression):
"""An expression whose value must be computed at pipeline execution time."""
def __init__(
self, # type: ComputedExpression
name, # type: str
func, # type: Callable[...,T]
args, # type: Iterable[Expression]
proxy=None, # type: Optional[T]
_id=None, # type: Optional[str]
requires_partition_by=partitionings.Index(), # type: partitionings.Partitioning
preserves_partition_by=partitionings.Nothing(), # type: partitionings.Partitioning
):
"""Initialize a computed expression.
Args:
name: The name of this expression.
func: The function that will be used to compute the value of this
expression. Should accept arguments of the types returned when
evaluating the `args` expressions.
args: The list of expressions that will be used to produce inputs to
`func`.
proxy: (Optional) a proxy object with same type as the objects that this
ComputedExpression will produce at execution time. If not provided, a
proxy will be generated using `func` and the proxies of `args`.
_id: (Optional) a string to uniquely identify this expression.
requires_partition_by: The required (common) partitioning of the args.
preserves_partition_by: The level of partitioning preserved.
"""
if (not _get_allow_non_parallel() and
requires_partition_by == partitionings.Singleton()):
raise NonParallelOperation(
"Using non-parallel form of %s "
"outside of allow_non_parallel_operations block." % name)
args = tuple(args)
if proxy is None:
proxy = func(*(arg.proxy() for arg in args))
super(ComputedExpression, self).__init__(name, proxy, _id)
self._func = func
self._args = args
self._requires_partition_by = requires_partition_by
self._preserves_partition_by = preserves_partition_by
def placeholders(self):
return frozenset.union(
frozenset(), *[arg.placeholders() for arg in self.args()])
def args(self):
return self._args
def evaluate_at(self, session):
return self._func(*(session.evaluate(arg) for arg in self._args))
def requires_partition_by(self):
return self._requires_partition_by
def preserves_partition_by(self):
return self._preserves_partition_by
def elementwise_expression(name, func, args):
return ComputedExpression(
name,
func,
args,
requires_partition_by=partitionings.Nothing(),
preserves_partition_by=partitionings.Singleton())
_ALLOW_NON_PARALLEL = threading.local()
_ALLOW_NON_PARALLEL.value = False
def _get_allow_non_parallel():
return _ALLOW_NON_PARALLEL.value
@contextlib.contextmanager
def allow_non_parallel_operations(allow=True):
if allow is None:
yield
else:
old_value, _ALLOW_NON_PARALLEL.value = _ALLOW_NON_PARALLEL.value, allow
yield
_ALLOW_NON_PARALLEL.value = old_value
class NonParallelOperation(Exception):
pass | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/dataframe/expressions.py | 0.875175 | 0.346072 | expressions.py | pypi |
from __future__ import absolute_import
import itertools
import re
from io import BytesIO
from io import StringIO
from io import TextIOWrapper
import pandas as pd
import apache_beam as beam
from apache_beam import io
from apache_beam.dataframe import frame_base
from apache_beam.io import fileio
_DEFAULT_LINES_CHUNKSIZE = 10_000
_DEFAULT_BYTES_CHUNKSIZE = 1 << 20
def read_csv(path, *args, splittable=False, **kwargs):
"""Emulates `pd.read_csv` from Pandas, but as a Beam PTransform.
Use this as
df = p | beam.dataframe.io.read_csv(...)
to get a deferred Beam dataframe representing the contents of the file.
If your files are large and records do not contain quoted newlines, you may
pass the extra argument splittable=True to enable dynamic splitting for this
read on newlines. Using this option for records that do contain quoted
newlines may result in partial records and data corruption.
"""
if 'nrows' in kwargs:
raise ValueError('nrows not yet supported')
return _ReadFromPandas(
pd.read_csv,
path,
args,
kwargs,
incremental=True,
splitter=_CsvSplitter(args, kwargs) if splittable else None)
def _as_pc(df):
from apache_beam.dataframe import convert # avoid circular import
# TODO(roberwb): Amortize the computation for multiple writes?
return convert.to_pcollection(df, yield_elements='pandas')
def to_csv(df, path, *args, **kwargs):
return _as_pc(df) | _WriteToPandas(
'to_csv', path, args, kwargs, incremental=True, binary=False)
def read_fwf(path, *args, **kwargs):
return _ReadFromPandas(pd.read_fwf, path, args, kwargs, incremental=True)
def read_json(path, *args, **kwargs):
if 'nrows' in kwargs:
raise NotImplementedError('nrows not yet supported')
elif kwargs.get('lines', False):
# Work around https://github.com/pandas-dev/pandas/issues/34548.
kwargs = dict(kwargs, nrows=1 << 63)
return _ReadFromPandas(
pd.read_json,
path,
args,
kwargs,
incremental=kwargs.get('lines', False),
splitter=_DelimSplitter(b'\n', _DEFAULT_BYTES_CHUNKSIZE) if kwargs.get(
'lines', False) else None,
binary=False)
def to_json(df, path, orient=None, *args, **kwargs):
if orient is None:
if isinstance(df._expr.proxy(), pd.DataFrame):
orient = 'columns'
elif isinstance(df._expr.proxy(), pd.Series):
orient = 'index'
else:
raise frame_base.WontImplementError('not dataframes or series')
kwargs['orient'] = orient
return _as_pc(df) | _WriteToPandas(
'to_json',
path,
args,
kwargs,
incremental=orient in ('index', 'records', 'values'),
binary=False)
def read_html(path, *args, **kwargs):
return _ReadFromPandas(
lambda *args,
**kwargs: pd.read_html(*args, **kwargs)[0],
path,
args,
kwargs)
def to_html(df, path, *args, **kwargs):
return _as_pc(df) | _WriteToPandas(
'to_html',
path,
args,
kwargs,
incremental=(
df._expr.proxy().index.nlevels == 1 or
not kwargs.get('sparsify', True)),
binary=False)
def _binary_reader(format):
func = getattr(pd, 'read_%s' % format)
return lambda path, *args, **kwargs: _ReadFromPandas(func, path, args, kwargs)
def _binary_writer(format):
return (
lambda df,
path,
*args,
**kwargs: _as_pc(df) | _WriteToPandas(f'to_{format}', path, args, kwargs))
for format in ('excel', 'feather', 'parquet', 'stata'):
globals()['read_%s' % format] = _binary_reader(format)
globals()['to_%s' % format] = _binary_writer(format)
for format in ('sas', 'spss'):
if hasattr(pd, 'read_%s' % format): # Depends on pandas version.
globals()['read_%s' % format] = _binary_reader(format)
read_clipboard = to_clipboard = frame_base.wont_implement_method('clipboard')
read_msgpack = to_msgpack = frame_base.wont_implement_method('deprecated')
read_hdf = to_hdf = frame_base.wont_implement_method('random access files')
for name in dir(pd):
if name.startswith('read_') and name not in globals():
globals()[name] = frame_base.not_implemented_method(name)
def _prefix_range_index_with(prefix, df):
if isinstance(df.index, pd.RangeIndex):
return df.set_index(prefix + df.index.map(str).astype(str))
else:
return df
class _ReadFromPandas(beam.PTransform):
def __init__(
self,
reader,
path,
args,
kwargs,
binary=True,
incremental=False,
splitter=False):
if 'compression' in kwargs:
raise NotImplementedError('compression')
if not isinstance(path, str):
raise frame_base.WontImplementError('non-deferred')
self.reader = reader
self.path = path
self.args = args
self.kwargs = kwargs
self.binary = binary
self.incremental = incremental
self.splitter = splitter
def expand(self, root):
# TODO(robertwb): Handle streaming (with explicit schema).
paths_pcoll = root | beam.Create([self.path])
first = io.filesystems.FileSystems.match([self.path],
limits=[1
])[0].metadata_list[0].path
with io.filesystems.FileSystems.open(first) as handle:
if not self.binary:
handle = TextIOWrapper(handle)
if self.incremental:
sample = next(
self.reader(handle, *self.args, **dict(self.kwargs, chunksize=100)))
else:
sample = self.reader(handle, *self.args, **self.kwargs)
pcoll = (
paths_pcoll
| fileio.MatchFiles(self.path)
| beam.Reshuffle()
| fileio.ReadMatches()
| beam.ParDo(
_ReadFromPandasDoFn(
self.reader,
self.args,
self.kwargs,
self.binary,
self.incremental,
self.splitter)))
from apache_beam.dataframe import convert
return convert.to_dataframe(
pcoll, proxy=_prefix_range_index_with(':', sample[:0]))
class _Splitter:
def empty_buffer(self):
"""Returns an empty buffer of the right type (string or bytes).
"""
raise NotImplementedError(self)
def read_header(self, handle):
"""Reads the header from handle, which points to the start of the file.
Returns the pair (header, buffer) where buffer contains any part of the
file that was "overread" from handle while seeking the end of header.
"""
raise NotImplementedError(self)
def read_to_record_boundary(self, buffered, handle):
"""Reads the given handle up to the end of the current record.
The buffer argument represents bytes that were read previously; logically
it's as if these were pushed back into handle for reading. If the
record end is within buffered, it's possible that no more bytes will be read
from handle at all.
Returns the pair (remaining_record_bytes, buffer) where buffer contains
any part of the file that was "overread" from handle while seeking the end
of the record.
"""
raise NotImplementedError(self)
class _DelimSplitter(_Splitter):
"""A _Splitter that splits on delimiters between records.
This delimiter is assumed ot never occur within a record.
"""
def __init__(self, delim, read_chunk_size=_DEFAULT_BYTES_CHUNKSIZE):
# Multi-char delimiters would require more care across chunk boundaries.
assert len(delim) == 1
self._delim = delim
self._empty = delim[:0]
self._read_chunk_size = read_chunk_size
def empty_buffer(self):
return self._empty
def read_header(self, handle):
return self._empty, self._empty
def read_to_record_boundary(self, buffered, handle):
if self._delim in buffered:
ix = buffered.index(self._delim) + len(self._delim)
return buffered[:ix], buffered[ix:]
else:
while True:
chunk = handle.read(self._read_chunk_size)
if self._delim in chunk:
ix = chunk.index(self._delim) + len(self._delim)
return buffered + chunk[:ix], chunk[ix:]
elif not chunk:
return buffered, self._empty
else:
buffered += chunk
def _maybe_encode(str_or_bytes):
if isinstance(str_or_bytes, str):
return str_or_bytes.encode('utf-8')
else:
return str_or_bytes
class _CsvSplitter(_DelimSplitter):
"""Splitter for dynamically sharding CSV files and newline record boundaries.
Currently does not handle quoted newlines, so is off by default, but such
support could be added in the future.
"""
def __init__(self, args, kwargs, read_chunk_size=_DEFAULT_BYTES_CHUNKSIZE):
if args:
# TODO(robertwb): Automatically populate kwargs as we do for df methods.
raise ValueError(
'Non-path arguments must be passed by keyword '
'for splittable csv reads.')
if kwargs.get('skipfooter', 0):
raise ValueError('Splittablility incompatible with skipping footers.')
super(_CsvSplitter, self).__init__(
_maybe_encode(kwargs.get('lineterminator', b'\n')),
_DEFAULT_BYTES_CHUNKSIZE)
self._kwargs = kwargs
def read_header(self, handle):
if self._kwargs.get('header', 'infer') == 'infer':
if 'names' in self._kwargs:
header = None
else:
header = 0
else:
header = self._kwargs['header']
if header is None:
return self._empty, self._empty
if isinstance(header, int):
max_header = header
else:
max_header = max(header)
skiprows = self._kwargs.get('skiprows', 0)
if isinstance(skiprows, int):
is_skiprow = lambda ix: ix < skiprows
elif callable(skiprows):
is_skiprow = skiprows
elif skiprows is None:
is_skiprow = lambda ix: False
else:
is_skiprow = lambda ix: ix in skiprows
comment = _maybe_encode(self._kwargs.get('comment', None))
if comment:
is_comment = lambda line: line.startswith(comment)
else:
is_comment = lambda line: False
skip_blank_lines = self._kwargs.get('skip_blank_lines', True)
if skip_blank_lines:
is_blank = lambda line: re.match(rb'^\s*$', line)
else:
is_blank = lambda line: False
text_header = b''
rest = b''
skipped = 0
for ix in itertools.count():
line, rest = self.read_to_record_boundary(rest, handle)
text_header += line
if is_skiprow(ix) or is_blank(line) or is_comment(line):
skipped += 1
continue
if ix - skipped == max_header:
return text_header, rest
class _TruncatingFileHandle(object):
"""A wrapper of a file-like object representing the restriction of the
underling handle according to the given SDF restriction tracker, breaking
the file only after the given delimiter.
For example, if the underling restriction is [103, 607) and each line were
exactly 10 characters long (i.e. every 10th charcter was a newline), then this
would give a view of a 500-byte file consisting of bytes bytes 110 to 609
(inclusive) of the underlying file.
As with all SDF trackers, the endpoint may change dynamically during reading.
"""
def __init__(self, underlying, tracker, splitter):
self._underlying = underlying
self._tracker = tracker
self._splitter = splitter
self._empty = self._splitter.empty_buffer()
self._done = False
self._header, self._buffer = self._splitter.read_header(self._underlying)
self._buffer_start_pos = len(self._header)
start = self._tracker.current_restriction().start
# Seek to first delimiter after the start position.
if start > len(self._header):
if start > len(self._header) + len(self._buffer):
self._buffer_start_pos = start
self._buffer = self._empty
self._underlying.seek(start)
else:
self._buffer_start_pos = start
self._buffer = self._buffer[start - len(self._header):]
skip, self._buffer = self._splitter.read_to_record_boundary(
self._buffer, self._underlying)
self._buffer_start_pos += len(skip)
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return False
@property
def closed(self):
return False
def __iter__(self):
# For pandas is_file_like.
raise NotImplementedError()
def read(self, size=-1):
if self._header:
res = self._header
self._header = None
return res
elif self._done:
return self._empty
elif size == -1:
self._buffer += self._underlying.read()
elif not self._buffer:
self._buffer = self._underlying.read(size)
if not self._buffer:
self._done = True
return self._empty
if self._tracker.try_claim(self._buffer_start_pos + len(self._buffer)):
res = self._buffer
self._buffer = self._empty
self._buffer_start_pos += len(res)
else:
offset = self._tracker.current_restriction().stop - self._buffer_start_pos
if offset <= 0:
res = self._empty
else:
rest, _ = self._splitter.read_to_record_boundary(
self._buffer[offset:], self._underlying)
res = self._buffer[:offset] + rest
self._done = True
return res
class _ReadFromPandasDoFn(beam.DoFn, beam.RestrictionProvider):
def __init__(self, reader, args, kwargs, binary, incremental, splitter):
# avoid pickling issues
if reader.__module__.startswith('pandas.'):
reader = reader.__name__
self.reader = reader
self.args = args
self.kwargs = kwargs
self.binary = binary
self.incremental = incremental
self.splitter = splitter
def initial_restriction(self, readable_file):
return beam.io.restriction_trackers.OffsetRange(
0, readable_file.metadata.size_in_bytes)
def restriction_size(self, readable_file, restriction):
return restriction.size()
def create_tracker(self, restriction):
tracker = beam.io.restriction_trackers.OffsetRestrictionTracker(restriction)
if self.splitter:
return tracker
else:
return beam.io.restriction_trackers.UnsplittableRestrictionTracker(
tracker)
def process(self, readable_file, tracker=beam.DoFn.RestrictionParam()):
reader = self.reader
if isinstance(reader, str):
reader = getattr(pd, self.reader)
with readable_file.open() as handle:
if self.incremental:
# TODO(robertwb): We could consider trying to get progress for
# non-incremental sources that are read linearly, as long as they
# don't try to seek. This could be deceptive as progress would
# advance to 100% the instant the (large) read was done, discounting
# any downstream processing.
handle = _TruncatingFileHandle(
handle,
tracker,
splitter=self.splitter or
_DelimSplitter(b'\n', _DEFAULT_BYTES_CHUNKSIZE))
if not self.binary:
handle = TextIOWrapper(handle)
if self.incremental:
if 'chunksize' not in self.kwargs:
self.kwargs['chunksize'] = _DEFAULT_LINES_CHUNKSIZE
frames = reader(handle, *self.args, **self.kwargs)
else:
frames = [reader(handle, *self.args, **self.kwargs)]
for df in frames:
yield _prefix_range_index_with(readable_file.metadata.path + ':', df)
if not self.incremental:
# Satisfy the SDF contract by claiming the whole range.
# Do this after emitting the frames to avoid advancing progress to 100%
# prior to that.
tracker.try_claim(tracker.current_restriction().stop)
class _WriteToPandas(beam.PTransform):
def __init__(
self, writer, path, args, kwargs, incremental=False, binary=True):
self.writer = writer
self.path = path
self.args = args
self.kwargs = kwargs
self.incremental = incremental
self.binary = binary
def expand(self, pcoll):
dir, name = io.filesystems.FileSystems.split(self.path)
return pcoll | fileio.WriteToFiles(
path=dir,
file_naming=fileio.default_file_naming(name),
sink=_WriteToPandasFileSink(
self.writer, self.args, self.kwargs, self.incremental, self.binary))
class _WriteToPandasFileSink(fileio.FileSink):
def __init__(self, writer, args, kwargs, incremental, binary):
if 'compression' in kwargs:
raise NotImplementedError('compression')
self.writer = writer
self.args = args
self.kwargs = kwargs
self.incremental = incremental
self.binary = binary
self.StringOrBytesIO = BytesIO if binary else StringIO
if incremental:
self.write = self.write_record_incremental
self.flush = self.close_incremental
else:
self.write = self.buffer_record
self.flush = self.flush_buffer
def open(self, file_handle):
self.buffer = []
self.empty = self.header = self.footer = None
if not self.binary:
file_handle = TextIOWrapper(file_handle)
self.file_handle = file_handle
def write_to(self, df, file_handle=None):
non_none_handle = file_handle or self.StringOrBytesIO()
getattr(df, self.writer)(non_none_handle, *self.args, **self.kwargs)
if file_handle is None:
return non_none_handle.getvalue()
def write_record_incremental(self, value):
if self.empty is None:
self.empty = self.write_to(value[:0])
if self.header is None and len(value):
def new_value(ix):
if isinstance(ix, tuple):
return (new_value(ix[0]), ) + ix[1:]
else:
return str('x') + '_again'
def change_index(df):
df.index = df.index.map(new_value)
return df
one_row = self.write_to(value[:1])
another_row = self.write_to(change_index(value[:1]))
two_rows = self.write_to(pd.concat([value[:1], change_index(value[:1])]))
for ix, c in enumerate(self.empty):
if one_row[ix] != c:
break
else:
ix = len(self.empty)
self.header = self.empty[:ix]
self.footer = self.empty[ix:]
self.delimiter = two_rows[len(one_row) - len(self.footer):-(
len(another_row) - len(self.header)) or None]
self.file_handle.write(self.header)
self.first = True
if len(value):
if self.first:
self.first = False
else:
self.file_handle.write(self.delimiter)
# IDEA(robertwb): Construct a "truncating" stream wrapper to avoid the
# in-memory copy.
rows = self.write_to(value)
self.file_handle.write(rows[len(self.header):-len(self.footer) or None])
def close_incremental(self):
if self.footer is not None:
self.file_handle.write(self.footer)
elif self.empty is not None:
self.file_handle.write(self.empty)
self.file_handle.flush()
def buffer_record(self, value):
self.buffer.append(value)
def flush_buffer(self):
if self.buffer:
self.write_to(pd.concat(self.buffer), self.file_handle)
self.file_handle.flush() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/dataframe/io.py | 0.477067 | 0.158923 | io.py | pypi |
from __future__ import absolute_import
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from future.utils import binary_type
from future.utils import text_type
from apache_beam import typehints
from apache_beam.metrics import Metrics
from apache_beam.transforms import DoFn
from apache_beam.transforms import FlatMap
from apache_beam.transforms import ParDo
from apache_beam.transforms import PTransform
from apache_beam.transforms import util
from cachetools.func import ttl_cache
try:
from google.cloud import vision
except ImportError:
raise ImportError(
'Google Cloud Vision not supported for this execution environment '
'(could not import google.cloud.vision).')
__all__ = [
'AnnotateImage',
'AnnotateImageWithContext',
]
@ttl_cache(maxsize=128, ttl=3600)
def get_vision_client(client_options=None):
"""Returns a Cloud Vision API client."""
_client = vision.ImageAnnotatorClient(client_options=client_options)
return _client
class AnnotateImage(PTransform):
"""A ``PTransform`` for annotating images using the GCP Vision API.
ref: https://cloud.google.com/vision/docs/
Batches elements together using ``util.BatchElements`` PTransform and sends
each batch of elements to the GCP Vision API.
Element is a Union[text_type, binary_type] of either an URI (e.g. a GCS URI)
or binary_type base64-encoded image data.
Accepts an `AsDict` side input that maps each image to an image context.
"""
MAX_BATCH_SIZE = 5
MIN_BATCH_SIZE = 1
def __init__(
self,
features,
retry=None,
timeout=120,
max_batch_size=None,
min_batch_size=None,
client_options=None,
context_side_input=None,
metadata=None):
"""
Args:
features: (List[``vision.types.Feature.enums.Feature``]) Required.
The Vision API features to detect
retry: (google.api_core.retry.Retry) Optional.
A retry object used to retry requests.
If None is specified (default), requests will not be retried.
timeout: (float) Optional.
The time in seconds to wait for the response from the Vision API.
Default is 120.
max_batch_size: (int) Optional.
Maximum number of images to batch in the same request to the Vision API.
Default is 5 (which is also the Vision API max).
This parameter is primarily intended for testing.
min_batch_size: (int) Optional.
Minimum number of images to batch in the same request to the Vision API.
Default is None. This parameter is primarily intended for testing.
client_options:
(Union[dict, google.api_core.client_options.ClientOptions]) Optional.
Client options used to set user options on the client.
API Endpoint should be set through client_options.
context_side_input: (beam.pvalue.AsDict) Optional.
An ``AsDict`` of a PCollection to be passed to the
_ImageAnnotateFn as the image context mapping containing additional
image context and/or feature-specific parameters.
Example usage::
image_contexts =
[(''gs://cloud-samples-data/vision/ocr/sign.jpg'', Union[dict,
``vision.types.ImageContext()``]),
(''gs://cloud-samples-data/vision/ocr/sign.jpg'', Union[dict,
``vision.types.ImageContext()``]),]
context_side_input =
(
p
| "Image contexts" >> beam.Create(image_contexts)
)
visionml.AnnotateImage(features,
context_side_input=beam.pvalue.AsDict(context_side_input)))
metadata: (Optional[Sequence[Tuple[str, str]]]): Optional.
Additional metadata that is provided to the method.
"""
super(AnnotateImage, self).__init__()
self.features = features
self.retry = retry
self.timeout = timeout
self.max_batch_size = max_batch_size or AnnotateImage.MAX_BATCH_SIZE
if self.max_batch_size > AnnotateImage.MAX_BATCH_SIZE:
raise ValueError(
'Max batch_size exceeded. '
'Batch size needs to be smaller than {}'.format(
AnnotateImage.MAX_BATCH_SIZE))
self.min_batch_size = min_batch_size or AnnotateImage.MIN_BATCH_SIZE
self.client_options = client_options
self.context_side_input = context_side_input
self.metadata = metadata
def expand(self, pvalue):
return (
pvalue
| FlatMap(self._create_image_annotation_pairs, self.context_side_input)
| util.BatchElements(
min_batch_size=self.min_batch_size,
max_batch_size=self.max_batch_size)
| ParDo(
_ImageAnnotateFn(
features=self.features,
retry=self.retry,
timeout=self.timeout,
client_options=self.client_options,
metadata=self.metadata)))
@typehints.with_input_types(
Union[text_type, binary_type], Optional[vision.types.ImageContext])
@typehints.with_output_types(List[vision.types.AnnotateImageRequest])
def _create_image_annotation_pairs(self, element, context_side_input):
if context_side_input: # If we have a side input image context, use that
image_context = context_side_input.get(element)
else:
image_context = None
if isinstance(element, text_type):
image = vision.types.Image(
source=vision.types.ImageSource(image_uri=element))
else: # Typehint checks only allows text_type or binary_type
image = vision.types.Image(content=element)
request = vision.types.AnnotateImageRequest(
image=image, features=self.features, image_context=image_context)
yield request
class AnnotateImageWithContext(AnnotateImage):
"""A ``PTransform`` for annotating images using the GCP Vision API.
ref: https://cloud.google.com/vision/docs/
Batches elements together using ``util.BatchElements`` PTransform and sends
each batch of elements to the GCP Vision API.
Element is a tuple of::
(Union[text_type, binary_type],
Optional[``vision.types.ImageContext``])
where the former is either an URI (e.g. a GCS URI) or binary_type
base64-encoded image data.
"""
def __init__(
self,
features,
retry=None,
timeout=120,
max_batch_size=None,
min_batch_size=None,
client_options=None,
metadata=None):
"""
Args:
features: (List[``vision.types.Feature.enums.Feature``]) Required.
The Vision API features to detect
retry: (google.api_core.retry.Retry) Optional.
A retry object used to retry requests.
If None is specified (default), requests will not be retried.
timeout: (float) Optional.
The time in seconds to wait for the response from the Vision API.
Default is 120.
max_batch_size: (int) Optional.
Maximum number of images to batch in the same request to the Vision API.
Default is 5 (which is also the Vision API max).
This parameter is primarily intended for testing.
min_batch_size: (int) Optional.
Minimum number of images to batch in the same request to the Vision API.
Default is None. This parameter is primarily intended for testing.
client_options:
(Union[dict, google.api_core.client_options.ClientOptions]) Optional.
Client options used to set user options on the client.
API Endpoint should be set through client_options.
metadata: (Optional[Sequence[Tuple[str, str]]]): Optional.
Additional metadata that is provided to the method.
"""
super(AnnotateImageWithContext, self).__init__(
features=features,
retry=retry,
timeout=timeout,
max_batch_size=max_batch_size,
min_batch_size=min_batch_size,
client_options=client_options,
metadata=metadata)
def expand(self, pvalue):
return (
pvalue
| FlatMap(self._create_image_annotation_pairs)
| util.BatchElements(
min_batch_size=self.min_batch_size,
max_batch_size=self.max_batch_size)
| ParDo(
_ImageAnnotateFn(
features=self.features,
retry=self.retry,
timeout=self.timeout,
client_options=self.client_options,
metadata=self.metadata)))
@typehints.with_input_types(
Tuple[Union[text_type, binary_type], Optional[vision.types.ImageContext]])
@typehints.with_output_types(List[vision.types.AnnotateImageRequest])
def _create_image_annotation_pairs(self, element, **kwargs):
element, image_context = element # Unpack (image, image_context) tuple
if isinstance(element, text_type):
image = vision.types.Image(
source=vision.types.ImageSource(image_uri=element))
else: # Typehint checks only allows text_type or binary_type
image = vision.types.Image(content=element)
request = vision.types.AnnotateImageRequest(
image=image, features=self.features, image_context=image_context)
yield request
@typehints.with_input_types(List[vision.types.AnnotateImageRequest])
class _ImageAnnotateFn(DoFn):
"""A DoFn that sends each input element to the GCP Vision API.
Returns ``google.cloud.vision.types.BatchAnnotateImagesResponse``.
"""
def __init__(self, features, retry, timeout, client_options, metadata):
super(_ImageAnnotateFn, self).__init__()
self._client = None
self.features = features
self.retry = retry
self.timeout = timeout
self.client_options = client_options
self.metadata = metadata
self.counter = Metrics.counter(self.__class__, "API Calls")
def setup(self):
self._client = get_vision_client(self.client_options)
def process(self, element, *args, **kwargs):
response = self._client.batch_annotate_images(
requests=element,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata)
self.counter.inc()
yield response | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/ml/gcp/visionml.py | 0.957906 | 0.302494 | visionml.py | pypi |
from __future__ import absolute_import
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
import apache_beam as beam
from apache_beam.metrics import Metrics
try:
from google.cloud import language
from google.cloud.language import enums # pylint: disable=unused-import
from google.cloud.language import types
except ImportError:
raise ImportError(
'Google Cloud Natural Language API not supported for this execution '
'environment (could not import Natural Language API client).')
__all__ = ['Document', 'AnnotateText']
class Document(object):
"""Represents the input to :class:`AnnotateText` transform.
Args:
content (str): The content of the input or the Google Cloud Storage URI
where the file is stored.
type (`Union[str, google.cloud.language.enums.Document.Type]`): Text type.
Possible values are `HTML`, `PLAIN_TEXT`. The default value is
`PLAIN_TEXT`.
language_hint (`Optional[str]`): The language of the text. If not specified,
language will be automatically detected. Values should conform to
ISO-639-1 standard.
encoding (`Optional[str]`): Text encoding. Possible values are: `NONE`,
`UTF8`, `UTF16`, `UTF32`. The default value is `UTF8`.
from_gcs (bool): Whether the content should be interpret as a Google Cloud
Storage URI. The default value is :data:`False`.
"""
def __init__(
self,
content, # type: str
type='PLAIN_TEXT', # type: Union[str, enums.Document.Type]
language_hint=None, # type: Optional[str]
encoding='UTF8', # type: Optional[str]
from_gcs=False # type: bool
):
self.content = content
self.type = type
self.encoding = encoding
self.language_hint = language_hint
self.from_gcs = from_gcs
@staticmethod
def to_dict(document):
# type: (Document) -> Mapping[str, Optional[str]]
if document.from_gcs:
dict_repr = {'gcs_content_uri': document.content}
else:
dict_repr = {'content': document.content}
dict_repr.update({
'type': document.type, 'language': document.language_hint
})
return dict_repr
@beam.ptransform_fn
def AnnotateText(
pcoll, # type: beam.pvalue.PCollection
features, # type: Union[Mapping[str, bool], types.AnnotateTextRequest.Features]
timeout=None, # type: Optional[float]
metadata=None # type: Optional[Sequence[Tuple[str, str]]]
):
"""A :class:`~apache_beam.transforms.ptransform.PTransform`
for annotating text using the Google Cloud Natural Language API:
https://cloud.google.com/natural-language/docs.
Args:
pcoll (:class:`~apache_beam.pvalue.PCollection`): An input PCollection of
:class:`Document` objects.
features (`Union[Mapping[str, bool], types.AnnotateTextRequest.Features]`):
A dictionary of natural language operations to be performed on given
text in the following format::
{'extact_syntax'=True, 'extract_entities'=True}
timeout (`Optional[float]`): The amount of time, in seconds, to wait
for the request to complete. The timeout applies to each individual
retry attempt.
metadata (`Optional[Sequence[Tuple[str, str]]]`): Additional metadata
that is provided to the method.
"""
return pcoll | beam.ParDo(_AnnotateTextFn(features, timeout, metadata))
@beam.typehints.with_input_types(Document)
@beam.typehints.with_output_types(types.AnnotateTextResponse)
class _AnnotateTextFn(beam.DoFn):
def __init__(
self,
features, # type: Union[Mapping[str, bool], types.AnnotateTextRequest.Features]
timeout, # type: Optional[float]
metadata=None # type: Optional[Sequence[Tuple[str, str]]]
):
self.features = features
self.timeout = timeout
self.metadata = metadata
self.api_calls = Metrics.counter(self.__class__.__name__, 'api_calls')
self.client = None
def setup(self):
self.client = self._get_api_client()
@staticmethod
def _get_api_client():
# type: () -> language.LanguageServiceClient
return language.LanguageServiceClient()
def process(self, element):
response = self.client.annotate_text(
document=Document.to_dict(element),
features=self.features,
encoding_type=element.encoding,
timeout=self.timeout,
metadata=self.metadata)
self.api_calls.inc()
yield response | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/ml/gcp/naturallanguageml.py | 0.908623 | 0.212661 | naturallanguageml.py | pypi |
from __future__ import absolute_import
from typing import Optional
from typing import Tuple
from typing import Union
from future.utils import binary_type
from future.utils import text_type
from apache_beam import typehints
from apache_beam.metrics import Metrics
from apache_beam.transforms import DoFn
from apache_beam.transforms import ParDo
from apache_beam.transforms import PTransform
from cachetools.func import ttl_cache
try:
from google.cloud import videointelligence
except ImportError:
raise ImportError(
'Google Cloud Video Intelligence not supported for this execution '
'environment (could not import google.cloud.videointelligence).')
__all__ = ['AnnotateVideo', 'AnnotateVideoWithContext']
@ttl_cache(maxsize=128, ttl=3600)
def get_videointelligence_client():
"""Returns a Cloud Video Intelligence client."""
_client = videointelligence.VideoIntelligenceServiceClient()
return _client
class AnnotateVideo(PTransform):
"""A ``PTransform`` for annotating video using the GCP Video Intelligence API
ref: https://cloud.google.com/video-intelligence/docs
Sends each element to the GCP Video Intelligence API. Element is a
Union[text_type, binary_type] of either an URI (e.g. a GCS URI) or
binary_type base64-encoded video data.
Accepts an `AsDict` side input that maps each video to a video context.
"""
def __init__(
self,
features,
location_id=None,
metadata=None,
timeout=120,
context_side_input=None):
"""
Args:
features: (List[``videointelligence_v1.enums.Feature``]) Required.
The Video Intelligence API features to detect
location_id: (str) Optional.
Cloud region where annotation should take place.
If no region is specified, a region will be determined
based on video file location.
metadata: (Sequence[Tuple[str, str]]) Optional.
Additional metadata that is provided to the method.
timeout: (int) Optional.
The time in seconds to wait for the response from the
Video Intelligence API
context_side_input: (beam.pvalue.AsDict) Optional.
An ``AsDict`` of a PCollection to be passed to the
_VideoAnnotateFn as the video context mapping containing additional
video context and/or feature-specific parameters.
Example usage::
video_contexts =
[('gs://cloud-samples-data/video/cat.mp4', Union[dict,
``videointelligence_v1.types.VideoContext``]),
('gs://some-other-video/sample.mp4', Union[dict,
``videointelligence_v1.types.VideoContext``]),]
context_side_input =
(
p
| "Video contexts" >> beam.Create(video_contexts)
)
videointelligenceml.AnnotateVideo(features,
context_side_input=beam.pvalue.AsDict(context_side_input)))
"""
super(AnnotateVideo, self).__init__()
self.features = features
self.location_id = location_id
self.metadata = metadata
self.timeout = timeout
self.context_side_input = context_side_input
def expand(self, pvalue):
return pvalue | ParDo(
_VideoAnnotateFn(
features=self.features,
location_id=self.location_id,
metadata=self.metadata,
timeout=self.timeout),
context_side_input=self.context_side_input)
@typehints.with_input_types(
Union[text_type, binary_type],
Optional[videointelligence.types.VideoContext])
class _VideoAnnotateFn(DoFn):
"""A DoFn that sends each input element to the GCP Video Intelligence API
service and outputs an element with the return result of the API
(``google.cloud.videointelligence_v1.types.AnnotateVideoResponse``).
"""
def __init__(self, features, location_id, metadata, timeout):
super(_VideoAnnotateFn, self).__init__()
self._client = None
self.features = features
self.location_id = location_id
self.metadata = metadata
self.timeout = timeout
self.counter = Metrics.counter(self.__class__, "API Calls")
def start_bundle(self):
self._client = get_videointelligence_client()
def _annotate_video(self, element, video_context):
if isinstance(element, text_type): # Is element an URI to a GCS bucket
response = self._client.annotate_video(
input_uri=element,
features=self.features,
video_context=video_context,
location_id=self.location_id,
metadata=self.metadata)
else: # Is element raw bytes
response = self._client.annotate_video(
input_content=element,
features=self.features,
video_context=video_context,
location_id=self.location_id,
metadata=self.metadata)
return response
def process(self, element, context_side_input=None, *args, **kwargs):
if context_side_input: # If we have a side input video context, use that
video_context = context_side_input.get(element)
else:
video_context = None
response = self._annotate_video(element, video_context)
self.counter.inc()
yield response.result(timeout=self.timeout)
class AnnotateVideoWithContext(AnnotateVideo):
"""A ``PTransform`` for annotating video using the GCP Video Intelligence API
ref: https://cloud.google.com/video-intelligence/docs
Sends each element to the GCP Video Intelligence API.
Element is a tuple of
(Union[text_type, binary_type],
Optional[videointelligence.types.VideoContext])
where the former is either an URI (e.g. a GCS URI) or
binary_type base64-encoded video data
"""
def __init__(self, features, location_id=None, metadata=None, timeout=120):
"""
Args:
features: (List[``videointelligence_v1.enums.Feature``]) Required.
the Video Intelligence API features to detect
location_id: (str) Optional.
Cloud region where annotation should take place.
If no region is specified, a region will be determined
based on video file location.
metadata: (Sequence[Tuple[str, str]]) Optional.
Additional metadata that is provided to the method.
timeout: (int) Optional.
The time in seconds to wait for the response from the
Video Intelligence API
"""
super(AnnotateVideoWithContext, self).__init__(
features=features,
location_id=location_id,
metadata=metadata,
timeout=timeout)
def expand(self, pvalue):
return pvalue | ParDo(
_VideoAnnotateFnWithContext(
features=self.features,
location_id=self.location_id,
metadata=self.metadata,
timeout=self.timeout))
@typehints.with_input_types(
Tuple[Union[text_type, binary_type],
Optional[videointelligence.types.VideoContext]])
class _VideoAnnotateFnWithContext(_VideoAnnotateFn):
"""A DoFn that unpacks each input tuple to element, video_context variables
and sends these to the GCP Video Intelligence API service and outputs
an element with the return result of the API
(``google.cloud.videointelligence_v1.types.AnnotateVideoResponse``).
"""
def __init__(self, features, location_id, metadata, timeout):
super(_VideoAnnotateFnWithContext, self).__init__(
features=features,
location_id=location_id,
metadata=metadata,
timeout=timeout)
def process(self, element, *args, **kwargs):
element, video_context = element # Unpack (video, video_context) tuple
response = self._annotate_video(element, video_context)
self.counter.inc()
yield response.result(timeout=self.timeout) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/ml/gcp/videointelligenceml.py | 0.889409 | 0.217524 | videointelligenceml.py | pypi |
from __future__ import absolute_import
import logging
from google.cloud import dlp_v2
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.transforms import DoFn
from apache_beam.transforms import ParDo
from apache_beam.transforms import PTransform
from apache_beam.utils.annotations import experimental
__all__ = ['MaskDetectedDetails', 'InspectForDetails']
_LOGGER = logging.getLogger(__name__)
@experimental()
class MaskDetectedDetails(PTransform):
"""Scrubs sensitive information detected in text.
The ``PTransform`` returns a ``PCollection`` of ``str``
Example usage::
pipeline | MaskDetectedDetails(project='example-gcp-project',
deidentification_config={
'info_type_transformations: {
'transformations': [{
'primitive_transformation': {
'character_mask_config': {
'masking_character': '#'
}
}
}]
}
}, inspection_config={'info_types': [{'name': 'EMAIL_ADDRESS'}]})
"""
def __init__(
self,
project=None,
deidentification_template_name=None,
deidentification_config=None,
inspection_template_name=None,
inspection_config=None,
timeout=None):
"""Initializes a :class:`MaskDetectedDetails` transform.
Args:
project: Optional. GCP project name in which inspection will be performed
deidentification_template_name (str): Either this or
`deidentification_config` required. Name of
deidentification template to be used on detected sensitive information
instances in text.
deidentification_config
(``Union[dict, google.cloud.dlp_v2.types.DeidentifyConfig]``):
Configuration for the de-identification of the content item.
If both template name and config are supplied,
config is more important.
inspection_template_name (str): This or `inspection_config` required.
Name of inspection template to be used
to detect sensitive data in text.
inspection_config
(``Union[dict, google.cloud.dlp_v2.types.InspectConfig]``):
Configuration for the inspector used to detect sensitive data in text.
If both template name and config are supplied,
config takes precedence.
timeout (float): Optional. The amount of time, in seconds, to wait for
the request to complete.
"""
self.config = {}
self.project = project
self.timeout = timeout
if deidentification_template_name is not None \
and deidentification_config is not None:
raise ValueError(
'Both deidentification_template_name and '
'deidentification_config were specified.'
' Please specify only one of these.')
elif deidentification_template_name is None \
and deidentification_config is None:
raise ValueError(
'deidentification_template_name or '
'deidentification_config must be specified.')
elif deidentification_template_name is not None:
self.config['deidentify_template_name'] = deidentification_template_name
else:
self.config['deidentify_config'] = deidentification_config
if inspection_config is None and inspection_template_name is None:
raise ValueError(
'inspection_template_name or inspection_config must be specified')
if inspection_template_name is not None:
self.config['inspect_template_name'] = inspection_template_name
if inspection_config is not None:
self.config['inspect_config'] = inspection_config
def expand(self, pcoll):
if self.project is None:
self.project = pcoll.pipeline.options.view_as(GoogleCloudOptions).project
if self.project is None:
raise ValueError(
'GCP project name needs to be specified in "project" pipeline option')
return (
pcoll
| ParDo(_DeidentifyFn(self.config, self.timeout, self.project)))
@experimental()
class InspectForDetails(PTransform):
"""Inspects input text for sensitive information.
the ``PTransform`` returns a ``PCollection`` of
``List[google.cloud.dlp_v2.proto.dlp_pb2.Finding]``
Example usage::
pipeline | InspectForDetails(project='example-gcp-project',
inspection_config={'info_types': [{'name': 'EMAIL_ADDRESS'}]})
"""
def __init__(
self,
project=None,
inspection_template_name=None,
inspection_config=None,
timeout=None):
"""Initializes a :class:`InspectForDetails` transform.
Args:
project: Optional. GCP project name in which inspection will be performed
inspection_template_name (str): This or `inspection_config` required.
Name of inspection template to be used
to detect sensitive data in text.
inspection_config
(``Union[dict, google.cloud.dlp_v2.types.InspectConfig]``):
Configuration for the inspector used to detect sensitive data in text.
If both template name and config are supplied,
config takes precedence.
timeout (float): Optional. The amount of time, in seconds, to wait for
the request to complete.
"""
self.timeout = timeout
self.config = {}
self.project = project
if inspection_config is None and inspection_template_name is None:
raise ValueError(
'inspection_template_name or inspection_config must be specified')
if inspection_template_name is not None:
self.config['inspect_template_name'] = inspection_template_name
if inspection_config is not None:
self.config['inspect_config'] = inspection_config
def expand(self, pcoll):
if self.project is None:
self.project = pcoll.pipeline.options.view_as(GoogleCloudOptions).project
if self.project is None:
raise ValueError(
'GCP project name needs to be specified in "project" pipeline option')
return pcoll | ParDo(_InspectFn(self.config, self.timeout, self.project))
class _DeidentifyFn(DoFn):
def __init__(self, config=None, timeout=None, project=None, client=None):
self.config = config
self.timeout = timeout
self.client = client
self.project = project
self.params = {}
def setup(self):
if self.client is None:
self.client = dlp_v2.DlpServiceClient()
self.params = {
'timeout': self.timeout,
'parent': self.client.project_path(self.project)
}
self.params.update(self.config)
def process(self, element, **kwargs):
operation = self.client.deidentify_content(
item={"value": element}, **self.params)
yield operation.item.value
class _InspectFn(DoFn):
def __init__(self, config=None, timeout=None, project=None):
self.config = config
self.timeout = timeout
self.client = None
self.project = project
self.params = {}
def setup(self):
if self.client is None:
self.client = dlp_v2.DlpServiceClient()
self.params = {
'timeout': self.timeout,
"parent": self.client.project_path(self.project)
}
self.params.update(self.config)
def process(self, element, **kwargs):
operation = self.client.inspect_content(
item={"value": element}, **self.params)
hits = [x for x in operation.result.findings]
yield hits | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/ml/gcp/cloud_dlp.py | 0.726037 | 0.179746 | cloud_dlp.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import logging
import os
import re
import time
import uuid
from builtins import range
from builtins import zip
from future.utils import iteritems
from past.builtins import unicode
from apache_beam.internal import util
from apache_beam.io import iobase
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filesystems import FileSystems
from apache_beam.options.value_provider import StaticValueProvider
from apache_beam.options.value_provider import ValueProvider
from apache_beam.options.value_provider import check_accessible
from apache_beam.transforms.display import DisplayDataItem
DEFAULT_SHARD_NAME_TEMPLATE = '-SSSSS-of-NNNNN'
__all__ = ['FileBasedSink']
_LOGGER = logging.getLogger(__name__)
class FileBasedSink(iobase.Sink):
"""A sink to a GCS or local files.
To implement a file-based sink, extend this class and override
either :meth:`.write_record()` or :meth:`.write_encoded_record()`.
If needed, also overwrite :meth:`.open()` and/or :meth:`.close()` to customize
the file handling or write headers and footers.
The output of this write is a :class:`~apache_beam.pvalue.PCollection` of
all written shards.
"""
# Max number of threads to be used for renaming.
_MAX_RENAME_THREADS = 64
__hash__ = None # type: ignore[assignment]
def __init__(
self,
file_path_prefix,
coder,
file_name_suffix='',
num_shards=0,
shard_name_template=None,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
"""
Raises:
TypeError: if file path parameters are not a :class:`str` or
:class:`~apache_beam.options.value_provider.ValueProvider`, or if
**compression_type** is not member of
:class:`~apache_beam.io.filesystem.CompressionTypes`.
ValueError: if **shard_name_template** is not of expected
format.
"""
if not isinstance(file_path_prefix, ((str, unicode), ValueProvider)):
raise TypeError(
'file_path_prefix must be a string or ValueProvider;'
'got %r instead' % file_path_prefix)
if not isinstance(file_name_suffix, ((str, unicode), ValueProvider)):
raise TypeError(
'file_name_suffix must be a string or ValueProvider;'
'got %r instead' % file_name_suffix)
if not CompressionTypes.is_valid_compression_type(compression_type):
raise TypeError(
'compression_type must be CompressionType object but '
'was %s' % type(compression_type))
if shard_name_template is None:
shard_name_template = DEFAULT_SHARD_NAME_TEMPLATE
elif shard_name_template == '':
num_shards = 1
if isinstance(file_path_prefix, (str, unicode)):
file_path_prefix = StaticValueProvider(str, file_path_prefix)
if isinstance(file_name_suffix, (str, unicode)):
file_name_suffix = StaticValueProvider(str, file_name_suffix)
self.file_path_prefix = file_path_prefix
self.file_name_suffix = file_name_suffix
self.num_shards = num_shards
self.coder = coder
self.shard_name_format = self._template_to_format(shard_name_template)
self.shard_name_glob_format = self._template_to_glob_format(
shard_name_template)
self.compression_type = compression_type
self.mime_type = mime_type
def display_data(self):
return {
'shards': DisplayDataItem(self.num_shards,
label='Number of Shards').drop_if_default(0),
'compression': DisplayDataItem(str(self.compression_type)),
'file_pattern': DisplayDataItem(
'{}{}{}'.format(
self.file_path_prefix,
self.shard_name_format,
self.file_name_suffix),
label='File Pattern')
}
@check_accessible(['file_path_prefix'])
def open(self, temp_path):
"""Opens ``temp_path``, returning an opaque file handle object.
The returned file handle is passed to ``write_[encoded_]record`` and
``close``.
"""
return FileSystems.create(temp_path, self.mime_type, self.compression_type)
def write_record(self, file_handle, value):
"""Writes a single record go the file handle returned by ``open()``.
By default, calls ``write_encoded_record`` after encoding the record with
this sink's Coder.
"""
self.write_encoded_record(file_handle, self.coder.encode(value))
def write_encoded_record(self, file_handle, encoded_value):
"""Writes a single encoded record to the file handle returned by ``open()``.
"""
raise NotImplementedError
def close(self, file_handle):
"""Finalize and close the file handle returned from ``open()``.
Called after all records are written.
By default, calls ``file_handle.close()`` iff it is not None.
"""
if file_handle is not None:
file_handle.close()
@check_accessible(['file_path_prefix', 'file_name_suffix'])
def initialize_write(self):
file_path_prefix = self.file_path_prefix.get()
tmp_dir = self._create_temp_dir(file_path_prefix)
FileSystems.mkdirs(tmp_dir)
return tmp_dir
def _create_temp_dir(self, file_path_prefix):
base_path, last_component = FileSystems.split(file_path_prefix)
if not last_component:
# Trying to re-split the base_path to check if it's a root.
new_base_path, _ = FileSystems.split(base_path)
if base_path == new_base_path:
raise ValueError(
'Cannot create a temporary directory for root path '
'prefix %s. Please specify a file path prefix with '
'at least two components.' % file_path_prefix)
path_components = [
base_path, 'beam-temp-' + last_component + '-' + uuid.uuid1().hex
]
return FileSystems.join(*path_components)
@check_accessible(['file_path_prefix', 'file_name_suffix'])
def open_writer(self, init_result, uid):
# A proper suffix is needed for AUTO compression detection.
# We also ensure there will be no collisions with uid and a
# (possibly unsharded) file_path_prefix and a (possibly empty)
# file_name_suffix.
file_path_prefix = self.file_path_prefix.get()
file_name_suffix = self.file_name_suffix.get()
suffix = ('.' + os.path.basename(file_path_prefix) + file_name_suffix)
writer_path = FileSystems.join(init_result, uid) + suffix
return FileBasedSinkWriter(self, writer_path)
@check_accessible(['file_path_prefix', 'file_name_suffix'])
def _get_final_name(self, shard_num, num_shards):
return ''.join([
self.file_path_prefix.get(),
self.shard_name_format %
dict(shard_num=shard_num, num_shards=num_shards),
self.file_name_suffix.get()
])
@check_accessible(['file_path_prefix', 'file_name_suffix'])
def _get_final_name_glob(self, num_shards):
return ''.join([
self.file_path_prefix.get(),
self.shard_name_glob_format % dict(num_shards=num_shards),
self.file_name_suffix.get()
])
def pre_finalize(self, init_result, writer_results):
num_shards = len(list(writer_results))
dst_glob = self._get_final_name_glob(num_shards)
dst_glob_files = [
file_metadata.path for mr in FileSystems.match([dst_glob])
for file_metadata in mr.metadata_list
]
if dst_glob_files:
_LOGGER.warning(
'Deleting %d existing files in target path matching: %s',
len(dst_glob_files),
self.shard_name_glob_format)
FileSystems.delete(dst_glob_files)
def _check_state_for_finalize_write(self, writer_results, num_shards):
"""Checks writer output files' states.
Returns:
src_files, dst_files: Lists of files to rename. For each i, finalize_write
should rename(src_files[i], dst_files[i]).
delete_files: Src files to delete. These could be leftovers from an
incomplete (non-atomic) rename operation.
num_skipped: Tally of writer results files already renamed, such as from
a previous run of finalize_write().
"""
if not writer_results:
return [], [], [], 0
src_glob = FileSystems.join(FileSystems.split(writer_results[0])[0], '*')
dst_glob = self._get_final_name_glob(num_shards)
src_glob_files = set(
file_metadata.path for mr in FileSystems.match([src_glob])
for file_metadata in mr.metadata_list)
dst_glob_files = set(
file_metadata.path for mr in FileSystems.match([dst_glob])
for file_metadata in mr.metadata_list)
src_files = []
dst_files = []
delete_files = []
num_skipped = 0
for shard_num, src in enumerate(writer_results):
final_name = self._get_final_name(shard_num, num_shards)
dst = final_name
src_exists = src in src_glob_files
dst_exists = dst in dst_glob_files
if not src_exists and not dst_exists:
raise BeamIOError(
'src and dst files do not exist. src: %s, dst: %s' % (src, dst))
if not src_exists and dst_exists:
_LOGGER.debug('src: %s -> dst: %s already renamed, skipping', src, dst)
num_skipped += 1
continue
if (src_exists and dst_exists and
FileSystems.checksum(src) == FileSystems.checksum(dst)):
_LOGGER.debug('src: %s == dst: %s, deleting src', src, dst)
delete_files.append(src)
continue
src_files.append(src)
dst_files.append(dst)
return src_files, dst_files, delete_files, num_skipped
@check_accessible(['file_path_prefix'])
def finalize_write(
self, init_result, writer_results, unused_pre_finalize_results):
writer_results = sorted(writer_results)
num_shards = len(writer_results)
src_files, dst_files, delete_files, num_skipped = (
self._check_state_for_finalize_write(writer_results, num_shards))
num_skipped += len(delete_files)
FileSystems.delete(delete_files)
num_shards_to_finalize = len(src_files)
min_threads = min(num_shards_to_finalize, FileBasedSink._MAX_RENAME_THREADS)
num_threads = max(1, min_threads)
chunk_size = FileSystems.get_chunk_size(self.file_path_prefix.get())
source_file_batch = [
src_files[i:i + chunk_size]
for i in range(0, len(src_files), chunk_size)
]
destination_file_batch = [
dst_files[i:i + chunk_size]
for i in range(0, len(dst_files), chunk_size)
]
if num_shards_to_finalize:
_LOGGER.info(
'Starting finalize_write threads with num_shards: %d (skipped: %d), '
'batches: %d, num_threads: %d',
num_shards_to_finalize,
num_skipped,
len(source_file_batch),
num_threads)
start_time = time.time()
# Use a thread pool for renaming operations.
def _rename_batch(batch):
"""_rename_batch executes batch rename operations."""
source_files, destination_files = batch
exceptions = []
try:
FileSystems.rename(source_files, destination_files)
return exceptions
except BeamIOError as exp:
if exp.exception_details is None:
raise
for (src, dst), exception in iteritems(exp.exception_details):
if exception:
_LOGGER.error(
('Exception in _rename_batch. src: %s, '
'dst: %s, err: %s'),
src,
dst,
exception)
exceptions.append(exception)
else:
_LOGGER.debug('Rename successful: %s -> %s', src, dst)
return exceptions
exception_batches = util.run_using_threadpool(
_rename_batch,
list(zip(source_file_batch, destination_file_batch)),
num_threads)
all_exceptions = [
e for exception_batch in exception_batches for e in exception_batch
]
if all_exceptions:
raise Exception(
'Encountered exceptions in finalize_write: %s' % all_exceptions)
for final_name in dst_files:
yield final_name
_LOGGER.info(
'Renamed %d shards in %.2f seconds.',
num_shards_to_finalize,
time.time() - start_time)
else:
_LOGGER.warning(
'No shards found to finalize. num_shards: %d, skipped: %d',
num_shards,
num_skipped)
try:
FileSystems.delete([init_result])
except IOError:
# May have already been removed.
pass
@staticmethod
def _template_replace_num_shards(shard_name_template):
match = re.search('N+', shard_name_template)
if match:
shard_name_template = shard_name_template.replace(
match.group(0), '%%(num_shards)0%dd' % len(match.group(0)))
return shard_name_template
@staticmethod
def _template_to_format(shard_name_template):
if not shard_name_template:
return ''
match = re.search('S+', shard_name_template)
if match is None:
raise ValueError(
"Shard number pattern S+ not found in shard_name_template: %s" %
shard_name_template)
shard_name_format = shard_name_template.replace(
match.group(0), '%%(shard_num)0%dd' % len(match.group(0)))
return FileBasedSink._template_replace_num_shards(shard_name_format)
@staticmethod
def _template_to_glob_format(shard_name_template):
if not shard_name_template:
return ''
match = re.search('S+', shard_name_template)
if match is None:
raise ValueError(
"Shard number pattern S+ not found in shard_name_template: %s" %
shard_name_template)
shard_name_format = shard_name_template.replace(match.group(0), '*')
return FileBasedSink._template_replace_num_shards(shard_name_format)
def __eq__(self, other):
# TODO: Clean up workitem_test which uses this.
# pylint: disable=unidiomatic-typecheck
return type(self) == type(other) and self.__dict__ == other.__dict__
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
class FileBasedSinkWriter(iobase.Writer):
"""The writer for FileBasedSink.
"""
def __init__(self, sink, temp_shard_path):
self.sink = sink
self.temp_shard_path = temp_shard_path
self.temp_handle = self.sink.open(temp_shard_path)
def write(self, value):
self.sink.write_record(self.temp_handle, value)
def close(self):
self.sink.close(self.temp_handle)
return self.temp_shard_path | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/filebasedsink.py | 0.695855 | 0.19454 | filebasedsink.py | pypi |
"""`iobase.RestrictionTracker` implementations provided with Apache Beam."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from builtins import object
from typing import Tuple
from apache_beam.io.iobase import RestrictionProgress
from apache_beam.io.iobase import RestrictionTracker
from apache_beam.io.range_trackers import OffsetRangeTracker
class OffsetRange(object):
def __init__(self, start, stop):
if start > stop:
raise ValueError(
'Start offset must be not be larger than the stop offset. '
'Received %d and %d respectively.' % (start, stop))
self.start = start
self.stop = stop
def __eq__(self, other):
if not isinstance(other, OffsetRange):
return False
return self.start == other.start and self.stop == other.stop
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash((type(self), self.start, self.stop))
def __repr__(self):
return 'OffsetRange(start=%s, stop=%s)' % (self.start, self.stop)
def split(self, desired_num_offsets_per_split, min_num_offsets_per_split=1):
current_split_start = self.start
max_split_size = max(
desired_num_offsets_per_split, min_num_offsets_per_split)
while current_split_start < self.stop:
current_split_stop = min(current_split_start + max_split_size, self.stop)
remaining = self.stop - current_split_stop
# Avoiding a small split at the end.
if (remaining < desired_num_offsets_per_split // 4 or
remaining < min_num_offsets_per_split):
current_split_stop = self.stop
yield OffsetRange(current_split_start, current_split_stop)
current_split_start = current_split_stop
def split_at(self, split_pos):
# type: (...) -> Tuple[OffsetRange, OffsetRange]
return OffsetRange(self.start, split_pos), OffsetRange(split_pos, self.stop)
def new_tracker(self):
return OffsetRangeTracker(self.start, self.stop)
def size(self):
return self.stop - self.start
class OffsetRestrictionTracker(RestrictionTracker):
"""An `iobase.RestrictionTracker` implementations for an offset range.
Offset range is represented as OffsetRange.
"""
def __init__(self, offset_range):
# type: (OffsetRange) -> None
assert isinstance(offset_range, OffsetRange), offset_range
self._range = offset_range
self._current_position = None
self._last_claim_attempt = None
self._checkpointed = False
def check_done(self):
if (self._range.start != self._range.stop and
(self._last_claim_attempt is None or
self._last_claim_attempt < self._range.stop - 1)):
raise ValueError(
'OffsetRestrictionTracker is not done since work in range [%s, %s) '
'has not been claimed.' % (
self._last_claim_attempt
if self._last_claim_attempt is not None else self._range.start,
self._range.stop))
def current_restriction(self):
return self._range
def current_progress(self):
# type: () -> RestrictionProgress
if self._current_position is None:
fraction = 0.0
elif self._range.stop == self._range.start:
# If self._current_position is not None, we must be done.
fraction = 1.0
else:
fraction = (
float(self._current_position - self._range.start) /
(self._range.stop - self._range.start))
return RestrictionProgress(fraction=fraction)
def start_position(self):
return self._range.start
def stop_position(self):
return self._range.stop
def try_claim(self, position):
if (self._last_claim_attempt is not None and
position <= self._last_claim_attempt):
raise ValueError(
'Positions claimed should strictly increase. Trying to claim '
'position %d while last claim attempt was %d.' %
(position, self._last_claim_attempt))
self._last_claim_attempt = position
if position < self._range.start:
raise ValueError(
'Position to be claimed cannot be smaller than the start position '
'of the range. Tried to claim position %r for the range [%r, %r)' %
(position, self._range.start, self._range.stop))
if self._range.start <= position < self._range.stop:
self._current_position = position
return True
return False
def try_split(self, fraction_of_remainder):
if not self._checkpointed:
if self._last_claim_attempt is None:
cur = self._range.start - 1
else:
cur = self._last_claim_attempt
split_point = (
cur + int(max(1, (self._range.stop - cur) * fraction_of_remainder)))
if split_point < self._range.stop:
if fraction_of_remainder == 0:
self._checkpointed = True
self._range, residual_range = self._range.split_at(split_point)
return self._range, residual_range
def is_bounded(self):
return True
class UnsplittableRestrictionTracker(RestrictionTracker):
"""An `iobase.RestrictionTracker` that wraps another but does not split."""
def __init__(self, underling_tracker):
self._underling_tracker = underling_tracker
def try_split(self, fraction_of_remainder):
return False
# __getattribute__ is used rather than __getattr__ to override the
# stubs in the baseclass.
def __getattribute__(self, name):
if name.startswith('_') or name in ('try_split', ):
return super(UnsplittableRestrictionTracker, self).__getattribute__(name)
else:
return getattr(self._underling_tracker, name) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/restriction_trackers.py | 0.843025 | 0.235185 | restriction_trackers.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import typing
from past.builtins import unicode
from apache_beam.transforms.external import BeamJarExpansionService
from apache_beam.transforms.external import ExternalTransform
from apache_beam.transforms.external import NamedTupleBasedPayloadBuilder
ReadFromKafkaSchema = typing.NamedTuple(
'ReadFromKafkaSchema',
[('consumer_config', typing.Mapping[unicode, unicode]),
('topics', typing.List[unicode]), ('key_deserializer', unicode),
('value_deserializer', unicode), ('start_read_time', typing.Optional[int]),
('max_num_records', typing.Optional[int]),
('max_read_time', typing.Optional[int]),
('commit_offset_in_finalize', bool), ('timestamp_policy', str)])
def default_io_expansion_service():
return BeamJarExpansionService('sdks:java:io:expansion-service:shadowJar')
class ReadFromKafka(ExternalTransform):
"""
An external PTransform which reads from Kafka and returns a KV pair for
each item in the specified Kafka topics. If no Kafka Deserializer for
key/value is provided, then the data will be returned as a raw byte array.
Experimental; no backwards compatibility guarantees.
"""
# Returns the key/value data as raw byte arrays
byte_array_deserializer = (
'org.apache.kafka.common.serialization.ByteArrayDeserializer')
processing_time_policy = 'ProcessingTime'
create_time_policy = 'CreateTime'
log_append_time = 'LogAppendTime'
URN = 'beam:external:java:kafka:read:v1'
def __init__(
self,
consumer_config,
topics,
key_deserializer=byte_array_deserializer,
value_deserializer=byte_array_deserializer,
start_read_time=None,
max_num_records=None,
max_read_time=None,
commit_offset_in_finalize=False,
timestamp_policy=processing_time_policy,
expansion_service=None,
):
"""
Initializes a read operation from Kafka.
:param consumer_config: A dictionary containing the consumer configuration.
:param topics: A list of topic strings.
:param key_deserializer: A fully-qualified Java class name of a Kafka
Deserializer for the topic's key, e.g.
'org.apache.kafka.common.serialization.LongDeserializer'.
Default: 'org.apache.kafka.common.serialization.ByteArrayDeserializer'.
:param value_deserializer: A fully-qualified Java class name of a Kafka
Deserializer for the topic's value, e.g.
'org.apache.kafka.common.serialization.LongDeserializer'.
Default: 'org.apache.kafka.common.serialization.ByteArrayDeserializer'.
:param start_read_time: Use timestamp to set up start offset in milliseconds
epoch.
:param max_num_records: Maximum amount of records to be read. Mainly used
for tests and demo applications.
:param max_read_time: Maximum amount of time in seconds the transform
executes. Mainly used for tests and demo applications.
:param commit_offset_in_finalize: Whether to commit offsets when finalizing.
:param timestamp_policy: The built-in timestamp policy which is used for
extracting timestamp from KafkaRecord.
:param expansion_service: The address (host:port) of the ExpansionService.
"""
if timestamp_policy not in [ReadFromKafka.processing_time_policy,
ReadFromKafka.create_time_policy,
ReadFromKafka.log_append_time]:
raise ValueError(
'timestamp_policy should be one of '
'[ProcessingTime, CreateTime, LogAppendTime]')
super(ReadFromKafka, self).__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
ReadFromKafkaSchema(
consumer_config=consumer_config,
topics=topics,
key_deserializer=key_deserializer,
value_deserializer=value_deserializer,
max_num_records=max_num_records,
max_read_time=max_read_time,
start_read_time=start_read_time,
commit_offset_in_finalize=commit_offset_in_finalize,
timestamp_policy=timestamp_policy)),
expansion_service or default_io_expansion_service())
WriteToKafkaSchema = typing.NamedTuple(
'WriteToKafkaSchema',
[
('producer_config', typing.Mapping[unicode, unicode]),
('topic', unicode),
('key_serializer', unicode),
('value_serializer', unicode),
])
class WriteToKafka(ExternalTransform):
"""
An external PTransform which writes KV data to a specified Kafka topic.
If no Kafka Serializer for key/value is provided, then key/value are
assumed to be byte arrays.
Experimental; no backwards compatibility guarantees.
"""
# Default serializer which passes raw bytes to Kafka
byte_array_serializer = (
'org.apache.kafka.common.serialization.ByteArraySerializer')
URN = 'beam:external:java:kafka:write:v1'
def __init__(
self,
producer_config,
topic,
key_serializer=byte_array_serializer,
value_serializer=byte_array_serializer,
expansion_service=None):
"""
Initializes a write operation to Kafka.
:param producer_config: A dictionary containing the producer configuration.
:param topic: A Kafka topic name.
:param key_deserializer: A fully-qualified Java class name of a Kafka
Serializer for the topic's key, e.g.
'org.apache.kafka.common.serialization.LongSerializer'.
Default: 'org.apache.kafka.common.serialization.ByteArraySerializer'.
:param value_deserializer: A fully-qualified Java class name of a Kafka
Serializer for the topic's value, e.g.
'org.apache.kafka.common.serialization.LongSerializer'.
Default: 'org.apache.kafka.common.serialization.ByteArraySerializer'.
:param expansion_service: The address (host:port) of the ExpansionService.
"""
super(WriteToKafka, self).__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
WriteToKafkaSchema(
producer_config=producer_config,
topic=topic,
key_serializer=key_serializer,
value_serializer=value_serializer,
)),
expansion_service or default_io_expansion_service()) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/kafka.py | 0.857887 | 0.278876 | kafka.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import bisect
import threading
from builtins import range
from apache_beam.io import iobase
class ConcatSource(iobase.BoundedSource):
"""For internal use only; no backwards-compatibility guarantees.
A ``BoundedSource`` that can group a set of ``BoundedSources``.
Primarily for internal use, use the ``apache_beam.Flatten`` transform
to create the union of several reads.
"""
def __init__(self, sources):
self._source_bundles = [
source if isinstance(source, iobase.SourceBundle) else
iobase.SourceBundle(None, source, None, None) for source in sources
]
@property
def sources(self):
return [s.source for s in self._source_bundles]
def estimate_size(self):
return sum(s.source.estimate_size() for s in self._source_bundles)
def split(
self, desired_bundle_size=None, start_position=None, stop_position=None):
if start_position or stop_position:
raise ValueError(
'Multi-level initial splitting is not supported. Expected start and '
'stop positions to be None. Received %r and %r respectively.' %
(start_position, stop_position))
for source in self._source_bundles:
# We assume all sub-sources to produce bundles that specify weight using
# the same unit. For example, all sub-sources may specify the size in
# bytes as their weight.
for bundle in source.source.split(desired_bundle_size,
source.start_position,
source.stop_position):
yield bundle
def get_range_tracker(self, start_position=None, stop_position=None):
if start_position is None:
start_position = (0, None)
if stop_position is None:
stop_position = (len(self._source_bundles), None)
return ConcatRangeTracker(
start_position, stop_position, self._source_bundles)
def read(self, range_tracker):
start_source, _ = range_tracker.start_position()
stop_source, stop_pos = range_tracker.stop_position()
if stop_pos is not None:
stop_source += 1
for source_ix in range(start_source, stop_source):
if not range_tracker.try_claim((source_ix, None)):
break
for record in self._source_bundles[source_ix].source.read(
range_tracker.sub_range_tracker(source_ix)):
yield record
def default_output_coder(self):
if self._source_bundles:
# Getting coder from the first sub-sources. This assumes all sub-sources
# to produce the same coder.
return self._source_bundles[0].source.default_output_coder()
else:
return super(ConcatSource, self).default_output_coder()
class ConcatRangeTracker(iobase.RangeTracker):
"""For internal use only; no backwards-compatibility guarantees.
Range tracker for ConcatSource"""
def __init__(self, start, end, source_bundles):
"""Initializes ``ConcatRangeTracker``
Args:
start: start position, a tuple of (source_index, source_position)
end: end position, a tuple of (source_index, source_position)
source_bundles: the list of source bundles in the ConcatSource
"""
super(ConcatRangeTracker, self).__init__()
self._start = start
self._end = end
self._source_bundles = source_bundles
self._lock = threading.RLock()
# Lazily-initialized list of RangeTrackers corresponding to each source.
self._range_trackers = [None] * len(source_bundles)
# The currently-being-iterated-over (and latest claimed) source.
self._claimed_source_ix = self._start[0]
# Now compute cumulative progress through the sources for converting
# between global fractions and fractions within specific sources.
# TODO(robertwb): Implement fraction-at-position to properly scale
# partial start and end sources.
# Note, however, that in practice splits are typically on source
# boundaries anyways.
last = end[0] if end[1] is None else end[0] + 1
self._cumulative_weights = (
[0] * start[0] +
self._compute_cumulative_weights(source_bundles[start[0]:last]) + [1] *
(len(source_bundles) - last - start[0]))
@staticmethod
def _compute_cumulative_weights(source_bundles):
# Two adjacent sources must differ so that they can be uniquely
# identified by a single global fraction. Let min_diff be the
# smallest allowable difference between sources.
min_diff = 1e-5
# For the computation below, we need weights for all sources.
# Substitute average weights for those whose weights are
# unspecified (or 1.0 for everything if none are known).
known = [s.weight for s in source_bundles if s.weight is not None]
avg = sum(known) / len(known) if known else 1.0
weights = [s.weight or avg for s in source_bundles]
# Now compute running totals of the percent done upon reaching
# each source, with respect to the start and end positions.
# E.g. if the weights were [100, 20, 3] we would produce
# [0.0, 100/123, 120/123, 1.0]
total = float(sum(weights))
running_total = [0]
for w in weights:
running_total.append(max(min_diff, min(1, running_total[-1] + w / total)))
running_total[-1] = 1 # In case of rounding error.
# There are issues if, due to rouding error or greatly differing sizes,
# two adjacent running total weights are equal. Normalize this things so
# that this never happens.
for k in range(1, len(running_total)):
if running_total[k] == running_total[k - 1]:
for j in range(k):
running_total[j] *= (1 - min_diff)
return running_total
def start_position(self):
return self._start
def stop_position(self):
return self._end
def try_claim(self, pos):
source_ix, source_pos = pos
with self._lock:
if source_ix > self._end[0]:
return False
elif source_ix == self._end[0] and self._end[1] is None:
return False
else:
assert source_ix >= self._claimed_source_ix
self._claimed_source_ix = source_ix
if source_pos is None:
return True
else:
return self.sub_range_tracker(source_ix).try_claim(source_pos)
def try_split(self, pos):
source_ix, source_pos = pos
with self._lock:
if source_ix < self._claimed_source_ix:
# Already claimed.
return None
elif source_ix > self._end[0]:
# After end.
return None
elif source_ix == self._end[0] and self._end[1] is None:
# At/after end.
return None
else:
if source_ix > self._claimed_source_ix:
# Prefer to split on even boundary.
split_pos = None
ratio = self._cumulative_weights[source_ix]
else:
# Split the current subsource.
split = self.sub_range_tracker(source_ix).try_split(source_pos)
if not split:
return None
split_pos, frac = split
ratio = self.local_to_global(source_ix, frac)
self._end = source_ix, split_pos
self._cumulative_weights = [
min(w / ratio, 1) for w in self._cumulative_weights
]
return (source_ix, split_pos), ratio
def set_current_position(self, pos):
raise NotImplementedError('Should only be called on sub-trackers')
def position_at_fraction(self, fraction):
source_ix, source_frac = self.global_to_local(fraction)
last = self._end[0] if self._end[1] is None else self._end[0] + 1
if source_ix == last:
return (source_ix, None)
else:
return (
source_ix,
self.sub_range_tracker(source_ix).position_at_fraction(source_frac))
def fraction_consumed(self):
with self._lock:
if self._claimed_source_ix == len(self._source_bundles):
return 1.0
else:
return self.local_to_global(
self._claimed_source_ix,
self.sub_range_tracker(self._claimed_source_ix).fraction_consumed())
def local_to_global(self, source_ix, source_frac):
cw = self._cumulative_weights
# The global fraction is the fraction to source_ix plus some portion of
# the way towards the next source.
return cw[source_ix] + source_frac * (cw[source_ix + 1] - cw[source_ix])
def global_to_local(self, frac):
if frac == 1:
last = self._end[0] if self._end[1] is None else self._end[0] + 1
return (last, None)
else:
cw = self._cumulative_weights
# Find the last source that starts at or before frac.
source_ix = bisect.bisect(cw, frac) - 1
# Return this source, converting what's left of frac after starting
# this source into a value in [0.0, 1.0) representing how far we are
# towards the next source.
return (
source_ix,
(frac - cw[source_ix]) / (cw[source_ix + 1] - cw[source_ix]))
def sub_range_tracker(self, source_ix):
assert self._start[0] <= source_ix <= self._end[0]
if self._range_trackers[source_ix] is None:
with self._lock:
if self._range_trackers[source_ix] is None:
source = self._source_bundles[source_ix]
if source_ix == self._start[0] and self._start[1] is not None:
start = self._start[1]
else:
start = source.start_position
if source_ix == self._end[0] and self._end[1] is not None:
stop = self._end[1]
else:
stop = source.stop_position
self._range_trackers[source_ix] = source.source.get_range_tracker(
start, stop)
return self._range_trackers[source_ix] | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/concat_source.py | 0.761184 | 0.271677 | concat_source.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import typing
from past.builtins import unicode
from apache_beam.coders import RowCoder
from apache_beam.transforms.external import BeamJarExpansionService
from apache_beam.transforms.external import ExternalTransform
from apache_beam.transforms.external import NamedTupleBasedPayloadBuilder
from apache_beam.typehints.schemas import typing_to_runner_api
__all__ = [
'WriteToJdbc',
'ReadFromJdbc',
]
def default_io_expansion_service():
return BeamJarExpansionService(
':sdks:java:extensions:schemaio-expansion-service:shadowJar')
JdbcConfigSchema = typing.NamedTuple(
'JdbcConfigSchema',
[('location', unicode), ('config', bytes)],
)
Config = typing.NamedTuple(
'Config',
[
('driver_class_name', unicode),
('jdbc_url', unicode),
('username', unicode),
('password', unicode),
('connection_properties', typing.Optional[unicode]),
('connection_init_sqls', typing.Optional[typing.List[unicode]]),
('write_statement', typing.Optional[unicode]),
('read_query', typing.Optional[unicode]),
('fetch_size', typing.Optional[int]),
('output_parallelization', typing.Optional[bool]),
],
)
class WriteToJdbc(ExternalTransform):
"""A PTransform which writes Rows to the specified database via JDBC.
This transform receives Rows defined as NamedTuple type and registered in
the coders registry, e.g.::
ExampleRow = typing.NamedTuple('ExampleRow',
[('id', int), ('name', unicode)])
coders.registry.register_coder(ExampleRow, coders.RowCoder)
with TestPipeline() as p:
_ = (
p
| beam.Create([ExampleRow(1, 'abc')])
.with_output_types(ExampleRow)
| 'Write to jdbc' >> WriteToJdbc(
table_name='jdbc_external_test_write'
driver_class_name='org.postgresql.Driver',
jdbc_url='jdbc:postgresql://localhost:5432/example',
username='postgres',
password='postgres',
))
table_name is a required paramater, and by default, the write_statement is
generated from it.
The generated write_statement can be overridden by passing in a
write_statment.
Experimental; no backwards compatibility guarantees.
"""
URN = 'beam:external:java:schemaio:jdbc:write:v1'
def __init__(
self,
table_name,
driver_class_name,
jdbc_url,
username,
password,
statement=None,
connection_properties=None,
connection_init_sqls=None,
expansion_service=None,
):
"""
Initializes a write operation to Jdbc.
:param driver_class_name: name of the jdbc driver class
:param jdbc_url: full jdbc url to the database.
:param username: database username
:param password: database password
:param statement: sql statement to be executed
:param connection_properties: properties of the jdbc connection
passed as string with format
[propertyName=property;]*
:param connection_init_sqls: required only for MySql and MariaDB.
passed as list of strings
:param expansion_service: The address (host:port) of the ExpansionService.
"""
super(WriteToJdbc, self).__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
JdbcConfigSchema(
location=table_name,
config=RowCoder(
typing_to_runner_api(Config).row_type.schema).encode(
Config(
driver_class_name=driver_class_name,
jdbc_url=jdbc_url,
username=username,
password=password,
connection_properties=connection_properties,
connection_init_sqls=connection_init_sqls,
write_statement=statement,
read_query=None,
fetch_size=None,
output_parallelization=None,
))),
),
expansion_service or default_io_expansion_service(),
)
class ReadFromJdbc(ExternalTransform):
"""A PTransform which reads Rows from the specified database via JDBC.
This transform delivers Rows defined as NamedTuple registered in
the coders registry, e.g.::
ExampleRow = typing.NamedTuple('ExampleRow',
[('id', int), ('name', unicode)])
coders.registry.register_coder(ExampleRow, coders.RowCoder)
with TestPipeline() as p:
result = (
p
| 'Read from jdbc' >> ReadFromJdbc(
table_name='jdbc_external_test_read'
driver_class_name='org.postgresql.Driver',
jdbc_url='jdbc:postgresql://localhost:5432/example',
username='postgres',
password='postgres',
))
table_name is a required paramater, and by default, the read_query is
generated from it.
The generated read_query can be overridden by passing in a read_query.
Experimental; no backwards compatibility guarantees.
"""
URN = 'beam:external:java:schemaio:jdbc:read:v1'
def __init__(
self,
table_name,
driver_class_name,
jdbc_url,
username,
password,
query=None,
output_parallelization=None,
fetch_size=None,
connection_properties=None,
connection_init_sqls=None,
expansion_service=None,
):
"""
Initializes a read operation from Jdbc.
:param driver_class_name: name of the jdbc driver class
:param jdbc_url: full jdbc url to the database.
:param username: database username
:param password: database password
:param query: sql query to be executed
:param output_parallelization: is output parallelization on
:param fetch_size: how many rows to fetch
:param connection_properties: properties of the jdbc connection
passed as string with format
[propertyName=property;]*
:param connection_init_sqls: required only for MySql and MariaDB.
passed as list of strings
:param expansion_service: The address (host:port) of the ExpansionService.
"""
super(ReadFromJdbc, self).__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
JdbcConfigSchema(
location=table_name,
config=RowCoder(
typing_to_runner_api(Config).row_type.schema).encode(
Config(
driver_class_name=driver_class_name,
jdbc_url=jdbc_url,
username=username,
password=password,
connection_properties=connection_properties,
connection_init_sqls=connection_init_sqls,
write_statement=None,
read_query=query,
fetch_size=fetch_size,
output_parallelization=output_parallelization,
))),
),
expansion_service or default_io_expansion_service(),
) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/jdbc.py | 0.687735 | 0.211376 | jdbc.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from apache_beam.io.iobase import WatermarkEstimator
from apache_beam.transforms.core import WatermarkEstimatorProvider
from apache_beam.utils.timestamp import Timestamp
class MonotonicWatermarkEstimator(WatermarkEstimator):
"""A WatermarkEstimator which assumes that timestamps of all ouput records
are increasing monotonically.
"""
def __init__(self, timestamp):
"""For a new <element, restriction> pair, the initial value is None. When
resuming processing, the initial timestamp will be the last reported
watermark.
"""
self._watermark = timestamp
def observe_timestamp(self, timestamp):
if self._watermark is None:
self._watermark = timestamp
else:
# TODO(BEAM-9312): Consider making it configurable to deal with late
# timestamp.
if timestamp < self._watermark:
raise ValueError(
'A MonotonicWatermarkEstimator expects output '
'timestamp to be increasing monotonically.')
self._watermark = timestamp
def current_watermark(self):
return self._watermark
def get_estimator_state(self):
return self._watermark
@staticmethod
def default_provider():
"""Provide a default WatermarkEstimatorProvider for
MonotonicWatermarkEstimator.
"""
class DefaultMonotonicWatermarkEstimator(WatermarkEstimatorProvider):
def initial_estimator_state(self, element, restriction):
return None
def create_watermark_estimator(self, estimator_state):
return MonotonicWatermarkEstimator(estimator_state)
return DefaultMonotonicWatermarkEstimator()
class WalltimeWatermarkEstimator(WatermarkEstimator):
"""A WatermarkEstimator which uses processing time as the estimated watermark.
"""
def __init__(self, timestamp=None):
self._timestamp = timestamp or Timestamp.now()
def observe_timestamp(self, timestamp):
pass
def current_watermark(self):
self._timestamp = max(self._timestamp, Timestamp.now())
return self._timestamp
def get_estimator_state(self):
return self._timestamp
@staticmethod
def default_provider():
"""Provide a default WatermarkEstimatorProvider for
WalltimeWatermarkEstimator.
"""
class DefaultWalltimeWatermarkEstimator(WatermarkEstimatorProvider):
def initial_estimator_state(self, element, restriction):
return None
def create_watermark_estimator(self, estimator_state):
return WalltimeWatermarkEstimator(estimator_state)
return DefaultWalltimeWatermarkEstimator()
class ManualWatermarkEstimator(WatermarkEstimator):
"""A WatermarkEstimator which is controlled manually from within a DoFn.
The DoFn must invoke set_watermark to advance the watermark.
"""
def __init__(self, watermark):
self._watermark = watermark
def observe_timestamp(self, timestamp):
pass
def current_watermark(self):
return self._watermark
def get_estimator_state(self):
return self._watermark
def set_watermark(self, timestamp):
# pylint: disable=line-too-long
"""Sets a timestamp before or at the timestamps of all future elements
produced by the associated DoFn.
This can be approximate. If records are output that violate this guarantee,
they will be considered late, which will affect how they will be processed.
See https://beam.apache.org/documentation/programming-guide/#watermarks-and-late-data
for more information on late data and how to handle it.
However, this value should be as late as possible. Downstream windows may
not be able to close until this watermark passes their end.
"""
if not isinstance(timestamp, Timestamp):
raise ValueError('set_watermark expects a Timestamp as input')
if self._watermark and self._watermark > timestamp:
raise ValueError(
'Watermark must be monotonically increasing.'
'Provided watermark %s is less than '
'current watermark %s',
timestamp,
self._watermark)
self._watermark = timestamp
@staticmethod
def default_provider():
"""Provide a default WatermarkEstimatorProvider for
WalltimeWatermarkEstimator.
"""
class DefaultManualWatermarkEstimatorProvider(WatermarkEstimatorProvider):
def initial_estimator_state(self, element, restriction):
return None
def create_watermark_estimator(self, estimator_state):
return ManualWatermarkEstimator(estimator_state)
return DefaultManualWatermarkEstimatorProvider() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/watermark_estimators.py | 0.681939 | 0.66609 | watermark_estimators.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from typing import List
from typing import NamedTuple
from typing import Optional
from past.builtins import unicode
import apache_beam as beam
from apache_beam.transforms.external import BeamJarExpansionService
from apache_beam.transforms.external import ExternalTransform
from apache_beam.transforms.external import NamedTupleBasedPayloadBuilder
__all__ = [
'ReadFromSnowflake',
'WriteToSnowflake',
'WriteDisposition',
'CreateDisposition',
]
def default_io_expansion_service():
return BeamJarExpansionService(
'sdks:java:io:snowflake:expansion-service:shadowJar')
ReadFromSnowflakeSchema = NamedTuple(
'ReadFromSnowflakeSchema',
[
('server_name', unicode),
('schema', unicode),
('database', unicode),
('staging_bucket_name', unicode),
('storage_integration_name', unicode),
('username', Optional[unicode]),
('password', Optional[unicode]),
('private_key_path', Optional[unicode]),
('raw_private_key', Optional[unicode]),
('private_key_passphrase', Optional[unicode]),
('o_auth_token', Optional[unicode]),
('table', Optional[unicode]),
('query', Optional[unicode]),
('role', Optional[unicode]),
('warehouse', Optional[unicode]),
])
class ReadFromSnowflake(beam.PTransform):
"""
An external PTransform which reads from Snowflake.
"""
URN = 'beam:external:java:snowflake:read:v1'
def __init__(
self,
server_name,
schema,
database,
staging_bucket_name,
storage_integration_name,
csv_mapper,
username=None,
password=None,
private_key_path=None,
raw_private_key=None,
private_key_passphrase=None,
o_auth_token=None,
table=None,
query=None,
role=None,
warehouse=None,
expansion_service=None):
"""
Initializes a read operation from Snowflake.
Required parameters:
:param server_name: full Snowflake server name with the following format
https://account.region.gcp.snowflakecomputing.com.
:param schema: name of the Snowflake schema in the database to use.
:param database: name of the Snowflake database to use.
:param staging_bucket_name: name of the Google Cloud Storage bucket.
Bucket will be used as a temporary location for storing CSV files.
Those temporary directories will be named
'sf_copy_csv_DATE_TIME_RANDOMSUFFIX'
and they will be removed automatically once Read operation finishes.
:param storage_integration_name: is the name of storage integration
object created according to Snowflake documentation.
:param csv_mapper: specifies a function which must translate
user-defined object to array of strings.
SnowflakeIO uses a COPY INTO <location> statement to move data from
a Snowflake table to Google Cloud Storage as CSV files.These files
are then downloaded via FileIO and processed line by line.
Each line is split into an array of Strings using the OpenCSV
The csv_mapper function job is to give the user the possibility to
convert the array of Strings to a user-defined type,
ie. GenericRecord for Avro or Parquet files, or custom objects.
Example:
def csv_mapper(strings_array)
return User(strings_array[0], int(strings_array[1])))
:param table: specifies a Snowflake table name.
:param query: specifies a Snowflake custom SQL query.
:param role: specifies a Snowflake role.
:param warehouse: specifies a Snowflake warehouse name.
:param expansion_service: specifies URL of expansion service.
Authentication parameters:
:param username: specifies username for
username/password authentication method.
:param password: specifies password for
username/password authentication method.
:param private_key_path: specifies a private key file for
key/ pair authentication method.
:param raw_private_key: specifies a private key for
key/ pair authentication method.
:param private_key_passphrase: specifies password for
key/ pair authentication method.
:param o_auth_token: specifies access token for
OAuth authentication method.
"""
verify_credentials(
username=username,
password=password,
private_key_path=private_key_path,
raw_private_key=raw_private_key,
private_key_passphrase=private_key_passphrase,
o_auth_token=o_auth_token,
)
self.params = ReadFromSnowflakeSchema(
server_name=server_name,
schema=schema,
database=database,
staging_bucket_name=staging_bucket_name,
storage_integration_name=storage_integration_name,
username=username,
password=password,
private_key_path=private_key_path,
raw_private_key=raw_private_key,
private_key_passphrase=private_key_passphrase,
o_auth_token=o_auth_token,
table=table,
query=query,
role=role,
warehouse=warehouse,
)
self.csv_mapper = csv_mapper
self.expansion_service = expansion_service or default_io_expansion_service()
def expand(self, pbegin):
return (
pbegin
| ExternalTransform(
self.URN,
NamedTupleBasedPayloadBuilder(self.params),
self.expansion_service,
)
| 'CSV to array mapper' >> beam.Map(lambda csv: csv.split(b','))
| 'CSV mapper' >> beam.Map(self.csv_mapper))
WriteToSnowflakeSchema = NamedTuple(
'WriteToSnowflakeSchema',
[
('server_name', unicode),
('schema', unicode),
('database', unicode),
('staging_bucket_name', unicode),
('storage_integration_name', unicode),
('create_disposition', unicode),
('write_disposition', unicode),
('table_schema', unicode),
('username', Optional[unicode]),
('password', Optional[unicode]),
('private_key_path', Optional[unicode]),
('raw_private_key', Optional[unicode]),
('private_key_passphrase', Optional[unicode]),
('o_auth_token', Optional[unicode]),
('table', Optional[unicode]),
('query', Optional[unicode]),
('role', Optional[unicode]),
('warehouse', Optional[unicode]),
],
)
class WriteToSnowflake(beam.PTransform):
"""
An external PTransform which writes to Snowflake.
"""
URN = 'beam:external:java:snowflake:write:v1'
def __init__(
self,
server_name,
schema,
database,
staging_bucket_name,
storage_integration_name,
create_disposition,
write_disposition,
table_schema,
user_data_mapper,
username=None,
password=None,
private_key_path=None,
raw_private_key=None,
private_key_passphrase=None,
o_auth_token=None,
table=None,
query=None,
role=None,
warehouse=None,
expansion_service=None,
):
# pylint: disable=line-too-long
"""
Initializes a write operation to Snowflake.
Required parameters:
:param server_name: full Snowflake server name with the following format
https://account.region.gcp.snowflakecomputing.com.
:param schema: name of the Snowflake schema in the database to use.
:param database: name of the Snowflake database to use.
:param staging_bucket_name: name of the Google Cloud Storage bucket.
Bucket will be used as a temporary location for storing CSV files.
:param storage_integration_name: is the name of a Snowflake storage
integration object created according to Snowflake documentation for the
GCS bucket.
:param user_data_mapper: specifies a function which maps data from
a PCollection to an array of String values before the write operation
saves the data to temporary .csv files.
Example:
def user_data_mapper(user):
return [user.name, str(user.age)]
:param table: specifies a Snowflake table name
:param query: specifies a custom SQL query
:param role: specifies a Snowflake role.
:param warehouse: specifies a Snowflake warehouse name.
:param expansion_service: specifies URL of expansion service.
Authentication parameters:
:param username: specifies username for
username/password authentication method.
:param password: specifies password for
username/password authentication method.
:param private_key_path: specifies a private key file for
key/ pair authentication method.
:param raw_private_key: specifies a private key for
key/ pair authentication method.
:param private_key_passphrase: specifies password for
key/ pair authentication method.
:param o_auth_token: specifies access token for
OAuth authentication method.
Additional parameters:
:param create_disposition: Defines the behaviour of the write operation if
the target table does not exist. The following values are supported:
CREATE_IF_NEEDED - default behaviour. The write operation checks whether
the specified target table exists; if it does not, the write operation
attempts to create the table Specify the schema for the target table
using the table_schema parameter.
CREATE_NEVER - The write operation fails if the target table does not
exist.
:param write_disposition: Defines the write behaviour based on the table
where data will be written to. The following values are supported:
APPEND - Default behaviour. Written data is added to the existing rows
in the table,
EMPTY - The target table must be empty; otherwise, the write operation
fails,
TRUNCATE - The write operation deletes all rows from the target table
before writing to it.
:param table_schema: When the create_disposition parameter is set to
CREATE_IF_NEEDED, the table_schema parameter enables specifying the
schema for the created target table. A table schema is as JSON with the
following structure:
{"schema":[
{
"dataType":{"type":"<COLUMN DATA TYPE>"},
"name":"<COLUMN NAME> ",
"nullable": <NULLABLE>
},
]}
All supported data types:
{"schema":[
{"dataType":{"type":"date"},"name":"","nullable":false},
{"dataType":{"type":"datetime"},"name":"","nullable":false},
{"dataType":{"type":"time"},"name":"","nullable":false},
{"dataType":{"type":"timestamp"},"name":"","nullable":false},
{"dataType":{"type":"timestamp_ltz"},"name":"","nullable":false},
{"dataType":{"type":"timestamp_ntz"},"name":"","nullable":false},
{"dataType":{"type":"timestamp_tz"},"name":"","nullable":false},
{"dataType":{"type":"boolean"},"name":"","nullable":false},
{"dataType":{"type":"decimal","precision":38,"scale":1},"name":"","nullable":true},
{"dataType":{"type":"double"},"name":"","nullable":false},
{"dataType":{"type":"float"},"name":"","nullable":false},
{"dataType":{"type":"integer","precision":38,"scale":0},"name":"","nullable":false},
{"dataType":{"type":"number","precision":38,"scale":1},"name":"","nullable":false},
{"dataType":{"type":"numeric","precision":38,"scale":2},"name":"","nullable":false},
{"dataType":{"type":"real"},"name":"","nullable":false},
{"dataType":{"type":"array"},"name":"","nullable":false},
{"dataType":{"type":"object"},"name":"","nullable":false},
{"dataType":{"type":"variant"},"name":"","nullable":true},
{"dataType":{"type":"binary","size":null},"name":"","nullable":false},
{"dataType":{"type":"char","length":1},"name":"","nullable":false},
{"dataType":{"type":"string","length":null},"name":"","nullable":false},
{"dataType":{"type":"text","length":null},"name":"","nullable":false},
{"dataType":{"type":"varbinary","size":null},"name":"","nullable":false},
{"dataType":{"type":"varchar","length":100},"name":"","nullable":false}]
}
"""
verify_credentials(
username=username,
password=password,
private_key_path=private_key_path,
raw_private_key=raw_private_key,
private_key_passphrase=private_key_passphrase,
o_auth_token=o_auth_token,
)
WriteDisposition.VerifyParam(write_disposition)
CreateDisposition.VerifyParam(create_disposition)
self.params = WriteToSnowflakeSchema(
server_name=server_name,
schema=schema,
database=database,
staging_bucket_name=staging_bucket_name,
storage_integration_name=storage_integration_name,
create_disposition=create_disposition,
write_disposition=write_disposition,
table_schema=table_schema,
username=username,
password=password,
private_key_path=private_key_path,
raw_private_key=raw_private_key,
private_key_passphrase=private_key_passphrase,
o_auth_token=o_auth_token,
table=table,
query=query,
role=role,
warehouse=warehouse,
)
self.user_data_mapper = user_data_mapper
self.expansion_service = expansion_service or default_io_expansion_service()
def expand(self, pbegin):
return (
pbegin
| 'User data mapper' >> beam.Map(
self.user_data_mapper).with_output_types(List[bytes])
| ExternalTransform(
self.URN,
NamedTupleBasedPayloadBuilder(self.params),
self.expansion_service))
class CreateDisposition:
"""
Enum class for possible values of create dispositions:
CREATE_IF_NEEDED: default behaviour. The write operation checks whether
the specified target table exists; if it does not, the write operation
attempts to create the table Specify the schema for the target table
using the table_schema parameter.
CREATE_NEVER: The write operation fails if the target table does not exist.
"""
CREATE_IF_NEEDED = 'CREATE_IF_NEEDED'
CREATE_NEVER = 'CREATE_NEVER'
@staticmethod
def VerifyParam(field):
if field and not hasattr(CreateDisposition, field):
raise RuntimeError(
'Create disposition has to be one of the following values:'
'CREATE_IF_NEEDED, CREATE_NEVER. Got: {}'.format(field))
class WriteDisposition:
"""
Enum class for possible values of write dispositions:
APPEND: Default behaviour. Written data is added to the existing rows
in the table,
EMPTY: The target table must be empty; otherwise, the write operation fails,
TRUNCATE: The write operation deletes all rows from the target table
before writing to it.
"""
APPEND = 'APPEND'
EMPTY = 'EMPTY'
TRUNCATE = 'TRUNCATE'
@staticmethod
def VerifyParam(field):
if field and not hasattr(WriteDisposition, field):
raise RuntimeError(
'Write disposition has to be one of the following values:'
'APPEND, EMPTY, TRUNCATE. Got: {}'.format(field))
def verify_credentials(
username,
password,
private_key_path,
raw_private_key,
private_key_passphrase,
o_auth_token):
if not (o_auth_token or (username and password) or
(username and
(private_key_path or raw_private_key) and private_key_passphrase)):
raise RuntimeError('Snowflake credentials are not set correctly.') | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/snowflake.py | 0.900709 | 0.239694 | snowflake.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import logging
import time
from typing import Mapping
from typing import NamedTuple
from typing import Optional
from past.builtins import unicode
from apache_beam import BeamJarExpansionService
from apache_beam import ExternalTransform
from apache_beam import NamedTupleBasedPayloadBuilder
__all__ = [
'WriteToKinesis',
'ReadDataFromKinesis',
'InitialPositionInStream',
'WatermarkPolicy',
]
def default_io_expansion_service():
return BeamJarExpansionService(
'sdks:java:io:kinesis:expansion-service:shadowJar')
WriteToKinesisSchema = NamedTuple(
'WriteToKinesisSchema',
[
('stream_name', unicode),
('aws_access_key', unicode),
('aws_secret_key', unicode),
('region', unicode),
('partition_key', unicode),
('service_endpoint', Optional[unicode]),
('verify_certificate', Optional[bool]),
('producer_properties', Optional[Mapping[unicode, unicode]]),
],
)
class WriteToKinesis(ExternalTransform):
"""
An external PTransform which writes byte array stream to Amazon Kinesis.
Experimental; no backwards compatibility guarantees.
"""
URN = 'beam:external:java:kinesis:write:v1'
def __init__(
self,
stream_name,
aws_access_key,
aws_secret_key,
region,
partition_key,
service_endpoint=None,
verify_certificate=None,
producer_properties=None,
expansion_service=None,
):
"""
Initializes a write operation to Kinesis.
:param stream_name: Kinesis stream name.
:param aws_access_key: Kinesis access key.
:param aws_secret_key: Kinesis access key secret.
:param region: AWS region. Example: 'us-east-1'.
:param service_endpoint: Kinesis service endpoint
:param verify_certificate: Enable or disable certificate verification.
Never set to False on production. True by default.
:param partition_key: Specify default partition key.
:param producer_properties: Specify the configuration properties for Kinesis
Producer Library (KPL) as dictionary.
Example: {'CollectionMaxCount': '1000', 'ConnectTimeout': '10000'}
:param expansion_service: The address (host:port) of the ExpansionService.
"""
super(WriteToKinesis, self).__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
WriteToKinesisSchema(
stream_name=stream_name,
aws_access_key=aws_access_key,
aws_secret_key=aws_secret_key,
region=region,
partition_key=partition_key,
service_endpoint=service_endpoint,
verify_certificate=verify_certificate,
producer_properties=producer_properties,
)),
expansion_service or default_io_expansion_service(),
)
ReadFromKinesisSchema = NamedTuple(
'ReadFromKinesisSchema',
[
('stream_name', unicode),
('aws_access_key', unicode),
('aws_secret_key', unicode),
('region', unicode),
('service_endpoint', Optional[unicode]),
('verify_certificate', Optional[bool]),
('max_num_records', Optional[int]),
('max_read_time', Optional[int]),
('initial_position_in_stream', Optional[unicode]),
('initial_timestamp_in_stream', Optional[int]),
('request_records_limit', Optional[int]),
('up_to_date_threshold', Optional[int]),
('max_capacity_per_shard', Optional[int]),
('watermark_policy', Optional[unicode]),
('watermark_idle_duration_threshold', Optional[int]),
('rate_limit', Optional[int]),
],
)
class ReadDataFromKinesis(ExternalTransform):
"""
An external PTransform which reads byte array stream from Amazon Kinesis.
Experimental; no backwards compatibility guarantees.
"""
URN = 'beam:external:java:kinesis:read_data:v1'
def __init__(
self,
stream_name,
aws_access_key,
aws_secret_key,
region,
service_endpoint=None,
verify_certificate=None,
max_num_records=None,
max_read_time=None,
initial_position_in_stream=None,
initial_timestamp_in_stream=None,
request_records_limit=None,
up_to_date_threshold=None,
max_capacity_per_shard=None,
watermark_policy=None,
watermark_idle_duration_threshold=None,
rate_limit=None,
expansion_service=None,
):
"""
Initializes a read operation from Kinesis.
:param stream_name: Kinesis stream name.
:param aws_access_key: Kinesis access key.
:param aws_secret_key: Kinesis access key secret.
:param region: AWS region. Example: 'us-east-1'.
:param service_endpoint: Kinesis service endpoint
:param verify_certificate: Enable or disable certificate verification.
Never set to False on production. True by default.
:param max_num_records: Specifies to read at most a given number of records.
Must be greater than 0.
:param max_read_time: Specifies to read records during x milliseconds.
:param initial_timestamp_in_stream: Specify reading beginning at the given
timestamp in milliseconds. Must be in the past.
:param initial_position_in_stream: Specify reading from some initial
position in stream. Possible values:
LATEST - Start after the most recent data record (fetch new data).
TRIM_HORIZON - Start from the oldest available data record.
AT_TIMESTAMP - Start from the record at or after the specified
server-side timestamp.
:param request_records_limit: Specifies the maximum number of records in
GetRecordsResult returned by GetRecords call which is limited by 10K
records. If should be adjusted according to average size of data record
to prevent shard overloading. More at:
docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html
:param up_to_date_threshold: Specifies how late in milliseconds records
consumed by this source can be to still be considered on time. Defaults
to zero.
:param max_capacity_per_shard: Specifies the maximum number of messages per
one shard. Defaults to 10'000.
:param watermark_policy: Specifies the watermark policy. Possible values:
PROCESSING_TYPE, ARRIVAL_TIME. Defaults to ARRIVAL_TIME.
:param watermark_idle_duration_threshold: Use only when watermark policy is
ARRIVAL_TIME. Denotes the duration for which the watermark can be idle.
Passed in milliseconds.
:param rate_limit: Sets fixed rate policy for given milliseconds value. By
default there is no rate limit.
:param expansion_service: The address (host:port) of the ExpansionService.
"""
WatermarkPolicy.validate_param(watermark_policy)
InitialPositionInStream.validate_param(initial_position_in_stream)
if watermark_idle_duration_threshold:
assert WatermarkPolicy.ARRIVAL_TIME == watermark_policy
if request_records_limit:
assert 0 < request_records_limit <= 10000
initial_timestamp_in_stream = int(
initial_timestamp_in_stream) if initial_timestamp_in_stream else None
if initial_timestamp_in_stream and initial_timestamp_in_stream < time.time(
):
logging.warning('Provided timestamp emplaced not in the past.')
super(ReadDataFromKinesis, self).__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
ReadFromKinesisSchema(
stream_name=stream_name,
aws_access_key=aws_access_key,
aws_secret_key=aws_secret_key,
region=region,
service_endpoint=service_endpoint,
verify_certificate=verify_certificate,
max_num_records=max_num_records,
max_read_time=max_read_time,
initial_position_in_stream=initial_position_in_stream,
initial_timestamp_in_stream=initial_timestamp_in_stream,
request_records_limit=request_records_limit,
up_to_date_threshold=up_to_date_threshold,
max_capacity_per_shard=max_capacity_per_shard,
watermark_policy=watermark_policy,
watermark_idle_duration_threshold=
watermark_idle_duration_threshold,
rate_limit=rate_limit,
)),
expansion_service or default_io_expansion_service(),
)
class InitialPositionInStream:
LATEST = 'LATEST'
TRIM_HORIZON = 'TRIM_HORIZON'
AT_TIMESTAMP = 'AT_TIMESTAMP'
@staticmethod
def validate_param(param):
if param and not hasattr(InitialPositionInStream, param):
raise RuntimeError('Invalid initial position in stream: {}'.format(param))
class WatermarkPolicy:
PROCESSING_TYPE = 'PROCESSING_TYPE'
ARRIVAL_TIME = 'ARRIVAL_TIME'
@staticmethod
def validate_param(param):
if param and not hasattr(WatermarkPolicy, param):
raise RuntimeError('Invalid watermark policy: {}'.format(param)) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/kinesis.py | 0.915276 | 0.258256 | kinesis.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import abc
import io
import os
from builtins import object
from future.utils import with_metaclass
__all__ = [
'Downloader',
'Uploader',
'DownloaderStream',
'UploaderStream',
'PipeStream'
]
class Downloader(with_metaclass(abc.ABCMeta, object)): # type: ignore[misc]
"""Download interface for a single file.
Implementations should support random access reads.
"""
@property
@abc.abstractmethod
def size(self):
"""Size of file to download."""
@abc.abstractmethod
def get_range(self, start, end):
"""Retrieve a given byte range [start, end) from this download.
Range must be in this form:
0 <= start < end: Fetch the bytes from start to end.
Args:
start: (int) Initial byte offset.
end: (int) Final byte offset, exclusive.
Returns:
(string) A buffer containing the requested data.
"""
class Uploader(with_metaclass(abc.ABCMeta, object)): # type: ignore[misc]
"""Upload interface for a single file."""
@abc.abstractmethod
def put(self, data):
"""Write data to file sequentially.
Args:
data: (memoryview) Data to write.
"""
@abc.abstractmethod
def finish(self):
"""Signal to upload any remaining data and close the file.
File should be fully written upon return from this method.
Raises:
Any error encountered during the upload.
"""
class DownloaderStream(io.RawIOBase):
"""Provides a stream interface for Downloader objects."""
def __init__(
self, downloader, read_buffer_size=io.DEFAULT_BUFFER_SIZE, mode='rb'):
"""Initializes the stream.
Args:
downloader: (Downloader) Filesystem dependent implementation.
read_buffer_size: (int) Buffer size to use during read operations.
mode: (string) Python mode attribute for this stream.
"""
self._downloader = downloader
self.mode = mode
self._position = 0
self._reader_buffer_size = read_buffer_size
def readinto(self, b):
"""Read up to len(b) bytes into b.
Returns number of bytes read (0 for EOF).
Args:
b: (bytearray/memoryview) Buffer to read into.
"""
self._checkClosed()
if self._position >= self._downloader.size:
return 0
start = self._position
end = min(self._position + len(b), self._downloader.size)
data = self._downloader.get_range(start, end)
self._position += len(data)
b[:len(data)] = data
return len(data)
def seek(self, offset, whence=os.SEEK_SET):
"""Set the stream's current offset.
Note if the new offset is out of bound, it is adjusted to either 0 or EOF.
Args:
offset: seek offset as number.
whence: seek mode. Supported modes are os.SEEK_SET (absolute seek),
os.SEEK_CUR (seek relative to the current position), and os.SEEK_END
(seek relative to the end, offset should be negative).
Raises:
``ValueError``: When this stream is closed or if whence is invalid.
"""
self._checkClosed()
if whence == os.SEEK_SET:
self._position = offset
elif whence == os.SEEK_CUR:
self._position += offset
elif whence == os.SEEK_END:
self._position = self._downloader.size + offset
else:
raise ValueError('Whence mode %r is invalid.' % whence)
self._position = min(self._position, self._downloader.size)
self._position = max(self._position, 0)
return self._position
def tell(self):
"""Tell the stream's current offset.
Returns:
current offset in reading this stream.
Raises:
``ValueError``: When this stream is closed.
"""
self._checkClosed()
return self._position
def seekable(self):
return True
def readable(self):
return True
def readall(self):
"""Read until EOF, using multiple read() call."""
res = []
while True:
data = self.read(self._reader_buffer_size)
if not data:
break
res.append(data)
return b''.join(res)
class UploaderStream(io.RawIOBase):
"""Provides a stream interface for Uploader objects."""
def __init__(self, uploader, mode='wb'):
"""Initializes the stream.
Args:
uploader: (Uploader) Filesystem dependent implementation.
mode: (string) Python mode attribute for this stream.
"""
self._uploader = uploader
self.mode = mode
self._position = 0
def tell(self):
return self._position
def write(self, b):
"""Write bytes from b.
Returns number of bytes written (<= len(b)).
Args:
b: (memoryview) Buffer with data to write.
"""
self._checkClosed()
self._uploader.put(b)
bytes_written = len(b)
self._position += bytes_written
return bytes_written
def close(self):
"""Complete the upload and close this stream.
This method has no effect if the stream is already closed.
Raises:
Any error encountered by the uploader.
"""
if not self.closed:
self._uploader.finish()
super(UploaderStream, self).close()
def writable(self):
return True
class PipeStream(object):
"""A class that presents a pipe connection as a readable stream.
Not thread-safe.
Remembers the last ``size`` bytes read and allows rewinding the stream by that
amount exactly. See BEAM-6380 for more.
"""
def __init__(self, recv_pipe):
self.conn = recv_pipe
self.closed = False
self.position = 0
self.remaining = b''
# Data and position of last block streamed. Allows limited seeking backwards
# of stream.
self.last_block_position = None
self.last_block = b''
def read(self, size):
"""Read data from the wrapped pipe connection.
Args:
size: Number of bytes to read. Actual number of bytes read is always
equal to size unless EOF is reached.
Returns:
data read as str.
"""
data_list = []
bytes_read = 0
last_block_position = self.position
while bytes_read < size:
bytes_from_remaining = min(size - bytes_read, len(self.remaining))
data_list.append(self.remaining[0:bytes_from_remaining])
self.remaining = self.remaining[bytes_from_remaining:]
self.position += bytes_from_remaining
bytes_read += bytes_from_remaining
if not self.remaining:
try:
self.remaining = self.conn.recv_bytes()
except EOFError:
break
last_block = b''.join(data_list)
if last_block:
self.last_block_position = last_block_position
self.last_block = last_block
return last_block
def tell(self):
"""Tell the file's current offset.
Returns:
current offset in reading this file.
Raises:
``ValueError``: When this stream is closed.
"""
self._check_open()
return self.position
def seek(self, offset, whence=os.SEEK_SET):
# The apitools library used by the gcsio.Uploader class insists on seeking
# to the end of a stream to do a check before completing an upload, so we
# must have this no-op method here in that case.
if whence == os.SEEK_END and offset == 0:
return
elif whence == os.SEEK_SET:
if offset == self.position:
return
elif offset == self.last_block_position and self.last_block:
self.position = offset
self.remaining = b''.join([self.last_block, self.remaining])
self.last_block = b''
return
raise NotImplementedError(
'offset: %s, whence: %s, position: %s, last: %s' %
(offset, whence, self.position, self.last_block_position))
def _check_open(self):
if self.closed:
raise IOError('Stream is closed.') | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/filesystemio.py | 0.856272 | 0.32958 | filesystemio.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import io
import os
import shutil
from builtins import zip
from typing import BinaryIO # pylint: disable=unused-import
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystem import CompressedFile
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filesystem import FileMetadata
from apache_beam.io.filesystem import FileSystem
__all__ = ['LocalFileSystem']
class LocalFileSystem(FileSystem):
"""A Local ``FileSystem`` implementation for accessing files on disk.
"""
@classmethod
def scheme(cls):
"""URI scheme for the FileSystem
"""
return None
def join(self, basepath, *paths):
"""Join two or more pathname components for the filesystem
Args:
basepath: string path of the first component of the path
paths: path components to be added
Returns: full path after combining all the passed components
"""
return os.path.join(basepath, *paths)
def split(self, path):
"""Splits the given path into two parts.
Splits the path into a pair (head, tail) such that tail contains the last
component of the path and head contains everything up to that.
Args:
path: path as a string
Returns:
a pair of path components as strings.
"""
return os.path.split(os.path.abspath(path))
def mkdirs(self, path):
"""Recursively create directories for the provided path.
Args:
path: string path of the directory structure that should be created
Raises:
IOError: if leaf directory already exists.
"""
try:
os.makedirs(path)
except OSError as err:
raise IOError(err)
def has_dirs(self):
"""Whether this FileSystem supports directories."""
return True
def _url_dirname(self, url_or_path):
"""Pass through to os.path.dirname.
This version uses os.path instead of posixpath to be compatible with the
host OS.
Args:
url_or_path: A string in the form of /some/path.
"""
return os.path.dirname(url_or_path)
def _list(self, dir_or_prefix):
"""List files in a location.
Listing is non-recursive, for filesystems that support directories.
Args:
dir_or_prefix: (string) A directory or location prefix (for filesystems
that don't have directories).
Returns:
Generator of ``FileMetadata`` objects.
Raises:
``BeamIOError``: if listing fails, but not if no files were found.
"""
if not self.exists(dir_or_prefix):
return
def list_files(root):
for dirpath, _, files in os.walk(root):
for filename in files:
yield self.join(dirpath, filename)
try:
for f in list_files(dir_or_prefix):
try:
yield FileMetadata(f, os.path.getsize(f))
except OSError:
# Files may disappear, such as when listing /tmp.
pass
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("List operation failed", {dir_or_prefix: e})
def _path_open(
self,
path,
mode,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
"""Helper functions to open a file in the provided mode.
"""
compression_type = FileSystem._get_compression_type(path, compression_type)
raw_file = io.open(path, mode)
if compression_type == CompressionTypes.UNCOMPRESSED:
return raw_file
else:
return CompressedFile(raw_file, compression_type=compression_type)
def create(
self,
path,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
# type: (...) -> BinaryIO
"""Returns a write channel for the given file path.
Args:
path: string path of the file object to be written to the system
mime_type: MIME type to specify the type of content in the file object
compression_type: Type of compression to be used for this object
Returns: file handle with a close function for the user to use
"""
if not os.path.exists(os.path.dirname(path)):
# TODO(Py3): Add exist_ok parameter.
os.makedirs(os.path.dirname(path))
return self._path_open(path, 'wb', mime_type, compression_type)
def open(
self,
path,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
# type: (...) -> BinaryIO
"""Returns a read channel for the given file path.
Args:
path: string path of the file object to be written to the system
mime_type: MIME type to specify the type of content in the file object
compression_type: Type of compression to be used for this object
Returns: file handle with a close function for the user to use
"""
return self._path_open(path, 'rb', mime_type, compression_type)
def copy(self, source_file_names, destination_file_names):
"""Recursively copy the file tree from the source to the destination
Args:
source_file_names: list of source file objects that needs to be copied
destination_file_names: list of destination of the new object
Raises:
``BeamIOError``: if any of the copy operations fail
"""
err_msg = (
"source_file_names and destination_file_names should "
"be equal in length")
assert len(source_file_names) == len(destination_file_names), err_msg
def _copy_path(source, destination):
"""Recursively copy the file tree from the source to the destination
"""
try:
if os.path.exists(destination):
if os.path.isdir(destination):
shutil.rmtree(destination)
else:
os.remove(destination)
if os.path.isdir(source):
shutil.copytree(source, destination)
else:
shutil.copy2(source, destination)
except OSError as err:
raise IOError(err)
exceptions = {}
for source, destination in zip(source_file_names, destination_file_names):
try:
_copy_path(source, destination)
except Exception as e: # pylint: disable=broad-except
exceptions[(source, destination)] = e
if exceptions:
raise BeamIOError("Copy operation failed", exceptions)
def rename(self, source_file_names, destination_file_names):
"""Rename the files at the source list to the destination list.
Source and destination lists should be of the same size.
Args:
source_file_names: List of file paths that need to be moved
destination_file_names: List of destination_file_names for the files
Raises:
``BeamIOError``: if any of the rename operations fail
"""
err_msg = (
"source_file_names and destination_file_names should "
"be equal in length")
assert len(source_file_names) == len(destination_file_names), err_msg
def _rename_file(source, destination):
"""Rename a single file object"""
try:
os.rename(source, destination)
except OSError as err:
raise IOError(err)
exceptions = {}
for source, destination in zip(source_file_names, destination_file_names):
try:
_rename_file(source, destination)
except Exception as e: # pylint: disable=broad-except
exceptions[(source, destination)] = e
if exceptions:
raise BeamIOError("Rename operation failed", exceptions)
def exists(self, path):
"""Check if the provided path exists on the FileSystem.
Args:
path: string path that needs to be checked.
Returns: boolean flag indicating if path exists
"""
return os.path.exists(path)
def size(self, path):
"""Get size of path on the FileSystem.
Args:
path: string path in question.
Returns: int size of path according to the FileSystem.
Raises:
``BeamIOError``: if path doesn't exist.
"""
try:
return os.path.getsize(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("Size operation failed", {path: e})
def last_updated(self, path):
"""Get UNIX Epoch time in seconds on the FileSystem.
Args:
path: string path of file.
Returns: float UNIX Epoch time
Raises:
``BeamIOError``: if path doesn't exist.
"""
if not self.exists(path):
raise BeamIOError('Path does not exist: %s' % path)
return os.path.getmtime(path)
def checksum(self, path):
"""Fetch checksum metadata of a file on the
:class:`~apache_beam.io.filesystem.FileSystem`.
Args:
path: string path of a file.
Returns: string containing file size.
Raises:
``BeamIOError``: if path isn't a file or doesn't exist.
"""
if not self.exists(path):
raise BeamIOError('Path does not exist: %s' % path)
return str(os.path.getsize(path))
def delete(self, paths):
"""Deletes files or directories at the provided paths.
Directories will be deleted recursively.
Args:
paths: list of paths that give the file objects to be deleted
Raises:
``BeamIOError``: if any of the delete operations fail
"""
def _delete_path(path):
"""Recursively delete the file or directory at the provided path.
"""
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError as err:
raise IOError(err)
exceptions = {}
def try_delete(path):
try:
_delete_path(path)
except Exception as e: # pylint: disable=broad-except
exceptions[path] = e
for match_result in self.match(paths):
metadata_list = match_result.metadata_list
if not metadata_list:
exceptions[match_result.pattern] = \
IOError('No files found to delete under: %s' % match_result.pattern)
for metadata in match_result.metadata_list:
try_delete(metadata.path)
if exceptions:
raise BeamIOError("Delete operation failed", exceptions) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/localfilesystem.py | 0.75101 | 0.36591 | localfilesystem.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import codecs
import logging
import math
import threading
from builtins import zip
from past.builtins import long
from apache_beam.io import iobase
__all__ = [
'OffsetRangeTracker',
'LexicographicKeyRangeTracker',
'OrderedPositionRangeTracker',
'UnsplittableRangeTracker'
]
_LOGGER = logging.getLogger(__name__)
class OffsetRangeTracker(iobase.RangeTracker):
"""A 'RangeTracker' for non-negative positions of type 'long'."""
# Offset corresponding to infinity. This can only be used as the upper-bound
# of a range, and indicates reading all of the records until the end without
# specifying exactly what the end is.
# Infinite ranges cannot be split because it is impossible to estimate
# progress within them.
OFFSET_INFINITY = float('inf')
def __init__(self, start, end):
super(OffsetRangeTracker, self).__init__()
if start is None:
raise ValueError('Start offset must not be \'None\'')
if end is None:
raise ValueError('End offset must not be \'None\'')
assert isinstance(start, (int, long))
if end != self.OFFSET_INFINITY:
assert isinstance(end, (int, long))
assert start <= end
self._start_offset = start
self._stop_offset = end
self._last_record_start = -1
self._last_attempted_record_start = -1
self._offset_of_last_split_point = -1
self._lock = threading.Lock()
self._split_points_seen = 0
self._split_points_unclaimed_callback = None
def start_position(self):
return self._start_offset
def stop_position(self):
return self._stop_offset
@property
def last_record_start(self):
return self._last_record_start
@property
def last_attempted_record_start(self):
"""Return current value of last_attempted_record_start.
last_attempted_record_start records a valid position that tried to be
claimed by calling try_claim(). This value is only updated by `try_claim()`
no matter `try_claim()` returns `True` or `False`.
"""
return self._last_attempted_record_start
def _validate_record_start(self, record_start, split_point):
# This function must only be called under the lock self.lock.
if not self._lock.locked():
raise ValueError(
'This function must only be called under the lock self.lock.')
if record_start < self._last_record_start:
raise ValueError(
'Trying to return a record [starting at %d] which is before the '
'last-returned record [starting at %d]' %
(record_start, self._last_record_start))
if (split_point and self._offset_of_last_split_point != -1 and
record_start == self._offset_of_last_split_point):
raise ValueError(
'Record at a split point has same offset as the previous split '
'point: %d' % record_start)
if not split_point and self._last_record_start == -1:
raise ValueError(
'The first record [starting at %d] must be at a split point' %
record_start)
def try_claim(self, record_start):
with self._lock:
# Attempted claim should be monotonous.
if record_start <= self._last_attempted_record_start:
raise ValueError(
'Trying to return a record [starting at %d] which is not greater'
'than the last-attempted record [starting at %d]' %
(record_start, self._last_attempted_record_start))
self._validate_record_start(record_start, True)
self._last_attempted_record_start = record_start
if record_start >= self.stop_position():
return False
self._offset_of_last_split_point = record_start
self._last_record_start = record_start
self._split_points_seen += 1
return True
def set_current_position(self, record_start):
with self._lock:
self._validate_record_start(record_start, False)
self._last_record_start = record_start
def try_split(self, split_offset):
assert isinstance(split_offset, (int, long))
with self._lock:
if self._stop_offset == OffsetRangeTracker.OFFSET_INFINITY:
_LOGGER.debug(
'refusing to split %r at %d: stop position unspecified',
self,
split_offset)
return
if self._last_record_start == -1:
_LOGGER.debug(
'Refusing to split %r at %d: unstarted', self, split_offset)
return
if split_offset <= self._last_record_start:
_LOGGER.debug(
'Refusing to split %r at %d: already past proposed stop offset',
self,
split_offset)
return
if (split_offset < self.start_position() or
split_offset >= self.stop_position()):
_LOGGER.debug(
'Refusing to split %r at %d: proposed split position out of range',
self,
split_offset)
return
_LOGGER.debug('Agreeing to split %r at %d', self, split_offset)
split_fraction = (
float(split_offset - self._start_offset) /
(self._stop_offset - self._start_offset))
self._stop_offset = split_offset
return self._stop_offset, split_fraction
def fraction_consumed(self):
with self._lock:
# self.last_record_start may become larger than self.end_offset when
# reading the records since any record that starts before the first 'split
# point' at or after the defined 'stop offset' is considered to be within
# the range of the OffsetRangeTracker. Hence fraction could be > 1.
# self.last_record_start is initialized to -1, hence fraction may be < 0.
# Bounding the to range [0, 1].
return self.position_to_fraction(
self._last_record_start, self.start_position(), self.stop_position())
def position_to_fraction(self, pos, start, stop):
fraction = 1.0 * (pos - start) / (stop - start) if start != stop else 0.0
return max(0.0, min(1.0, fraction))
def position_at_fraction(self, fraction):
if self.stop_position() == OffsetRangeTracker.OFFSET_INFINITY:
raise Exception(
'get_position_for_fraction_consumed is not applicable for an '
'unbounded range')
return int(
math.ceil(
self.start_position() + fraction *
(self.stop_position() - self.start_position())))
def split_points(self):
with self._lock:
split_points_consumed = (
0 if self._split_points_seen == 0 else self._split_points_seen - 1)
split_points_unclaimed = (
self._split_points_unclaimed_callback(self.stop_position())
if self._split_points_unclaimed_callback else
iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
split_points_remaining = (
iobase.RangeTracker.SPLIT_POINTS_UNKNOWN
if split_points_unclaimed == iobase.RangeTracker.SPLIT_POINTS_UNKNOWN
else (split_points_unclaimed + 1))
return (split_points_consumed, split_points_remaining)
def set_split_points_unclaimed_callback(self, callback):
self._split_points_unclaimed_callback = callback
class OrderedPositionRangeTracker(iobase.RangeTracker):
"""
An abstract base class for range trackers whose positions are comparable.
Subclasses only need to implement the mapping from position ranges
to and from the closed interval [0, 1].
"""
UNSTARTED = object()
def __init__(self, start_position=None, stop_position=None):
self._start_position = start_position
self._stop_position = stop_position
self._lock = threading.Lock()
self._last_claim = self.UNSTARTED
def start_position(self):
return self._start_position
def stop_position(self):
with self._lock:
return self._stop_position
def try_claim(self, position):
with self._lock:
if self._last_claim is not self.UNSTARTED and position < self._last_claim:
raise ValueError(
"Positions must be claimed in order: "
"claim '%s' attempted after claim '%s'" %
(position, self._last_claim))
elif self._start_position is not None and position < self._start_position:
raise ValueError(
"Claim '%s' is before start '%s'" %
(position, self._start_position))
if self._stop_position is None or position < self._stop_position:
self._last_claim = position
return True
else:
return False
def position_at_fraction(self, fraction):
return self.fraction_to_position(
fraction, self._start_position, self._stop_position)
def try_split(self, position):
with self._lock:
if ((self._stop_position is not None and position >= self._stop_position)
or (self._start_position is not None and
position <= self._start_position)):
raise ValueError(
"Split at '%s' not in range %s" %
(position, [self._start_position, self._stop_position]))
if self._last_claim is self.UNSTARTED or self._last_claim < position:
fraction = self.position_to_fraction(
position, start=self._start_position, end=self._stop_position)
self._stop_position = position
return position, fraction
else:
return None
def fraction_consumed(self):
if self._last_claim is self.UNSTARTED:
return 0
else:
return self.position_to_fraction(
self._last_claim, self._start_position, self._stop_position)
def fraction_to_position(self, fraction, start, end):
"""
Converts a fraction between 0 and 1 to a position between start and end.
"""
raise NotImplementedError
class UnsplittableRangeTracker(iobase.RangeTracker):
"""A RangeTracker that always ignores split requests.
This can be used to make a given
:class:`~apache_beam.io.iobase.RangeTracker` object unsplittable by
ignoring all calls to :meth:`.try_split()`. All other calls will be delegated
to the given :class:`~apache_beam.io.iobase.RangeTracker`.
"""
def __init__(self, range_tracker):
"""Initializes UnsplittableRangeTracker.
Args:
range_tracker (~apache_beam.io.iobase.RangeTracker): a
:class:`~apache_beam.io.iobase.RangeTracker` to which all method
calls expect calls to :meth:`.try_split()` will be delegated.
"""
assert isinstance(range_tracker, iobase.RangeTracker)
self._range_tracker = range_tracker
def start_position(self):
return self._range_tracker.start_position()
def stop_position(self):
return self._range_tracker.stop_position()
def position_at_fraction(self, fraction):
return self._range_tracker.position_at_fraction(fraction)
def try_claim(self, position):
return self._range_tracker.try_claim(position)
def try_split(self, position):
return None
def set_current_position(self, position):
self._range_tracker.set_current_position(position)
def fraction_consumed(self):
return self._range_tracker.fraction_consumed()
def split_points(self):
# An unsplittable range only contains a single split point.
return (0, 1)
def set_split_points_unclaimed_callback(self, callback):
self._range_tracker.set_split_points_unclaimed_callback(callback)
class LexicographicKeyRangeTracker(OrderedPositionRangeTracker):
"""
A range tracker that tracks progress through a lexicographically
ordered keyspace of strings.
"""
@classmethod
def fraction_to_position(cls, fraction, start=None, end=None):
"""
Linearly interpolates a key that is lexicographically
fraction of the way between start and end.
"""
assert 0 <= fraction <= 1, fraction
if start is None:
start = b''
if fraction == 1:
return end
elif fraction == 0:
return start
else:
if not end:
common_prefix_len = len(start) - len(start.lstrip(b'\xFF'))
else:
for ix, (s, e) in enumerate(zip(start, end)):
if s != e:
common_prefix_len = ix
break
else:
common_prefix_len = min(len(start), len(end))
# Convert the relative precision of fraction (~53 bits) to an absolute
# precision needed to represent values between start and end distinctly.
prec = common_prefix_len + int(-math.log(fraction, 256)) + 7
istart = cls._bytestring_to_int(start, prec)
iend = cls._bytestring_to_int(end, prec) if end else 1 << (prec * 8)
ikey = istart + int((iend - istart) * fraction)
# Could be equal due to rounding.
# Adjust to ensure we never return the actual start and end
# unless fraction is exatly 0 or 1.
if ikey == istart:
ikey += 1
elif ikey == iend:
ikey -= 1
return cls._bytestring_from_int(ikey, prec).rstrip(b'\0')
@classmethod
def position_to_fraction(cls, key, start=None, end=None):
"""
Returns the fraction of keys in the range [start, end) that
are less than the given key.
"""
if not key:
return 0
if start is None:
start = b''
prec = len(start) + 7
if key.startswith(start):
# Higher absolute precision needed for very small values of fixed
# relative position.
prec = max(prec, len(key) - len(key[len(start):].strip(b'\0')) + 7)
istart = cls._bytestring_to_int(start, prec)
ikey = cls._bytestring_to_int(key, prec)
iend = cls._bytestring_to_int(end, prec) if end else 1 << (prec * 8)
return float(ikey - istart) / (iend - istart)
@staticmethod
def _bytestring_to_int(s, prec):
"""
Returns int(256**prec * f) where f is the fraction
represented by interpreting '.' + s as a base-256
floating point number.
"""
if not s:
return 0
elif len(s) < prec:
s += b'\0' * (prec - len(s))
else:
s = s[:prec]
return int(codecs.encode(s, 'hex'), 16)
@staticmethod
def _bytestring_from_int(i, prec):
"""
Inverse of _bytestring_to_int.
"""
h = '%x' % i
return codecs.decode('0' * (2 * prec - len(h)) + h, 'hex') | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/range_trackers.py | 0.857171 | 0.319068 | range_trackers.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import errno
import io
import logging
import os
import re
import tempfile
import time
from builtins import object
from apache_beam.io.filesystemio import Downloader
from apache_beam.io.filesystemio import DownloaderStream
from apache_beam.io.filesystemio import Uploader
from apache_beam.io.filesystemio import UploaderStream
from apache_beam.utils import retry
_LOGGER = logging.getLogger(__name__)
try:
# pylint: disable=wrong-import-order, wrong-import-position
# pylint: disable=ungrouped-imports
from azure.core.exceptions import ResourceNotFoundError
from azure.storage.blob import (
BlobServiceClient,
ContentSettings,
)
AZURE_DEPS_INSTALLED = True
except ImportError:
AZURE_DEPS_INSTALLED = False
DEFAULT_READ_BUFFER_SIZE = 16 * 1024 * 1024
MAX_BATCH_OPERATION_SIZE = 100
def parse_azfs_path(azfs_path, blob_optional=False, get_account=False):
"""Return the storage account, the container and
blob names of the given azfs:// path.
"""
match = re.match(
'^azfs://([a-z0-9]{3,24})/([a-z0-9](?![a-z0-9-]*--[a-z0-9-]*)'
'[a-z0-9-]{1,61}[a-z0-9])/(.*)$',
azfs_path)
if match is None or (match.group(3) == '' and not blob_optional):
raise ValueError(
'Azure Blob Storage path must be in the form '
'azfs://<storage-account>/<container>/<path>.')
result = None
if get_account:
result = match.group(1), match.group(2), match.group(3)
else:
result = match.group(2), match.group(3)
return result
def get_azfs_url(storage_account, container, blob=''):
"""Returns the url in the form of
https://account.blob.core.windows.net/container/blob-name
"""
return 'https://' + storage_account + '.blob.core.windows.net/' + \
container + '/' + blob
class Blob():
"""A Blob in Azure Blob Storage."""
def __init__(self, etag, name, last_updated, size, mime_type):
self.etag = etag
self.name = name
self.last_updated = last_updated
self.size = size
self.mime_type = mime_type
class BlobStorageIOError(IOError, retry.PermanentException):
"""Blob Strorage IO error that should not be retried."""
pass
class BlobStorageError(Exception):
"""Blob Storage client error."""
def __init__(self, message=None, code=None):
self.message = message
self.code = code
class BlobStorageIO(object):
"""Azure Blob Storage I/O client."""
def __init__(self, client=None):
connect_str = os.getenv('AZURE_STORAGE_CONNECTION_STRING')
if client is None:
self.client = BlobServiceClient.from_connection_string(connect_str)
else:
self.client = client
if not AZURE_DEPS_INSTALLED:
raise RuntimeError('Azure dependencies are not installed. Unable to run.')
def open(
self,
filename,
mode='r',
read_buffer_size=DEFAULT_READ_BUFFER_SIZE,
mime_type='application/octet-stream'):
"""Open an Azure Blob Storage file path for reading or writing.
Args:
filename (str): Azure Blob Storage file path in the form
``azfs://<storage-account>/<container>/<path>``.
mode (str): ``'r'`` for reading or ``'w'`` for writing.
read_buffer_size (int): Buffer size to use during read operations.
mime_type (str): Mime type to set for write operations.
Returns:
Azure Blob Storage file object.
Raises:
ValueError: Invalid open file mode.
"""
if mode == 'r' or mode == 'rb':
downloader = BlobStorageDownloader(
self.client, filename, buffer_size=read_buffer_size)
return io.BufferedReader(
DownloaderStream(
downloader, read_buffer_size=read_buffer_size, mode=mode),
buffer_size=read_buffer_size)
elif mode == 'w' or mode == 'wb':
uploader = BlobStorageUploader(self.client, filename, mime_type)
return io.BufferedWriter(
UploaderStream(uploader, mode=mode), buffer_size=128 * 1024)
else:
raise ValueError('Invalid file open mode: %s.' % mode)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_beam_io_error_filter)
def copy(self, src, dest):
"""Copies a single Azure Blob Storage blob from src to dest.
Args:
src: Blob Storage file path pattern in the form
azfs://<storage-account>/<container>/[name].
dest: Blob Storage file path pattern in the form
azfs://<storage-account>/<container>/[name].
Raises:
TimeoutError: on timeout.
"""
src_storage_account, src_container, src_blob = parse_azfs_path(
src, get_account=True)
dest_container, dest_blob = parse_azfs_path(dest)
source_blob = get_azfs_url(src_storage_account, src_container, src_blob)
copied_blob = self.client.get_blob_client(dest_container, dest_blob)
try:
copied_blob.start_copy_from_url(source_blob)
except ResourceNotFoundError as e:
message = e.reason
code = e.status_code
raise BlobStorageError(message, code)
# We intentionally do not decorate this method with a retry, since the
# underlying copy operation is already an idempotent operation protected
# by retry decorators.
def copy_tree(self, src, dest):
"""Renames the given Azure Blob storage directory and its contents
recursively from src to dest.
Args:
src: Blob Storage file path pattern in the form
azfs://<storage-account>/<container>/[name].
dest: Blob Storage file path pattern in the form
azfs://<storage-account>/<container>/[name].
Returns:
List of tuples of (src, dest, exception) where exception is None if the
operation succeeded or the relevant exception if the operation failed.
"""
assert src.endswith('/')
assert dest.endswith('/')
results = []
for entry in self.list_prefix(src):
rel_path = entry[len(src):]
try:
self.copy(entry, dest + rel_path)
results.append((entry, dest + rel_path, None))
except BlobStorageError as e:
results.append((entry, dest + rel_path, e))
return results
# We intentionally do not decorate this method with a retry, since the
# underlying copy operation is already an idempotent operation protected
# by retry decorators.
def copy_paths(self, src_dest_pairs):
"""Copies the given Azure Blob Storage blobs from src to dest. This can
handle directory or file paths.
Args:
src_dest_pairs: List of (src, dest) tuples of
azfs://<storage-account>/<container>/[name] file paths
to copy from src to dest.
Returns:
List of tuples of (src, dest, exception) in the same order as the
src_dest_pairs argument, where exception is None if the operation
succeeded or the relevant exception if the operation failed.
"""
if not src_dest_pairs:
return []
results = []
for src_path, dest_path in src_dest_pairs:
# Case 1. They are directories.
if src_path.endswith('/') and dest_path.endswith('/'):
try:
results += self.copy_tree(src_path, dest_path)
except BlobStorageError as e:
results.append((src_path, dest_path, e))
# Case 2. They are individual blobs.
elif not src_path.endswith('/') and not dest_path.endswith('/'):
try:
self.copy(src_path, dest_path)
results.append((src_path, dest_path, None))
except BlobStorageError as e:
results.append((src_path, dest_path, e))
# Mismatched paths (one directory, one non-directory) get an error.
else:
e = BlobStorageError(
"Unable to copy mismatched paths" +
"(directory, non-directory): %s, %s" % (src_path, dest_path),
400)
results.append((src_path, dest_path, e))
return results
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def rename(self, src, dest):
"""Renames the given Azure Blob Storage blob from src to dest.
Args:
src: Blob Storage file path pattern in the form
azfs://<storage-account>/<container>/[name].
dest: Blob Storage file path pattern in the form
azfs://<storage-account>/<container>/[name].
"""
self.copy(src, dest)
self.delete(src)
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def rename_files(self, src_dest_pairs):
"""Renames the given Azure Blob Storage blobs from src to dest.
Args:
src_dest_pairs: List of (src, dest) tuples of
azfs://<storage-account>/<container>/[name]
file paths to rename from src to dest.
Returns: List of tuples of (src, dest, exception) in the same order as the
src_dest_pairs argument, where exception is None if the operation
succeeded or the relevant exception if the operation failed.
"""
if not src_dest_pairs:
return []
for src, dest in src_dest_pairs:
if src.endswith('/') or dest.endswith('/'):
raise ValueError('Unable to rename a directory.')
# Results from copy operation.
copy_results = self.copy_paths(src_dest_pairs)
paths_to_delete = \
[src for (src, _, error) in copy_results if error is None]
# Results from delete operation.
delete_results = self.delete_files(paths_to_delete)
# Get rename file results (list of tuples).
results = []
# Using a dictionary will make the operation faster.
delete_results_dict = {src: error for (src, error) in delete_results}
for src, dest, error in copy_results:
# If there was an error in the copy operation.
if error is not None:
results.append((src, dest, error))
# If there was an error in the delete operation.
elif delete_results_dict[src] is not None:
results.append((src, dest, delete_results_dict[src]))
# If there was no error in the operations.
else:
results.append((src, dest, None))
return results
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_beam_io_error_filter)
def exists(self, path):
"""Returns whether the given Azure Blob Storage blob exists.
Args:
path: Azure Blob Storage file path pattern in the form
azfs://<storage-account>/<container>/[name].
"""
container, blob = parse_azfs_path(path)
blob_to_check = self.client.get_blob_client(container, blob)
try:
blob_to_check.get_blob_properties()
return True
except ResourceNotFoundError as e:
if e.status_code == 404:
# HTTP 404 indicates that the file did not exist.
return False
else:
# We re-raise all other exceptions.
raise
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_beam_io_error_filter)
def size(self, path):
"""Returns the size of a single Blob Storage blob.
This method does not perform glob expansion. Hence the
given path must be for a single Blob Storage blob.
Returns: size of the Blob Storage blob in bytes.
"""
container, blob = parse_azfs_path(path)
blob_to_check = self.client.get_blob_client(container, blob)
try:
properties = blob_to_check.get_blob_properties()
except ResourceNotFoundError as e:
message = e.reason
code = e.status_code
raise BlobStorageError(message, code)
return properties.size
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_beam_io_error_filter)
def last_updated(self, path):
"""Returns the last updated epoch time of a single
Azure Blob Storage blob.
This method does not perform glob expansion. Hence the
given path must be for a single Azure Blob Storage blob.
Returns: last updated time of the Azure Blob Storage blob
in seconds.
"""
container, blob = parse_azfs_path(path)
blob_to_check = self.client.get_blob_client(container, blob)
try:
properties = blob_to_check.get_blob_properties()
except ResourceNotFoundError as e:
message = e.reason
code = e.status_code
raise BlobStorageError(message, code)
datatime = properties.last_modified
return (
time.mktime(datatime.timetuple()) - time.timezone +
datatime.microsecond / 1000000.0)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_beam_io_error_filter)
def checksum(self, path):
"""Looks up the checksum of an Azure Blob Storage blob.
Args:
path: Azure Blob Storage file path pattern in the form
azfs://<storage-account>/<container>/[name].
"""
container, blob = parse_azfs_path(path)
blob_to_check = self.client.get_blob_client(container, blob)
try:
properties = blob_to_check.get_blob_properties()
except ResourceNotFoundError as e:
message = e.reason
code = e.status_code
raise BlobStorageError(message, code)
return properties.etag
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_beam_io_error_filter)
def delete(self, path):
"""Deletes a single blob at the given Azure Blob Storage path.
Args:
path: Azure Blob Storage file path pattern in the form
azfs://<storage-account>/<container>/[name].
"""
container, blob = parse_azfs_path(path)
blob_to_delete = self.client.get_blob_client(container, blob)
try:
blob_to_delete.delete_blob()
except ResourceNotFoundError as e:
if e.status_code == 404:
# Return success when the file doesn't exist anymore for idempotency.
return
else:
logging.error('HTTP error while deleting file %s', path)
raise e
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def delete_paths(self, paths):
"""Deletes the given Azure Blob Storage blobs from src to dest.
This can handle directory or file paths.
Args:
paths: list of Azure Blob Storage paths in the form
azfs://<storage-account>/<container>/[name] that give the
file blobs to be deleted.
Returns:
List of tuples of (src, dest, exception) in the same order as the
src_dest_pairs argument, where exception is None if the operation
succeeded or the relevant exception if the operation failed.
"""
directories, blobs = [], []
# Retrieve directories and not directories.
for path in paths:
if path.endswith('/'):
directories.append(path)
else:
blobs.append(path)
results = {}
for directory in directories:
directory_result = dict(self.delete_tree(directory))
results.update(directory_result)
blobs_results = dict(self.delete_files(blobs))
results.update(blobs_results)
return results
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def delete_tree(self, root):
"""Deletes all blobs under the given Azure BlobStorage virtual
directory.
Args:
path: Azure Blob Storage file path pattern in the form
azfs://<storage-account>/<container>/[name]
(ending with a "/").
Returns:
List of tuples of (path, exception), where each path is a blob
under the given root. exception is None if the operation succeeded
or the relevant exception if the operation failed.
"""
assert root.endswith('/')
# Get the blob under the root directory.
paths_to_delete = self.list_prefix(root)
return self.delete_files(paths_to_delete)
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def delete_files(self, paths):
"""Deletes the given Azure Blob Storage blobs from src to dest.
Args:
paths: list of Azure Blob Storage paths in the form
azfs://<storage-account>/<container>/[name] that give the
file blobs to be deleted.
Returns:
List of tuples of (src, dest, exception) in the same order as the
src_dest_pairs argument, where exception is None if the operation
succeeded or the relevant exception if the operation failed.
"""
if not paths:
return []
# Group blobs into containers.
containers, blobs = zip(*[parse_azfs_path(path, get_account=False) \
for path in paths])
grouped_blobs = {container: [] for container in containers}
# Fill dictionary.
for container, blob in zip(containers, blobs):
grouped_blobs[container].append(blob)
results = {}
# Delete minibatches of blobs for each container.
for container, blobs in grouped_blobs.items():
for i in range(0, len(blobs), MAX_BATCH_OPERATION_SIZE):
blobs_to_delete = blobs[i:i + MAX_BATCH_OPERATION_SIZE]
results.update(self._delete_batch(container, blobs_to_delete))
final_results = \
[(path, results[parse_azfs_path(path, get_account=False)]) \
for path in paths]
return final_results
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_beam_io_error_filter)
def _delete_batch(self, container, blobs):
"""A helper method. Azure Blob Storage Python Client allows batch
deletions for blobs within the same container.
Args:
container: container name.
blobs: list of blobs to be deleted.
Returns:
Dictionary of the form {(container, blob): error}, where error is
None if the operation succeeded.
"""
container_client = self.client.get_container_client(container)
results = {}
for blob in blobs:
try:
response = container_client.delete_blob(blob)
results[(container, blob)] = response
except ResourceNotFoundError as e:
results[(container, blob)] = e.status_code
return results
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_beam_io_error_filter)
def list_prefix(self, path):
"""Lists files matching the prefix.
Args:
path: Azure Blob Storage file path pattern in the form
azfs://<storage-account>/<container>/[name].
Returns:
Dictionary of file name -> size.
"""
storage_account, container, blob = parse_azfs_path(
path, blob_optional=True, get_account=True)
file_sizes = {}
counter = 0
start_time = time.time()
logging.info("Starting the size estimation of the input")
container_client = self.client.get_container_client(container)
while True:
response = container_client.list_blobs(name_starts_with=blob)
for item in response:
file_name = "azfs://%s/%s/%s" % (storage_account, container, item.name)
file_sizes[file_name] = item.size
counter += 1
if counter % 10000 == 0:
logging.info("Finished computing size of: %s files", len(file_sizes))
break
logging.info(
"Finished listing %s files in %s seconds.",
counter,
time.time() - start_time)
return file_sizes
class BlobStorageDownloader(Downloader):
def __init__(self, client, path, buffer_size):
self._client = client
self._path = path
self._container, self._blob = parse_azfs_path(path)
self._buffer_size = buffer_size
self._blob_to_download = self._client.get_blob_client(
self._container, self._blob)
try:
properties = self._get_object_properties()
except ResourceNotFoundError as http_error:
if http_error.status_code == 404:
raise IOError(errno.ENOENT, 'Not found: %s' % self._path)
else:
_LOGGER.error(
'HTTP error while requesting file %s: %s', self._path, http_error)
raise
self._size = properties.size
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_beam_io_error_filter)
def _get_object_properties(self):
return self._blob_to_download.get_blob_properties()
@property
def size(self):
return self._size
def get_range(self, start, end):
# Download_blob first parameter is offset and second is length (exclusive).
blob_data = self._blob_to_download.download_blob(start, end - start)
# Returns the content as bytes.
return blob_data.readall()
class BlobStorageUploader(Uploader):
def __init__(self, client, path, mime_type='application/octet-stream'):
self._client = client
self._path = path
self._container, self._blob = parse_azfs_path(path)
self._content_settings = ContentSettings(mime_type)
self._blob_to_upload = self._client.get_blob_client(
self._container, self._blob)
# Temporary file.
self._temporary_file = tempfile.NamedTemporaryFile()
def put(self, data):
self._temporary_file.write(data.tobytes())
def finish(self):
self._temporary_file.seek(0)
# The temporary file is deleted immediately after the operation.
with open(self._temporary_file.name, "rb") as f:
self._blob_to_upload.upload_blob(
f.read(), overwrite=True, content_settings=self._content_settings) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/azure/blobstorageio.py | 0.713032 | 0.175892 | blobstorageio.py | pypi |
from __future__ import absolute_import
from future.utils import iteritems
from apache_beam.io.azure import blobstorageio
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystem import CompressedFile
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filesystem import FileMetadata
from apache_beam.io.filesystem import FileSystem
__all__ = ['BlobStorageFileSystem']
class BlobStorageFileSystem(FileSystem):
"""An Azure Blob Storage ``FileSystem`` implementation for accesing files on
Azure Blob Storage.
"""
CHUNK_SIZE = blobstorageio.MAX_BATCH_OPERATION_SIZE
AZURE_FILE_SYSTEM_PREFIX = 'azfs://'
@classmethod
def scheme(cls):
"""URI scheme for the FileSystem
"""
return 'azfs'
def join(self, basepath, *paths):
"""Join two or more pathname components for the filesystem
Args:
basepath: string path of the first component of the path
paths: path components to be added
Returns: full path after combining all the passed components
"""
if not basepath.startswith(BlobStorageFileSystem.AZURE_FILE_SYSTEM_PREFIX):
raise ValueError(
'Basepath %r must be an Azure Blob Storage path.' % basepath)
path = basepath
for p in paths:
path = path.rstrip('/') + '/' + p.lstrip('/')
return path
def split(self, path):
"""Splits the given path into two parts.
Splits the path into a pair (head, tail) such that tail contains the last
component of the path and head contains everything up to that.
For file-systems other than the local file-system, head should include the
prefix.
Args:
path: path as a string
Returns:
a pair of path components as strings.
"""
path = path.strip()
if not path.startswith(BlobStorageFileSystem.AZURE_FILE_SYSTEM_PREFIX):
raise ValueError('Path %r must be Azure Blob Storage path.' % path)
prefix_len = len(BlobStorageFileSystem.AZURE_FILE_SYSTEM_PREFIX)
last_sep = path[prefix_len:].rfind('/')
if last_sep >= 0:
last_sep += prefix_len
if last_sep > 0:
return (path[:last_sep], path[last_sep + 1:])
elif last_sep < 0:
return (path, '')
else:
raise ValueError('Invalid path: %s' % path)
def mkdirs(self, path):
"""Recursively create directories for the provided path.
Args:
path: string path of the directory structure that should be created
Raises:
IOError: if leaf directory already exists.
"""
pass
def has_dirs(self):
"""Whether this FileSystem supports directories."""
return False
def _list(self, dir_or_prefix):
"""List files in a location.
Listing is non-recursive (for filesystems that support directories).
Args:
dir_or_prefix: (string) A directory or location prefix (for filesystems
that don't have directories).
Returns:
Generator of ``FileMetadata`` objects.
Raises:
``BeamIOError``: if listing fails, but not if no files were found.
"""
try:
for path, size in \
iteritems(blobstorageio.BlobStorageIO().list_prefix(dir_or_prefix)):
yield FileMetadata(path, size)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("List operation failed", {dir_or_prefix: e})
def _path_open(
self,
path,
mode,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
"""Helper functions to open a file in the provided mode.
"""
compression_type = FileSystem._get_compression_type(path, compression_type)
mime_type = CompressionTypes.mime_type(compression_type, mime_type)
raw_file = blobstorageio.BlobStorageIO().open(
path, mode, mime_type=mime_type)
if compression_type == CompressionTypes.UNCOMPRESSED:
return raw_file
return CompressedFile(raw_file, compression_type=compression_type)
def create(
self,
path,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
# type: (...) -> BinaryIO
"""Returns a write channel for the given file path.
Args:
path: string path of the file object to be written to the system
mime_type: MIME type to specify the type of content in the file object
compression_type: Type of compression to be used for this object
Returns: file handle with a close function for the user to use
"""
return self._path_open(path, 'wb', mime_type, compression_type)
def open(
self,
path,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
# type: (...) -> BinaryIO
"""Returns a read channel for the given file path.
Args:
path: string path of the file object to be read
mime_type: MIME type to specify the type of content in the file object
compression_type: Type of compression to be used for this object
Returns: file handle with a close function for the user to use
"""
return self._path_open(path, 'rb', mime_type, compression_type)
def copy(self, source_file_names, destination_file_names):
"""Recursively copy the file tree from the source to the destination
Args:
source_file_names: list of source file objects that needs to be copied
destination_file_names: list of destination of the new object
Raises:
``BeamIOError``: if any of the copy operations fail
"""
if not len(source_file_names) == len(destination_file_names):
message = 'Unable to copy unequal number of sources and destinations.'
raise BeamIOError(message)
src_dest_pairs = list(zip(source_file_names, destination_file_names))
return blobstorageio.BlobStorageIO().copy_paths(src_dest_pairs)
def rename(self, source_file_names, destination_file_names):
"""Rename the files at the source list to the destination list.
Source and destination lists should be of the same size.
Args:
source_file_names: List of file paths that need to be moved
destination_file_names: List of destination_file_names for the files
Raises:
``BeamIOError``: if any of the rename operations fail
"""
if not len(source_file_names) == len(destination_file_names):
message = 'Unable to rename unequal number of sources and destinations.'
raise BeamIOError(message)
src_dest_pairs = list(zip(source_file_names, destination_file_names))
results = blobstorageio.BlobStorageIO().rename_files(src_dest_pairs)
# Retrieve exceptions.
exceptions = {(src, dest): error
for (src, dest, error) in results if error is not None}
if exceptions:
raise BeamIOError("Rename operation failed.", exceptions)
def exists(self, path):
"""Check if the provided path exists on the FileSystem.
Args:
path: string path that needs to be checked.
Returns: boolean flag indicating if path exists
"""
try:
return blobstorageio.BlobStorageIO().exists(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("Exists operation failed", {path: e})
def size(self, path):
"""Get size in bytes of a file on the FileSystem.
Args:
path: string filepath of file.
Returns: int size of file according to the FileSystem.
Raises:
``BeamIOError``: if path doesn't exist.
"""
try:
return blobstorageio.BlobStorageIO().size(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("Size operation failed", {path: e})
def last_updated(self, path):
"""Get UNIX Epoch time in seconds on the FileSystem.
Args:
path: string path of file.
Returns: float UNIX Epoch time
Raises:
``BeamIOError``: if path doesn't exist.
"""
try:
return blobstorageio.BlobStorageIO().last_updated(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("Last updated operation failed", {path: e})
def checksum(self, path):
"""Fetch checksum metadata of a file on the
:class:`~apache_beam.io.filesystem.FileSystem`.
Args:
path: string path of a file.
Returns: string containing checksum
Raises:
``BeamIOError``: if path isn't a file or doesn't exist.
"""
try:
return blobstorageio.BlobStorageIO().checksum(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("Checksum operation failed", {path, e})
def delete(self, paths):
"""Deletes files or directories at the provided paths.
Directories will be deleted recursively.
Args:
paths: list of paths that give the file objects to be deleted
Raises:
``BeamIOError``: if any of the delete operations fail
"""
results = blobstorageio.BlobStorageIO().delete_paths(paths)
# Retrieve exceptions.
exceptions = {
path: error
for (path, error) in results.items() if error is not None
}
if exceptions:
raise BeamIOError("Delete operation failed", exceptions) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/azure/blobstoragefilesystem.py | 0.769297 | 0.280894 | blobstoragefilesystem.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import re
from builtins import object
from typing import Any
from typing import List
from typing import NamedTuple
from typing import Optional
from future.utils import iteritems
from past.builtins import unicode
from apache_beam import coders
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Write
from apache_beam.runners.dataflow.native_io import iobase as dataflow_io
from apache_beam.transforms import Flatten
from apache_beam.transforms import Map
from apache_beam.transforms import PTransform
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.utils.annotations import deprecated
try:
from google.cloud import pubsub
except ImportError:
pubsub = None
__all__ = [
'MultipleReadFromPubSub',
'PubsubMessage',
'PubSubSourceDescriptor',
'ReadFromPubSub',
'ReadStringsFromPubSub',
'WriteStringsToPubSub',
'WriteToPubSub'
]
class PubsubMessage(object):
"""Represents a Cloud Pub/Sub message.
Message payload includes the data and attributes fields. For the payload to be
valid, at least one of its fields must be non-empty.
Attributes:
data: (bytes) Message data. May be None.
attributes: (dict) Key-value map of str to str, containing both user-defined
and service generated attributes (such as id_label and
timestamp_attribute). May be None.
"""
def __init__(self, data, attributes):
if data is None and not attributes:
raise ValueError(
'Either data (%r) or attributes (%r) must be set.', data, attributes)
self.data = data
self.attributes = attributes
def __hash__(self):
return hash((self.data, frozenset(self.attributes.items())))
def __eq__(self, other):
return isinstance(other, PubsubMessage) and (
self.data == other.data and self.attributes == other.attributes)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __repr__(self):
return 'PubsubMessage(%s, %s)' % (self.data, self.attributes)
@staticmethod
def _from_proto_str(proto_msg):
# type: (bytes) -> PubsubMessage
"""Construct from serialized form of ``PubsubMessage``.
Args:
proto_msg: String containing a serialized protobuf of type
https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#google.pubsub.v1.PubsubMessage
Returns:
A new PubsubMessage object.
"""
msg = pubsub.types.pubsub_pb2.PubsubMessage()
msg.ParseFromString(proto_msg)
# Convert ScalarMapContainer to dict.
attributes = dict((key, msg.attributes[key]) for key in msg.attributes)
return PubsubMessage(msg.data, attributes)
def _to_proto_str(self):
"""Get serialized form of ``PubsubMessage``.
Args:
proto_msg: str containing a serialized protobuf.
Returns:
A str containing a serialized protobuf of type
https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#google.pubsub.v1.PubsubMessage
containing the payload of this object.
"""
msg = pubsub.types.pubsub_pb2.PubsubMessage()
msg.data = self.data
for key, value in iteritems(self.attributes):
msg.attributes[key] = value
return msg.SerializeToString()
@staticmethod
def _from_message(msg):
# type: (Any) -> PubsubMessage
"""Construct from ``google.cloud.pubsub_v1.subscriber.message.Message``.
https://googleapis.github.io/google-cloud-python/latest/pubsub/subscriber/api/message.html
"""
# Convert ScalarMapContainer to dict.
attributes = dict((key, msg.attributes[key]) for key in msg.attributes)
return PubsubMessage(msg.data, attributes)
class ReadFromPubSub(PTransform):
"""A ``PTransform`` for reading from Cloud Pub/Sub."""
# Implementation note: This ``PTransform`` is overridden by Directrunner.
def __init__(
self,
topic=None, # type: Optional[str]
subscription=None, # type: Optional[str]
id_label=None, # type: Optional[str]
with_attributes=False, # type: bool
timestamp_attribute=None # type: Optional[str]
):
# type: (...) -> None
"""Initializes ``ReadFromPubSub``.
Args:
topic: Cloud Pub/Sub topic in the form
"projects/<project>/topics/<topic>". If provided, subscription must be
None.
subscription: Existing Cloud Pub/Sub subscription to use in the
form "projects/<project>/subscriptions/<subscription>". If not
specified, a temporary subscription will be created from the specified
topic. If provided, topic must be None.
id_label: The attribute on incoming Pub/Sub messages to use as a unique
record identifier. When specified, the value of this attribute (which
can be any string that uniquely identifies the record) will be used for
deduplication of messages. If not provided, we cannot guarantee
that no duplicate data will be delivered on the Pub/Sub stream. In this
case, deduplication of the stream will be strictly best effort.
with_attributes:
True - output elements will be :class:`~PubsubMessage` objects.
False - output elements will be of type ``bytes`` (message
data only).
timestamp_attribute: Message value to use as element timestamp. If None,
uses message publishing time as the timestamp.
Timestamp values should be in one of two formats:
- A numerical value representing the number of milliseconds since the
Unix epoch.
- A string in RFC 3339 format, UTC timezone. Example:
``2015-10-29T23:41:41.123Z``. The sub-second component of the
timestamp is optional, and digits beyond the first three (i.e., time
units smaller than milliseconds) may be ignored.
"""
super(ReadFromPubSub, self).__init__()
self.with_attributes = with_attributes
self._source = _PubSubSource(
topic=topic,
subscription=subscription,
id_label=id_label,
with_attributes=self.with_attributes,
timestamp_attribute=timestamp_attribute)
def expand(self, pvalue):
pcoll = pvalue.pipeline | Read(self._source)
pcoll.element_type = bytes
if self.with_attributes:
pcoll = pcoll | Map(PubsubMessage._from_proto_str)
pcoll.element_type = PubsubMessage
return pcoll
def to_runner_api_parameter(self, context):
# Required as this is identified by type in PTransformOverrides.
# TODO(BEAM-3812): Use an actual URN here.
return self.to_runner_api_pickled(context)
@deprecated(since='2.7.0', extra_message='Use ReadFromPubSub instead.')
def ReadStringsFromPubSub(topic=None, subscription=None, id_label=None):
return _ReadStringsFromPubSub(topic, subscription, id_label)
class _ReadStringsFromPubSub(PTransform):
"""This class is deprecated. Use ``ReadFromPubSub`` instead."""
def __init__(self, topic=None, subscription=None, id_label=None):
super(_ReadStringsFromPubSub, self).__init__()
self.topic = topic
self.subscription = subscription
self.id_label = id_label
def expand(self, pvalue):
p = (
pvalue.pipeline
| ReadFromPubSub(
self.topic, self.subscription, self.id_label, with_attributes=False)
| 'DecodeString' >> Map(lambda b: b.decode('utf-8')))
p.element_type = unicode
return p
@deprecated(since='2.7.0', extra_message='Use WriteToPubSub instead.')
def WriteStringsToPubSub(topic):
return _WriteStringsToPubSub(topic)
class _WriteStringsToPubSub(PTransform):
"""This class is deprecated. Use ``WriteToPubSub`` instead."""
def __init__(self, topic):
"""Initializes ``_WriteStringsToPubSub``.
Attributes:
topic: Cloud Pub/Sub topic in the form "/topics/<project>/<topic>".
"""
super(_WriteStringsToPubSub, self).__init__()
self.topic = topic
def expand(self, pcoll):
pcoll = pcoll | 'EncodeString' >> Map(lambda s: s.encode('utf-8'))
pcoll.element_type = bytes
return pcoll | WriteToPubSub(self.topic)
class WriteToPubSub(PTransform):
"""A ``PTransform`` for writing messages to Cloud Pub/Sub."""
# Implementation note: This ``PTransform`` is overridden by Directrunner.
def __init__(
self,
topic, # type: str
with_attributes=False, # type: bool
id_label=None, # type: Optional[str]
timestamp_attribute=None # type: Optional[str]
):
# type: (...) -> None
"""Initializes ``WriteToPubSub``.
Args:
topic: Cloud Pub/Sub topic in the form "/topics/<project>/<topic>".
with_attributes:
True - input elements will be :class:`~PubsubMessage` objects.
False - input elements will be of type ``bytes`` (message
data only).
id_label: If set, will set an attribute for each Cloud Pub/Sub message
with the given name and a unique value. This attribute can then be used
in a ReadFromPubSub PTransform to deduplicate messages.
timestamp_attribute: If set, will set an attribute for each Cloud Pub/Sub
message with the given name and the message's publish time as the value.
"""
super(WriteToPubSub, self).__init__()
self.with_attributes = with_attributes
self.id_label = id_label
self.timestamp_attribute = timestamp_attribute
self.project, self.topic_name = parse_topic(topic)
self.full_topic = topic
self._sink = _PubSubSink(topic, id_label, timestamp_attribute)
@staticmethod
def message_to_proto_str(element):
# type: (PubsubMessage) -> bytes
if not isinstance(element, PubsubMessage):
raise TypeError(
'Unexpected element. Type: %s (expected: PubsubMessage), '
'value: %r' % (type(element), element))
return element._to_proto_str()
@staticmethod
def bytes_to_proto_str(element):
# type: (bytes) -> bytes
msg = pubsub.types.pubsub_pb2.PubsubMessage()
msg.data = element
return msg.SerializeToString()
def expand(self, pcoll):
if self.with_attributes:
pcoll = pcoll | 'ToProtobuf' >> Map(self.message_to_proto_str)
else:
pcoll = pcoll | 'ToProtobuf' >> Map(self.bytes_to_proto_str)
pcoll.element_type = bytes
return pcoll | Write(self._sink)
def to_runner_api_parameter(self, context):
# Required as this is identified by type in PTransformOverrides.
# TODO(BEAM-3812): Use an actual URN here.
return self.to_runner_api_pickled(context)
def display_data(self):
return {
'topic': DisplayDataItem(self.full_topic, label='Pubsub Topic'),
'id_label': DisplayDataItem(self.id_label, label='ID Label Attribute'),
'with_attributes': DisplayDataItem(
True, label='With Attributes').drop_if_none(),
'timestamp_attribute': DisplayDataItem(
self.timestamp_attribute, label='Timestamp Attribute'),
}
PROJECT_ID_REGEXP = '[a-z][-a-z0-9:.]{4,61}[a-z0-9]'
SUBSCRIPTION_REGEXP = 'projects/([^/]+)/subscriptions/(.+)'
TOPIC_REGEXP = 'projects/([^/]+)/topics/(.+)'
def parse_topic(full_topic):
match = re.match(TOPIC_REGEXP, full_topic)
if not match:
raise ValueError(
'PubSub topic must be in the form "projects/<project>/topics'
'/<topic>" (got %r).' % full_topic)
project, topic_name = match.group(1), match.group(2)
if not re.match(PROJECT_ID_REGEXP, project):
raise ValueError('Invalid PubSub project name: %r.' % project)
return project, topic_name
def parse_subscription(full_subscription):
match = re.match(SUBSCRIPTION_REGEXP, full_subscription)
if not match:
raise ValueError(
'PubSub subscription must be in the form "projects/<project>'
'/subscriptions/<subscription>" (got %r).' % full_subscription)
project, subscription_name = match.group(1), match.group(2)
if not re.match(PROJECT_ID_REGEXP, project):
raise ValueError('Invalid PubSub project name: %r.' % project)
return project, subscription_name
class _PubSubSource(dataflow_io.NativeSource):
"""Source for a Cloud Pub/Sub topic or subscription.
This ``NativeSource`` is overridden by a native Pubsub implementation.
Attributes:
with_attributes: If False, will fetch just message data. Otherwise,
fetches ``PubsubMessage`` protobufs.
"""
def __init__(
self,
topic=None, # type: Optional[str]
subscription=None, # type: Optional[str]
id_label=None, # type: Optional[str]
with_attributes=False, # type: bool
timestamp_attribute=None # type: Optional[str]
):
self.coder = coders.BytesCoder()
self.full_topic = topic
self.full_subscription = subscription
self.topic_name = None
self.subscription_name = None
self.id_label = id_label
self.with_attributes = with_attributes
self.timestamp_attribute = timestamp_attribute
# Perform some validation on the topic and subscription.
if not (topic or subscription):
raise ValueError('Either a topic or subscription must be provided.')
if topic and subscription:
raise ValueError('Only one of topic or subscription should be provided.')
if topic:
self.project, self.topic_name = parse_topic(topic)
if subscription:
self.project, self.subscription_name = parse_subscription(subscription)
@property
def format(self):
"""Source format name required for remote execution."""
return 'pubsub'
def display_data(self):
return {
'id_label': DisplayDataItem(self.id_label,
label='ID Label Attribute').drop_if_none(),
'topic': DisplayDataItem(self.full_topic,
label='Pubsub Topic').drop_if_none(),
'subscription': DisplayDataItem(
self.full_subscription, label='Pubsub Subscription').drop_if_none(),
'with_attributes': DisplayDataItem(
self.with_attributes, label='With Attributes').drop_if_none(),
'timestamp_attribute': DisplayDataItem(
self.timestamp_attribute,
label='Timestamp Attribute').drop_if_none(),
}
def reader(self):
raise NotImplementedError
def is_bounded(self):
return False
class _PubSubSink(dataflow_io.NativeSink):
"""Sink for a Cloud Pub/Sub topic.
This ``NativeSource`` is overridden by a native Pubsub implementation.
"""
def __init__(
self,
topic, # type: str
id_label, # type: Optional[str]
timestamp_attribute # type: Optional[str]
):
self.coder = coders.BytesCoder()
self.full_topic = topic
self.id_label = id_label
self.timestamp_attribute = timestamp_attribute
self.project, self.topic_name = parse_topic(topic)
@property
def format(self):
"""Sink format name required for remote execution."""
return 'pubsub'
def writer(self):
raise NotImplementedError
class PubSubSourceDescriptor(NamedTuple):
"""A PubSub source descriptor for ``MultipleReadFromPubSub```
Attributes:
source: Existing Cloud Pub/Sub topic or subscription to use in the
form "projects/<project>/topics/<topic>" or
"projects/<project>/subscriptions/<subscription>"
id_label: The attribute on incoming Pub/Sub messages to use as a unique
record identifier. When specified, the value of this attribute (which
can be any string that uniquely identifies the record) will be used for
deduplication of messages. If not provided, we cannot guarantee
that no duplicate data will be delivered on the Pub/Sub stream. In this
case, deduplication of the stream will be strictly best effort.
timestamp_attribute: Message value to use as element timestamp. If None,
uses message publishing time as the timestamp.
Timestamp values should be in one of two formats:
- A numerical value representing the number of milliseconds since the
Unix epoch.
- A string in RFC 3339 format, UTC timezone. Example:
``2015-10-29T23:41:41.123Z``. The sub-second component of the
timestamp is optional, and digits beyond the first three (i.e., time
units smaller than milliseconds) may be ignored.
"""
source: str
id_label: str = None
timestamp_attribute: str = None
PUBSUB_DESCRIPTOR_REGEXP = 'projects/([^/]+)/(topics|subscriptions)/(.+)'
class MultipleReadFromPubSub(PTransform):
"""A ``PTransform`` that expands ``ReadFromPubSub`` to read from multiple
``PubSubSourceDescriptor``.
The `MultipleReadFromPubSub` transform allows you to read multiple topics
and/or subscriptions using just one transform. It is the recommended transform
to read multiple Pub/Sub sources when the output `PCollection` are going to be
flattened. The transform takes a list of `PubSubSourceDescriptor` and organize
them by type (topic / subscription) and project:::
topic_1 = PubSubSourceDescriptor('projects/myproject/topics/a_topic')
topic_2 = PubSubSourceDescriptor(
'projects/myproject2/topics/b_topic',
'my_label',
'my_timestamp_attribute')
subscription_1 = PubSubSourceDescriptor(
'projects/myproject/subscriptions/a_subscription')
results = pipeline | MultipleReadFromPubSub(
[topic_1, topic_2, subscription_1])
"""
def __init__(
self,
pubsub_source_descriptors, # type: List[PubSubSourceDescriptor]
with_attributes=False, # type: bool
):
"""Initializes ``PubSubMultipleReader``.
Args:
pubsub_source_descriptors: List of Cloud Pub/Sub topics or subscriptions
of type `~PubSubSourceDescriptor`.
with_attributes:
True - input elements will be :class:`~PubsubMessage` objects.
False - input elements will be of type ``bytes`` (message data only).
"""
self.pubsub_source_descriptors = pubsub_source_descriptors
self.with_attributes = with_attributes
for descriptor in self.pubsub_source_descriptors:
match_descriptor = re.match(PUBSUB_DESCRIPTOR_REGEXP, descriptor.source)
if not match_descriptor:
raise ValueError(
'PubSub source descriptor must be in the form "projects/<project>'
'/topics/<topic>" or "projects/<project>/subscription'
'/<subscription>" (got %r).' % descriptor.source)
def expand(self, pcol):
sources_pcol = []
for descriptor in self.pubsub_source_descriptors:
source_match = re.match(PUBSUB_DESCRIPTOR_REGEXP, descriptor.source)
source_project = source_match.group(1)
source_type = source_match.group(2)
source_name = source_match.group(3)
read_step_name = 'PubSub %s/project:%s/Read %s' % (
source_type, source_project, source_name)
if source_type == 'topics':
current_source = pcol | read_step_name >> ReadFromPubSub(
topic=descriptor.source,
id_label=descriptor.id_label,
with_attributes=self.with_attributes,
timestamp_attribute=descriptor.timestamp_attribute)
else:
current_source = pcol | read_step_name >> ReadFromPubSub(
subscription=descriptor.source,
id_label=descriptor.id_label,
with_attributes=self.with_attributes,
timestamp_attribute=descriptor.timestamp_attribute)
sources_pcol.append(current_source)
return tuple(sources_pcol) | Flatten() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/gcp/pubsub.py | 0.792223 | 0.281097 | pubsub.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from builtins import zip
from typing import BinaryIO # pylint: disable=unused-import
from future.utils import iteritems
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystem import CompressedFile
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filesystem import FileMetadata
from apache_beam.io.filesystem import FileSystem
from apache_beam.io.gcp import gcsio
__all__ = ['GCSFileSystem']
class GCSFileSystem(FileSystem):
"""A GCS ``FileSystem`` implementation for accessing files on GCS.
"""
CHUNK_SIZE = gcsio.MAX_BATCH_OPERATION_SIZE # Chuck size in batch operations
GCS_PREFIX = 'gs://'
@classmethod
def scheme(cls):
"""URI scheme for the FileSystem
"""
return 'gs'
def join(self, basepath, *paths):
"""Join two or more pathname components for the filesystem
Args:
basepath: string path of the first component of the path
paths: path components to be added
Returns: full path after combining all the passed components
"""
if not basepath.startswith(GCSFileSystem.GCS_PREFIX):
raise ValueError('Basepath %r must be GCS path.' % basepath)
path = basepath
for p in paths:
path = path.rstrip('/') + '/' + p.lstrip('/')
return path
def split(self, path):
"""Splits the given path into two parts.
Splits the path into a pair (head, tail) such that tail contains the last
component of the path and head contains everything up to that.
Head will include the GCS prefix ('gs://').
Args:
path: path as a string
Returns:
a pair of path components as strings.
"""
path = path.strip()
if not path.startswith(GCSFileSystem.GCS_PREFIX):
raise ValueError('Path %r must be GCS path.' % path)
prefix_len = len(GCSFileSystem.GCS_PREFIX)
last_sep = path[prefix_len:].rfind('/')
if last_sep >= 0:
last_sep += prefix_len
if last_sep > 0:
return (path[:last_sep], path[last_sep + 1:])
elif last_sep < 0:
return (path, '')
else:
raise ValueError('Invalid path: %s' % path)
def mkdirs(self, path):
"""Recursively create directories for the provided path.
Args:
path: string path of the directory structure that should be created
Raises:
IOError: if leaf directory already exists.
"""
pass
def has_dirs(self):
"""Whether this FileSystem supports directories."""
return False
def _list(self, dir_or_prefix):
"""List files in a location.
Listing is non-recursive, for filesystems that support directories.
Args:
dir_or_prefix: (string) A directory or location prefix (for filesystems
that don't have directories).
Returns:
Generator of ``FileMetadata`` objects.
Raises:
``BeamIOError``: if listing fails, but not if no files were found.
"""
try:
for path, size in iteritems(gcsio.GcsIO().list_prefix(dir_or_prefix)):
yield FileMetadata(path, size)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("List operation failed", {dir_or_prefix: e})
def _path_open(
self,
path,
mode,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
"""Helper functions to open a file in the provided mode.
"""
compression_type = FileSystem._get_compression_type(path, compression_type)
mime_type = CompressionTypes.mime_type(compression_type, mime_type)
raw_file = gcsio.GcsIO().open(path, mode, mime_type=mime_type)
if compression_type == CompressionTypes.UNCOMPRESSED:
return raw_file
return CompressedFile(raw_file, compression_type=compression_type)
def create(
self,
path,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
# type: (...) -> BinaryIO
"""Returns a write channel for the given file path.
Args:
path: string path of the file object to be written to the system
mime_type: MIME type to specify the type of content in the file object
compression_type: Type of compression to be used for this object
Returns: file handle with a close function for the user to use
"""
return self._path_open(path, 'wb', mime_type, compression_type)
def open(
self,
path,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
# type: (...) -> BinaryIO
"""Returns a read channel for the given file path.
Args:
path: string path of the file object to be written to the system
mime_type: MIME type to specify the type of content in the file object
compression_type: Type of compression to be used for this object
Returns: file handle with a close function for the user to use
"""
return self._path_open(path, 'rb', mime_type, compression_type)
def copy(self, source_file_names, destination_file_names):
"""Recursively copy the file tree from the source to the destination
Args:
source_file_names: list of source file objects that needs to be copied
destination_file_names: list of destination of the new object
Raises:
``BeamIOError``: if any of the copy operations fail
"""
err_msg = (
"source_file_names and destination_file_names should "
"be equal in length")
assert len(source_file_names) == len(destination_file_names), err_msg
def _copy_path(source, destination):
"""Recursively copy the file tree from the source to the destination
"""
if not destination.startswith(GCSFileSystem.GCS_PREFIX):
raise ValueError('Destination %r must be GCS path.' % destination)
# Use copy_tree if the path ends with / as it is a directory
if source.endswith('/'):
gcsio.GcsIO().copytree(source, destination)
else:
gcsio.GcsIO().copy(source, destination)
exceptions = {}
for source, destination in zip(source_file_names, destination_file_names):
try:
_copy_path(source, destination)
except Exception as e: # pylint: disable=broad-except
exceptions[(source, destination)] = e
if exceptions:
raise BeamIOError("Copy operation failed", exceptions)
def rename(self, source_file_names, destination_file_names):
"""Rename the files at the source list to the destination list.
Source and destination lists should be of the same size.
Args:
source_file_names: List of file paths that need to be moved
destination_file_names: List of destination_file_names for the files
Raises:
``BeamIOError``: if any of the rename operations fail
"""
err_msg = (
"source_file_names and destination_file_names should "
"be equal in length")
assert len(source_file_names) == len(destination_file_names), err_msg
gcs_batches = []
gcs_current_batch = []
for src, dest in zip(source_file_names, destination_file_names):
gcs_current_batch.append((src, dest))
if len(gcs_current_batch) == self.CHUNK_SIZE:
gcs_batches.append(gcs_current_batch)
gcs_current_batch = []
if gcs_current_batch:
gcs_batches.append(gcs_current_batch)
# Execute GCS renames if any and return exceptions.
exceptions = {}
for batch in gcs_batches:
copy_statuses = gcsio.GcsIO().copy_batch(batch)
copy_succeeded = []
for src, dest, exception in copy_statuses:
if exception:
exceptions[(src, dest)] = exception
else:
copy_succeeded.append((src, dest))
delete_batch = [src for src, dest in copy_succeeded]
delete_statuses = gcsio.GcsIO().delete_batch(delete_batch)
for i, (src, exception) in enumerate(delete_statuses):
dest = copy_succeeded[i][1]
if exception:
exceptions[(src, dest)] = exception
if exceptions:
raise BeamIOError("Rename operation failed", exceptions)
def exists(self, path):
"""Check if the provided path exists on the FileSystem.
Args:
path: string path that needs to be checked.
Returns: boolean flag indicating if path exists
"""
return gcsio.GcsIO().exists(path)
def size(self, path):
"""Get size of path on the FileSystem.
Args:
path: string path in question.
Returns: int size of path according to the FileSystem.
Raises:
``BeamIOError``: if path doesn't exist.
"""
return gcsio.GcsIO().size(path)
def last_updated(self, path):
"""Get UNIX Epoch time in seconds on the FileSystem.
Args:
path: string path of file.
Returns: float UNIX Epoch time
Raises:
``BeamIOError``: if path doesn't exist.
"""
return gcsio.GcsIO().last_updated(path)
def checksum(self, path):
"""Fetch checksum metadata of a file on the
:class:`~apache_beam.io.filesystem.FileSystem`.
Args:
path: string path of a file.
Returns: string containing checksum
Raises:
``BeamIOError``: if path isn't a file or doesn't exist.
"""
try:
return gcsio.GcsIO().checksum(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("Checksum operation failed", {path: e})
def delete(self, paths):
"""Deletes files or directories at the provided paths.
Directories will be deleted recursively.
Args:
paths: list of paths that give the file objects to be deleted
"""
def _delete_path(path):
"""Recursively delete the file or directory at the provided path.
"""
if path.endswith('/'):
path_to_use = path + '*'
else:
path_to_use = path
match_result = self.match([path_to_use])[0]
statuses = gcsio.GcsIO().delete_batch(
[m.path for m in match_result.metadata_list])
failures = [e for (_, e) in statuses if e is not None]
if failures:
raise failures[0]
exceptions = {}
for path in paths:
try:
_delete_path(path)
except Exception as e: # pylint: disable=broad-except
exceptions[path] = e
if exceptions:
raise BeamIOError("Delete operation failed", exceptions) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/gcp/gcsfilesystem.py | 0.844762 | 0.344182 | gcsfilesystem.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import argparse
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import StandardOptions
def run_pipeline(argv, with_attributes, id_label, timestamp_attribute):
"""Build and run the pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--output_topic',
required=True,
help=(
'Output PubSub topic of the form '
'"projects/<PROJECT>/topic/<TOPIC>".'))
parser.add_argument(
'--input_subscription',
required=True,
help=(
'Input PubSub subscription of the form '
'"projects/<PROJECT>/subscriptions/<SUBSCRIPTION>."'))
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(StandardOptions).streaming = True
p = beam.Pipeline(options=pipeline_options)
runner_name = type(p.runner).__name__
# Read from PubSub into a PCollection.
if runner_name == 'TestDirectRunner':
messages = p | beam.io.ReadFromPubSub(
subscription=known_args.input_subscription,
with_attributes=with_attributes,
timestamp_attribute=timestamp_attribute)
else:
messages = p | beam.io.ReadFromPubSub(
subscription=known_args.input_subscription,
id_label=id_label,
with_attributes=with_attributes,
timestamp_attribute=timestamp_attribute)
def add_attribute(msg, timestamp=beam.DoFn.TimestampParam):
msg.data += b'-seen'
msg.attributes['processed'] = 'IT'
if timestamp_attribute in msg.attributes:
msg.attributes[timestamp_attribute + '_out'] = timestamp.to_rfc3339()
return msg
def modify_data(data):
return data + b'-seen'
if with_attributes:
output = messages | 'add_attribute' >> beam.Map(add_attribute)
else:
output = messages | 'modify_data' >> beam.Map(modify_data)
# Write to PubSub.
if runner_name == 'TestDirectRunner':
_ = output | beam.io.WriteToPubSub(
known_args.output_topic, with_attributes=with_attributes)
else:
_ = output | beam.io.WriteToPubSub(
known_args.output_topic,
id_label=id_label,
with_attributes=with_attributes,
timestamp_attribute=timestamp_attribute)
result = p.run()
result.wait_until_finish() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/gcp/pubsub_it_pipeline.py | 0.638385 | 0.156201 | pubsub_it_pipeline.py | pypi |
import collections
import decimal
import json
import logging
import random
import time
import uuid
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Union
import apache_beam as beam
from apache_beam.coders import coders
from apache_beam.io.avroio import _create_avro_source
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filesystems import FileSystems
from apache_beam.io.gcp import bigquery_tools
from apache_beam.io.gcp.bigquery_io_metadata import create_bigquery_io_metadata
from apache_beam.io.iobase import BoundedSource
from apache_beam.io.textio import _TextSource
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.value_provider import ValueProvider
from apache_beam.transforms import PTransform
if TYPE_CHECKING:
from apache_beam.io.gcp.bigquery import ReadFromBigQueryRequest
try:
from apache_beam.io.gcp.internal.clients.bigquery import DatasetReference
from apache_beam.io.gcp.internal.clients.bigquery import TableReference
except ImportError:
DatasetReference = None
TableReference = None
_LOGGER = logging.getLogger(__name__)
def bigquery_export_destination_uri(
gcs_location_vp: Optional[ValueProvider],
temp_location: Optional[str],
unique_id: str,
directory_only: bool = False,
) -> str:
"""Returns the fully qualified Google Cloud Storage URI where the
extracted table should be written.
"""
file_pattern = 'bigquery-table-dump-*.json'
gcs_location = None
if gcs_location_vp is not None:
gcs_location = gcs_location_vp.get()
if gcs_location is not None:
gcs_base = gcs_location
elif temp_location is not None:
gcs_base = temp_location
_LOGGER.debug("gcs_location is empty, using temp_location instead")
else:
raise ValueError(
'ReadFromBigQuery requires a GCS location to be provided. Neither '
'gcs_location in the constructor nor the fallback option '
'--temp_location is set.')
if not unique_id:
unique_id = uuid.uuid4().hex
if directory_only:
return FileSystems.join(gcs_base, unique_id)
else:
return FileSystems.join(gcs_base, unique_id, file_pattern)
class _PassThroughThenCleanup(PTransform):
"""A PTransform that invokes a DoFn after the input PCollection has been
processed.
DoFn should have arguments (element, side_input, cleanup_signal).
Utilizes readiness of PCollection to trigger DoFn.
"""
def __init__(self, side_input=None):
self.side_input = side_input
def expand(self, input):
class PassThrough(beam.DoFn):
def process(self, element):
yield element
class RemoveExtractedFiles(beam.DoFn):
def process(self, unused_element, unused_signal, gcs_locations):
FileSystems.delete(list(gcs_locations))
main_output, cleanup_signal = input | beam.ParDo(
PassThrough()).with_outputs(
'cleanup_signal', main='main')
cleanup_input = input.pipeline | beam.Create([None])
_ = cleanup_input | beam.ParDo(
RemoveExtractedFiles(),
beam.pvalue.AsSingleton(cleanup_signal),
self.side_input,
)
return main_output
class _BigQueryReadSplit(beam.transforms.DoFn):
"""Starts the process of reading from BigQuery.
This transform will start a BigQuery export job, and output a number of
file sources that are consumed downstream.
"""
def __init__(
self,
options: PipelineOptions,
gcs_location: Union[str, ValueProvider] = None,
use_json_exports: bool = False,
bigquery_job_labels: Dict[str, str] = None,
step_name: str = None,
job_name: str = None,
unique_id: str = None,
kms_key: str = None,
project: str = None,
temp_dataset: Union[str, DatasetReference] = None):
self.options = options
self.use_json_exports = use_json_exports
self.gcs_location = gcs_location
self.bigquery_job_labels = bigquery_job_labels or {}
self._step_name = step_name
self._job_name = job_name or 'BQ_READ_SPLIT'
self._source_uuid = unique_id
self.kms_key = kms_key
self.project = project
self.temp_dataset = temp_dataset or 'bq_read_all_%s' % uuid.uuid4().hex
self.bq_io_metadata = None
def display_data(self):
return {
'use_json_exports': str(self.use_json_exports),
'gcs_location': str(self.gcs_location),
'bigquery_job_labels': json.dumps(self.bigquery_job_labels),
'kms_key': str(self.kms_key),
'project': str(self.project),
'temp_dataset': str(self.temp_dataset)
}
def _get_temp_dataset(self):
if isinstance(self.temp_dataset, str):
return DatasetReference(
datasetId=self.temp_dataset, projectId=self._get_project())
else:
return self.temp_dataset
def process(self,
element: 'ReadFromBigQueryRequest') -> Iterable[BoundedSource]:
bq = bigquery_tools.BigQueryWrapper(
temp_dataset_id=self._get_temp_dataset().datasetId)
# TODO(BEAM-11359): Clean up temp dataset at pipeline completion.
if element.query is not None:
self._setup_temporary_dataset(bq, element)
table_reference = self._execute_query(bq, element)
else:
assert element.table
table_reference = bigquery_tools.parse_table_reference(
element.table, project=self._get_project())
if not table_reference.projectId:
table_reference.projectId = self._get_project()
schema, metadata_list = self._export_files(bq, element, table_reference)
for metadata in metadata_list:
yield self._create_source(metadata.path, schema)
if element.query is not None:
bq._delete_table(
table_reference.projectId,
table_reference.datasetId,
table_reference.tableId)
def _get_bq_metadata(self):
if not self.bq_io_metadata:
self.bq_io_metadata = create_bigquery_io_metadata(self._step_name)
return self.bq_io_metadata
def _create_source(self, path, schema):
if not self.use_json_exports:
return _create_avro_source(path, use_fastavro=True)
else:
return _TextSource(
path,
min_bundle_size=0,
compression_type=CompressionTypes.UNCOMPRESSED,
strip_trailing_newlines=True,
coder=_JsonToDictCoder(schema))
def _setup_temporary_dataset(
self,
bq: bigquery_tools.BigQueryWrapper,
element: 'ReadFromBigQueryRequest'):
location = bq.get_query_location(
self._get_project(), element.query, not element.use_standard_sql)
bq.create_temporary_dataset(self._get_project(), location)
def _execute_query(
self,
bq: bigquery_tools.BigQueryWrapper,
element: 'ReadFromBigQueryRequest'):
query_job_name = bigquery_tools.generate_bq_job_name(
self._job_name,
self._source_uuid,
bigquery_tools.BigQueryJobTypes.QUERY,
'%s_%s' % (int(time.time()), random.randint(0, 1000)))
job = bq._start_query_job(
self._get_project(),
element.query,
not element.use_standard_sql,
element.flatten_results,
job_id=query_job_name,
kms_key=self.kms_key,
job_labels=self._get_bq_metadata().add_additional_bq_job_labels(
self.bigquery_job_labels))
job_ref = job.jobReference
bq.wait_for_bq_job(job_ref, max_retries=0)
return bq._get_temp_table(self._get_project())
def _export_files(
self,
bq: bigquery_tools.BigQueryWrapper,
element: 'ReadFromBigQueryRequest',
table_reference: TableReference):
"""Runs a BigQuery export job.
Returns:
bigquery.TableSchema instance, a list of FileMetadata instances
"""
job_labels = self._get_bq_metadata().add_additional_bq_job_labels(
self.bigquery_job_labels)
export_job_name = bigquery_tools.generate_bq_job_name(
self._job_name,
self._source_uuid,
bigquery_tools.BigQueryJobTypes.EXPORT,
element.obj_id)
temp_location = self.options.view_as(GoogleCloudOptions).temp_location
gcs_location = bigquery_export_destination_uri(
self.gcs_location,
temp_location,
'%s%s' % (self._source_uuid, element.obj_id))
if self.use_json_exports:
job_ref = bq.perform_extract_job([gcs_location],
export_job_name,
table_reference,
bigquery_tools.FileFormat.JSON,
project=self._get_project(),
job_labels=job_labels,
include_header=False)
else:
job_ref = bq.perform_extract_job([gcs_location],
export_job_name,
table_reference,
bigquery_tools.FileFormat.AVRO,
project=self._get_project(),
include_header=False,
job_labels=job_labels,
use_avro_logical_types=True)
bq.wait_for_bq_job(job_ref)
metadata_list = FileSystems.match([gcs_location])[0].metadata_list
if isinstance(table_reference, ValueProvider):
table_ref = bigquery_tools.parse_table_reference(
element.table, project=self._get_project())
else:
table_ref = table_reference
table = bq.get_table(
table_ref.projectId, table_ref.datasetId, table_ref.tableId)
return table.schema, metadata_list
def _get_project(self):
"""Returns the project that queries and exports will be billed to."""
project = self.options.view_as(GoogleCloudOptions).project
if isinstance(project, ValueProvider):
project = project.get()
if not project:
project = self.project
return project
FieldSchema = collections.namedtuple('FieldSchema', 'fields mode name type')
class _JsonToDictCoder(coders.Coder):
"""A coder for a JSON string to a Python dict."""
def __init__(self, table_schema):
self.fields = self._convert_to_tuple(table_schema.fields)
self._converters = {
'INTEGER': int,
'INT64': int,
'FLOAT': float,
'FLOAT64': float,
'NUMERIC': self._to_decimal,
'BYTES': self._to_bytes,
}
@staticmethod
def _to_decimal(value):
return decimal.Decimal(value)
@staticmethod
def _to_bytes(value):
"""Converts value from str to bytes on Python 3.x. Does nothing on
Python 2.7."""
return value.encode('utf-8')
@classmethod
def _convert_to_tuple(cls, table_field_schemas):
"""Recursively converts the list of TableFieldSchema instances to the
list of tuples to prevent errors when pickling and unpickling
TableFieldSchema instances.
"""
if not table_field_schemas:
return []
return [
FieldSchema(cls._convert_to_tuple(x.fields), x.mode, x.name, x.type)
for x in table_field_schemas
]
def decode(self, value):
value = json.loads(value.decode('utf-8'))
return self._decode_row(value, self.fields)
def _decode_row(self, row: Dict[str, Any], schema_fields: List[FieldSchema]):
for field in schema_fields:
if field.name not in row:
# The field exists in the schema, but it doesn't exist in this row.
# It probably means its value was null, as the extract to JSON job
# doesn't preserve null fields
row[field.name] = None
continue
if field.mode == 'REPEATED':
for i, elem in enumerate(row[field.name]):
row[field.name][i] = self._decode_data(elem, field)
else:
row[field.name] = self._decode_data(row[field.name], field)
return row
def _decode_data(self, obj: Any, field: FieldSchema):
if not field.fields:
try:
return self._converters[field.type](obj)
except KeyError:
# No need to do any conversion
return obj
return self._decode_row(obj, field.fields)
def is_deterministic(self):
return True
def to_type_hint(self):
return dict | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/gcp/bigquery_read_internal.py | 0.631253 | 0.156878 | bigquery_read_internal.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import hashlib
import logging
import random
import uuid
from future.utils import iteritems
import apache_beam as beam
from apache_beam import pvalue
from apache_beam.io import filesystems as fs
from apache_beam.io.gcp import bigquery_tools
from apache_beam.io.gcp.bigquery_io_metadata import create_bigquery_io_metadata
from apache_beam.options import value_provider as vp
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.transforms import trigger
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.transforms.window import GlobalWindows
_LOGGER = logging.getLogger(__name__)
ONE_TERABYTE = (1 << 40)
# The maximum file size for imports is 5TB. We keep our files under that.
_DEFAULT_MAX_FILE_SIZE = 4 * ONE_TERABYTE
_DEFAULT_MAX_WRITERS_PER_BUNDLE = 20
# The maximum size for a single load job is one terabyte
_MAXIMUM_LOAD_SIZE = 15 * ONE_TERABYTE
# Big query only supports up to 10 thousand URIs for a single load job.
_MAXIMUM_SOURCE_URIS = 10 * 1000
# If triggering_frequency is supplied, we will trigger the file write after
# this many records are written.
_FILE_TRIGGERING_RECORD_COUNT = 500000
def _generate_job_name(job_name, job_type, step_name):
return bigquery_tools.generate_bq_job_name(
job_name=job_name,
step_id=step_name,
job_type=job_type,
random=random.randint(0, 1000))
def file_prefix_generator(
with_validation=True, pipeline_gcs_location=None, temp_location=None):
def _generate_file_prefix(unused_elm):
# If a gcs location is provided to the pipeline, then we shall use that.
# Otherwise, we shall use the temp_location from pipeline options.
gcs_base = pipeline_gcs_location.get()
if not gcs_base:
gcs_base = temp_location
# This will fail at pipeline execution time, but will fail early, as this
# step doesn't have any dependencies (and thus will be one of the first
# stages to be run).
if with_validation and (not gcs_base or not gcs_base.startswith('gs://')):
raise ValueError(
'Invalid GCS location: %r.\n'
'Writing to BigQuery with FILE_LOADS method requires a'
' GCS location to be provided to write files to be loaded'
' into BigQuery. Please provide a GCS bucket through'
' custom_gcs_temp_location in the constructor of WriteToBigQuery'
' or the fallback option --temp_location, or pass'
' method="STREAMING_INSERTS" to WriteToBigQuery.' % gcs_base)
prefix_uuid = _bq_uuid()
return fs.FileSystems.join(gcs_base, 'bq_load', prefix_uuid)
return _generate_file_prefix
def _make_new_file_writer(
file_prefix,
destination,
file_format,
schema=None,
schema_side_inputs=tuple()):
destination = bigquery_tools.get_hashable_destination(destination)
# Windows does not allow : on filenames. Replacing with underscore.
# Other disallowed characters are:
# https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
destination = destination.replace(':', '.')
directory = fs.FileSystems.join(file_prefix, destination)
if not fs.FileSystems.exists(directory):
fs.FileSystems.mkdirs(directory)
file_name = str(uuid.uuid4())
file_path = fs.FileSystems.join(file_prefix, destination, file_name)
if file_format == bigquery_tools.FileFormat.AVRO:
if callable(schema):
schema = schema(destination, *schema_side_inputs)
elif isinstance(schema, vp.ValueProvider):
schema = schema.get()
writer = bigquery_tools.AvroRowWriter(
fs.FileSystems.create(file_path, "application/avro"), schema)
elif file_format == bigquery_tools.FileFormat.JSON:
writer = bigquery_tools.JsonRowWriter(
fs.FileSystems.create(file_path, "application/text"))
else:
raise ValueError((
'Only AVRO and JSON are supported as intermediate formats for '
'BigQuery WriteRecordsToFile, got: {}.').format(file_format))
return file_path, writer
def _bq_uuid(seed=None):
if not seed:
return str(uuid.uuid4()).replace("-", "")
else:
return str(hashlib.md5(seed.encode('utf8')).hexdigest())
class _ShardDestinations(beam.DoFn):
"""Adds a shard number to the key of the KV element.
Experimental; no backwards compatibility guarantees."""
DEFAULT_SHARDING_FACTOR = 10
def __init__(self, sharding_factor=DEFAULT_SHARDING_FACTOR):
self.sharding_factor = sharding_factor
def start_bundle(self):
self._shard_count = random.randrange(self.sharding_factor)
def process(self, element):
destination = element[0]
row = element[1]
sharded_destination = (
destination, self._shard_count % self.sharding_factor)
self._shard_count += 1
yield (sharded_destination, row)
class WriteRecordsToFile(beam.DoFn):
"""Write input records to files before triggering a load job.
This transform keeps up to ``max_files_per_bundle`` files open to write to. It
receives (destination, record) tuples, and it writes the records to different
files for each destination.
If there are more than ``max_files_per_bundle`` destinations that we need to
write to, then those records are grouped by their destination, and later
written to files by ``WriteGroupedRecordsToFile``.
It outputs two PCollections.
"""
UNWRITTEN_RECORD_TAG = 'UnwrittenRecords'
WRITTEN_FILE_TAG = 'WrittenFiles'
def __init__(
self,
schema,
max_files_per_bundle=_DEFAULT_MAX_WRITERS_PER_BUNDLE,
max_file_size=_DEFAULT_MAX_FILE_SIZE,
file_format=None):
"""Initialize a :class:`WriteRecordsToFile`.
Args:
max_files_per_bundle (int): The maximum number of files that can be kept
open during execution of this step in a worker. This is to avoid over-
whelming the worker memory.
max_file_size (int): The maximum size in bytes for a file to be used in
an export job.
"""
self.schema = schema
self.max_files_per_bundle = max_files_per_bundle
self.max_file_size = max_file_size
self.file_format = file_format or bigquery_tools.FileFormat.JSON
def display_data(self):
return {
'max_files_per_bundle': self.max_files_per_bundle,
'max_file_size': str(self.max_file_size),
'file_format': self.file_format,
}
def start_bundle(self):
self._destination_to_file_writer = {}
def process(self, element, file_prefix, *schema_side_inputs):
"""Take a tuple with (destination, row) and write to file or spill out.
Destination may be a ``TableReference`` or a string, and row is a
Python dictionary for a row to be inserted to BigQuery."""
destination = bigquery_tools.get_hashable_destination(element[0])
row = element[1]
if destination not in self._destination_to_file_writer:
if len(self._destination_to_file_writer) < self.max_files_per_bundle:
self._destination_to_file_writer[destination] = _make_new_file_writer(
file_prefix,
destination,
self.file_format,
self.schema,
schema_side_inputs)
else:
yield pvalue.TaggedOutput(
WriteRecordsToFile.UNWRITTEN_RECORD_TAG, element)
return
(file_path, writer) = self._destination_to_file_writer[destination]
# TODO(pabloem): Is it possible for this to throw exception?
writer.write(row)
file_size = writer.tell()
if file_size > self.max_file_size:
writer.close()
self._destination_to_file_writer.pop(destination)
yield pvalue.TaggedOutput(
WriteRecordsToFile.WRITTEN_FILE_TAG,
(element[0], (file_path, file_size)))
def finish_bundle(self):
for destination, file_path_writer in \
iteritems(self._destination_to_file_writer):
(file_path, writer) = file_path_writer
file_size = writer.tell()
writer.close()
yield pvalue.TaggedOutput(
WriteRecordsToFile.WRITTEN_FILE_TAG,
GlobalWindows.windowed_value((destination, (file_path, file_size))))
self._destination_to_file_writer = {}
class WriteGroupedRecordsToFile(beam.DoFn):
"""Receives collection of dest-iterable(records), writes it to files.
This is different from ``WriteRecordsToFile`` because it receives records
grouped by destination. This means that it's not necessary to keep multiple
file descriptors open, because we know for sure when records for a single
destination have been written out.
Experimental; no backwards compatibility guarantees.
"""
def __init__(
self, schema, max_file_size=_DEFAULT_MAX_FILE_SIZE, file_format=None):
self.schema = schema
self.max_file_size = max_file_size
self.file_format = file_format or bigquery_tools.FileFormat.JSON
def process(self, element, file_prefix, *schema_side_inputs):
destination = element[0]
rows = element[1]
file_path, writer = None, None
for row in rows:
if writer is None:
(file_path, writer) = _make_new_file_writer(
file_prefix,
destination,
self.file_format,
self.schema,
schema_side_inputs)
writer.write(row)
file_size = writer.tell()
if file_size > self.max_file_size:
writer.close()
yield (destination, (file_path, file_size))
file_path, writer = None, None
if writer is not None:
writer.close()
yield (destination, (file_path, file_size))
class TriggerCopyJobs(beam.DoFn):
"""Launches jobs to copy from temporary tables into the main target table.
When a job needs to write to multiple destination tables, or when a single
destination table needs to have multiple load jobs to write to it, files are
loaded into temporary tables, and those tables are later copied to the
destination tables.
This transform emits (destination, job_reference) pairs.
TODO(BEAM-7822): In file loads method of writing to BigQuery,
copying from temp_tables to destination_table is not atomic.
See: https://issues.apache.org/jira/browse/BEAM-7822
"""
def __init__(
self,
create_disposition=None,
write_disposition=None,
test_client=None,
step_name=None):
self.create_disposition = create_disposition
self.write_disposition = write_disposition
self.test_client = test_client
self._observed_tables = set()
self.bq_io_metadata = None
self._step_name = step_name
def display_data(self):
return {
'launchesBigQueryJobs': DisplayDataItem(
True, label="This Dataflow job launches bigquery jobs.")
}
def start_bundle(self):
self._observed_tables = set()
self.bq_wrapper = bigquery_tools.BigQueryWrapper(client=self.test_client)
if not self.bq_io_metadata:
self.bq_io_metadata = create_bigquery_io_metadata(self._step_name)
def process(self, element, job_name_prefix=None):
destination = element[0]
job_reference = element[1]
copy_to_reference = bigquery_tools.parse_table_reference(destination)
if copy_to_reference.projectId is None:
copy_to_reference.projectId = vp.RuntimeValueProvider.get_value(
'project', str, '')
copy_from_reference = bigquery_tools.parse_table_reference(destination)
copy_from_reference.tableId = job_reference.jobId
if copy_from_reference.projectId is None:
copy_from_reference.projectId = vp.RuntimeValueProvider.get_value(
'project', str, '')
copy_job_name = '%s_%s' % (
job_name_prefix,
_bq_uuid(
'%s:%s.%s' % (
copy_from_reference.projectId,
copy_from_reference.datasetId,
copy_from_reference.tableId)))
_LOGGER.info(
"Triggering copy job from %s to %s",
copy_from_reference,
copy_to_reference)
if copy_to_reference.tableId not in self._observed_tables:
# When the write_disposition for a job is WRITE_TRUNCATE,
# multiple copy jobs to the same destination can stump on
# each other, truncate data, and write to the BQ table over and
# over.
# Thus, the first copy job runs with the user's write_disposition,
# but afterwards, all jobs must always WRITE_APPEND to the table.
# If they do not, subsequent copy jobs will clear out data appended
# by previous jobs.
write_disposition = self.write_disposition
wait_for_job = True
self._observed_tables.add(copy_to_reference.tableId)
else:
wait_for_job = False
write_disposition = 'WRITE_APPEND'
if not self.bq_io_metadata:
self.bq_io_metadata = create_bigquery_io_metadata(self._step_name)
job_reference = self.bq_wrapper._insert_copy_job(
copy_to_reference.projectId,
copy_job_name,
copy_from_reference,
copy_to_reference,
create_disposition=self.create_disposition,
write_disposition=write_disposition,
job_labels=self.bq_io_metadata.add_additional_bq_job_labels())
if wait_for_job:
self.bq_wrapper.wait_for_bq_job(job_reference, sleep_duration_sec=10)
yield (destination, job_reference)
class TriggerLoadJobs(beam.DoFn):
"""Triggers the import jobs to BQ.
Experimental; no backwards compatibility guarantees.
"""
TEMP_TABLES = 'TemporaryTables'
def __init__(
self,
schema=None,
create_disposition=None,
write_disposition=None,
test_client=None,
temporary_tables=False,
additional_bq_parameters=None,
source_format=None,
step_name=None):
self.schema = schema
self.test_client = test_client
self.temporary_tables = temporary_tables
self.additional_bq_parameters = additional_bq_parameters or {}
self.source_format = source_format
self.bq_io_metadata = None
self._step_name = step_name
if self.temporary_tables:
# If we are loading into temporary tables, we rely on the default create
# and write dispositions, which mean that a new table will be created.
self.create_disposition = None
self.write_disposition = None
else:
self.create_disposition = create_disposition
self.write_disposition = write_disposition
def display_data(self):
result = {
'create_disposition': str(self.create_disposition),
'write_disposition': str(self.write_disposition),
'additional_bq_params': str(self.additional_bq_parameters),
'schema': str(self.schema),
'launchesBigQueryJobs': DisplayDataItem(
True, label="This Dataflow job launches bigquery jobs.")
}
return result
def start_bundle(self):
self.bq_wrapper = bigquery_tools.BigQueryWrapper(client=self.test_client)
if not self.bq_io_metadata:
self.bq_io_metadata = create_bigquery_io_metadata(self._step_name)
def process(self, element, load_job_name_prefix, *schema_side_inputs):
# Each load job is assumed to have files respecting these constraints:
# 1. Total size of all files < 15 TB (Max size for load jobs)
# 2. Total no. of files in a single load job < 10,000
# This assumption means that there will always be a single load job
# triggered for each partition of files.
destination = element[0]
files = element[1]
if callable(self.schema):
schema = self.schema(destination, *schema_side_inputs)
elif isinstance(self.schema, vp.ValueProvider):
schema = self.schema.get()
else:
schema = self.schema
if callable(self.additional_bq_parameters):
additional_parameters = self.additional_bq_parameters(destination)
elif isinstance(self.additional_bq_parameters, vp.ValueProvider):
additional_parameters = self.additional_bq_parameters.get()
else:
additional_parameters = self.additional_bq_parameters
table_reference = bigquery_tools.parse_table_reference(destination)
if table_reference.projectId is None:
table_reference.projectId = vp.RuntimeValueProvider.get_value(
'project', str, '')
# Load jobs for a single destination are always triggered from the same
# worker. This means that we can generate a deterministic numbered job id,
# and not need to worry.
destination_hash = _bq_uuid(
'%s:%s.%s' % (
table_reference.projectId,
table_reference.datasetId,
table_reference.tableId))
uid = _bq_uuid()
job_name = '%s_%s_%s' % (load_job_name_prefix, destination_hash, uid)
_LOGGER.debug(
'Load job has %s files. Job name is %s.', len(files), job_name)
create_disposition = self.create_disposition
if self.temporary_tables:
# If we are using temporary tables, then we must always create the
# temporary tables, so we replace the create_disposition.
create_disposition = 'CREATE_IF_NEEDED'
# For temporary tables, we create a new table with the name with JobId.
table_reference.tableId = job_name
yield pvalue.TaggedOutput(TriggerLoadJobs.TEMP_TABLES, table_reference)
_LOGGER.info(
'Triggering job %s to load data to BigQuery table %s.'
'Schema: %s. Additional parameters: %s',
job_name,
table_reference,
schema,
additional_parameters)
if not self.bq_io_metadata:
self.bq_io_metadata = create_bigquery_io_metadata(self._step_name)
job_reference = self.bq_wrapper.perform_load_job(
table_reference,
files,
job_name,
schema=schema,
write_disposition=self.write_disposition,
create_disposition=create_disposition,
additional_load_parameters=additional_parameters,
source_format=self.source_format,
job_labels=self.bq_io_metadata.add_additional_bq_job_labels())
yield (destination, job_reference)
class PartitionFiles(beam.DoFn):
MULTIPLE_PARTITIONS_TAG = 'MULTIPLE_PARTITIONS'
SINGLE_PARTITION_TAG = 'SINGLE_PARTITION'
class Partition(object):
def __init__(self, max_size, max_files, files=None, size=0):
self.max_size = max_size
self.max_files = max_files
self.files = files if files is not None else []
self.size = size
def can_accept(self, file_size, no_of_files=1):
if (((self.size + file_size) <= self.max_size) and
((len(self.files) + no_of_files) <= self.max_files)):
return True
else:
return False
def add(self, file_path, file_size):
self.files.append(file_path)
self.size += file_size
def __init__(self, max_partition_size, max_files_per_partition):
self.max_partition_size = max_partition_size
self.max_files_per_partition = max_files_per_partition
def process(self, element):
destination = element[0]
files = element[1]
partitions = []
latest_partition = PartitionFiles.Partition(
self.max_partition_size, self.max_files_per_partition)
for file_path, file_size in files:
if latest_partition.can_accept(file_size):
latest_partition.add(file_path, file_size)
else:
partitions.append(latest_partition.files)
latest_partition = PartitionFiles.Partition(
self.max_partition_size, self.max_files_per_partition)
latest_partition.add(file_path, file_size)
partitions.append(latest_partition.files)
if len(partitions) > 1:
output_tag = PartitionFiles.MULTIPLE_PARTITIONS_TAG
else:
output_tag = PartitionFiles.SINGLE_PARTITION_TAG
for partition in partitions:
yield pvalue.TaggedOutput(output_tag, (destination, partition))
class WaitForBQJobs(beam.DoFn):
"""Takes in a series of BQ job names as side input, and waits for all of them.
If any job fails, it will fail. If all jobs succeed, it will succeed.
Experimental; no backwards compatibility guarantees.
"""
def __init__(self, test_client=None):
self.test_client = test_client
def start_bundle(self):
self.bq_wrapper = bigquery_tools.BigQueryWrapper(client=self.test_client)
def process(self, element, dest_ids_list):
job_references = [elm[1] for elm in dest_ids_list]
for ref in job_references:
# We must poll repeatedly until the job finishes or fails, thus setting
# max_retries to 0.
self.bq_wrapper.wait_for_bq_job(ref, sleep_duration_sec=10, max_retries=0)
return dest_ids_list # Pass the list of destination-jobs downstream
class DeleteTablesFn(beam.DoFn):
def __init__(self, test_client=None):
self.test_client = test_client
def start_bundle(self):
self.bq_wrapper = bigquery_tools.BigQueryWrapper(client=self.test_client)
def process(self, table_reference):
_LOGGER.info("Deleting table %s", table_reference)
table_reference = bigquery_tools.parse_table_reference(table_reference)
self.bq_wrapper._delete_table(
table_reference.projectId,
table_reference.datasetId,
table_reference.tableId)
class BigQueryBatchFileLoads(beam.PTransform):
"""Takes in a set of elements, and inserts them to BigQuery via batch loads.
"""
DESTINATION_JOBID_PAIRS = 'destination_load_jobid_pairs'
DESTINATION_FILE_PAIRS = 'destination_file_pairs'
DESTINATION_COPY_JOBID_PAIRS = 'destination_copy_jobid_pairs'
COUNT = 0
def __init__(
self,
destination,
schema=None,
custom_gcs_temp_location=None,
create_disposition=None,
write_disposition=None,
triggering_frequency=None,
temp_file_format=None,
max_file_size=None,
max_files_per_bundle=None,
max_partition_size=None,
max_files_per_partition=None,
additional_bq_parameters=None,
table_side_inputs=None,
schema_side_inputs=None,
test_client=None,
validate=True,
is_streaming_pipeline=False):
self.destination = destination
self.create_disposition = create_disposition
self.write_disposition = write_disposition
self.triggering_frequency = triggering_frequency
self.max_file_size = max_file_size or _DEFAULT_MAX_FILE_SIZE
self.max_files_per_bundle = (
max_files_per_bundle or _DEFAULT_MAX_WRITERS_PER_BUNDLE)
self.max_partition_size = max_partition_size or _MAXIMUM_LOAD_SIZE
self.max_files_per_partition = (
max_files_per_partition or _MAXIMUM_SOURCE_URIS)
if (isinstance(custom_gcs_temp_location, str) or
custom_gcs_temp_location is None):
self._custom_gcs_temp_location = vp.StaticValueProvider(
str, custom_gcs_temp_location or '')
elif isinstance(custom_gcs_temp_location, vp.ValueProvider):
self._custom_gcs_temp_location = custom_gcs_temp_location
else:
raise ValueError('custom_gcs_temp_location must be str or ValueProvider')
self.test_client = test_client
self.schema = schema
self._temp_file_format = temp_file_format or bigquery_tools.FileFormat.JSON
# If we have multiple destinations, then we will have multiple load jobs,
# thus we will need temporary tables for atomicity.
self.dynamic_destinations = bool(callable(destination))
self.additional_bq_parameters = additional_bq_parameters or {}
self.table_side_inputs = table_side_inputs or ()
self.schema_side_inputs = schema_side_inputs or ()
self.is_streaming_pipeline = is_streaming_pipeline
self._validate = validate
if self._validate:
self.verify()
def verify(self):
if (isinstance(self._custom_gcs_temp_location.get(), vp.StaticValueProvider)
and not self._custom_gcs_temp_location.get().startswith('gs://')):
# Only fail if the custom location is provided, and it is not a GCS
# location.
raise ValueError(
'Invalid GCS location: %r.\n'
'Writing to BigQuery with FILE_LOADS method requires a '
'GCS location to be provided to write files to be '
'loaded into BigQuery. Please provide a GCS bucket, or '
'pass method="STREAMING_INSERTS" to WriteToBigQuery.' %
self._custom_gcs_temp_location.get())
if self.is_streaming_pipeline and not self.triggering_frequency:
raise ValueError(
'triggering_frequency must be specified to use file'
'loads in streaming')
elif not self.is_streaming_pipeline and self.triggering_frequency:
raise ValueError(
'triggering_frequency can only be used with file'
'loads in streaming')
def _window_fn(self):
"""Set the correct WindowInto PTransform"""
# The user-supplied triggering_frequency is often chosen to control how
# many BigQuery load jobs are triggered, to prevent going over BigQuery's
# daily quota for load jobs. If this is set to a large value, currently we
# have to buffer all the data until the trigger fires. Instead we ensure
# that the files are written if a threshold number of records are ready.
# We use only the user-supplied trigger on the actual BigQuery load.
# This allows us to offload the data to the filesystem.
if self.is_streaming_pipeline:
return beam.WindowInto(beam.window.GlobalWindows(),
trigger=trigger.Repeatedly(
trigger.AfterAny(
trigger.AfterProcessingTime(
self.triggering_frequency),
trigger.AfterCount(
_FILE_TRIGGERING_RECORD_COUNT))),
accumulation_mode=trigger.AccumulationMode\
.DISCARDING)
else:
return beam.WindowInto(beam.window.GlobalWindows())
def _write_files(self, destination_data_kv_pc, file_prefix_pcv):
outputs = (
destination_data_kv_pc
| beam.ParDo(
WriteRecordsToFile(
schema=self.schema,
max_files_per_bundle=self.max_files_per_bundle,
max_file_size=self.max_file_size,
file_format=self._temp_file_format),
file_prefix_pcv,
*self.schema_side_inputs).with_outputs(
WriteRecordsToFile.UNWRITTEN_RECORD_TAG,
WriteRecordsToFile.WRITTEN_FILE_TAG))
# A PCollection of (destination, file) tuples. It lists files with records,
# and the destination each file is meant to be imported into.
destination_files_kv_pc = outputs[WriteRecordsToFile.WRITTEN_FILE_TAG]
# A PCollection of (destination, record) tuples. These are later sharded,
# grouped, and all records for each destination-shard is written to files.
# This PCollection is necessary because not all records can be written into
# files in ``WriteRecordsToFile``.
unwritten_records_pc = outputs[WriteRecordsToFile.UNWRITTEN_RECORD_TAG]
more_destination_files_kv_pc = (
unwritten_records_pc
| beam.ParDo(_ShardDestinations())
| "GroupShardedRows" >> beam.GroupByKey()
| "DropShardNumber" >> beam.Map(lambda x: (x[0][0], x[1]))
| "WriteGroupedRecordsToFile" >> beam.ParDo(
WriteGroupedRecordsToFile(
schema=self.schema, file_format=self._temp_file_format),
file_prefix_pcv,
*self.schema_side_inputs))
# TODO(BEAM-9494): Remove the identity transform. We flatten both
# PCollection paths and use an identity function to work around a
# flatten optimization issue where the wrong coder is being used.
all_destination_file_pairs_pc = (
(destination_files_kv_pc, more_destination_files_kv_pc)
| "DestinationFilesUnion" >> beam.Flatten()
| "IdentityWorkaround" >> beam.Map(lambda x: x))
if self.is_streaming_pipeline:
# Apply the user's trigger back before we start triggering load jobs
all_destination_file_pairs_pc = (
all_destination_file_pairs_pc
| "ApplyUserTrigger" >> beam.WindowInto(
beam.window.GlobalWindows(),
trigger=trigger.Repeatedly(
trigger.AfterAll(
trigger.AfterProcessingTime(self.triggering_frequency),
trigger.AfterCount(1))),
accumulation_mode=trigger.AccumulationMode.DISCARDING))
return all_destination_file_pairs_pc
def _load_data(
self,
partitions_using_temp_tables,
partitions_direct_to_destination,
load_job_name_pcv,
copy_job_name_pcv,
p,
step_name):
"""Load data to BigQuery
Data is loaded into BigQuery in the following two ways:
1. Single partition:
When there is a single partition of files destined to a single
destination, a single load job is triggered.
2. Multiple partitions and/or Dynamic Destinations:
When there are multiple partitions of files destined for a single
destination or when Dynamic Destinations are used, multiple load jobs
need to be triggered for each partition/destination. Load Jobs are
triggered to temporary tables, and those are later copied to the actual
appropriate destination table. This ensures atomicity when only some
of the load jobs would fail but not other. If any of them fails, then
copy jobs are not triggered.
"""
# Load data using temp tables
trigger_loads_outputs = (
partitions_using_temp_tables
| "TriggerLoadJobsWithTempTables" >> beam.ParDo(
TriggerLoadJobs(
schema=self.schema,
write_disposition=self.write_disposition,
create_disposition=self.create_disposition,
test_client=self.test_client,
temporary_tables=True,
additional_bq_parameters=self.additional_bq_parameters,
source_format=self._temp_file_format,
step_name=step_name),
load_job_name_pcv,
*self.schema_side_inputs).with_outputs(
TriggerLoadJobs.TEMP_TABLES, main='main'))
temp_tables_load_job_ids_pc = trigger_loads_outputs['main']
temp_tables_pc = trigger_loads_outputs[TriggerLoadJobs.TEMP_TABLES]
destination_copy_job_ids_pc = (
p
| "ImpulseMonitorLoadJobs" >> beam.Create([None])
| "WaitForTempTableLoadJobs" >> beam.ParDo(
WaitForBQJobs(self.test_client),
beam.pvalue.AsList(temp_tables_load_job_ids_pc))
| beam.ParDo(
TriggerCopyJobs(
create_disposition=self.create_disposition,
write_disposition=self.write_disposition,
test_client=self.test_client,
step_name=step_name),
copy_job_name_pcv))
finished_copy_jobs_pc = (
p
| "ImpulseMonitorCopyJobs" >> beam.Create([None])
| "WaitForCopyJobs" >> beam.ParDo(
WaitForBQJobs(self.test_client),
beam.pvalue.AsList(destination_copy_job_ids_pc)))
_ = (
finished_copy_jobs_pc
| "RemoveTempTables/PassTables" >> beam.FlatMap(
lambda x,
deleting_tables: deleting_tables,
pvalue.AsIter(temp_tables_pc))
| "RemoveTempTables/AddUselessValue" >> beam.Map(lambda x: (x, None))
| "RemoveTempTables/DeduplicateTables" >> beam.GroupByKey()
| "RemoveTempTables/GetTableNames" >> beam.Map(lambda elm: elm[0])
| "RemoveTempTables/Delete" >> beam.ParDo(
DeleteTablesFn(self.test_client)))
# Load data directly to destination table
destination_load_job_ids_pc = (
partitions_direct_to_destination
| "TriggerLoadJobsWithoutTempTables" >> beam.ParDo(
TriggerLoadJobs(
schema=self.schema,
write_disposition=self.write_disposition,
create_disposition=self.create_disposition,
test_client=self.test_client,
temporary_tables=False,
additional_bq_parameters=self.additional_bq_parameters,
source_format=self._temp_file_format,
step_name=step_name),
load_job_name_pcv,
*self.schema_side_inputs))
_ = (
p
| "ImpulseMonitorDestLoadJobs" >> beam.Create([None])
| "WaitForDestinationLoadJobs" >> beam.ParDo(
WaitForBQJobs(self.test_client),
beam.pvalue.AsList(destination_load_job_ids_pc)))
destination_load_job_ids_pc = (
(temp_tables_load_job_ids_pc, destination_load_job_ids_pc)
| beam.Flatten())
return destination_load_job_ids_pc, destination_copy_job_ids_pc
def expand(self, pcoll):
p = pcoll.pipeline
try:
step_name = self.label
except AttributeError:
step_name = 'BigQueryBatchFileLoads_%d' % BigQueryBatchFileLoads.COUNT
BigQueryBatchFileLoads.COUNT += 1
temp_location = p.options.view_as(GoogleCloudOptions).temp_location
job_name = (
p.options.view_as(GoogleCloudOptions).job_name or 'AUTOMATIC_JOB_NAME')
empty_pc = p | "ImpulseEmptyPC" >> beam.Create([])
singleton_pc = p | "ImpulseSingleElementPC" >> beam.Create([None])
load_job_name_pcv = pvalue.AsSingleton(
singleton_pc
| "LoadJobNamePrefix" >> beam.Map(
lambda _: _generate_job_name(
job_name, bigquery_tools.BigQueryJobTypes.LOAD, 'LOAD_STEP')))
copy_job_name_pcv = pvalue.AsSingleton(
singleton_pc
| "CopyJobNamePrefix" >> beam.Map(
lambda _: _generate_job_name(
job_name, bigquery_tools.BigQueryJobTypes.COPY, 'COPY_STEP')))
file_prefix_pcv = pvalue.AsSingleton(
singleton_pc
| "GenerateFilePrefix" >> beam.Map(
file_prefix_generator(
self._validate, self._custom_gcs_temp_location, temp_location)))
destination_data_kv_pc = (
pcoll
| "RewindowIntoGlobal" >> self._window_fn()
| "AppendDestination" >> beam.ParDo(
bigquery_tools.AppendDestinationsFn(self.destination),
*self.table_side_inputs))
all_destination_file_pairs_pc = self._write_files(
destination_data_kv_pc, file_prefix_pcv)
grouped_files_pc = (
all_destination_file_pairs_pc
| "GroupFilesByTableDestinations" >> beam.GroupByKey())
partitions = (
grouped_files_pc
| beam.ParDo(
PartitionFiles(
self.max_partition_size,
self.max_files_per_partition)).with_outputs(
PartitionFiles.MULTIPLE_PARTITIONS_TAG,
PartitionFiles.SINGLE_PARTITION_TAG))
multiple_partitions_per_destination_pc = partitions[
PartitionFiles.MULTIPLE_PARTITIONS_TAG]
single_partition_per_destination_pc = partitions[
PartitionFiles.SINGLE_PARTITION_TAG]
# When using dynamic destinations, elements with both single as well as
# multiple partitions are loaded into BigQuery using temporary tables to
# ensure atomicity.
if self.dynamic_destinations:
all_partitions = ((
multiple_partitions_per_destination_pc,
single_partition_per_destination_pc)
| "FlattenPartitions" >> beam.Flatten())
destination_load_job_ids_pc, destination_copy_job_ids_pc = (
self._load_data(all_partitions,
empty_pc,
load_job_name_pcv,
copy_job_name_pcv,
p,
step_name))
else:
destination_load_job_ids_pc, destination_copy_job_ids_pc = (
self._load_data(multiple_partitions_per_destination_pc,
single_partition_per_destination_pc,
load_job_name_pcv,
copy_job_name_pcv,
p,
step_name))
return {
self.DESTINATION_JOBID_PAIRS: destination_load_job_ids_pc,
self.DESTINATION_FILE_PAIRS: all_destination_file_pairs_pc,
self.DESTINATION_COPY_JOBID_PAIRS: destination_copy_job_ids_pc,
} | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/gcp/bigquery_file_loads.py | 0.685318 | 0.171321 | bigquery_file_loads.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import argparse
import logging
import random
import time
import apache_beam as beam
from apache_beam.io.gcp.bigquery import ReadFromBigQuery
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class RowToStringWithSlowDown(beam.DoFn):
def process(self, element, num_slow=0, *args, **kwargs):
if num_slow == 0:
yield ['row']
else:
rand = random.random() * 100
if rand < num_slow:
time.sleep(0.01)
yield ['slow_row']
else:
yield ['row']
def run(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_table', required=True, help='Input table to process.')
parser.add_argument(
'--num_records',
required=True,
help='The expected number of records',
type=int)
parser.add_argument(
'--num_slow',
default=0,
help=(
'Percentage of rows that will be slow. '
'Must be in the range [0, 100)'))
parser.add_argument(
'--beam_bq_source',
default=False,
type=bool,
help=(
'Whether to use the new ReadFromBigQuery'
' transform, or the BigQuerySource.'))
known_args, pipeline_args = parser.parse_known_args(argv)
options = PipelineOptions(pipeline_args)
with TestPipeline(options=options) as p:
if known_args.beam_bq_source:
reader = ReadFromBigQuery(
table='%s:%s' %
(options.view_as(GoogleCloudOptions).project, known_args.input_table))
else:
reader = beam.io.Read(beam.io.BigQuerySource(known_args.input_table))
# pylint: disable=expression-not-assigned
count = (
p | 'read' >> reader
| 'row to string' >> beam.ParDo(
RowToStringWithSlowDown(), num_slow=known_args.num_slow)
| 'count' >> beam.combiners.Count.Globally())
assert_that(count, equal_to([known_args.num_records]))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/gcp/bigquery_io_read_pipeline.py | 0.560493 | 0.26078 | bigquery_io_read_pipeline.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
import apache_beam as beam
from apache_beam.io.gcp.dicomclient import DicomApiHttpClient
from apache_beam.transforms import PTransform
class DicomSearch(PTransform):
"""A PTransform used for retrieving DICOM instance metadata from Google
Cloud DICOM store. It takes a PCollection of dicts as input and return
a PCollection of dict as results:
INPUT:
The input dict represents DICOM web path parameters, which has the following
string keys and values:
{
'project_id': str,
'region': str,
'dataset_id': str,
'dicom_store_id': str,
'search_type': str,
'params': dict(str,str) (Optional),
}
Key-value pairs:
project_id: Id of the project in which the DICOM store is
located. (Required)
region: Region where the DICOM store resides. (Required)
dataset_id: Id of the dataset where DICOM store belongs to. (Required)
dicom_store_id: Id of the dicom store. (Required)
search_type: Which type of search it is, could only be one of the three
values: 'instances', 'series', or 'studies'. (Required)
params: A dict of str:str pairs used to refine QIDO search. (Optional)
Supported tags in three categories:
1.Studies:
* StudyInstanceUID,
* PatientName,
* PatientID,
* AccessionNumber,
* ReferringPhysicianName,
* StudyDate,
2.Series: all study level search terms and
* SeriesInstanceUID,
* Modality,
3.Instances: all study/series level search terms and
* SOPInstanceUID,
e.g. {"StudyInstanceUID":"1","SeriesInstanceUID":"2"}
OUTPUT:
The output dict wraps results as well as error messages:
{
'result': a list of dicts in JSON style.
'success': boolean value telling whether the operation is successful.
'input': detail ids and dicomweb path for this retrieval.
'status': status code from the server, used as error message.
}
"""
def __init__(
self, buffer_size=8, max_workers=5, client=None, credential=None):
"""Initializes DicomSearch.
Args:
buffer_size: # type: Int. Size of the request buffer.
max_workers: # type: Int. Maximum number of threads a worker can
create. If it is set to one, all the request will be processed
sequentially in a worker.
client: # type: object. If it is specified, all the Api calls will
made by this client instead of the default one (DicomApiHttpClient).
credential: # type: Google credential object, if it is specified, the
Http client will use it to create sessions instead of the default.
"""
self.buffer_size = buffer_size
self.max_workers = max_workers
self.client = client or DicomApiHttpClient()
self.credential = credential
def expand(self, pcoll):
return pcoll | beam.ParDo(
_QidoReadFn(
self.buffer_size, self.max_workers, self.client, self.credential))
class _QidoReadFn(beam.DoFn):
"""A DoFn for executing every qido query request."""
def __init__(self, buffer_size, max_workers, client, credential=None):
self.buffer_size = buffer_size
self.max_workers = max_workers
self.client = client
self.credential = credential
def start_bundle(self):
self.buffer = []
def finish_bundle(self):
for item in self._flush():
yield item
def validate_element(self, element):
# Check if all required keys present.
required_keys = [
'project_id', 'region', 'dataset_id', 'dicom_store_id', 'search_type'
]
for key in required_keys:
if key not in element:
error_message = 'Must have %s in the dict.' % (key)
return False, error_message
# Check if return type is correct.
if element['search_type'] in ['instances', "studies", "series"]:
return True, None
else:
error_message = (
'Search type can only be "studies", '
'"instances" or "series"')
return False, error_message
def process(
self,
element,
window=beam.DoFn.WindowParam,
timestamp=beam.DoFn.TimestampParam):
# Check if the element is valid
valid, error_message = self.validate_element(element)
if valid:
self.buffer.append((element, window, timestamp))
if len(self.buffer) >= self.buffer_size:
for item in self._flush():
yield item
else:
# Return this when the input dict dose not meet the requirements
out = {}
out['result'] = []
out['status'] = error_message
out['input'] = element
out['success'] = False
yield out
def make_request(self, element):
# Sending Qido request to DICOM Api
project_id = element['project_id']
region = element['region']
dataset_id = element['dataset_id']
dicom_store_id = element['dicom_store_id']
search_type = element['search_type']
params = element['params'] if 'params' in element else None
# Call qido search http client
result, status_code = self.client.qido_search(
project_id, region, dataset_id, dicom_store_id,
search_type, params, self.credential
)
out = {}
out['result'] = result
out['status'] = status_code
out['input'] = element
out['success'] = (status_code == 200)
return out
def process_buffer_element(self, buffer_element):
# Thread job runner - each thread makes a Qido search request
value = self.make_request(buffer_element[0])
windows = [buffer_element[1]]
timestamp = buffer_element[2]
return beam.utils.windowed_value.WindowedValue(
value=value, timestamp=timestamp, windows=windows)
def _flush(self):
# Create thread pool executor and process the buffered elements in paralllel
executor = ThreadPoolExecutor(max_workers=self.max_workers)
futures = [
executor.submit(self.process_buffer_element, ele) for ele in self.buffer
]
self.buffer = []
for f in as_completed(futures):
yield f.result()
class FormatToQido(PTransform):
"""A PTransform for converting pubsub messages into search input dict.
Takes PCollection of string as input and returns a PCollection of dict as
results. Note that some pubsub messages may not be from DICOM API, which
will be recorded as failed conversions.
INPUT:
The input are normally strings from Pubsub topic:
"projects/PROJECT_ID/locations/LOCATION/datasets/DATASET_ID/
dicomStores/DICOM_STORE_ID/dicomWeb/studies/STUDY_UID/
series/SERIES_UID/instances/INSTANCE_UID"
OUTPUT:
The output dict encodes results as well as error messages:
{
'result': a dict representing instance level qido search request.
'success': boolean value telling whether the conversion is successful.
'input': input pubsub message string.
}
"""
def __init__(self, credential=None):
"""Initializes FormatToQido.
Args:
credential: # type: Google credential object, if it is specified, the
Http client will use it instead of the default one.
"""
self.credential = credential
def expand(self, pcoll):
return pcoll | beam.ParDo(_ConvertStringToQido())
class _ConvertStringToQido(beam.DoFn):
"""A DoFn for converting pubsub string to qido search parameters."""
def process(self, element):
# Some constants for DICOM pubsub message
NUM_PUBSUB_STR_ENTRIES = 15
NUM_DICOM_WEBPATH_PARAMETERS = 5
NUM_TOTAL_PARAMETERS = 8
INDEX_PROJECT_ID = 1
INDEX_REGION = 3
INDEX_DATASET_ID = 5
INDEX_DICOMSTORE_ID = 7
INDEX_STUDY_ID = 10
INDEX_SERIE_ID = 12
INDEX_INSTANCE_ID = 14
entries = element.split('/')
# Output dict with error message, used when
# receiving invalid pubsub string.
error_dict = {}
error_dict['result'] = {}
error_dict['input'] = element
error_dict['success'] = False
if len(entries) != NUM_PUBSUB_STR_ENTRIES:
return [error_dict]
required_keys = [
'projects',
'locations',
'datasets',
'dicomStores',
'dicomWeb',
'studies',
'series',
'instances'
]
# Check if the required keys present and
# the positions of those keys are correct
for i in range(NUM_DICOM_WEBPATH_PARAMETERS):
if required_keys[i] != entries[i * 2]:
return [error_dict]
for i in range(NUM_DICOM_WEBPATH_PARAMETERS, NUM_TOTAL_PARAMETERS):
if required_keys[i] != entries[i * 2 - 1]:
return [error_dict]
# Compose dicom webpath parameters for qido search
qido_dict = {}
qido_dict['project_id'] = entries[INDEX_PROJECT_ID]
qido_dict['region'] = entries[INDEX_REGION]
qido_dict['dataset_id'] = entries[INDEX_DATASET_ID]
qido_dict['dicom_store_id'] = entries[INDEX_DICOMSTORE_ID]
qido_dict['search_type'] = 'instances'
# Compose instance level params for qido search
params = {}
params['StudyInstanceUID'] = entries[INDEX_STUDY_ID]
params['SeriesInstanceUID'] = entries[INDEX_SERIE_ID]
params['SOPInstanceUID'] = entries[INDEX_INSTANCE_ID]
qido_dict['params'] = params
out = {}
out['result'] = qido_dict
out['input'] = element
out['success'] = True
return [out]
class UploadToDicomStore(PTransform):
"""A PTransform for storing instances to a DICOM store.
Takes PCollection of byte[] as input and return a PCollection of dict as
results. The inputs are normally DICOM file in bytes or str filename.
INPUT:
This PTransform supports two types of input:
1. Byte[]: representing dicom file.
2. Fileio object: stream file object.
OUTPUT:
The output dict encodes status as well as error messages:
{
'success': boolean value telling whether the store is successful.
'input': undeliverable data. Exactly the same as the input,
only set if the operation is failed.
'status': status code from the server, used as error messages.
}
"""
def __init__(
self,
destination_dict,
input_type,
buffer_size=8,
max_workers=5,
client=None,
credential=None):
"""Initializes UploadToDicomStore.
Args:
destination_dict: # type: python dict, encodes DICOM endpoint information:
{
'project_id': str,
'region': str,
'dataset_id': str,
'dicom_store_id': str,
}
Key-value pairs:
* project_id: Id of the project in which DICOM store locates. (Required)
* region: Region where the DICOM store resides. (Required)
* dataset_id: Id of the dataset where DICOM store belongs to. (Required)
* dicom_store_id: Id of the dicom store. (Required)
input_type: # type: string, could only be 'bytes' or 'fileio'
buffer_size: # type: Int. Size of the request buffer.
max_workers: # type: Int. Maximum number of threads a worker can
create. If it is set to one, all the request will be processed
sequentially in a worker.
client: # type: object. If it is specified, all the Api calls will
made by this client instead of the default one (DicomApiHttpClient).
credential: # type: Google credential object, if it is specified, the
Http client will use it instead of the default one.
"""
self.destination_dict = destination_dict
# input_type pre-check
if input_type not in ['bytes', 'fileio']:
raise ValueError("input_type could only be 'bytes' or 'fileio'")
self.input_type = input_type
self.buffer_size = buffer_size
self.max_workers = max_workers
self.client = client
self.credential = credential
def expand(self, pcoll):
return pcoll | beam.ParDo(
_StoreInstance(
self.destination_dict,
self.input_type,
self.buffer_size,
self.max_workers,
self.client,
self.credential))
class _StoreInstance(beam.DoFn):
"""A DoFn read or fetch dicom files then push it to a dicom store."""
def __init__(
self,
destination_dict,
input_type,
buffer_size,
max_workers,
client,
credential=None):
# pre-check destination dict
required_keys = ['project_id', 'region', 'dataset_id', 'dicom_store_id']
for key in required_keys:
if key not in destination_dict:
raise ValueError('Must have %s in the dict.' % (key))
self.destination_dict = destination_dict
self.input_type = input_type
self.buffer_size = buffer_size
self.max_workers = max_workers
self.client = client
self.credential = credential
def start_bundle(self):
self.buffer = []
def finish_bundle(self):
for item in self._flush():
yield item
def process(
self,
element,
window=beam.DoFn.WindowParam,
timestamp=beam.DoFn.TimestampParam):
self.buffer.append((element, window, timestamp))
if len(self.buffer) >= self.buffer_size:
for item in self._flush():
yield item
def make_request(self, dicom_file):
# Send file to DICOM store and records the results.
project_id = self.destination_dict['project_id']
region = self.destination_dict['region']
dataset_id = self.destination_dict['dataset_id']
dicom_store_id = self.destination_dict['dicom_store_id']
# Feed the dicom file into store client
if self.client:
_, status_code = self.client.dicomweb_store_instance(
project_id, region, dataset_id, dicom_store_id, dicom_file,
self.credential
)
else:
_, status_code = DicomApiHttpClient().dicomweb_store_instance(
project_id, region, dataset_id, dicom_store_id, dicom_file,
self.credential
)
out = {}
out['status'] = status_code
out['success'] = (status_code == 200)
return out
def read_dicom_file(self, buffer_element):
# Read the file based on different input. If the read fails ,return
# an error dict which records input and error messages.
try:
if self.input_type == 'fileio':
f = buffer_element.open()
data = f.read()
f.close()
return True, data
else:
return True, buffer_element
except Exception as error_message:
error_out = {}
error_out['status'] = error_message
error_out['success'] = False
return False, error_out
def process_buffer_element(self, buffer_element):
# Thread job runner - each thread stores a DICOM file
success, read_result = self.read_dicom_file(buffer_element[0])
windows = [buffer_element[1]]
timestamp = buffer_element[2]
value = None
if success:
value = self.make_request(read_result)
else:
value = read_result
# save the undeliverable data
if not value['success']:
value['input'] = buffer_element[0]
return beam.utils.windowed_value.WindowedValue(
value=value, timestamp=timestamp, windows=windows)
def _flush(self):
# Create thread pool executor and process the buffered elements in paralllel
executor = ThreadPoolExecutor(max_workers=self.max_workers)
futures = [
executor.submit(self.process_buffer_element, ele) for ele in self.buffer
]
self.buffer = []
for f in as_completed(futures):
yield f.result() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/gcp/dicomio.py | 0.805479 | 0.359505 | dicomio.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from enum import Enum
from enum import auto
from typing import NamedTuple
from typing import Optional
from past.builtins import unicode
from apache_beam.transforms.external import BeamJarExpansionService
from apache_beam.transforms.external import ExternalTransform
from apache_beam.transforms.external import NamedTupleBasedPayloadBuilder
from apache_beam.typehints.schemas import named_tuple_to_schema
__all__ = [
'ReadFromSpanner',
'SpannerDelete',
'SpannerInsert',
'SpannerInsertOrUpdate',
'SpannerReplace',
'SpannerUpdate',
'TimestampBoundMode',
'TimeUnit',
]
def default_io_expansion_service():
return BeamJarExpansionService(
'sdks:java:io:google-cloud-platform:expansion-service:shadowJar')
class TimeUnit(Enum):
NANOSECONDS = auto()
MICROSECONDS = auto()
MILLISECONDS = auto()
SECONDS = auto()
HOURS = auto()
DAYS = auto()
class TimestampBoundMode(Enum):
MAX_STALENESS = auto()
EXACT_STALENESS = auto()
READ_TIMESTAMP = auto()
MIN_READ_TIMESTAMP = auto()
STRONG = auto()
class ReadFromSpannerSchema(NamedTuple):
instance_id: unicode
database_id: unicode
schema: bytes
sql: Optional[unicode]
table: Optional[unicode]
project_id: Optional[unicode]
host: Optional[unicode]
emulator_host: Optional[unicode]
batching: Optional[bool]
timestamp_bound_mode: Optional[unicode]
read_timestamp: Optional[unicode]
staleness: Optional[int]
time_unit: Optional[unicode]
class ReadFromSpanner(ExternalTransform):
"""
A PTransform which reads from the specified Spanner instance's database.
This transform required type of the row it has to return to provide the
schema. Example::
from typing import NamedTuple
from apache_beam import coders
class ExampleRow(NamedTuple):
id: int
name: unicode
coders.registry.register_coder(ExampleRow, coders.RowCoder)
with Pipeline() as p:
result = (
p
| ReadFromSpanner(
instance_id='your_instance_id',
database_id='your_database_id',
project_id='your_project_id',
row_type=ExampleRow,
query='SELECT * FROM some_table',
timestamp_bound_mode=TimestampBoundMode.MAX_STALENESS,
staleness=3,
time_unit=TimeUnit.HOURS,
).with_output_types(ExampleRow))
Experimental; no backwards compatibility guarantees.
"""
URN = 'beam:external:java:spanner:read:v1'
def __init__(
self,
project_id,
instance_id,
database_id,
row_type=None,
sql=None,
table=None,
host=None,
emulator_host=None,
batching=None,
timestamp_bound_mode=None,
read_timestamp=None,
staleness=None,
time_unit=None,
expansion_service=None,
):
"""
Initializes a read operation from Spanner.
:param project_id: Specifies the Cloud Spanner project.
:param instance_id: Specifies the Cloud Spanner instance.
:param database_id: Specifies the Cloud Spanner database.
:param row_type: Row type that fits the given query or table. Passed as
NamedTuple, e.g. NamedTuple('name', [('row_name', unicode)])
:param sql: An sql query to execute. It's results must fit the
provided row_type. Don't use when table is set.
:param table: A spanner table. When provided all columns from row_type
will be selected to query. Don't use when query is set.
:param batching: By default Batch API is used to read data from Cloud
Spanner. It is useful to disable batching when the underlying query
is not root-partitionable.
:param host: Specifies the Cloud Spanner host.
:param emulator_host: Specifies Spanner emulator host.
:param timestamp_bound_mode: Defines how Cloud Spanner will choose a
timestamp for a read-only transaction or a single read/query.
Passed as TimestampBoundMode enum. Possible values:
STRONG: A timestamp bound that will perform reads and queries at a
timestamp where all previously committed transactions are visible.
READ_TIMESTAMP: Returns a timestamp bound that will perform reads
and queries at the given timestamp.
MIN_READ_TIMESTAMP: Returns a timestamp bound that will perform reads
and queries at a timestamp chosen to be at least given timestamp value.
EXACT_STALENESS: Returns a timestamp bound that will perform reads and
queries at an exact staleness. The timestamp is chosen soon after the
read is started.
MAX_STALENESS: Returns a timestamp bound that will perform reads and
queries at a timestamp chosen to be at most time_unit stale.
:param read_timestamp: Timestamp in string. Use only when
timestamp_bound_mode is set to READ_TIMESTAMP or MIN_READ_TIMESTAMP.
:param staleness: Staleness value as int. Use only when
timestamp_bound_mode is set to EXACT_STALENESS or MAX_STALENESS.
time_unit has to be set along with this param.
:param time_unit: Time unit for staleness_value passed as TimeUnit enum.
Possible values: NANOSECONDS, MICROSECONDS, MILLISECONDS, SECONDS,
HOURS, DAYS.
:param expansion_service: The address (host:port) of the ExpansionService.
"""
assert row_type
assert sql or table and not (sql and table)
staleness_value = int(staleness) if staleness else None
if staleness_value or time_unit:
assert staleness_value and time_unit and \
timestamp_bound_mode is TimestampBoundMode.MAX_STALENESS or \
timestamp_bound_mode is TimestampBoundMode.EXACT_STALENESS
if read_timestamp:
assert timestamp_bound_mode is TimestampBoundMode.MIN_READ_TIMESTAMP\
or timestamp_bound_mode is TimestampBoundMode.READ_TIMESTAMP
super(ReadFromSpanner, self).__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
ReadFromSpannerSchema(
instance_id=instance_id,
database_id=database_id,
sql=sql,
table=table,
schema=named_tuple_to_schema(row_type).SerializeToString(),
project_id=project_id,
host=host,
emulator_host=emulator_host,
batching=batching,
timestamp_bound_mode=_get_enum_name(timestamp_bound_mode),
read_timestamp=read_timestamp,
staleness=staleness,
time_unit=_get_enum_name(time_unit),
),
),
expansion_service or default_io_expansion_service(),
)
class WriteToSpannerSchema(NamedTuple):
project_id: unicode
instance_id: unicode
database_id: unicode
table: unicode
max_batch_size_bytes: Optional[int]
max_number_mutations: Optional[int]
max_number_rows: Optional[int]
grouping_factor: Optional[int]
host: Optional[unicode]
emulator_host: Optional[unicode]
commit_deadline: Optional[int]
max_cumulative_backoff: Optional[int]
_CLASS_DOC = \
"""
A PTransform which writes {operation} mutations to the specified Spanner
table.
This transform receives rows defined as NamedTuple. Example::
from typing import NamedTuple
from apache_beam import coders
class {row_type}(NamedTuple):
id: int
name: unicode
coders.registry.register_coder({row_type}, coders.RowCoder)
with Pipeline() as p:
_ = (
p
| 'Impulse' >> beam.Impulse()
| 'Generate' >> beam.FlatMap(lambda x: range(num_rows))
| 'To row' >> beam.Map(lambda n: {row_type}(n, str(n))
.with_output_types({row_type})
| 'Write to Spanner' >> Spanner{operation_suffix}(
instance_id='your_instance',
database_id='existing_database',
project_id='your_project_id',
table='your_table'))
Experimental; no backwards compatibility guarantees.
"""
_INIT_DOC = \
"""
Initializes {operation} operation to a Spanner table.
:param project_id: Specifies the Cloud Spanner project.
:param instance_id: Specifies the Cloud Spanner instance.
:param database_id: Specifies the Cloud Spanner database.
:param table: Specifies the Cloud Spanner table.
:param max_batch_size_bytes: Specifies the batch size limit (max number of
bytes mutated per batch). Default value is 1048576 bytes = 1MB.
:param max_number_mutations: Specifies the cell mutation limit (maximum
number of mutated cells per batch). Default value is 5000.
:param max_number_rows: Specifies the row mutation limit (maximum number of
mutated rows per batch). Default value is 500.
:param grouping_factor: Specifies the multiple of max mutation (in terms
of both bytes per batch and cells per batch) that is used to select a
set of mutations to sort by key for batching. This sort uses local
memory on the workers, so using large values can cause out of memory
errors. Default value is 1000.
:param host: Specifies the Cloud Spanner host.
:param emulator_host: Specifies Spanner emulator host.
:param commit_deadline: Specifies the deadline for the Commit API call.
Default is 15 secs. DEADLINE_EXCEEDED errors will prompt a backoff/retry
until the value of commit_deadline is reached. DEADLINE_EXCEEDED errors
are ar reported with logging and counters. Pass seconds as value.
:param max_cumulative_backoff: Specifies the maximum cumulative backoff
time when retrying after DEADLINE_EXCEEDED errors. Default is 900s
(15min). If the mutations still have not been written after this time,
they are treated as a failure, and handled according to the setting of
failure_mode. Pass seconds as value.
:param expansion_service: The address (host:port) of the ExpansionService.
"""
def _add_doc(
value,
operation=None,
row_type=None,
operation_suffix=None,
):
def _doc(obj):
obj.__doc__ = value.format(
operation=operation,
row_type=row_type,
operation_suffix=operation_suffix,
)
return obj
return _doc
@_add_doc(
_CLASS_DOC,
operation='delete',
row_type='ExampleKey',
operation_suffix='Delete',
)
class SpannerDelete(ExternalTransform):
URN = 'beam:external:java:spanner:delete:v1'
@_add_doc(_INIT_DOC, operation='a delete')
def __init__(
self,
project_id,
instance_id,
database_id,
table,
max_batch_size_bytes=None,
max_number_mutations=None,
max_number_rows=None,
grouping_factor=None,
host=None,
emulator_host=None,
commit_deadline=None,
max_cumulative_backoff=None,
expansion_service=None,
):
max_cumulative_backoff = int(
max_cumulative_backoff) if max_cumulative_backoff else None
commit_deadline = int(commit_deadline) if commit_deadline else None
super().__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
WriteToSpannerSchema(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
table=table,
max_batch_size_bytes=max_batch_size_bytes,
max_number_mutations=max_number_mutations,
max_number_rows=max_number_rows,
grouping_factor=grouping_factor,
host=host,
emulator_host=emulator_host,
commit_deadline=commit_deadline,
max_cumulative_backoff=max_cumulative_backoff,
),
),
expansion_service=expansion_service or default_io_expansion_service(),
)
@_add_doc(
_CLASS_DOC,
operation='insert',
row_type='ExampleRow',
operation_suffix='Insert',
)
class SpannerInsert(ExternalTransform):
URN = 'beam:external:java:spanner:insert:v1'
@_add_doc(_INIT_DOC, operation='an insert')
def __init__(
self,
project_id,
instance_id,
database_id,
table,
max_batch_size_bytes=None,
max_number_mutations=None,
max_number_rows=None,
grouping_factor=None,
host=None,
emulator_host=None,
commit_deadline=None,
max_cumulative_backoff=None,
expansion_service=None,
):
max_cumulative_backoff = int(
max_cumulative_backoff) if max_cumulative_backoff else None
commit_deadline = int(commit_deadline) if commit_deadline else None
super().__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
WriteToSpannerSchema(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
table=table,
max_batch_size_bytes=max_batch_size_bytes,
max_number_mutations=max_number_mutations,
max_number_rows=max_number_rows,
grouping_factor=grouping_factor,
host=host,
emulator_host=emulator_host,
commit_deadline=commit_deadline,
max_cumulative_backoff=max_cumulative_backoff,
),
),
expansion_service=expansion_service or default_io_expansion_service(),
)
@_add_doc(
_CLASS_DOC,
operation='replace',
row_type='ExampleRow',
operation_suffix='Replace',
)
class SpannerReplace(ExternalTransform):
URN = 'beam:external:java:spanner:replace:v1'
@_add_doc(_INIT_DOC, operation='a replace')
def __init__(
self,
project_id,
instance_id,
database_id,
table,
max_batch_size_bytes=None,
max_number_mutations=None,
max_number_rows=None,
grouping_factor=None,
host=None,
emulator_host=None,
commit_deadline=None,
max_cumulative_backoff=None,
expansion_service=None,
):
max_cumulative_backoff = int(
max_cumulative_backoff) if max_cumulative_backoff else None
commit_deadline = int(commit_deadline) if commit_deadline else None
super().__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
WriteToSpannerSchema(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
table=table,
max_batch_size_bytes=max_batch_size_bytes,
max_number_mutations=max_number_mutations,
max_number_rows=max_number_rows,
grouping_factor=grouping_factor,
host=host,
emulator_host=emulator_host,
commit_deadline=commit_deadline,
max_cumulative_backoff=max_cumulative_backoff,
),
),
expansion_service=expansion_service or default_io_expansion_service(),
)
@_add_doc(
_CLASS_DOC,
operation='insert-or-update',
row_type='ExampleRow',
operation_suffix='InsertOrUpdate',
)
class SpannerInsertOrUpdate(ExternalTransform):
URN = 'beam:external:java:spanner:insert_or_update:v1'
@_add_doc(_INIT_DOC, operation='an insert-or-update')
def __init__(
self,
project_id,
instance_id,
database_id,
table,
max_batch_size_bytes=None,
max_number_mutations=None,
max_number_rows=None,
grouping_factor=None,
host=None,
emulator_host=None,
commit_deadline=None,
max_cumulative_backoff=None,
expansion_service=None,
):
max_cumulative_backoff = int(
max_cumulative_backoff) if max_cumulative_backoff else None
commit_deadline = int(commit_deadline) if commit_deadline else None
super().__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
WriteToSpannerSchema(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
table=table,
max_batch_size_bytes=max_batch_size_bytes,
max_number_mutations=max_number_mutations,
max_number_rows=max_number_rows,
grouping_factor=grouping_factor,
host=host,
emulator_host=emulator_host,
commit_deadline=commit_deadline,
max_cumulative_backoff=max_cumulative_backoff,
),
),
expansion_service=expansion_service or default_io_expansion_service(),
)
@_add_doc(
_CLASS_DOC,
operation='update',
row_type='ExampleRow',
operation_suffix='Update',
)
class SpannerUpdate(ExternalTransform):
URN = 'beam:external:java:spanner:update:v1'
@_add_doc(_INIT_DOC, operation='an update')
def __init__(
self,
project_id,
instance_id,
database_id,
table,
max_batch_size_bytes=None,
max_number_mutations=None,
max_number_rows=None,
grouping_factor=None,
host=None,
emulator_host=None,
commit_deadline=None,
max_cumulative_backoff=None,
expansion_service=None,
):
max_cumulative_backoff = int(
max_cumulative_backoff) if max_cumulative_backoff else None
commit_deadline = int(commit_deadline) if commit_deadline else None
super().__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
WriteToSpannerSchema(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
table=table,
max_batch_size_bytes=max_batch_size_bytes,
max_number_mutations=max_number_mutations,
max_number_rows=max_number_rows,
grouping_factor=grouping_factor,
host=host,
emulator_host=emulator_host,
commit_deadline=commit_deadline,
max_cumulative_backoff=max_cumulative_backoff,
),
),
expansion_service=expansion_service or default_io_expansion_service(),
)
def _get_enum_name(enum):
return None if enum is None else enum.name | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/gcp/spanner.py | 0.888819 | 0.271541 | spanner.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import re
from apache_beam.io.gcp import gce_metadata_util
_VALID_CLOUD_LABEL_PATTERN = re.compile(r'^[a-z0-9\_\-]{1,63}$')
def _sanitize_value(value):
"""Sanitizes a value into a valid BigQuery label value."""
return re.sub(r'[^\w-]+', '', value.lower().replace('/', '-'))[0:63]
def _is_valid_cloud_label_value(label_value):
"""Returns true if label_value is a valid cloud label string.
This function can return false in cases where the label value is valid.
However, it will not return true in a case where the lavel value is invalid.
This is because a stricter set of allowed characters is used in this
validator, because foreign language characters are not accepted.
Thus, this should not be used as a generic validator for all cloud labels.
See Also:
https://cloud.google.com/compute/docs/labeling-resources
Args:
label_value: The label value to validate.
Returns:
True if the label value is a valid
"""
return _VALID_CLOUD_LABEL_PATTERN.match(label_value)
def create_bigquery_io_metadata(step_name=None):
"""Creates a BigQueryIOMetadata.
This will request metadata properly based on which runner is being used.
"""
dataflow_job_id = gce_metadata_util.fetch_dataflow_job_id()
# If a dataflow_job id is returned on GCE metadata. Then it means
# This program is running on a Dataflow GCE VM.
is_dataflow_runner = bool(dataflow_job_id)
kwargs = {}
if is_dataflow_runner:
# Only use this label if it is validated already.
# As we do not want a bad label to fail the BQ job.
if _is_valid_cloud_label_value(dataflow_job_id):
kwargs['beam_job_id'] = dataflow_job_id
if step_name:
step_name = _sanitize_value(step_name)
if _is_valid_cloud_label_value(step_name):
kwargs['step_name'] = step_name
return BigQueryIOMetadata(**kwargs)
class BigQueryIOMetadata(object):
"""Metadata class for BigQueryIO. i.e. to use as BQ job labels.
Do not construct directly, use the create_bigquery_io_metadata factory.
Which will request metadata properly based on which runner is being used.
"""
def __init__(self, beam_job_id=None, step_name=None):
self.beam_job_id = beam_job_id
self.step_name = step_name
def add_additional_bq_job_labels(self, job_labels=None):
job_labels = job_labels or {}
if self.beam_job_id and 'beam_job_id' not in job_labels:
job_labels['beam_job_id'] = self.beam_job_id
if self.step_name and 'step_name' not in job_labels:
job_labels['step_name'] = self.step_name
return job_labels | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/gcp/bigquery_io_metadata.py | 0.80954 | 0.401043 | bigquery_io_metadata.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import logging
import apache_beam as beam
from apache_beam.metrics import Metrics
from apache_beam.transforms.display import DisplayDataItem
_LOGGER = logging.getLogger(__name__)
try:
from google.cloud.bigtable import Client
except ImportError:
_LOGGER.warning(
'ImportError: from google.cloud.bigtable import Client', exc_info=True)
__all__ = ['WriteToBigTable']
class _BigTableWriteFn(beam.DoFn):
""" Creates the connector can call and add_row to the batcher using each
row in beam pipe line
Args:
project_id(str): GCP Project ID
instance_id(str): GCP Instance ID
table_id(str): GCP Table ID
"""
def __init__(self, project_id, instance_id, table_id):
""" Constructor of the Write connector of Bigtable
Args:
project_id(str): GCP Project of to write the Rows
instance_id(str): GCP Instance to write the Rows
table_id(str): GCP Table to write the `DirectRows`
"""
super(_BigTableWriteFn, self).__init__()
self.beam_options = {
'project_id': project_id,
'instance_id': instance_id,
'table_id': table_id
}
self.table = None
self.batcher = None
self.written = Metrics.counter(self.__class__, 'Written Row')
def __getstate__(self):
return self.beam_options
def __setstate__(self, options):
self.beam_options = options
self.table = None
self.batcher = None
self.written = Metrics.counter(self.__class__, 'Written Row')
def start_bundle(self):
if self.table is None:
client = Client(project=self.beam_options['project_id'])
instance = client.instance(self.beam_options['instance_id'])
self.table = instance.table(self.beam_options['table_id'])
self.batcher = self.table.mutations_batcher()
def process(self, row):
self.written.inc()
# You need to set the timestamp in the cells in this row object,
# when we do a retry we will mutating the same object, but, with this
# we are going to set our cell with new values.
# Example:
# direct_row.set_cell('cf1',
# 'field1',
# 'value1',
# timestamp=datetime.datetime.now())
self.batcher.mutate(row)
def finish_bundle(self):
self.batcher.flush()
self.batcher = None
def display_data(self):
return {
'projectId': DisplayDataItem(
self.beam_options['project_id'], label='Bigtable Project Id'),
'instanceId': DisplayDataItem(
self.beam_options['instance_id'], label='Bigtable Instance Id'),
'tableId': DisplayDataItem(
self.beam_options['table_id'], label='Bigtable Table Id')
}
class WriteToBigTable(beam.PTransform):
""" A transform to write to the Bigtable Table.
A PTransform that write a list of `DirectRow` into the Bigtable Table
"""
def __init__(self, project_id=None, instance_id=None, table_id=None):
""" The PTransform to access the Bigtable Write connector
Args:
project_id(str): GCP Project of to write the Rows
instance_id(str): GCP Instance to write the Rows
table_id(str): GCP Table to write the `DirectRows`
"""
super(WriteToBigTable, self).__init__()
self.beam_options = {
'project_id': project_id,
'instance_id': instance_id,
'table_id': table_id
}
def expand(self, pvalue):
beam_options = self.beam_options
return (
pvalue
| beam.ParDo(
_BigTableWriteFn(
beam_options['project_id'],
beam_options['instance_id'],
beam_options['table_id']))) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/gcp/bigtableio.py | 0.713232 | 0.24817 | bigtableio.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import collections
import itertools
import json
import logging
import random
import time
import uuid
from builtins import object
from builtins import zip
from typing import Dict
from typing import Union
from future.utils import itervalues
from past.builtins import unicode
import apache_beam as beam
from apache_beam import coders
from apache_beam import pvalue
from apache_beam.internal.gcp.json_value import from_json_value
from apache_beam.internal.gcp.json_value import to_json_value
from apache_beam.io.avroio import _create_avro_source as create_avro_source
from apache_beam.io.filesystems import CompressionTypes
from apache_beam.io.filesystems import FileSystems
from apache_beam.io.gcp import bigquery_tools
from apache_beam.io.gcp.bigquery_io_metadata import create_bigquery_io_metadata
from apache_beam.io.gcp.bigquery_read_internal import _BigQueryReadSplit
from apache_beam.io.gcp.bigquery_read_internal import _JsonToDictCoder
from apache_beam.io.gcp.bigquery_read_internal import _PassThroughThenCleanup
from apache_beam.io.gcp.bigquery_read_internal import bigquery_export_destination_uri
from apache_beam.io.gcp.bigquery_tools import RetryStrategy
from apache_beam.io.gcp.internal.clients import bigquery
from apache_beam.io.iobase import BoundedSource
from apache_beam.io.iobase import RangeTracker
from apache_beam.io.iobase import SDFBoundedSourceReader
from apache_beam.io.iobase import SourceBundle
from apache_beam.io.textio import _TextSource as TextSource
from apache_beam.metrics import Metrics
from apache_beam.options import value_provider as vp
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.value_provider import StaticValueProvider
from apache_beam.options.value_provider import ValueProvider
from apache_beam.options.value_provider import check_accessible
from apache_beam.runners.dataflow.native_io import iobase as dataflow_io
from apache_beam.transforms import DoFn
from apache_beam.transforms import ParDo
from apache_beam.transforms import PTransform
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.transforms.sideinputs import SIDE_INPUT_PREFIX
from apache_beam.transforms.sideinputs import get_sideinput_index
from apache_beam.transforms.util import ReshufflePerKey
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import retry
from apache_beam.utils.annotations import deprecated
from apache_beam.utils.annotations import experimental
try:
from apache_beam.io.gcp.internal.clients.bigquery import DatasetReference
from apache_beam.io.gcp.internal.clients.bigquery import TableReference
except ImportError:
DatasetReference = None
TableReference = None
__all__ = [
'TableRowJsonCoder',
'BigQueryDisposition',
'BigQuerySource',
'BigQuerySink',
'WriteToBigQuery',
'ReadFromBigQuery',
'ReadFromBigQueryRequest',
'ReadAllFromBigQuery',
'SCHEMA_AUTODETECT',
]
_LOGGER = logging.getLogger(__name__)
"""
Template for BigQuery jobs created by BigQueryIO. This template is:
`"beam_bq_job_{job_type}_{job_id}_{step_id}_{random}"`, where:
- `job_type` represents the BigQuery job type (e.g. extract / copy / load /
query).
- `job_id` is the Beam job name.
- `step_id` is a UUID representing the the Dataflow step that created the
BQ job.
- `random` is a random string.
NOTE: This job name template does not have backwards compatibility guarantees.
"""
BQ_JOB_NAME_TEMPLATE = "beam_bq_job_{job_type}_{job_id}_{step_id}{random}"
"""The number of shards per destination when writing via streaming inserts."""
DEFAULT_SHARDS_PER_DESTINATION = 500
@deprecated(since='2.11.0', current="bigquery_tools.parse_table_reference")
def _parse_table_reference(table, dataset=None, project=None):
return bigquery_tools.parse_table_reference(table, dataset, project)
@deprecated(
since='2.11.0', current="bigquery_tools.parse_table_schema_from_json")
def parse_table_schema_from_json(schema_string):
return bigquery_tools.parse_table_schema_from_json(schema_string)
@deprecated(since='2.11.0', current="bigquery_tools.default_encoder")
def default_encoder(obj):
return bigquery_tools.default_encoder(obj)
@deprecated(since='2.11.0', current="bigquery_tools.RowAsDictJsonCoder")
def RowAsDictJsonCoder(*args, **kwargs):
return bigquery_tools.RowAsDictJsonCoder(*args, **kwargs)
@deprecated(since='2.11.0', current="bigquery_tools.BigQueryReader")
def BigQueryReader(*args, **kwargs):
return bigquery_tools.BigQueryReader(*args, **kwargs)
@deprecated(since='2.11.0', current="bigquery_tools.BigQueryWriter")
def BigQueryWriter(*args, **kwargs):
return bigquery_tools.BigQueryWriter(*args, **kwargs)
@deprecated(since='2.11.0', current="bigquery_tools.BigQueryWrapper")
def BigQueryWrapper(*args, **kwargs):
return bigquery_tools.BigQueryWrapper(*args, **kwargs)
class TableRowJsonCoder(coders.Coder):
"""A coder for a TableRow instance to/from a JSON string.
Note that the encoding operation (used when writing to sinks) requires the
table schema in order to obtain the ordered list of field names. Reading from
sources on the other hand does not need the table schema.
"""
def __init__(self, table_schema=None):
# The table schema is needed for encoding TableRows as JSON (writing to
# sinks) because the ordered list of field names is used in the JSON
# representation.
self.table_schema = table_schema
# Precompute field names since we need them for row encoding.
if self.table_schema:
self.field_names = tuple(fs.name for fs in self.table_schema.fields)
self.field_types = tuple(fs.type for fs in self.table_schema.fields)
def encode(self, table_row):
if self.table_schema is None:
raise AttributeError(
'The TableRowJsonCoder requires a table schema for '
'encoding operations. Please specify a table_schema argument.')
try:
return json.dumps(
collections.OrderedDict(
zip(
self.field_names,
[from_json_value(f.v) for f in table_row.f])),
allow_nan=False,
default=bigquery_tools.default_encoder)
except ValueError as e:
raise ValueError('%s. %s' % (e, bigquery_tools.JSON_COMPLIANCE_ERROR))
def decode(self, encoded_table_row):
od = json.loads(
encoded_table_row, object_pairs_hook=collections.OrderedDict)
return bigquery.TableRow(
f=[bigquery.TableCell(v=to_json_value(e)) for e in itervalues(od)])
class BigQueryDisposition(object):
"""Class holding standard strings used for create and write dispositions."""
CREATE_NEVER = 'CREATE_NEVER'
CREATE_IF_NEEDED = 'CREATE_IF_NEEDED'
WRITE_TRUNCATE = 'WRITE_TRUNCATE'
WRITE_APPEND = 'WRITE_APPEND'
WRITE_EMPTY = 'WRITE_EMPTY'
@staticmethod
def validate_create(disposition):
values = (
BigQueryDisposition.CREATE_NEVER, BigQueryDisposition.CREATE_IF_NEEDED)
if disposition not in values:
raise ValueError(
'Invalid create disposition %s. Expecting %s' % (disposition, values))
return disposition
@staticmethod
def validate_write(disposition):
values = (
BigQueryDisposition.WRITE_TRUNCATE,
BigQueryDisposition.WRITE_APPEND,
BigQueryDisposition.WRITE_EMPTY)
if disposition not in values:
raise ValueError(
'Invalid write disposition %s. Expecting %s' % (disposition, values))
return disposition
# -----------------------------------------------------------------------------
# BigQuerySource, BigQuerySink.
@deprecated(since='2.25.0', current="ReadFromBigQuery")
def BigQuerySource(
table=None,
dataset=None,
project=None,
query=None,
validate=False,
coder=None,
use_standard_sql=False,
flatten_results=True,
kms_key=None,
use_dataflow_native_source=False):
if use_dataflow_native_source:
return _BigQuerySource(
table,
dataset,
project,
query,
validate,
coder,
use_standard_sql,
flatten_results,
kms_key)
else:
return ReadFromBigQuery(
table=table,
dataset=dataset,
project=project,
query=query,
validate=validate,
coder=coder,
use_standard_sql=use_standard_sql,
flatten_results=flatten_results,
use_json_exports=True,
kms_key=kms_key)
@deprecated(since='2.25.0', current="ReadFromBigQuery")
class _BigQuerySource(dataflow_io.NativeSource):
"""A source based on a BigQuery table."""
def __init__(
self,
table=None,
dataset=None,
project=None,
query=None,
validate=False,
coder=None,
use_standard_sql=False,
flatten_results=True,
kms_key=None,
temp_dataset=None):
"""Initialize a :class:`BigQuerySource`.
Args:
table (str): The ID of a BigQuery table. If specified all data of the
table will be used as input of the current source. The ID must contain
only letters ``a-z``, ``A-Z``, numbers ``0-9``, or underscores
``_``. If dataset and query arguments are :data:`None` then the table
argument must contain the entire table reference specified as:
``'DATASET.TABLE'`` or ``'PROJECT:DATASET.TABLE'``.
dataset (str): The ID of the dataset containing this table or
:data:`None` if the table reference is specified entirely by the table
argument or a query is specified.
project (str): The ID of the project containing this table or
:data:`None` if the table reference is specified entirely by the table
argument or a query is specified.
query (str): A query to be used instead of arguments table, dataset, and
project.
validate (bool): If :data:`True`, various checks will be done when source
gets initialized (e.g., is table present?). This should be
:data:`True` for most scenarios in order to catch errors as early as
possible (pipeline construction instead of pipeline execution). It
should be :data:`False` if the table is created during pipeline
execution by a previous step.
coder (~apache_beam.coders.coders.Coder): The coder for the table
rows if serialized to disk. If :data:`None`, then the default coder is
:class:`~apache_beam.io.gcp.bigquery_tools.RowAsDictJsonCoder`,
which will interpret every line in a file as a JSON serialized
dictionary. This argument needs a value only in special cases when
returning table rows as dictionaries is not desirable.
use_standard_sql (bool): Specifies whether to use BigQuery's standard SQL
dialect for this query. The default value is :data:`False`.
If set to :data:`True`, the query will use BigQuery's updated SQL
dialect with improved standards compliance.
This parameter is ignored for table inputs.
flatten_results (bool): Flattens all nested and repeated fields in the
query results. The default value is :data:`True`.
kms_key (str): Optional Cloud KMS key name for use when creating new
tables.
temp_dataset (``google.cloud.bigquery.dataset.DatasetReference``):
The dataset in which to create temporary tables when performing file
loads. By default, a new dataset is created in the execution project for
temporary tables.
Raises:
ValueError: if any of the following is true:
1) the table reference as a string does not match the expected format
2) neither a table nor a query is specified
3) both a table and a query is specified.
"""
# Import here to avoid adding the dependency for local running scenarios.
try:
# pylint: disable=wrong-import-order, wrong-import-position
from apitools.base import py # pylint: disable=unused-import
except ImportError:
raise ImportError(
'Google Cloud IO not available, '
'please install apache_beam[gcp]')
if table is not None and query is not None:
raise ValueError(
'Both a BigQuery table and a query were specified.'
' Please specify only one of these.')
elif table is None and query is None:
raise ValueError('A BigQuery table or a query must be specified')
elif table is not None:
self.table_reference = bigquery_tools.parse_table_reference(
table, dataset, project)
self.query = None
self.use_legacy_sql = True
else:
self.query = query
# TODO(BEAM-1082): Change the internal flag to be standard_sql
self.use_legacy_sql = not use_standard_sql
self.table_reference = None
self.validate = validate
self.flatten_results = flatten_results
self.coder = coder or bigquery_tools.RowAsDictJsonCoder()
self.kms_key = kms_key
self.temp_dataset = temp_dataset
def display_data(self):
if self.query is not None:
res = {'query': DisplayDataItem(self.query, label='Query')}
else:
if self.table_reference.projectId is not None:
tableSpec = '{}:{}.{}'.format(
self.table_reference.projectId,
self.table_reference.datasetId,
self.table_reference.tableId)
else:
tableSpec = '{}.{}'.format(
self.table_reference.datasetId, self.table_reference.tableId)
res = {'table': DisplayDataItem(tableSpec, label='Table')}
res['validation'] = DisplayDataItem(
self.validate, label='Validation Enabled')
return res
@property
def format(self):
"""Source format name required for remote execution."""
return 'bigquery'
def reader(self, test_bigquery_client=None):
return bigquery_tools.BigQueryReader(
source=self,
test_bigquery_client=test_bigquery_client,
use_legacy_sql=self.use_legacy_sql,
flatten_results=self.flatten_results,
kms_key=self.kms_key)
class _CustomBigQuerySource(BoundedSource):
def __init__(
self,
gcs_location=None,
table=None,
dataset=None,
project=None,
query=None,
validate=False,
pipeline_options=None,
coder=None,
use_standard_sql=False,
flatten_results=True,
kms_key=None,
bigquery_job_labels=None,
use_json_exports=False,
job_name=None,
step_name=None,
unique_id=None,
temp_dataset=None):
if table is not None and query is not None:
raise ValueError(
'Both a BigQuery table and a query were specified.'
' Please specify only one of these.')
elif table is None and query is None:
raise ValueError('A BigQuery table or a query must be specified')
elif table is not None:
self.table_reference = bigquery_tools.parse_table_reference(
table, dataset, project)
self.query = None
self.use_legacy_sql = True
else:
if isinstance(query, (str, unicode)):
query = StaticValueProvider(str, query)
self.query = query
# TODO(BEAM-1082): Change the internal flag to be standard_sql
self.use_legacy_sql = not use_standard_sql
self.table_reference = None
self.gcs_location = gcs_location
self.project = project
self.validate = validate
self.flatten_results = flatten_results
self.coder = coder or _JsonToDictCoder
self.kms_key = kms_key
self.split_result = None
self.options = pipeline_options
self.bq_io_metadata = None # Populate in setup, as it may make an RPC
self.bigquery_job_labels = bigquery_job_labels or {}
self.use_json_exports = use_json_exports
self.temp_dataset = temp_dataset
self._job_name = job_name or 'BQ_EXPORT_JOB'
self._step_name = step_name
self._source_uuid = unique_id
def _get_bq_metadata(self):
if not self.bq_io_metadata:
self.bq_io_metadata = create_bigquery_io_metadata(self._step_name)
return self.bq_io_metadata
def display_data(self):
export_format = 'JSON' if self.use_json_exports else 'AVRO'
return {
'table': str(self.table_reference),
'query': str(self.query),
'project': str(self.project),
'use_legacy_sql': self.use_legacy_sql,
'bigquery_job_labels': json.dumps(self.bigquery_job_labels),
'export_file_format': export_format,
'launchesBigQueryJobs': DisplayDataItem(
True, label="This Dataflow job launches bigquery jobs."),
}
def estimate_size(self):
bq = bigquery_tools.BigQueryWrapper()
if self.table_reference is not None:
table_ref = self.table_reference
if (isinstance(self.table_reference, vp.ValueProvider) and
self.table_reference.is_accessible()):
table_ref = bigquery_tools.parse_table_reference(
table_ref, project=self._get_project())
elif isinstance(self.table_reference, vp.ValueProvider):
# Size estimation is best effort. We return None as we have
# no access to the table that we're querying.
return None
if not table_ref.projectId:
table_ref.projectId = self._get_project()
table = bq.get_table(
table_ref.projectId, table_ref.datasetId, table_ref.tableId)
return int(table.numBytes)
elif self.query is not None and self.query.is_accessible():
project = self._get_project()
query_job_name = bigquery_tools.generate_bq_job_name(
self._job_name,
self._source_uuid,
bigquery_tools.BigQueryJobTypes.QUERY,
'%s_%s' % (int(time.time()), random.randint(0, 1000)))
job = bq._start_query_job(
project,
self.query.get(),
self.use_legacy_sql,
self.flatten_results,
job_id=query_job_name,
dry_run=True,
kms_key=self.kms_key,
job_labels=self._get_bq_metadata().add_additional_bq_job_labels(
self.bigquery_job_labels))
size = int(job.statistics.totalBytesProcessed)
return size
else:
# Size estimation is best effort. We return None as we have
# no access to the query that we're running.
return None
def _get_project(self):
"""Returns the project that queries and exports will be billed to."""
project = self.options.view_as(GoogleCloudOptions).project
if isinstance(project, vp.ValueProvider):
project = project.get()
if self.temp_dataset:
return self.temp_dataset.projectId
if not project:
project = self.project
return project
def _create_source(self, path, schema):
if not self.use_json_exports:
return create_avro_source(path, use_fastavro=True)
else:
return TextSource(
path,
min_bundle_size=0,
compression_type=CompressionTypes.UNCOMPRESSED,
strip_trailing_newlines=True,
coder=self.coder(schema))
def split(self, desired_bundle_size, start_position=None, stop_position=None):
if self.split_result is None:
bq = bigquery_tools.BigQueryWrapper(
temp_dataset_id=(
self.temp_dataset.datasetId if self.temp_dataset else None))
if self.query is not None:
self._setup_temporary_dataset(bq)
self.table_reference = self._execute_query(bq)
if not self.table_reference.projectId:
self.table_reference.projectId = self._get_project()
schema, metadata_list = self._export_files(bq)
self.split_result = [
self._create_source(metadata.path, schema)
for metadata in metadata_list
]
if self.query is not None:
bq.clean_up_temporary_dataset(self._get_project())
for source in self.split_result:
yield SourceBundle(1.0, source, None, None)
def get_range_tracker(self, start_position, stop_position):
class CustomBigQuerySourceRangeTracker(RangeTracker):
"""A RangeTracker that always returns positions as None."""
def start_position(self):
return None
def stop_position(self):
return None
return CustomBigQuerySourceRangeTracker()
def read(self, range_tracker):
raise NotImplementedError('BigQuery source must be split before being read')
@check_accessible(['query'])
def _setup_temporary_dataset(self, bq):
location = bq.get_query_location(
self._get_project(), self.query.get(), self.use_legacy_sql)
bq.create_temporary_dataset(self._get_project(), location)
@check_accessible(['query'])
def _execute_query(self, bq):
query_job_name = bigquery_tools.generate_bq_job_name(
self._job_name,
self._source_uuid,
bigquery_tools.BigQueryJobTypes.QUERY,
'%s_%s' % (int(time.time()), random.randint(0, 1000)))
job = bq._start_query_job(
self._get_project(),
self.query.get(),
self.use_legacy_sql,
self.flatten_results,
job_id=query_job_name,
kms_key=self.kms_key,
job_labels=self._get_bq_metadata().add_additional_bq_job_labels(
self.bigquery_job_labels))
job_ref = job.jobReference
bq.wait_for_bq_job(job_ref, max_retries=0)
return bq._get_temp_table(self._get_project())
def _export_files(self, bq):
"""Runs a BigQuery export job.
Returns:
bigquery.TableSchema instance, a list of FileMetadata instances
"""
job_labels = self._get_bq_metadata().add_additional_bq_job_labels(
self.bigquery_job_labels)
export_job_name = bigquery_tools.generate_bq_job_name(
self._job_name,
self._source_uuid,
bigquery_tools.BigQueryJobTypes.EXPORT,
'%s_%s' % (int(time.time()), random.randint(0, 1000)))
temp_location = self.options.view_as(GoogleCloudOptions).temp_location
gcs_location = bigquery_export_destination_uri(
self.gcs_location, temp_location, self._source_uuid)
if self.use_json_exports:
job_ref = bq.perform_extract_job([gcs_location],
export_job_name,
self.table_reference,
bigquery_tools.FileFormat.JSON,
project=self._get_project(),
job_labels=job_labels,
include_header=False)
else:
job_ref = bq.perform_extract_job([gcs_location],
export_job_name,
self.table_reference,
bigquery_tools.FileFormat.AVRO,
project=self._get_project(),
include_header=False,
job_labels=job_labels,
use_avro_logical_types=True)
bq.wait_for_bq_job(job_ref)
metadata_list = FileSystems.match([gcs_location])[0].metadata_list
if isinstance(self.table_reference, vp.ValueProvider):
table_ref = bigquery_tools.parse_table_reference(
self.table_reference.get(), project=self.project)
else:
table_ref = self.table_reference
table = bq.get_table(
table_ref.projectId, table_ref.datasetId, table_ref.tableId)
return table.schema, metadata_list
@deprecated(since='2.11.0', current="WriteToBigQuery")
class BigQuerySink(dataflow_io.NativeSink):
"""A sink based on a BigQuery table.
This BigQuery sink triggers a Dataflow native sink for BigQuery
that only supports batch pipelines.
Instead of using this sink directly, please use WriteToBigQuery
transform that works for both batch and streaming pipelines.
"""
def __init__(
self,
table,
dataset=None,
project=None,
schema=None,
create_disposition=BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=BigQueryDisposition.WRITE_EMPTY,
validate=False,
coder=None,
kms_key=None):
"""Initialize a BigQuerySink.
Args:
table (str): The ID of the table. The ID must contain only letters
``a-z``, ``A-Z``, numbers ``0-9``, or underscores ``_``. If
**dataset** argument is :data:`None` then the table argument must
contain the entire table reference specified as: ``'DATASET.TABLE'`` or
``'PROJECT:DATASET.TABLE'``.
dataset (str): The ID of the dataset containing this table or
:data:`None` if the table reference is specified entirely by the table
argument.
project (str): The ID of the project containing this table or
:data:`None` if the table reference is specified entirely by the table
argument.
schema (str): The schema to be used if the BigQuery table to write has
to be created. This can be either specified as a
:class:`~apache_beam.io.gcp.internal.clients.bigquery.\
bigquery_v2_messages.TableSchema` object or a single string of the form
``'field1:type1,field2:type2,field3:type3'`` that defines a comma
separated list of fields. Here ``'type'`` should specify the BigQuery
type of the field. Single string based schemas do not support nested
fields, repeated fields, or specifying a BigQuery mode for fields (mode
will always be set to ``'NULLABLE'``).
create_disposition (BigQueryDisposition): A string describing what
happens if the table does not exist. Possible values are:
* :attr:`BigQueryDisposition.CREATE_IF_NEEDED`: create if does not
exist.
* :attr:`BigQueryDisposition.CREATE_NEVER`: fail the write if does not
exist.
write_disposition (BigQueryDisposition): A string describing what
happens if the table has already some data. Possible values are:
* :attr:`BigQueryDisposition.WRITE_TRUNCATE`: delete existing rows.
* :attr:`BigQueryDisposition.WRITE_APPEND`: add to existing rows.
* :attr:`BigQueryDisposition.WRITE_EMPTY`: fail the write if table not
empty.
validate (bool): If :data:`True`, various checks will be done when sink
gets initialized (e.g., is table present given the disposition
arguments?). This should be :data:`True` for most scenarios in order to
catch errors as early as possible (pipeline construction instead of
pipeline execution). It should be :data:`False` if the table is created
during pipeline execution by a previous step.
coder (~apache_beam.coders.coders.Coder): The coder for the
table rows if serialized to disk. If :data:`None`, then the default
coder is :class:`~apache_beam.io.gcp.bigquery_tools.RowAsDictJsonCoder`,
which will interpret every element written to the sink as a dictionary
that will be JSON serialized as a line in a file. This argument needs a
value only in special cases when writing table rows as dictionaries is
not desirable.
kms_key (str): Optional Cloud KMS key name for use when creating new
tables.
Raises:
TypeError: if the schema argument is not a :class:`str` or a
:class:`~apache_beam.io.gcp.internal.clients.bigquery.\
bigquery_v2_messages.TableSchema` object.
ValueError: if the table reference as a string does not
match the expected format.
"""
# Import here to avoid adding the dependency for local running scenarios.
try:
# pylint: disable=wrong-import-order, wrong-import-position
from apitools.base import py # pylint: disable=unused-import
except ImportError:
raise ImportError(
'Google Cloud IO not available, '
'please install apache_beam[gcp]')
self.table_reference = bigquery_tools.parse_table_reference(
table, dataset, project)
# Transform the table schema into a bigquery.TableSchema instance.
if isinstance(schema, (str, unicode)):
# TODO(silviuc): Should add a regex-based validation of the format.
table_schema = bigquery.TableSchema()
schema_list = [s.strip(' ') for s in schema.split(',')]
for field_and_type in schema_list:
field_name, field_type = field_and_type.split(':')
field_schema = bigquery.TableFieldSchema()
field_schema.name = field_name
field_schema.type = field_type
field_schema.mode = 'NULLABLE'
table_schema.fields.append(field_schema)
self.table_schema = table_schema
elif schema is None:
# TODO(silviuc): Should check that table exists if no schema specified.
self.table_schema = schema
elif isinstance(schema, bigquery.TableSchema):
self.table_schema = schema
else:
raise TypeError('Unexpected schema argument: %s.' % schema)
self.create_disposition = BigQueryDisposition.validate_create(
create_disposition)
self.write_disposition = BigQueryDisposition.validate_write(
write_disposition)
self.validate = validate
self.coder = coder or bigquery_tools.RowAsDictJsonCoder()
self.kms_key = kms_key
def display_data(self):
res = {}
if self.table_reference is not None:
tableSpec = '{}.{}'.format(
self.table_reference.datasetId, self.table_reference.tableId)
if self.table_reference.projectId is not None:
tableSpec = '{}:{}'.format(self.table_reference.projectId, tableSpec)
res['table'] = DisplayDataItem(tableSpec, label='Table')
res['validation'] = DisplayDataItem(
self.validate, label="Validation Enabled")
return res
def schema_as_json(self):
"""Returns the TableSchema associated with the sink as a JSON string."""
def schema_list_as_object(schema_list):
"""Returns a list of TableFieldSchema objects as a list of dicts."""
fields = []
for f in schema_list:
fs = {'name': f.name, 'type': f.type}
if f.description is not None:
fs['description'] = f.description
if f.mode is not None:
fs['mode'] = f.mode
if f.type.lower() == 'record':
fs['fields'] = schema_list_as_object(f.fields)
fields.append(fs)
return fields
return json.dumps(
{'fields': schema_list_as_object(self.table_schema.fields)})
@property
def format(self):
"""Sink format name required for remote execution."""
return 'bigquery'
def writer(self, test_bigquery_client=None, buffer_size=None):
return bigquery_tools.BigQueryWriter(
sink=self,
test_bigquery_client=test_bigquery_client,
buffer_size=buffer_size)
_KNOWN_TABLES = set()
class BigQueryWriteFn(DoFn):
"""A ``DoFn`` that streams writes to BigQuery once the table is created."""
DEFAULT_MAX_BUFFERED_ROWS = 2000
DEFAULT_MAX_BATCH_SIZE = 500
FAILED_ROWS = 'FailedRows'
STREAMING_API_LOGGING_FREQUENCY_SEC = 300
def __init__(
self,
batch_size,
schema=None,
create_disposition=None,
write_disposition=None,
kms_key=None,
test_client=None,
max_buffered_rows=None,
retry_strategy=None,
additional_bq_parameters=None,
ignore_insert_ids=False):
"""Initialize a WriteToBigQuery transform.
Args:
batch_size: Number of rows to be written to BQ per streaming API insert.
schema: The schema to be used if the BigQuery table to write has to be
created. This can be either specified as a 'bigquery.TableSchema' object
or a single string of the form 'field1:type1,field2:type2,field3:type3'
that defines a comma separated list of fields. Here 'type' should
specify the BigQuery type of the field. Single string based schemas do
not support nested fields, repeated fields, or specifying a BigQuery
mode for fields (mode will always be set to 'NULLABLE').
create_disposition: A string describing what happens if the table does not
exist. Possible values are:
- BigQueryDisposition.CREATE_IF_NEEDED: create if does not exist.
- BigQueryDisposition.CREATE_NEVER: fail the write if does not exist.
write_disposition: A string describing what happens if the table has
already some data. Possible values are:
- BigQueryDisposition.WRITE_TRUNCATE: delete existing rows.
- BigQueryDisposition.WRITE_APPEND: add to existing rows.
- BigQueryDisposition.WRITE_EMPTY: fail the write if table not empty.
For streaming pipelines WriteTruncate can not be used.
kms_key: Optional Cloud KMS key name for use when creating new tables.
test_client: Override the default bigquery client used for testing.
max_buffered_rows: The maximum number of rows that are allowed to stay
buffered when running dynamic destinations. When destinations are
dynamic, it is important to keep caches small even when a single
batch has not been completely filled up.
retry_strategy: The strategy to use when retrying streaming inserts
into BigQuery. Options are shown in bigquery_tools.RetryStrategy attrs.
additional_bq_parameters (dict, callable): A set of additional parameters
to be passed when creating a BigQuery table. These are passed when
triggering a load job for FILE_LOADS, and when creating a new table for
STREAMING_INSERTS.
ignore_insert_ids: When using the STREAMING_INSERTS method to write data
to BigQuery, `insert_ids` are a feature of BigQuery that support
deduplication of events. If your use case is not sensitive to
duplication of data inserted to BigQuery, set `ignore_insert_ids`
to True to increase the throughput for BQ writing. See:
https://cloud.google.com/bigquery/streaming-data-into-bigquery#disabling_best_effort_de-duplication
"""
self.schema = schema
self.test_client = test_client
self.create_disposition = create_disposition
self.write_disposition = write_disposition
if write_disposition in (BigQueryDisposition.WRITE_EMPTY,
BigQueryDisposition.WRITE_TRUNCATE):
raise ValueError(
'Write disposition %s is not supported for'
' streaming inserts to BigQuery' % write_disposition)
self._rows_buffer = []
self._reset_rows_buffer()
self._total_buffered_rows = 0
self.kms_key = kms_key
self._max_batch_size = batch_size or BigQueryWriteFn.DEFAULT_MAX_BATCH_SIZE
self._max_buffered_rows = (
max_buffered_rows or BigQueryWriteFn.DEFAULT_MAX_BUFFERED_ROWS)
self._retry_strategy = retry_strategy or RetryStrategy.RETRY_ALWAYS
self.ignore_insert_ids = ignore_insert_ids
self.additional_bq_parameters = additional_bq_parameters or {}
# accumulate the total time spent in exponential backoff
self._throttled_secs = Metrics.counter(
BigQueryWriteFn, "cumulativeThrottlingSeconds")
self.batch_size_metric = Metrics.distribution(self.__class__, "batch_size")
self.batch_latency_metric = Metrics.distribution(
self.__class__, "batch_latency_ms")
self.failed_rows_metric = Metrics.distribution(
self.__class__, "rows_failed_per_batch")
self.bigquery_wrapper = None
self.streaming_api_logging_frequency_sec = (
BigQueryWriteFn.STREAMING_API_LOGGING_FREQUENCY_SEC)
def display_data(self):
return {
'max_batch_size': self._max_batch_size,
'max_buffered_rows': self._max_buffered_rows,
'retry_strategy': self._retry_strategy,
'create_disposition': str(self.create_disposition),
'write_disposition': str(self.write_disposition),
'additional_bq_parameters': str(self.additional_bq_parameters),
'ignore_insert_ids': str(self.ignore_insert_ids)
}
def _reset_rows_buffer(self):
self._rows_buffer = collections.defaultdict(lambda: [])
@staticmethod
def get_table_schema(schema):
"""Transform the table schema into a bigquery.TableSchema instance.
Args:
schema: The schema to be used if the BigQuery table to write has to be
created. This is a dictionary object created in the WriteToBigQuery
transform.
Returns:
table_schema: The schema to be used if the BigQuery table to write has
to be created but in the bigquery.TableSchema format.
"""
if schema is None:
return schema
elif isinstance(schema, (str, unicode)):
return bigquery_tools.parse_table_schema_from_json(schema)
elif isinstance(schema, dict):
return bigquery_tools.parse_table_schema_from_json(json.dumps(schema))
else:
raise TypeError('Unexpected schema argument: %s.' % schema)
def start_bundle(self):
self._reset_rows_buffer()
if not self.bigquery_wrapper:
self.bigquery_wrapper = bigquery_tools.BigQueryWrapper(
client=self.test_client)
(
bigquery_tools.BigQueryWrapper.HISTOGRAM_METRIC_LOGGER.
minimum_logging_frequency_msec
) = self.streaming_api_logging_frequency_sec * 1000
self._backoff_calculator = iter(
retry.FuzzedExponentialIntervals(
initial_delay_secs=0.2, num_retries=10000, max_delay_secs=1500))
def _create_table_if_needed(self, table_reference, schema=None):
str_table_reference = '%s:%s.%s' % (
table_reference.projectId,
table_reference.datasetId,
table_reference.tableId)
if str_table_reference in _KNOWN_TABLES:
return
if self.create_disposition == BigQueryDisposition.CREATE_NEVER:
# If we never want to create the table, we assume it already exists,
# and avoid the get-or-create step.
return
_LOGGER.debug(
'Creating or getting table %s with schema %s.', table_reference, schema)
table_schema = self.get_table_schema(schema)
if table_reference.projectId is None:
table_reference.projectId = vp.RuntimeValueProvider.get_value(
'project', str, '')
self.bigquery_wrapper.get_or_create_table(
table_reference.projectId,
table_reference.datasetId,
table_reference.tableId,
table_schema,
self.create_disposition,
self.write_disposition,
additional_create_parameters=self.additional_bq_parameters)
_KNOWN_TABLES.add(str_table_reference)
def process(self, element, *schema_side_inputs):
destination = element[0]
if callable(self.schema):
schema = self.schema(destination, *schema_side_inputs)
elif isinstance(self.schema, vp.ValueProvider):
schema = self.schema.get()
else:
schema = self.schema
self._create_table_if_needed(
bigquery_tools.parse_table_reference(destination), schema)
destination = bigquery_tools.get_hashable_destination(destination)
row_and_insert_id = element[1]
self._rows_buffer[destination].append(row_and_insert_id)
self._total_buffered_rows += 1
if len(self._rows_buffer[destination]) >= self._max_batch_size:
return self._flush_batch(destination)
elif self._total_buffered_rows >= self._max_buffered_rows:
return self._flush_all_batches()
def finish_bundle(self):
bigquery_tools.BigQueryWrapper.HISTOGRAM_METRIC_LOGGER.log_metrics(
reset_after_logging=True)
return self._flush_all_batches()
def _flush_all_batches(self):
_LOGGER.debug(
'Attempting to flush to all destinations. Total buffered: %s',
self._total_buffered_rows)
return itertools.chain(
*[
self._flush_batch(destination)
for destination in list(self._rows_buffer.keys())
if self._rows_buffer[destination]
])
def _flush_batch(self, destination):
# Flush the current batch of rows to BigQuery.
rows_and_insert_ids = self._rows_buffer[destination]
table_reference = bigquery_tools.parse_table_reference(destination)
if table_reference.projectId is None:
table_reference.projectId = vp.RuntimeValueProvider.get_value(
'project', str, '')
_LOGGER.debug(
'Flushing data to %s. Total %s rows.',
destination,
len(rows_and_insert_ids))
self.batch_size_metric.update(len(rows_and_insert_ids))
rows = [r[0] for r in rows_and_insert_ids]
if self.ignore_insert_ids:
insert_ids = None
else:
insert_ids = [r[1] for r in rows_and_insert_ids]
while True:
start = time.time()
passed, errors = self.bigquery_wrapper.insert_rows(
project_id=table_reference.projectId,
dataset_id=table_reference.datasetId,
table_id=table_reference.tableId,
rows=rows,
insert_ids=insert_ids,
skip_invalid_rows=True)
self.batch_latency_metric.update((time.time() - start) * 1000)
failed_rows = [rows[entry.index] for entry in errors]
should_retry = any(
RetryStrategy.should_retry(
self._retry_strategy, entry.errors[0].reason) for entry in errors)
if not passed:
self.failed_rows_metric.update(len(failed_rows))
message = (
'There were errors inserting to BigQuery. Will{} retry. '
'Errors were {}'.format(("" if should_retry else " not"), errors))
if should_retry:
_LOGGER.warning(message)
else:
_LOGGER.error(message)
rows = failed_rows
if not should_retry:
break
else:
retry_backoff = next(self._backoff_calculator)
_LOGGER.info(
'Sleeping %s seconds before retrying insertion.', retry_backoff)
time.sleep(retry_backoff)
self._throttled_secs.inc(retry_backoff)
self._total_buffered_rows -= len(self._rows_buffer[destination])
del self._rows_buffer[destination]
return [
pvalue.TaggedOutput(
BigQueryWriteFn.FAILED_ROWS,
GlobalWindows.windowed_value((destination, row)))
for row in failed_rows
]
class _StreamToBigQuery(PTransform):
def __init__(
self,
table_reference,
table_side_inputs,
schema_side_inputs,
schema,
batch_size,
create_disposition,
write_disposition,
kms_key,
retry_strategy,
additional_bq_parameters,
ignore_insert_ids,
test_client=None):
self.table_reference = table_reference
self.table_side_inputs = table_side_inputs
self.schema_side_inputs = schema_side_inputs
self.schema = schema
self.batch_size = batch_size
self.create_disposition = create_disposition
self.write_disposition = write_disposition
self.kms_key = kms_key
self.retry_strategy = retry_strategy
self.test_client = test_client
self.additional_bq_parameters = additional_bq_parameters
self.ignore_insert_ids = ignore_insert_ids
class InsertIdPrefixFn(DoFn):
def __init__(self, shards=DEFAULT_SHARDS_PER_DESTINATION):
self.shards = shards
def start_bundle(self):
self.prefix = str(uuid.uuid4())
self._row_count = 0
def process(self, element):
key = element[0]
value = element[1]
key = (key, random.randint(0, self.shards))
insert_id = '%s-%s' % (self.prefix, self._row_count)
self._row_count += 1
yield (key, (value, insert_id))
def expand(self, input):
bigquery_write_fn = BigQueryWriteFn(
schema=self.schema,
batch_size=self.batch_size,
create_disposition=self.create_disposition,
write_disposition=self.write_disposition,
kms_key=self.kms_key,
retry_strategy=self.retry_strategy,
test_client=self.test_client,
additional_bq_parameters=self.additional_bq_parameters,
ignore_insert_ids=self.ignore_insert_ids)
def drop_shard(elms):
key_and_shard = elms[0]
key = key_and_shard[0]
value = elms[1]
return (key, value)
sharded_data = (
input
| 'AppendDestination' >> beam.ParDo(
bigquery_tools.AppendDestinationsFn(self.table_reference),
*self.table_side_inputs)
| 'AddInsertIdsWithRandomKeys' >> beam.ParDo(
_StreamToBigQuery.InsertIdPrefixFn()))
if not self.ignore_insert_ids:
sharded_data = (sharded_data | 'CommitInsertIds' >> ReshufflePerKey())
return (
sharded_data
| 'DropShard' >> beam.Map(drop_shard)
| 'StreamInsertRows' >> ParDo(
bigquery_write_fn, *self.schema_side_inputs).with_outputs(
BigQueryWriteFn.FAILED_ROWS, main='main'))
# Flag to be passed to WriteToBigQuery to force schema autodetection
SCHEMA_AUTODETECT = 'SCHEMA_AUTODETECT'
class WriteToBigQuery(PTransform):
"""Write data to BigQuery.
This transform receives a PCollection of elements to be inserted into BigQuery
tables. The elements would come in as Python dictionaries, or as `TableRow`
instances.
"""
class Method(object):
DEFAULT = 'DEFAULT'
STREAMING_INSERTS = 'STREAMING_INSERTS'
FILE_LOADS = 'FILE_LOADS'
def __init__(
self,
table,
dataset=None,
project=None,
schema=None,
create_disposition=BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=BigQueryDisposition.WRITE_APPEND,
kms_key=None,
batch_size=None,
max_file_size=None,
max_files_per_bundle=None,
test_client=None,
custom_gcs_temp_location=None,
method=None,
insert_retry_strategy=None,
additional_bq_parameters=None,
table_side_inputs=None,
schema_side_inputs=None,
triggering_frequency=None,
validate=True,
temp_file_format=None,
ignore_insert_ids=False):
"""Initialize a WriteToBigQuery transform.
Args:
table (str, callable, ValueProvider): The ID of the table, or a callable
that returns it. The ID must contain only letters ``a-z``, ``A-Z``,
numbers ``0-9``, or underscores ``_``. If dataset argument is
:data:`None` then the table argument must contain the entire table
reference specified as: ``'DATASET.TABLE'``
or ``'PROJECT:DATASET.TABLE'``. If it's a callable, it must receive one
argument representing an element to be written to BigQuery, and return
a TableReference, or a string table name as specified above.
Multiple destinations are only supported on Batch pipelines at the
moment.
dataset (str): The ID of the dataset containing this table or
:data:`None` if the table reference is specified entirely by the table
argument.
project (str): The ID of the project containing this table or
:data:`None` if the table reference is specified entirely by the table
argument.
schema (str,dict,ValueProvider,callable): The schema to be used if the
BigQuery table to write has to be created. This can be either specified
as a :class:`~apache_beam.io.gcp.internal.clients.bigquery.\
bigquery_v2_messages.TableSchema`. or a `ValueProvider` that has a JSON string,
or a python dictionary, or the string or dictionary itself,
object or a single string of the form
``'field1:type1,field2:type2,field3:type3'`` that defines a comma
separated list of fields. Here ``'type'`` should specify the BigQuery
type of the field. Single string based schemas do not support nested
fields, repeated fields, or specifying a BigQuery mode for fields
(mode will always be set to ``'NULLABLE'``).
If a callable, then it should receive a destination (in the form of
a TableReference or a string, and return a str, dict or TableSchema.
One may also pass ``SCHEMA_AUTODETECT`` here when using JSON-based
file loads, and BigQuery will try to infer the schema for the files
that are being loaded.
create_disposition (BigQueryDisposition): A string describing what
happens if the table does not exist. Possible values are:
* :attr:`BigQueryDisposition.CREATE_IF_NEEDED`: create if does not
exist.
* :attr:`BigQueryDisposition.CREATE_NEVER`: fail the write if does not
exist.
write_disposition (BigQueryDisposition): A string describing what happens
if the table has already some data. Possible values are:
* :attr:`BigQueryDisposition.WRITE_TRUNCATE`: delete existing rows.
* :attr:`BigQueryDisposition.WRITE_APPEND`: add to existing rows.
* :attr:`BigQueryDisposition.WRITE_EMPTY`: fail the write if table not
empty.
For streaming pipelines WriteTruncate can not be used.
kms_key (str): Optional Cloud KMS key name for use when creating new
tables.
batch_size (int): Number of rows to be written to BQ per streaming API
insert. The default is 500.
insert.
test_client: Override the default bigquery client used for testing.
max_file_size (int): The maximum size for a file to be written and then
loaded into BigQuery. The default value is 4TB, which is 80% of the
limit of 5TB for BigQuery to load any file.
max_files_per_bundle(int): The maximum number of files to be concurrently
written by a worker. The default here is 20. Larger values will allow
writing to multiple destinations without having to reshard - but they
increase the memory burden on the workers.
custom_gcs_temp_location (str): A GCS location to store files to be used
for file loads into BigQuery. By default, this will use the pipeline's
temp_location, but for pipelines whose temp_location is not appropriate
for BQ File Loads, users should pass a specific one.
method: The method to use to write to BigQuery. It may be
STREAMING_INSERTS, FILE_LOADS, or DEFAULT. An introduction on loading
data to BigQuery: https://cloud.google.com/bigquery/docs/loading-data.
DEFAULT will use STREAMING_INSERTS on Streaming pipelines and
FILE_LOADS on Batch pipelines.
insert_retry_strategy: The strategy to use when retrying streaming inserts
into BigQuery. Options are shown in bigquery_tools.RetryStrategy attrs.
Default is to retry always. This means that whenever there are rows
that fail to be inserted to BigQuery, they will be retried indefinitely.
Other retry strategy settings will produce a deadletter PCollection
as output. Appropriate values are:
* `RetryStrategy.RETRY_ALWAYS`: retry all rows if
there are any kind of errors. Note that this will hold your pipeline
back if there are errors until you cancel or update it.
* `RetryStrategy.RETRY_NEVER`: rows with errors
will not be retried. Instead they will be output to a dead letter
queue under the `'FailedRows'` tag.
* `RetryStrategy.RETRY_ON_TRANSIENT_ERROR`: retry
rows with transient errors (e.g. timeouts). Rows with permanent errors
will be output to dead letter queue under `'FailedRows'` tag.
additional_bq_parameters (callable): A function that returns a dictionary
with additional parameters to pass to BQ when creating / loading data
into a table. These can be 'timePartitioning', 'clustering', etc. They
are passed directly to the job load configuration. See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load
table_side_inputs (tuple): A tuple with ``AsSideInput`` PCollections to be
passed to the table callable (if one is provided).
schema_side_inputs: A tuple with ``AsSideInput`` PCollections to be
passed to the schema callable (if one is provided).
triggering_frequency (int): Every triggering_frequency duration, a
BigQuery load job will be triggered for all the data written since
the last load job. BigQuery has limits on how many load jobs can be
triggered per day, so be careful not to set this duration too low, or
you may exceed daily quota. Often this is set to 5 or 10 minutes to
ensure that the project stays well under the BigQuery quota.
See https://cloud.google.com/bigquery/quota-policy for more information
about BigQuery quotas.
validate: Indicates whether to perform validation checks on
inputs. This parameter is primarily used for testing.
temp_file_format: The format to use for file loads into BigQuery. The
options are NEWLINE_DELIMITED_JSON or AVRO, with NEWLINE_DELIMITED_JSON
being used by default. For advantages and limitations of the two
formats, see
https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-avro
and
https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-json.
ignore_insert_ids: When using the STREAMING_INSERTS method to write data
to BigQuery, `insert_ids` are a feature of BigQuery that support
deduplication of events. If your use case is not sensitive to
duplication of data inserted to BigQuery, set `ignore_insert_ids`
to True to increase the throughput for BQ writing. See:
https://cloud.google.com/bigquery/streaming-data-into-bigquery#disabling_best_effort_de-duplication
"""
self._table = table
self._dataset = dataset
self._project = project
self.table_reference = bigquery_tools.parse_table_reference(
table, dataset, project)
self.create_disposition = BigQueryDisposition.validate_create(
create_disposition)
self.write_disposition = BigQueryDisposition.validate_write(
write_disposition)
if schema == SCHEMA_AUTODETECT:
self.schema = schema
else:
self.schema = bigquery_tools.get_dict_table_schema(schema)
self.batch_size = batch_size
self.kms_key = kms_key
self.test_client = test_client
# TODO(pabloem): Consider handling ValueProvider for this location.
self.custom_gcs_temp_location = custom_gcs_temp_location
self.max_file_size = max_file_size
self.max_files_per_bundle = max_files_per_bundle
self.method = method or WriteToBigQuery.Method.DEFAULT
self.triggering_frequency = triggering_frequency
self.insert_retry_strategy = insert_retry_strategy
self._validate = validate
self._temp_file_format = temp_file_format or bigquery_tools.FileFormat.JSON
self.additional_bq_parameters = additional_bq_parameters or {}
self.table_side_inputs = table_side_inputs or ()
self.schema_side_inputs = schema_side_inputs or ()
self._ignore_insert_ids = ignore_insert_ids
# Dict/schema methods were moved to bigquery_tools, but keep references
# here for backward compatibility.
get_table_schema_from_string = \
staticmethod(bigquery_tools.get_table_schema_from_string)
table_schema_to_dict = staticmethod(bigquery_tools.table_schema_to_dict)
get_dict_table_schema = staticmethod(bigquery_tools.get_dict_table_schema)
def _compute_method(self, experiments, is_streaming_pipeline):
# If the new BQ sink is not activated for experiment flags, then we use
# streaming inserts by default (it gets overridden in dataflow_runner.py).
if self.method == self.Method.DEFAULT and is_streaming_pipeline:
return self.Method.STREAMING_INSERTS
elif self.method == self.Method.DEFAULT and not is_streaming_pipeline:
return self.Method.FILE_LOADS
else:
return self.method
def expand(self, pcoll):
p = pcoll.pipeline
if (isinstance(self.table_reference, TableReference) and
self.table_reference.projectId is None):
self.table_reference.projectId = pcoll.pipeline.options.view_as(
GoogleCloudOptions).project
experiments = p.options.view_as(DebugOptions).experiments or []
# TODO(pabloem): Use a different method to determine if streaming or batch.
is_streaming_pipeline = p.options.view_as(StandardOptions).streaming
method_to_use = self._compute_method(experiments, is_streaming_pipeline)
if method_to_use == WriteToBigQuery.Method.STREAMING_INSERTS:
if self.schema == SCHEMA_AUTODETECT:
raise ValueError(
'Schema auto-detection is not supported for streaming '
'inserts into BigQuery. Only for File Loads.')
if self.triggering_frequency:
raise ValueError(
'triggering_frequency can only be used with '
'FILE_LOADS method of writing to BigQuery.')
outputs = pcoll | _StreamToBigQuery(
self.table_reference,
self.table_side_inputs,
self.schema_side_inputs,
self.schema,
self.batch_size,
self.create_disposition,
self.write_disposition,
self.kms_key,
self.insert_retry_strategy,
self.additional_bq_parameters,
self._ignore_insert_ids,
test_client=self.test_client)
return {BigQueryWriteFn.FAILED_ROWS: outputs[BigQueryWriteFn.FAILED_ROWS]}
else:
if self._temp_file_format == bigquery_tools.FileFormat.AVRO:
if self.schema == SCHEMA_AUTODETECT:
raise ValueError(
'Schema auto-detection is not supported when using Avro based '
'file loads into BigQuery. Please specify a schema or set '
'temp_file_format="NEWLINE_DELIMITED_JSON"')
if self.schema is None:
raise ValueError(
'A schema must be provided when writing to BigQuery using '
'Avro based file loads')
from apache_beam.io.gcp import bigquery_file_loads
return pcoll | bigquery_file_loads.BigQueryBatchFileLoads(
destination=self.table_reference,
schema=self.schema,
create_disposition=self.create_disposition,
write_disposition=self.write_disposition,
triggering_frequency=self.triggering_frequency,
temp_file_format=self._temp_file_format,
max_file_size=self.max_file_size,
max_files_per_bundle=self.max_files_per_bundle,
custom_gcs_temp_location=self.custom_gcs_temp_location,
test_client=self.test_client,
table_side_inputs=self.table_side_inputs,
schema_side_inputs=self.schema_side_inputs,
additional_bq_parameters=self.additional_bq_parameters,
validate=self._validate,
is_streaming_pipeline=is_streaming_pipeline)
def display_data(self):
res = {}
if self.table_reference is not None:
tableSpec = '{}.{}'.format(
self.table_reference.datasetId, self.table_reference.tableId)
if self.table_reference.projectId is not None:
tableSpec = '{}:{}'.format(self.table_reference.projectId, tableSpec)
res['table'] = DisplayDataItem(tableSpec, label='Table')
return res
def to_runner_api_parameter(self, context):
from apache_beam.internal import pickler
# It'd be nice to name these according to their actual
# names/positions in the orignal argument list, but such a
# transformation is currently irreversible given how
# remove_objects_from_args and insert_values_in_args
# are currently implemented.
def serialize(side_inputs):
return {(SIDE_INPUT_PREFIX + '%s') % ix:
si.to_runner_api(context).SerializeToString()
for ix,
si in enumerate(side_inputs)}
table_side_inputs = serialize(self.table_side_inputs)
schema_side_inputs = serialize(self.schema_side_inputs)
config = {
'table': self._table,
'dataset': self._dataset,
'project': self._project,
'schema': self.schema,
'create_disposition': self.create_disposition,
'write_disposition': self.write_disposition,
'kms_key': self.kms_key,
'batch_size': self.batch_size,
'max_file_size': self.max_file_size,
'max_files_per_bundle': self.max_files_per_bundle,
'custom_gcs_temp_location': self.custom_gcs_temp_location,
'method': self.method,
'insert_retry_strategy': self.insert_retry_strategy,
'additional_bq_parameters': self.additional_bq_parameters,
'table_side_inputs': table_side_inputs,
'schema_side_inputs': schema_side_inputs,
'triggering_frequency': self.triggering_frequency,
'validate': self._validate,
'temp_file_format': self._temp_file_format,
}
return 'beam:transform:write_to_big_query:v0', pickler.dumps(config)
@PTransform.register_urn('beam:transform:write_to_big_query:v0', bytes)
def from_runner_api(unused_ptransform, payload, context):
from apache_beam.internal import pickler
from apache_beam.portability.api.beam_runner_api_pb2 import SideInput
config = pickler.loads(payload)
def deserialize(side_inputs):
deserialized_side_inputs = {}
for k, v in side_inputs.items():
side_input = SideInput()
side_input.ParseFromString(v)
deserialized_side_inputs[k] = side_input
# This is an ordered list stored as a dict (see the comments in
# to_runner_api_parameter above).
indexed_side_inputs = [(
get_sideinput_index(tag),
pvalue.AsSideInput.from_runner_api(si, context)) for tag,
si in deserialized_side_inputs.items()]
return [si for _, si in sorted(indexed_side_inputs)]
config['table_side_inputs'] = deserialize(config['table_side_inputs'])
config['schema_side_inputs'] = deserialize(config['schema_side_inputs'])
return WriteToBigQuery(**config)
class ReadFromBigQuery(PTransform):
"""Read data from BigQuery.
This PTransform uses a BigQuery export job to take a snapshot of the table
on GCS, and then reads from each produced file. File format is Avro by
default.
Args:
table (str, callable, ValueProvider): The ID of the table, or a callable
that returns it. The ID must contain only letters ``a-z``, ``A-Z``,
numbers ``0-9``, or underscores ``_``. If dataset argument is
:data:`None` then the table argument must contain the entire table
reference specified as: ``'DATASET.TABLE'``
or ``'PROJECT:DATASET.TABLE'``. If it's a callable, it must receive one
argument representing an element to be written to BigQuery, and return
a TableReference, or a string table name as specified above.
dataset (str): The ID of the dataset containing this table or
:data:`None` if the table reference is specified entirely by the table
argument.
project (str): The ID of the project containing this table.
query (str, ValueProvider): A query to be used instead of arguments
table, dataset, and project.
validate (bool): If :data:`True`, various checks will be done when source
gets initialized (e.g., is table present?). This should be
:data:`True` for most scenarios in order to catch errors as early as
possible (pipeline construction instead of pipeline execution). It
should be :data:`False` if the table is created during pipeline
execution by a previous step.
coder (~apache_beam.coders.coders.Coder): The coder for the table
rows. If :data:`None`, then the default coder is
_JsonToDictCoder, which will interpret every row as a JSON
serialized dictionary.
use_standard_sql (bool): Specifies whether to use BigQuery's standard SQL
dialect for this query. The default value is :data:`False`.
If set to :data:`True`, the query will use BigQuery's updated SQL
dialect with improved standards compliance.
This parameter is ignored for table inputs.
flatten_results (bool): Flattens all nested and repeated fields in the
query results. The default value is :data:`True`.
kms_key (str): Optional Cloud KMS key name for use when creating new
temporary tables.
gcs_location (str, ValueProvider): The name of the Google Cloud Storage
bucket where the extracted table should be written as a string or
a :class:`~apache_beam.options.value_provider.ValueProvider`. If
:data:`None`, then the temp_location parameter is used.
bigquery_job_labels (dict): A dictionary with string labels to be passed
to BigQuery export and query jobs created by this transform. See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/\
Job#JobConfiguration
use_json_exports (bool): By default, this transform works by exporting
BigQuery data into Avro files, and reading those files. With this
parameter, the transform will instead export to JSON files. JSON files
are slower to read due to their larger size.
When using JSON exports, the BigQuery types for DATE, DATETIME, TIME, and
TIMESTAMP will be exported as strings. This behavior is consistent with
BigQuerySource.
When using Avro exports, these fields will be exported as native Python
types (datetime.date, datetime.datetime, datetime.datetime,
and datetime.datetime respectively). Avro exports are recommended.
To learn more about BigQuery types, and Time-related type
representations, see: https://cloud.google.com/bigquery/docs/reference/\
standard-sql/data-types
To learn more about type conversions between BigQuery and Avro, see:
https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-avro\
#avro_conversions
"""
COUNTER = 0
def __init__(self, gcs_location=None, *args, **kwargs):
if gcs_location:
if not isinstance(gcs_location, (str, unicode, ValueProvider)):
raise TypeError(
'%s: gcs_location must be of type string'
' or ValueProvider; got %r instead' %
(self.__class__.__name__, type(gcs_location)))
if isinstance(gcs_location, (str, unicode)):
gcs_location = StaticValueProvider(str, gcs_location)
self.gcs_location = gcs_location
self._args = args
self._kwargs = kwargs
def expand(self, pcoll):
# TODO(BEAM-11115): Make ReadFromBQ rely on ReadAllFromBQ implementation.
temp_location = pcoll.pipeline.options.view_as(
GoogleCloudOptions).temp_location
job_name = pcoll.pipeline.options.view_as(GoogleCloudOptions).job_name
gcs_location_vp = self.gcs_location
unique_id = str(uuid.uuid4())[0:10]
def file_path_to_remove(unused_elm):
gcs_location = bigquery_export_destination_uri(
gcs_location_vp, temp_location, unique_id, True)
return gcs_location + '/'
files_to_remove_pcoll = beam.pvalue.AsList(
pcoll.pipeline
| 'FilesToRemoveImpulse' >> beam.Create([None])
| 'MapFilesToRemove' >> beam.Map(file_path_to_remove))
try:
step_name = self.label
except AttributeError:
step_name = 'ReadFromBigQuery_%d' % ReadFromBigQuery.COUNTER
ReadFromBigQuery.COUNTER += 1
return (
pcoll
| beam.io.Read(
_CustomBigQuerySource(
gcs_location=self.gcs_location,
pipeline_options=pcoll.pipeline.options,
job_name=job_name,
step_name=step_name,
unique_id=unique_id,
*self._args,
**self._kwargs))
| _PassThroughThenCleanup(files_to_remove_pcoll))
class ReadFromBigQueryRequest:
"""
Class that defines data to read from BQ.
"""
def __init__(
self,
query: str = None,
use_standard_sql: bool = True,
table: Union[str, TableReference] = None,
flatten_results: bool = False):
"""
Only one of query or table should be specified.
:param query: SQL query to fetch data.
:param use_standard_sql:
Specifies whether to use BigQuery's standard SQL dialect for this query.
The default value is :data:`True`. If set to :data:`False`,
the query will use BigQuery's legacy SQL dialect.
This parameter is ignored for table inputs.
:param table:
The ID of the table to read. The ID must contain only letters
``a-z``, ``A-Z``, numbers ``0-9``, or underscores ``_``. Table should
define project and dataset (ex.: ``'PROJECT:DATASET.TABLE'``).
:param flatten_results:
Flattens all nested and repeated fields in the query results.
The default value is :data:`False`.
"""
self.flatten_results = flatten_results
self.query = query
self.use_standard_sql = use_standard_sql
self.table = table
self.validate()
# We use this internal object ID to generate BigQuery export directories.
self.obj_id = random.randint(0, 100000)
def validate(self):
if self.table is not None and self.query is not None:
raise ValueError(
'Both a BigQuery table and a query were specified.'
' Please specify only one of these.')
elif self.table is None and self.query is None:
raise ValueError('A BigQuery table or a query must be specified')
if self.table is not None:
if isinstance(self.table, str):
assert self.table.find('.'), (
'Expected a table reference '
'(PROJECT:DATASET.TABLE or DATASET.TABLE) instead of %s'
% self.table)
@experimental()
class ReadAllFromBigQuery(PTransform):
"""Read data from BigQuery.
PTransform:ReadFromBigQueryRequest->Rows
This PTransform uses a BigQuery export job to take a snapshot of the table
on GCS, and then reads from each produced file. Data is exported into
a new subdirectory for each export using UUIDs generated in
`ReadFromBigQueryRequest` objects.
It is recommended not to use this PTransform for streaming jobs on
GlobalWindow, since it will not be able to cleanup snapshots.
Args:
gcs_location (str): The name of the Google Cloud Storage
bucket where the extracted table should be written as a string. If
:data:`None`, then the temp_location parameter is used.
validate (bool): If :data:`True`, various checks will be done when source
gets initialized (e.g., is table present?).
kms_key (str): Experimental. Optional Cloud KMS key name for use when
creating new temporary tables.
"""
COUNTER = 0
def __init__(
self,
gcs_location: Union[str, ValueProvider] = None,
validate: bool = False,
kms_key: str = None,
temp_dataset: Union[str, DatasetReference] = None,
bigquery_job_labels: Dict[str, str] = None):
if gcs_location:
if not isinstance(gcs_location, (str, ValueProvider)):
raise TypeError(
'%s: gcs_location must be of type string'
' or ValueProvider; got %r instead' %
(self.__class__.__name__, type(gcs_location)))
self.gcs_location = gcs_location
self.validate = validate
self.kms_key = kms_key
self.bigquery_job_labels = bigquery_job_labels
self.temp_dataset = temp_dataset
def expand(self, pcoll):
job_name = pcoll.pipeline.options.view_as(GoogleCloudOptions).job_name
project = pcoll.pipeline.options.view_as(GoogleCloudOptions).project
unique_id = str(uuid.uuid4())[0:10]
try:
step_name = self.label
except AttributeError:
step_name = 'ReadAllFromBigQuery_%d' % ReadAllFromBigQuery.COUNTER
ReadAllFromBigQuery.COUNTER += 1
sources_to_read, cleanup_locations = (
pcoll
| beam.ParDo(
_BigQueryReadSplit(
options=pcoll.pipeline.options,
gcs_location=self.gcs_location,
bigquery_job_labels=self.bigquery_job_labels,
job_name=job_name,
step_name=step_name,
unique_id=unique_id,
kms_key=self.kms_key,
project=project,
temp_dataset=self.temp_dataset)).with_outputs(
"location_to_cleanup", main="files_to_read")
)
return (
sources_to_read
| SDFBoundedSourceReader()
| _PassThroughThenCleanup(beam.pvalue.AsIter(cleanup_locations))) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/gcp/bigquery.py | 0.743354 | 0.169303 | bigquery.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import copy
from typing import Iterable
from typing import List
from typing import Optional
from typing import Text
from typing import Union
from google.cloud.datastore import entity
from google.cloud.datastore import key
from google.cloud.datastore import query
from apache_beam.options.value_provider import ValueProvider
__all__ = ['Query', 'Key', 'Entity']
class Query(object):
def __init__(
self,
kind=None,
project=None,
namespace=None,
ancestor=None,
filters=(),
projection=(),
order=(),
distinct_on=(),
limit=None):
"""Represents a Datastore query.
Args:
kind: (str) The kind to query.
project: (str) Required. Project associated with query.
namespace: (str, ValueProvider(str)) (Optional) Namespace to restrict
results to.
ancestor: (:class:`~apache_beam.io.gcp.datastore.v1new.types.Key`)
(Optional) key of the ancestor to which this query's results are
restricted.
filters: (sequence of tuple[str, str, str],
sequence of
tuple[ValueProvider(str), ValueProvider(str), ValueProvider(str)])
Property filters applied by this query.
The sequence is ``(property_name, operator, value)``.
projection: (sequence of string) fields returned as part of query results.
order: (sequence of string) field names used to order query results.
Prepend ``-`` to a field name to sort it in descending order.
distinct_on: (sequence of string) field names used to group query
results.
limit: (int) Maximum amount of results to return.
"""
self.kind = kind
self.project = project
self.namespace = namespace
self.ancestor = ancestor
self.filters = filters or ()
self.projection = projection
self.order = order
self.distinct_on = distinct_on
self.limit = limit
def _to_client_query(self, client):
"""
Returns a ``google.cloud.datastore.query.Query`` instance that represents
this query.
Args:
client: (``google.cloud.datastore.client.Client``) Datastore client
instance to use.
"""
ancestor_client_key = None
if self.ancestor is not None:
ancestor_client_key = self.ancestor.to_client_key()
# Resolve ValueProvider arguments.
self.filters = self._set_runtime_filters()
if isinstance(self.namespace, ValueProvider):
self.namespace = self.namespace.get()
return query.Query(
client,
kind=self.kind,
project=self.project,
namespace=self.namespace,
ancestor=ancestor_client_key,
filters=self.filters,
projection=self.projection,
order=self.order,
distinct_on=self.distinct_on)
def _set_runtime_filters(self):
"""
Extracts values from ValueProviders in `self.filters` if available
:param filters: sequence of tuple[str, str, str] or
sequence of tuple[ValueProvider, ValueProvider, ValueProvider]
:return: tuple[str, str, str]
"""
runtime_filters = []
if not all(len(filter_tuple) == 3 for filter_tuple in self.filters):
raise TypeError(
'%s: filters must be a sequence of tuple with length=3'
' got %r instead' % (self.__class__.__name__, self.filters))
for filter_type, filter_operator, filter_value in self.filters:
if isinstance(filter_type, ValueProvider):
filter_type = filter_type.get()
if isinstance(filter_operator, ValueProvider):
filter_operator = filter_operator.get()
if isinstance(filter_value, ValueProvider):
filter_value = filter_value.get()
runtime_filters.append((filter_type, filter_operator, filter_value))
return runtime_filters or ()
def clone(self):
return copy.copy(self)
def __repr__(self):
return (
'<Query(kind=%s, project=%s, namespace=%s, ancestor=%s, filters=%s,'
'projection=%s, order=%s, distinct_on=%s, limit=%s)>' % (
self.kind,
self.project,
self.namespace,
self.ancestor,
self.filters,
self.projection,
self.order,
self.distinct_on,
self.limit))
class Key(object):
def __init__(self,
path_elements, # type: List[Union[Text, int]]
parent=None, # type: Optional[Key]
project=None, # type: Optional[Text]
namespace=None # type: Optional[Text]
):
"""
Represents a Datastore key.
The partition ID is represented by its components: namespace and project.
If key has a parent, project and namespace should either be unset or match
the parent's.
Args:
path_elements: (list of str and int) Key path: an alternating sequence of
kind and identifier. The kind must be of type ``str`` and identifier may
be a ``str`` or an ``int``.
If the last identifier is omitted this is an incomplete key, which is
unsupported in ``WriteToDatastore`` and ``DeleteFromDatastore``.
See :class:`google.cloud.datastore.key.Key` for more details.
parent: (:class:`~apache_beam.io.gcp.datastore.v1new.types.Key`)
(optional) Parent for this key.
project: (str) Project ID. Required unless set by parent.
namespace: (str) (optional) Namespace ID
"""
# Verification or arguments is delegated to to_client_key().
self.path_elements = tuple(path_elements)
self.parent = parent
self.namespace = namespace
self.project = project
@staticmethod
def from_client_key(client_key):
return Key(
client_key.flat_path,
project=client_key.project,
namespace=client_key.namespace)
def to_client_key(self):
"""
Returns a :class:`google.cloud.datastore.key.Key` instance that represents
this key.
"""
parent = self.parent
if parent is not None:
parent = parent.to_client_key()
return key.Key(
*self.path_elements,
parent=parent,
namespace=self.namespace,
project=self.project)
def __eq__(self, other):
if not isinstance(other, Key):
return False
if self.path_elements != other.path_elements:
return False
if self.project != other.project:
return False
if self.parent is not None and other.parent is not None:
return self.parent == other.parent
return self.parent is None and other.parent is None
__hash__ = None # type: ignore[assignment]
def __repr__(self):
return '<%s(%s, parent=%s, project=%s, namespace=%s)>' % (
self.__class__.__name__,
str(self.path_elements),
str(self.parent),
self.project,
self.namespace)
class Entity(object):
def __init__(
self,
key, # type: Key
exclude_from_indexes=() # type: Iterable[str]
):
"""
Represents a Datastore entity.
Does not support the property value "meaning" field.
Args:
key: (Key) A complete Key representing this Entity.
exclude_from_indexes: (iterable of str) List of property keys whose values
should not be indexed for this entity.
"""
self.key = key
self.exclude_from_indexes = set(exclude_from_indexes)
self.properties = {}
def set_properties(self, property_dict):
"""Sets a dictionary of properties on this entity.
Args:
property_dict: A map from property name to value. See
:class:`google.cloud.datastore.entity.Entity` documentation for allowed
values.
"""
self.properties.update(property_dict)
@staticmethod
def from_client_entity(client_entity):
res = Entity(
Key.from_client_key(client_entity.key),
exclude_from_indexes=set(client_entity.exclude_from_indexes))
for name, value in client_entity.items():
if isinstance(value, key.Key):
value = Key.from_client_key(value)
if isinstance(value, entity.Entity):
value = Entity.from_client_entity(value)
res.properties[name] = value
return res
def to_client_entity(self):
"""
Returns a :class:`google.cloud.datastore.entity.Entity` instance that
represents this entity.
"""
res = entity.Entity(
key=self.key.to_client_key(),
exclude_from_indexes=tuple(self.exclude_from_indexes))
for name, value in self.properties.items():
if isinstance(value, Key):
if not value.project:
value.project = self.key.project
value = value.to_client_key()
if isinstance(value, Entity):
if not value.key.project:
value.key.project = self.key.project
value = value.to_client_entity()
res[name] = value
return res
def __eq__(self, other):
if not isinstance(other, Entity):
return False
return (
self.key == other.key and
self.exclude_from_indexes == other.exclude_from_indexes and
self.properties == other.properties)
__hash__ = None # type: ignore[assignment]
def __repr__(self):
return "<%s(key=%s, exclude_from_indexes=%s) properties=%s>" % (
self.__class__.__name__,
str(self.key),
str(self.exclude_from_indexes),
str(self.properties)) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/gcp/datastore/v1new/types.py | 0.911601 | 0.312265 | types.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import argparse
import hashlib
import logging
import uuid
import apache_beam as beam
from apache_beam.io.gcp.datastore.v1new.datastoreio import DeleteFromDatastore
from apache_beam.io.gcp.datastore.v1new.datastoreio import ReadFromDatastore
from apache_beam.io.gcp.datastore.v1new.datastoreio import WriteToDatastore
from apache_beam.io.gcp.datastore.v1new.types import Entity
from apache_beam.io.gcp.datastore.v1new.types import Key
from apache_beam.io.gcp.datastore.v1new.types import Query
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
_LOGGER = logging.getLogger(__name__)
def new_pipeline_with_job_name(pipeline_options, job_name, suffix):
"""Create a pipeline with the given job_name and a suffix."""
gcp_options = pipeline_options.view_as(GoogleCloudOptions)
# DirectRunner doesn't have a job name.
if job_name:
gcp_options.job_name = job_name + suffix
return TestPipeline(options=pipeline_options)
class EntityWrapper(object):
"""
Create a Cloud Datastore entity from the given string.
Namespace and project are taken from the parent key.
"""
def __init__(self, kind, parent_key):
self._kind = kind
self._parent_key = parent_key
def make_entity(self, content):
"""Create entity from given string."""
key = Key([self._kind, hashlib.sha1(content.encode('utf-8')).hexdigest()],
parent=self._parent_key)
entity = Entity(key)
entity.set_properties({'content': str(content)})
return entity
def run(argv=None):
"""Main entry point."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--kind', dest='kind', default='writereadtest', help='Datastore Kind')
parser.add_argument(
'--num_entities',
dest='num_entities',
type=int,
required=True,
help='Number of entities to write')
parser.add_argument(
'--limit',
dest='limit',
type=int,
help='Limit of number of entities to write')
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
gcloud_options = pipeline_options.view_as(GoogleCloudOptions)
job_name = gcloud_options.job_name
kind = known_args.kind
num_entities = known_args.num_entities
project = gcloud_options.project
# Pipeline 1: Create and write the specified number of Entities to the
# Cloud Datastore.
ancestor_key = Key([kind, str(uuid.uuid4())], project=project)
_LOGGER.info('Writing %s entities to %s', num_entities, project)
p = new_pipeline_with_job_name(pipeline_options, job_name, '-write')
_ = (
p
| 'Input' >> beam.Create(list(range(num_entities)))
| 'To String' >> beam.Map(str)
| 'To Entity' >> beam.Map(EntityWrapper(kind, ancestor_key).make_entity)
| 'Write to Datastore' >> WriteToDatastore(project))
p.run()
query = Query(kind=kind, project=project, ancestor=ancestor_key)
# Optional Pipeline 2: If a read limit was provided, read it and confirm
# that the expected entities were read.
if known_args.limit is not None:
_LOGGER.info(
'Querying a limited set of %s entities and verifying count.',
known_args.limit)
p = new_pipeline_with_job_name(pipeline_options, job_name, '-verify-limit')
query.limit = known_args.limit
entities = p | 'read from datastore' >> ReadFromDatastore(query)
assert_that(
entities | beam.combiners.Count.Globally(),
equal_to([known_args.limit]))
p.run()
query.limit = None
# Pipeline 3: Query the written Entities and verify result.
_LOGGER.info('Querying entities, asserting they match.')
p = new_pipeline_with_job_name(pipeline_options, job_name, '-verify')
entities = p | 'read from datastore' >> ReadFromDatastore(query)
assert_that(
entities | beam.combiners.Count.Globally(), equal_to([num_entities]))
p.run()
# Pipeline 4: Delete Entities.
_LOGGER.info('Deleting entities.')
p = new_pipeline_with_job_name(pipeline_options, job_name, '-delete')
entities = p | 'read from datastore' >> ReadFromDatastore(query)
_ = (
entities
| 'To Keys' >> beam.Map(lambda entity: entity.key)
| 'delete entities' >> DeleteFromDatastore(project))
p.run()
# Pipeline 5: Query the written Entities, verify no results.
_LOGGER.info('Querying for the entities to make sure there are none present.')
p = new_pipeline_with_job_name(pipeline_options, job_name, '-verify-deleted')
entities = p | 'read from datastore' >> ReadFromDatastore(query)
assert_that(entities | beam.combiners.Count.Globally(), equal_to([0]))
p.run()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/gcp/datastore/v1new/datastore_write_it_pipeline.py | 0.756627 | 0.379608 | datastore_write_it_pipeline.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import typing
from past.builtins import unicode
import apache_beam as beam
from apache_beam.io.gcp import pubsub
from apache_beam.transforms import Map
from apache_beam.transforms.external import ExternalTransform
from apache_beam.transforms.external import NamedTupleBasedPayloadBuilder
ReadFromPubsubSchema = typing.NamedTuple(
'ReadFromPubsubSchema',
[
('topic', typing.Optional[unicode]),
('subscription', typing.Optional[unicode]),
('id_label', typing.Optional[unicode]),
('with_attributes', bool),
('timestamp_attribute', typing.Optional[unicode]),
])
class ReadFromPubSub(beam.PTransform):
"""An external ``PTransform`` for reading from Cloud Pub/Sub.
Experimental; no backwards compatibility guarantees. It requires special
preparation of the Java SDK. See BEAM-7870.
"""
URN = 'beam:external:java:pubsub:read:v1'
def __init__(
self,
topic=None,
subscription=None,
id_label=None,
with_attributes=False,
timestamp_attribute=None,
expansion_service=None):
"""Initializes ``ReadFromPubSub``.
Args:
topic: Cloud Pub/Sub topic in the form
"projects/<project>/topics/<topic>". If provided, subscription must be
None.
subscription: Existing Cloud Pub/Sub subscription to use in the
form "projects/<project>/subscriptions/<subscription>". If not
specified, a temporary subscription will be created from the specified
topic. If provided, topic must be None.
id_label: The attribute on incoming Pub/Sub messages to use as a unique
record identifier. When specified, the value of this attribute (which
can be any string that uniquely identifies the record) will be used for
deduplication of messages. If not provided, we cannot guarantee
that no duplicate data will be delivered on the Pub/Sub stream. In this
case, deduplication of the stream will be strictly best effort.
with_attributes:
True - output elements will be
:class:`~apache_beam.io.gcp.pubsub.PubsubMessage` objects.
False - output elements will be of type ``bytes`` (message
data only).
timestamp_attribute: Message value to use as element timestamp. If None,
uses message publishing time as the timestamp.
Timestamp values should be in one of two formats:
- A numerical value representing the number of milliseconds since the
Unix epoch.
- A string in RFC 3339 format, UTC timezone. Example:
``2015-10-29T23:41:41.123Z``. The sub-second component of the
timestamp is optional, and digits beyond the first three (i.e., time
units smaller than milliseconds) may be ignored.
"""
self.params = ReadFromPubsubSchema(
topic=topic,
subscription=subscription,
id_label=id_label,
with_attributes=with_attributes,
timestamp_attribute=timestamp_attribute)
self.expansion_service = expansion_service
def expand(self, pbegin):
pcoll = pbegin.apply(
ExternalTransform(
self.URN,
NamedTupleBasedPayloadBuilder(self.params),
self.expansion_service))
if self.params.with_attributes:
pcoll = pcoll | 'FromProto' >> Map(pubsub.PubsubMessage._from_proto_str)
pcoll.element_type = pubsub.PubsubMessage
else:
pcoll.element_type = bytes
return pcoll
WriteToPubsubSchema = typing.NamedTuple(
'WriteToPubsubSchema',
[
('topic', unicode),
('id_label', typing.Optional[unicode]),
# this is not implemented yet on the Java side:
# ('with_attributes', bool),
('timestamp_attribute', typing.Optional[unicode]),
])
class WriteToPubSub(beam.PTransform):
"""An external ``PTransform`` for writing messages to Cloud Pub/Sub.
Experimental; no backwards compatibility guarantees. It requires special
preparation of the Java SDK. See BEAM-7870.
"""
URN = 'beam:external:java:pubsub:write:v1'
def __init__(
self,
topic,
with_attributes=False,
id_label=None,
timestamp_attribute=None,
expansion_service=None):
"""Initializes ``WriteToPubSub``.
Args:
topic: Cloud Pub/Sub topic in the form "/topics/<project>/<topic>".
with_attributes:
True - input elements will be
:class:`~apache_beam.io.gcp.pubsub.PubsubMessage` objects.
False - input elements will be of type ``bytes`` (message
data only).
id_label: If set, will set an attribute for each Cloud Pub/Sub message
with the given name and a unique value. This attribute can then be used
in a ReadFromPubSub PTransform to deduplicate messages.
timestamp_attribute: If set, will set an attribute for each Cloud Pub/Sub
message with the given name and the message's publish time as the value.
"""
self.params = WriteToPubsubSchema(
topic=topic,
id_label=id_label,
# with_attributes=with_attributes,
timestamp_attribute=timestamp_attribute)
self.expansion_service = expansion_service
self.with_attributes = with_attributes
def expand(self, pvalue):
if self.with_attributes:
pcoll = pvalue | 'ToProto' >> Map(pubsub.WriteToPubSub.to_proto_str)
else:
pcoll = pvalue | 'ToProto' >> Map(
lambda x: pubsub.PubsubMessage(x, {})._to_proto_str())
pcoll.element_type = bytes
return pcoll.apply(
ExternalTransform(
self.URN,
NamedTupleBasedPayloadBuilder(self.params),
self.expansion_service)) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/external/gcp/pubsub.py | 0.906894 | 0.337504 | pubsub.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from future.utils import iteritems
from apache_beam.io.aws import s3io
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystem import CompressedFile
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filesystem import FileMetadata
from apache_beam.io.filesystem import FileSystem
__all__ = ['S3FileSystem']
class S3FileSystem(FileSystem):
"""An S3 `FileSystem` implementation for accessing files on AWS S3
"""
CHUNK_SIZE = s3io.MAX_BATCH_OPERATION_SIZE
S3_PREFIX = 's3://'
def __init__(self, pipeline_options):
"""Initializes a connection to S3.
Connection configuration is done by passing pipeline options.
See :class:`~apache_beam.options.pipeline_options.S3Options`.
"""
super(S3FileSystem, self).__init__(pipeline_options)
self._options = pipeline_options
@classmethod
def scheme(cls):
"""URI scheme for the FileSystem
"""
return 's3'
def join(self, basepath, *paths):
"""Join two or more pathname components for the filesystem
Args:
basepath: string path of the first component of the path
paths: path components to be added
Returns: full path after combining all of the return nulled components
"""
if not basepath.startswith(S3FileSystem.S3_PREFIX):
raise ValueError('Basepath %r must be S3 path.' % basepath)
path = basepath
for p in paths:
path = path.rstrip('/') + '/' + p.lstrip('/')
return path
def split(self, path):
"""Splits the given path into two parts.
Splits the path into a pair (head, tail) such that tail contains the last
component of the path and head contains everything up to that.
Head will include the S3 prefix ('s3://').
Args:
path: path as a string
Returns:
a pair of path components as strings.
"""
path = path.strip()
if not path.startswith(S3FileSystem.S3_PREFIX):
raise ValueError('Path %r must be S3 path.' % path)
prefix_len = len(S3FileSystem.S3_PREFIX)
last_sep = path[prefix_len:].rfind('/')
if last_sep >= 0:
last_sep += prefix_len
if last_sep > 0:
return (path[:last_sep], path[last_sep + 1:])
elif last_sep < 0:
return (path, '')
else:
raise ValueError('Invalid path: %s' % path)
def mkdirs(self, path):
"""Recursively create directories for the provided path.
Args:
path: string path of the directory structure that should be created
Raises:
IOError: if leaf directory already exists.
"""
pass
def has_dirs(self):
"""Whether this FileSystem supports directories."""
return False
def _list(self, dir_or_prefix):
"""List files in a location.
Listing is non-recursive, for filesystems that support directories.
Args:
dir_or_prefix: (string) A directory or location prefix (for filesystems
that don't have directories).
Returns:
Generator of ``FileMetadata`` objects.
Raises:
``BeamIOError``: if listing fails, but not if no files were found.
"""
try:
for path, size in iteritems(
s3io.S3IO(options=self._options).list_prefix(dir_or_prefix)):
yield FileMetadata(path, size)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("List operation failed", {dir_or_prefix: e})
def _path_open(
self,
path,
mode,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
"""Helper functions to open a file in the provided mode.
"""
compression_type = FileSystem._get_compression_type(path, compression_type)
mime_type = CompressionTypes.mime_type(compression_type, mime_type)
raw_file = s3io.S3IO(options=self._options).open(
path, mode, mime_type=mime_type)
if compression_type == CompressionTypes.UNCOMPRESSED:
return raw_file
return CompressedFile(raw_file, compression_type=compression_type)
def create(
self,
path,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
"""Returns a write channel for the given file path.
Args:
path: string path of the file object to be written to the system
mime_type: MIME type to specify the type of content in the file object
compression_type: Type of compression to be used for this object
Returns: file handle with a close function for the user to use
"""
return self._path_open(path, 'wb', mime_type, compression_type)
def open(
self,
path,
mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
"""Returns a read channel for the given file path.
Args:
path: string path of the file object to be written to the system
mime_type: MIME type to specify the type of content in the file object
compression_type: Type of compression to be used for this object
Returns: file handle with a close function for the user to use
"""
return self._path_open(path, 'rb', mime_type, compression_type)
def copy(self, source_file_names, destination_file_names):
"""Recursively copy the file tree from the source to the destination
Args:
source_file_names: list of source file objects that needs to be copied
destination_file_names: list of destination of the new object
Raises:
``BeamIOError``: if any of the copy operations fail
"""
if not len(source_file_names) == len(destination_file_names):
message = 'Unable to copy unequal number of sources and destinations'
raise BeamIOError(message)
src_dest_pairs = list(zip(source_file_names, destination_file_names))
return s3io.S3IO(options=self._options).copy_paths(src_dest_pairs)
def rename(self, source_file_names, destination_file_names):
"""Rename the files at the source list to the destination list.
Source and destination lists should be of the same size.
Args:
source_file_names: List of file paths that need to be moved
destination_file_names: List of destination_file_names for the files
Raises:
``BeamIOError``: if any of the rename operations fail
"""
if not len(source_file_names) == len(destination_file_names):
message = 'Unable to rename unequal number of sources and destinations'
raise BeamIOError(message)
src_dest_pairs = list(zip(source_file_names, destination_file_names))
results = s3io.S3IO(options=self._options).rename_files(src_dest_pairs)
exceptions = {(src, dest): error
for (src, dest, error) in results if error is not None}
if exceptions:
raise BeamIOError("Rename operation failed", exceptions)
def exists(self, path):
"""Check if the provided path exists on the FileSystem.
Args:
path: string path that needs to be checked.
Returns: boolean flag indicating if path exists
"""
try:
return s3io.S3IO(options=self._options).exists(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("exists() operation failed", {path: e})
def size(self, path):
"""Get size of path on the FileSystem.
Args:
path: string path in question.
Returns: int size of path according to the FileSystem.
Raises:
``BeamIOError``: if path doesn't exist.
"""
try:
return s3io.S3IO(options=self._options).size(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("size() operation failed", {path: e})
def last_updated(self, path):
"""Get UNIX Epoch time in seconds on the FileSystem.
Args:
path: string path of file.
Returns: float UNIX Epoch time
Raises:
``BeamIOError``: if path doesn't exist.
"""
try:
return s3io.S3IO(options=self._options).last_updated(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("last_updated operation failed", {path: e})
def checksum(self, path):
"""Fetch checksum metadata of a file on the
:class:`~apache_beam.io.filesystem.FileSystem`.
Args:
path: string path of a file.
Returns: string containing checksum
Raises:
``BeamIOError``: if path isn't a file or doesn't exist.
"""
try:
return s3io.S3IO(options=self._options).checksum(path)
except Exception as e: # pylint: disable=broad-except
raise BeamIOError("Checksum operation failed", {path: e})
def delete(self, paths):
"""Deletes files or directories at the provided paths.
Directories will be deleted recursively.
Args:
paths: list of paths that give the file objects to be deleted
"""
results = s3io.S3IO(options=self._options).delete_paths(paths)
exceptions = {
path: error
for (path, error) in results.items() if error is not None
}
if exceptions:
raise BeamIOError("Delete operation failed", exceptions) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/io/aws/s3filesystem.py | 0.85376 | 0.305995 | s3filesystem.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from builtins import object
from functools import wraps
from typing import Set
from apache_beam import error
__all__ = [
'ValueProvider',
'StaticValueProvider',
'RuntimeValueProvider',
'NestedValueProvider',
'check_accessible',
]
class ValueProvider(object):
"""Base class that all other ValueProviders must implement.
"""
def is_accessible(self):
"""Whether the contents of this ValueProvider is available to routines
that run at graph construction time.
"""
raise NotImplementedError(
'ValueProvider.is_accessible implemented in derived classes')
def get(self):
"""Return the value wrapped by this ValueProvider.
"""
raise NotImplementedError(
'ValueProvider.get implemented in derived classes')
class StaticValueProvider(ValueProvider):
"""StaticValueProvider is an implementation of ValueProvider that allows
for a static value to be provided.
"""
def __init__(self, value_type, value):
"""
Args:
value_type: Type of the static value
value: Static value
"""
self.value_type = value_type
self.value = value_type(value)
def is_accessible(self):
return True
def get(self):
return self.value
def __str__(self):
return str(self.value)
def __eq__(self, other):
if self.value == other:
return True
if isinstance(other, StaticValueProvider):
if (self.value_type == other.value_type and self.value == other.value):
return True
return False
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash((type(self), self.value_type, self.value))
class RuntimeValueProvider(ValueProvider):
"""RuntimeValueProvider is an implementation of ValueProvider that
allows for a value to be provided at execution time rather than
at graph construction time.
"""
runtime_options = None
experiments = set() # type: Set[str]
def __init__(self, option_name, value_type, default_value):
self.option_name = option_name
self.default_value = default_value
self.value_type = value_type
def is_accessible(self):
return RuntimeValueProvider.runtime_options is not None
@classmethod
def get_value(cls, option_name, value_type, default_value):
if not RuntimeValueProvider.runtime_options:
return default_value
candidate = RuntimeValueProvider.runtime_options.get(option_name)
if candidate:
return value_type(candidate)
else:
return default_value
def get(self):
if RuntimeValueProvider.runtime_options is None:
raise error.RuntimeValueProviderError(
'%s.get() not called from a runtime context' % self)
return RuntimeValueProvider.get_value(
self.option_name, self.value_type, self.default_value)
@classmethod
def set_runtime_options(cls, pipeline_options):
RuntimeValueProvider.runtime_options = pipeline_options
RuntimeValueProvider.experiments = RuntimeValueProvider.get_value(
'experiments', set, set())
def __str__(self):
return '%s(option: %s, type: %s, default_value: %s)' % (
self.__class__.__name__,
self.option_name,
self.value_type.__name__,
repr(self.default_value))
class NestedValueProvider(ValueProvider):
"""NestedValueProvider is an implementation of ValueProvider that allows
for wrapping another ValueProvider object.
"""
def __init__(self, value, translator):
"""Creates a NestedValueProvider that wraps the provided ValueProvider.
Args:
value: ValueProvider object to wrap
translator: function that is applied to the ValueProvider
Raises:
``RuntimeValueProviderError``: if any of the provided objects are not
accessible.
"""
self.value = value
self.translator = translator
def is_accessible(self):
return self.value.is_accessible()
def get(self):
try:
return self.cached_value
except AttributeError:
self.cached_value = self.translator(self.value.get())
return self.cached_value
def __str__(self):
return "%s(value: %s, translator: %s)" % (
self.__class__.__name__,
self.value,
self.translator.__name__,
)
def check_accessible(value_provider_list):
"""A decorator that checks accessibility of a list of ValueProvider objects.
Args:
value_provider_list: list of ValueProvider objects
Raises:
``RuntimeValueProviderError``: if any of the provided objects are not
accessible.
"""
assert isinstance(value_provider_list, list)
def _check_accessible(fnc):
@wraps(fnc)
def _f(self, *args, **kwargs):
for obj in [getattr(self, vp) for vp in value_provider_list]:
if not obj.is_accessible():
raise error.RuntimeValueProviderError('%s not accessible' % obj)
return fnc(self, *args, **kwargs)
return _f
return _check_accessible | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/options/value_provider.py | 0.746509 | 0.215062 | value_provider.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import logging
import re
from builtins import object
from past.builtins import unicode
from apache_beam.internal import pickler
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TestOptions
from apache_beam.options.pipeline_options import TypeOptions
from apache_beam.options.pipeline_options import WorkerOptions
_LOGGER = logging.getLogger(__name__)
class PipelineOptionsValidator(object):
"""Validates PipelineOptions.
Goes through a list of known PipelineOption subclassess and calls::
validate(validator)
if one is implemented. Aggregates a list of validation errors from all and
returns an aggregated list.
"""
# Validator will call validate on these subclasses of PipelineOptions
OPTIONS = [
DebugOptions,
GoogleCloudOptions,
PortableOptions,
SetupOptions,
StandardOptions,
TestOptions,
TypeOptions,
WorkerOptions
]
# Mutually exclusive options for different types of portable environments.
REQUIRED_ENVIRONMENT_OPTIONS = {
'DOCKER': [],
'PROCESS': ['process_command'],
'EXTERNAL': ['external_service_address'],
'LOOPBACK': []
}
OPTIONAL_ENVIRONMENT_OPTIONS = {
'DOCKER': ['docker_container_image'],
'PROCESS': ['process_variables'],
'EXTERNAL': [],
'LOOPBACK': []
}
# Possible validation errors.
ERR_MISSING_OPTION = 'Missing required option: %s.'
ERR_MISSING_GCS_PATH = 'Missing GCS path option: %s.'
ERR_INVALID_GCS_PATH = 'Invalid GCS path (%s), given for the option: %s.'
ERR_INVALID_GCS_BUCKET = (
'Invalid GCS bucket (%s), given for the option: %s. See '
'https://developers.google.com/storage/docs/bucketnaming '
'for more details.')
ERR_INVALID_GCS_OBJECT = 'Invalid GCS object (%s), given for the option: %s.'
ERR_INVALID_JOB_NAME = (
'Invalid job_name (%s); the name must consist of only the characters '
'[-a-z0-9], starting with a letter and ending with a letter or number')
ERR_INVALID_PROJECT_NUMBER = (
'Invalid Project ID (%s). Please make sure you specified the Project ID, '
'not project number.')
ERR_INVALID_PROJECT_ID = (
'Invalid Project ID (%s). Please make sure you specified the Project ID, '
'not project description.')
ERR_INVALID_NOT_POSITIVE = (
'Invalid value (%s) for option: %s. Value needs '
'to be positive.')
ERR_INVALID_TEST_MATCHER_TYPE = (
'Invalid value (%s) for option: %s. Please extend your matcher object '
'from hamcrest.core.base_matcher.BaseMatcher.')
ERR_INVALID_TEST_MATCHER_UNPICKLABLE = (
'Invalid value (%s) for option: %s. Please make sure the test matcher '
'is unpicklable.')
ERR_INVALID_TRANSFORM_NAME_MAPPING = (
'Invalid transform name mapping format. Please make sure the mapping is '
'string key-value pairs. Invalid pair: (%s:%s)')
ERR_INVALID_ENVIRONMENT = (
'Option %s is not compatible with environment type %s.')
ERR_ENVIRONMENT_CONFIG = (
'Option environment_config is incompatible with option(s) %s.')
ERR_MISSING_REQUIRED_ENVIRONMENT_OPTION = (
'Option %s is required for environment type %s.')
# GCS path specific patterns.
GCS_URI = '(?P<SCHEME>[^:]+)://(?P<BUCKET>[^/]+)(/(?P<OBJECT>.*))?'
GCS_BUCKET = '^[a-z0-9][-_a-z0-9.]+[a-z0-9]$'
GCS_SCHEME = 'gs'
# GoogleCloudOptions specific patterns.
JOB_PATTERN = '[a-z]([-a-z0-9]*[a-z0-9])?'
PROJECT_ID_PATTERN = '[a-z][-a-z0-9:.]+[a-z0-9]'
PROJECT_NUMBER_PATTERN = '[0-9]*'
ENDPOINT_PATTERN = r'https://[\S]*googleapis\.com[/]?'
def __init__(self, options, runner):
self.options = options
self.runner = runner
def validate(self):
"""Calls validate on subclassess and returns a list of errors.
validate will call validate method on subclasses, accumulate the returned
list of errors, and returns the aggregate list.
Returns:
Aggregate list of errors after all calling all possible validate methods.
"""
errors = []
for cls in self.OPTIONS:
if 'validate' in cls.__dict__ and callable(cls.__dict__['validate']):
errors.extend(self.options.view_as(cls).validate(self))
return errors
def is_service_runner(self):
"""True if pipeline will execute on the Google Cloud Dataflow service."""
is_service_runner = (
self.runner is not None and
type(self.runner).__name__ in ['DataflowRunner', 'TestDataflowRunner'])
dataflow_endpoint = (
self.options.view_as(GoogleCloudOptions).dataflow_endpoint)
is_service_endpoint = (
dataflow_endpoint is not None and
self.is_full_string_match(self.ENDPOINT_PATTERN, dataflow_endpoint))
return is_service_runner and is_service_endpoint
def is_full_string_match(self, pattern, string):
"""Returns True if the pattern matches the whole string."""
pattern = '^%s$' % pattern
return re.search(pattern, string) is not None
def _validate_error(self, err, *args):
return [err % args]
def validate_gcs_path(self, view, arg_name):
"""Validates a GCS path against gs://bucket/object URI format."""
arg = getattr(view, arg_name, None)
if arg is None:
return self._validate_error(self.ERR_MISSING_GCS_PATH, arg_name)
match = re.match(self.GCS_URI, arg, re.DOTALL)
if match is None:
return self._validate_error(self.ERR_INVALID_GCS_PATH, arg, arg_name)
scheme = match.group('SCHEME')
bucket = match.group('BUCKET')
gcs_object = match.group('OBJECT')
if ((scheme is None) or (scheme.lower() != self.GCS_SCHEME) or
(bucket is None)):
return self._validate_error(self.ERR_INVALID_GCS_PATH, arg, arg_name)
if not self.is_full_string_match(self.GCS_BUCKET, bucket):
return self._validate_error(self.ERR_INVALID_GCS_BUCKET, arg, arg_name)
if gcs_object is None or '\n' in gcs_object or '\r' in gcs_object:
return self._validate_error(self.ERR_INVALID_GCS_OBJECT, arg, arg_name)
return []
def validate_cloud_options(self, view):
"""Validates job_name and project arguments."""
errors = []
if (view.job_name and
not self.is_full_string_match(self.JOB_PATTERN, view.job_name)):
errors.extend(
self._validate_error(self.ERR_INVALID_JOB_NAME, view.job_name))
project = view.project
if project is None:
errors.extend(self._validate_error(self.ERR_MISSING_OPTION, 'project'))
else:
if self.is_full_string_match(self.PROJECT_NUMBER_PATTERN, project):
errors.extend(
self._validate_error(self.ERR_INVALID_PROJECT_NUMBER, project))
elif not self.is_full_string_match(self.PROJECT_ID_PATTERN, project):
errors.extend(
self._validate_error(self.ERR_INVALID_PROJECT_ID, project))
if view.update:
if not view.job_name:
errors.extend(
self._validate_error(
'Existing job name must be provided when updating a pipeline.'))
if view.transform_name_mapping:
if not view.update or not self.options.view_as(StandardOptions).streaming:
errors.append(
'Transform name mapping option is only useful when '
'--update and --streaming is specified')
for _, (key, value) in enumerate(view.transform_name_mapping.items()):
if not isinstance(key, (str, unicode)) \
or not isinstance(value, (str, unicode)):
errors.extend(
self._validate_error(
self.ERR_INVALID_TRANSFORM_NAME_MAPPING, key, value))
break
if view.region is None and self.is_service_runner():
default_region = self.runner.get_default_gcp_region()
if default_region is None:
errors.extend(self._validate_error(self.ERR_MISSING_OPTION, 'region'))
else:
view.region = default_region
return errors
def validate_worker_region_zone(self, view):
"""Validates Dataflow worker region and zone arguments are consistent."""
errors = []
if view.zone and (view.worker_region or view.worker_zone):
errors.extend(
self._validate_error(
'Cannot use deprecated flag --zone along with worker_region or '
'worker_zone.'))
if self.options.view_as(DebugOptions).lookup_experiment('worker_region')\
and (view.worker_region or view.worker_zone):
errors.extend(
self._validate_error(
'Cannot use deprecated experiment worker_region along with '
'worker_region or worker_zone.'))
if view.worker_region and view.worker_zone:
errors.extend(
self._validate_error(
'worker_region and worker_zone are mutually exclusive.'))
if view.zone:
_LOGGER.warning(
'Option --zone is deprecated. Please use --worker_zone instead.')
view.worker_zone = view.zone
view.zone = None
return errors
def validate_optional_argument_positive(self, view, arg_name):
"""Validates that an optional argument (if set) has a positive value."""
arg = getattr(view, arg_name, None)
if arg is not None and int(arg) <= 0:
return self._validate_error(self.ERR_INVALID_NOT_POSITIVE, arg, arg_name)
return []
def validate_test_matcher(self, view, arg_name):
"""Validates that on_success_matcher argument if set.
Validates that on_success_matcher is unpicklable and is instance
of `hamcrest.core.base_matcher.BaseMatcher`.
"""
# This is a test only method and requires hamcrest
from hamcrest.core.base_matcher import BaseMatcher
pickled_matcher = view.on_success_matcher
errors = []
try:
matcher = pickler.loads(pickled_matcher)
if not isinstance(matcher, BaseMatcher):
errors.extend(
self._validate_error(
self.ERR_INVALID_TEST_MATCHER_TYPE, matcher, arg_name))
except: # pylint: disable=bare-except
errors.extend(
self._validate_error(
self.ERR_INVALID_TEST_MATCHER_UNPICKLABLE,
pickled_matcher,
arg_name))
return errors
def validate_environment_options(self, view):
"""Validates portable environment options."""
errors = []
actual_environment_type = (
view.environment_type.upper() if view.environment_type else None)
for environment_type, required in self.REQUIRED_ENVIRONMENT_OPTIONS.items():
found_required_options = [
opt for opt in required
if view.lookup_environment_option(opt) is not None
]
found_optional_options = [
opt for opt in self.OPTIONAL_ENVIRONMENT_OPTIONS[environment_type]
if view.lookup_environment_option(opt) is not None
]
found_options = found_required_options + found_optional_options
if environment_type == actual_environment_type:
if view.environment_config:
if found_options:
errors.extend(
self._validate_error(
self.ERR_ENVIRONMENT_CONFIG, ', '.join(found_options)))
else:
missing_options = set(required).difference(
set(found_required_options))
for opt in missing_options:
errors.extend(
self._validate_error(
self.ERR_MISSING_REQUIRED_ENVIRONMENT_OPTION,
opt,
environment_type))
else:
# Environment options classes are mutually exclusive.
for opt in found_options:
errors.extend(
self._validate_error(
self.ERR_INVALID_ENVIRONMENT, opt, actual_environment_type))
if actual_environment_type == 'LOOPBACK' and view.environment_config:
errors.extend(
self._validate_error(
self.ERR_INVALID_ENVIRONMENT, 'environment_config', 'LOOPBACK'))
return errors | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/options/pipeline_options_validator.py | 0.700383 | 0.183027 | pipeline_options_validator.py | pypi |
from __future__ import absolute_import
from __future__ import division
import logging
import math
import threading
from collections import Counter
_LOGGER = logging.getLogger(__name__)
class Histogram(object):
"""A histogram that supports estimated percentile with linear interpolation.
This class is considered experimental and may break or receive backwards-
incompatible changes in future versions of the Apache Beam SDK.
"""
def __init__(self, bucket_type):
self._lock = threading.Lock()
self._bucket_type = bucket_type
self._buckets = Counter()
self._num_records = 0
self._num_top_records = 0
self._num_bot_records = 0
def clear(self):
with self._lock:
self._buckets = Counter()
self._num_records = 0
self._num_top_records = 0
self._num_bot_records = 0
def copy(self):
with self._lock:
histogram = Histogram(self._bucket_type)
histogram._num_records = self._num_records
histogram._num_top_records = self._num_top_records
histogram._num_bot_records = self._num_bot_records
histogram._buckets = self._buckets.copy()
return histogram
def combine(self, other):
if not isinstance(other,
Histogram) or self._bucket_type != other._bucket_type:
raise RuntimeError('failed to combine histogram.')
other_histogram = other.copy()
with self._lock:
histogram = Histogram(self._bucket_type)
histogram._num_records = self._num_records + other_histogram._num_records
histogram._num_top_records = (
self._num_top_records + other_histogram._num_top_records)
histogram._num_bot_records = (
self._num_bot_records + other_histogram._num_bot_records)
histogram._buckets = self._buckets + other_histogram._buckets
return histogram
def record(self, *args):
for arg in args:
self._record(arg)
def _record(self, value):
range_from = self._bucket_type.range_from()
range_to = self._bucket_type.range_to()
with self._lock:
if value >= range_to:
_LOGGER.warning('record is out of upper bound %s: %s', range_to, value)
self._num_top_records += 1
elif value < range_from:
_LOGGER.warning(
'record is out of lower bound %s: %s', range_from, value)
self._num_bot_records += 1
else:
index = self._bucket_type.bucket_index(value)
self._buckets[index] = self._buckets.get(index, 0) + 1
self._num_records += 1
def total_count(self):
return self._num_records + self._num_top_records + self._num_bot_records
def p99(self):
return self.get_linear_interpolation(0.99)
def p90(self):
return self.get_linear_interpolation(0.90)
def p50(self):
return self.get_linear_interpolation(0.50)
def get_percentile_info(self):
def _format(f):
if f == float('-inf'):
return '<%s' % self._bucket_type.range_from()
elif f == float('inf'):
return '>=%s' % self._bucket_type.range_to()
else:
return str(int(round(f))) # pylint: disable=round-builtin
with self._lock:
return (
'Total count: %s, '
'P99: %s, P90: %s, P50: %s' % (
self.total_count(),
_format(self._get_linear_interpolation(0.99)),
_format(self._get_linear_interpolation(0.90)),
_format(self._get_linear_interpolation(0.50))))
def get_linear_interpolation(self, percentile):
"""Calculate percentile estimation based on linear interpolation.
It first finds the bucket which includes the target percentile and
projects the estimated point in the bucket by assuming all the elements
in the bucket are uniformly distributed.
Args:
percentile: The target percentile of the value returning from this
method. Should be a floating point number greater than 0 and less
than 1.
"""
with self._lock:
return self._get_linear_interpolation(percentile)
def _get_linear_interpolation(self, percentile):
total_num_records = self.total_count()
if total_num_records == 0:
raise RuntimeError('histogram has no record.')
index = 0
record_sum = self._num_bot_records
if record_sum / total_num_records >= percentile:
return float('-inf')
while index < self._bucket_type.num_buckets():
record_sum += self._buckets.get(index, 0)
if record_sum / total_num_records >= percentile:
break
index += 1
if index == self._bucket_type.num_buckets():
return float('inf')
frac_percentile = percentile - (
record_sum - self._buckets[index]) / total_num_records
bucket_percentile = self._buckets[index] / total_num_records
frac_bucket_size = frac_percentile * self._bucket_type.bucket_size(
index) / bucket_percentile
return (
self._bucket_type.range_from() +
self._bucket_type.accumulated_bucket_size(index) + frac_bucket_size)
def __eq__(self, other):
if not isinstance(other, Histogram):
return False
return (
self._bucket_type == other._bucket_type and
self._num_records == other._num_records and
self._num_top_records == other._num_top_records and
self._num_bot_records == other._num_bot_records and
self._buckets == other._buckets)
def __hash__(self):
return hash((
self._bucket_type,
self._num_records,
self._num_top_records,
self._num_bot_records,
frozenset(self._buckets.items())))
class BucketType(object):
def range_from(self):
"""Lower bound of a starting bucket."""
raise NotImplementedError
def range_to(self):
"""Upper bound of an ending bucket."""
raise NotImplementedError
def num_buckets(self):
"""The number of buckets."""
raise NotImplementedError
def bucket_index(self, value):
"""Get the bucket array index for the given value."""
raise NotImplementedError
def bucket_size(self, index):
"""Get the bucket size for the given bucket array index."""
raise NotImplementedError
def accumulated_bucket_size(self, end_index):
"""Get the accumulated bucket size from bucket index 0 until endIndex.
Generally, this can be calculated as
`sigma(0 <= i < endIndex) getBucketSize(i)`. However, a child class could
provide better optimized calculation.
"""
raise NotImplementedError
class LinearBucket(BucketType):
def __init__(self, start, width, num_buckets):
"""Create a histogram with linear buckets.
Args:
start: Lower bound of a starting bucket.
width: Bucket width. Smaller width implies a better resolution for
percentile estimation.
num_buckets: The number of buckets. Upper bound of an ending bucket is
defined by start + width * numBuckets.
"""
self._start = start
self._width = width
self._num_buckets = num_buckets
def range_from(self):
return self._start
def range_to(self):
return self._start + self._width * self._num_buckets
def num_buckets(self):
return self._num_buckets
def bucket_index(self, value):
return math.floor((value - self._start) / self._width)
def bucket_size(self, index):
return self._width
def accumulated_bucket_size(self, end_index):
return self._width * end_index
def __eq__(self, other):
if not isinstance(other, LinearBucket):
return False
return (
self._start == other._start and self._width == other._width and
self._num_buckets == other._num_buckets)
def __hash__(self):
return hash((self._start, self._width, self._num_buckets)) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/utils/histogram.py | 0.916637 | 0.357988 | histogram.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from typing import Type
from typing import TypeVar
from typing import Union
from typing import overload
from google.protobuf import any_pb2
from google.protobuf import duration_pb2
from google.protobuf import message
from google.protobuf import struct_pb2
from google.protobuf import timestamp_pb2
MessageT = TypeVar('MessageT', bound=message.Message)
TimeMessageT = TypeVar(
'TimeMessageT', duration_pb2.Duration, timestamp_pb2.Timestamp)
@overload
def pack_Any(msg):
# type: (message.Message) -> any_pb2.Any
pass
@overload
def pack_Any(msg):
# type: (None) -> None
pass
def pack_Any(msg):
"""Creates a protobuf Any with msg as its content.
Returns None if msg is None.
"""
if msg is None:
return None
result = any_pb2.Any()
result.Pack(msg)
return result
@overload
def unpack_Any(any_msg, msg_class):
# type: (any_pb2.Any, Type[MessageT]) -> MessageT
pass
@overload
def unpack_Any(any_msg, msg_class):
# type: (any_pb2.Any, None) -> None
pass
def unpack_Any(any_msg, msg_class):
"""Unpacks any_msg into msg_class.
Returns None if msg_class is None.
"""
if msg_class is None:
return None
msg = msg_class()
any_msg.Unpack(msg)
return msg
@overload
def parse_Bytes(serialized_bytes, msg_class):
# type: (bytes, Type[MessageT]) -> MessageT
pass
@overload
def parse_Bytes(serialized_bytes, msg_class):
# type: (bytes, Union[Type[bytes], None]) -> bytes
pass
def parse_Bytes(serialized_bytes, msg_class):
"""Parses the String of bytes into msg_class.
Returns the input bytes if msg_class is None."""
if msg_class is None or msg_class is bytes:
return serialized_bytes
msg = msg_class()
msg.ParseFromString(serialized_bytes)
return msg
def pack_Struct(**kwargs):
# type: (...) -> struct_pb2.Struct
"""Returns a struct containing the values indicated by kwargs.
"""
msg = struct_pb2.Struct()
for key, value in kwargs.items():
msg[key] = value # pylint: disable=unsubscriptable-object, unsupported-assignment-operation
return msg
def from_micros(cls, micros):
# type: (Type[TimeMessageT], int) -> TimeMessageT
result = cls()
result.FromMicroseconds(micros)
return result
def to_Timestamp(time):
# type: (Union[int, float]) -> timestamp_pb2.Timestamp
"""Convert a float returned by time.time() to a Timestamp.
"""
seconds = int(time)
nanos = int((time - seconds) * 10**9)
return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos)
def from_Timestamp(timestamp):
# type: (timestamp_pb2.Timestamp) -> float
"""Convert a Timestamp to a float expressed as seconds since the epoch.
"""
return timestamp.seconds + float(timestamp.nanos) / 10**9 | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/utils/proto_utils.py | 0.834778 | 0.154217 | proto_utils.py | pypi |
# pytype: skip-file
# mypy: disallow-untyped-defs
from __future__ import absolute_import
from __future__ import division
import datetime
import time
from builtins import object
from typing import Any
from typing import Union
from typing import overload
import dateutil.parser
import pytz
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
from past.builtins import long
from apache_beam.portability import common_urns
# types compatible with Timestamp.of()
TimestampTypes = Union[int, float, 'Timestamp']
# types compatible with Duration.of()
DurationTypes = Union[int, float, 'Duration']
TimestampDurationTypes = Union[int, float, 'Duration', 'Timestamp']
class Timestamp(object):
"""Represents a Unix second timestamp with microsecond granularity.
Can be treated in common timestamp arithmetic operations as a numeric type.
Internally stores a time interval as an int of microseconds. This strategy
is necessary since floating point values lose precision when storing values,
especially after arithmetic operations (for example, 10000000 % 0.1 evaluates
to 0.0999999994448885).
"""
def __init__(self, seconds=0, micros=0):
# type: (Union[int, float], Union[int, float]) -> None
if not isinstance(seconds, (int, long, float)):
raise TypeError(
'Cannot interpret %s %s as seconds.' % (seconds, type(seconds)))
if not isinstance(micros, (int, long, float)):
raise TypeError(
'Cannot interpret %s %s as micros.' % (micros, type(micros)))
self.micros = int(seconds * 1000000) + int(micros)
@staticmethod
def of(seconds):
# type: (TimestampTypes) -> Timestamp
"""Return the Timestamp for the given number of seconds.
If the input is already a Timestamp, the input itself will be returned.
Args:
seconds: Number of seconds as int, float, long, or Timestamp.
Returns:
Corresponding Timestamp object.
"""
if not isinstance(seconds, (int, long, float, Timestamp)):
raise TypeError(
'Cannot interpret %s %s as Timestamp.' % (seconds, type(seconds)))
if isinstance(seconds, Timestamp):
return seconds
return Timestamp(seconds)
@staticmethod
def now():
# type: () -> Timestamp
return Timestamp(seconds=time.time())
@staticmethod
def _epoch_datetime_utc():
# type: () -> datetime.datetime
return datetime.datetime.fromtimestamp(0, pytz.utc)
@classmethod
def from_utc_datetime(cls, dt):
# type: (datetime.datetime) -> Timestamp
"""Create a ``Timestamp`` instance from a ``datetime.datetime`` object.
Args:
dt: A ``datetime.datetime`` object in UTC (offset-aware).
"""
if dt.tzinfo != pytz.utc:
raise ValueError('dt not in UTC: %s' % dt)
duration = dt - cls._epoch_datetime_utc()
return Timestamp(duration.total_seconds())
@classmethod
def from_rfc3339(cls, rfc3339):
# type: (str) -> Timestamp
"""Create a ``Timestamp`` instance from an RFC 3339 compliant string.
.. note::
All timezones are implicitly converted to UTC.
Args:
rfc3339: String in RFC 3339 form.
"""
try:
dt = dateutil.parser.isoparse(rfc3339).astimezone(pytz.UTC)
except ValueError as e:
raise ValueError(
"Could not parse RFC 3339 string '{}' due to error: '{}'.".format(
rfc3339, e))
return cls.from_utc_datetime(dt)
def predecessor(self):
# type: () -> Timestamp
"""Returns the largest timestamp smaller than self."""
return Timestamp(micros=self.micros - 1)
def __repr__(self):
# type: () -> str
micros = self.micros
sign = ''
if micros < 0:
sign = '-'
micros = -micros
int_part = micros // 1000000
frac_part = micros % 1000000
if frac_part:
return 'Timestamp(%s%d.%06d)' % (sign, int_part, frac_part)
return 'Timestamp(%s%d)' % (sign, int_part)
def to_utc_datetime(self):
# type: () -> datetime.datetime
# We can't easily construct a datetime object from microseconds, so we
# create one at the epoch and add an appropriate timedelta interval.
return self._epoch_datetime_utc().replace(tzinfo=None) + datetime.timedelta(
microseconds=self.micros)
def to_rfc3339(self):
# type: () -> str
# Append 'Z' for UTC timezone.
return self.to_utc_datetime().isoformat() + 'Z'
def to_proto(self):
# type: () -> timestamp_pb2.Timestamp
"""Returns the `google.protobuf.timestamp_pb2` representation."""
secs = self.micros // 1000000
nanos = (self.micros % 1000000) * 1000
return timestamp_pb2.Timestamp(seconds=secs, nanos=nanos)
@staticmethod
def from_proto(timestamp_proto):
# type: (timestamp_pb2.Timestamp) -> Timestamp
"""Creates a Timestamp from a `google.protobuf.timestamp_pb2`.
Note that the google has a sub-second resolution of nanoseconds whereas this
class has a resolution of microsends. This class will truncate the
nanosecond resolution down to the microsecond.
"""
if timestamp_proto.nanos % 1000 != 0:
# TODO(BEAM-8738): Better define timestamps.
raise ValueError(
"Cannot convert from nanoseconds to microseconds " +
"because this loses precision. Please make sure that " +
"this is the correct behavior you want and manually " +
"truncate the precision to the nearest microseconds. " +
"See [BEAM-8738] for more information.")
return Timestamp(
seconds=timestamp_proto.seconds, micros=timestamp_proto.nanos // 1000)
def __float__(self):
# type: () -> float
# Note that the returned value may have lost precision.
return self.micros / 1000000
def __int__(self):
# type: () -> int
# Note that the returned value may have lost precision.
return self.micros // 1000000
def __eq__(self, other):
# type: (object) -> bool
# Allow comparisons between Duration and Timestamp values.
if isinstance(other, (Duration, Timestamp)):
return self.micros == other.micros
elif isinstance(other, (int, long, float)):
return self.micros == Timestamp.of(other).micros
else:
# Support equality with other types
return NotImplemented
def __ne__(self, other):
# type: (Any) -> bool
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __lt__(self, other):
# type: (TimestampDurationTypes) -> bool
# Allow comparisons between Duration and Timestamp values.
if not isinstance(other, Duration):
other = Timestamp.of(other)
return self.micros < other.micros
def __gt__(self, other):
# type: (TimestampDurationTypes) -> bool
return not (self < other or self == other)
def __le__(self, other):
# type: (TimestampDurationTypes) -> bool
return self < other or self == other
def __ge__(self, other):
# type: (TimestampDurationTypes) -> bool
return not self < other
def __hash__(self):
# type: () -> int
return hash(self.micros)
def __add__(self, other):
# type: (DurationTypes) -> Timestamp
other = Duration.of(other)
return Timestamp(micros=self.micros + other.micros)
def __radd__(self, other):
# type: (DurationTypes) -> Timestamp
return self + other
@overload
def __sub__(self, other):
# type: (DurationTypes) -> Timestamp
pass
@overload
def __sub__(self, other):
# type: (Timestamp) -> Duration
pass
def __sub__(self, other):
# type: (Union[DurationTypes, Timestamp]) -> Union[Timestamp, Duration]
if isinstance(other, Timestamp):
return Duration(micros=self.micros - other.micros)
other = Duration.of(other)
return Timestamp(micros=self.micros - other.micros)
def __mod__(self, other):
# type: (DurationTypes) -> Duration
other = Duration.of(other)
return Duration(micros=self.micros % other.micros)
MIN_TIMESTAMP = Timestamp(
micros=int(common_urns.constants.MIN_TIMESTAMP_MILLIS.constant) * 1000)
MAX_TIMESTAMP = Timestamp(
micros=int(common_urns.constants.MAX_TIMESTAMP_MILLIS.constant) * 1000)
class Duration(object):
"""Represents a second duration with microsecond granularity.
Can be treated in common arithmetic operations as a numeric type.
Internally stores a time interval as an int of microseconds. This strategy
is necessary since floating point values lose precision when storing values,
especially after arithmetic operations (for example, 10000000 % 0.1 evaluates
to 0.0999999994448885).
"""
def __init__(self, seconds=0, micros=0):
# type: (Union[int, float], Union[int, float]) -> None
self.micros = int(seconds * 1000000) + int(micros)
@staticmethod
def of(seconds):
# type: (DurationTypes) -> Duration
"""Return the Duration for the given number of seconds since Unix epoch.
If the input is already a Duration, the input itself will be returned.
Args:
seconds: Number of seconds as int, float or Duration.
Returns:
Corresponding Duration object.
"""
if isinstance(seconds, Timestamp):
raise TypeError('Cannot interpret %s as Duration.' % seconds)
if isinstance(seconds, Duration):
return seconds
return Duration(seconds)
def to_proto(self):
# type: () -> duration_pb2.Duration
"""Returns the `google.protobuf.duration_pb2` representation."""
secs = self.micros // 1000000
nanos = (self.micros % 1000000) * 1000
return duration_pb2.Duration(seconds=secs, nanos=nanos)
@staticmethod
def from_proto(duration_proto):
# type: (duration_pb2.Duration) -> Duration
"""Creates a Duration from a `google.protobuf.duration_pb2`.
Note that the google has a sub-second resolution of nanoseconds whereas this
class has a resolution of microsends. This class will truncate the
nanosecond resolution down to the microsecond.
"""
if duration_proto.nanos % 1000 != 0:
# TODO(BEAM-8738): Better define durations.
raise ValueError(
"Cannot convert from nanoseconds to microseconds " +
"because this loses precision. Please make sure that " +
"this is the correct behavior you want and manually " +
"truncate the precision to the nearest microseconds. " +
"See [BEAM-8738] for more information.")
return Duration(
seconds=duration_proto.seconds, micros=duration_proto.nanos // 1000)
def __repr__(self):
# type: () -> str
micros = self.micros
sign = ''
if micros < 0:
sign = '-'
micros = -micros
int_part = micros // 1000000
frac_part = micros % 1000000
if frac_part:
return 'Duration(%s%d.%06d)' % (sign, int_part, frac_part)
return 'Duration(%s%d)' % (sign, int_part)
def __float__(self):
# type: () -> float
# Note that the returned value may have lost precision.
return self.micros / 1000000
def __eq__(self, other):
# type: (object) -> bool
# Allow comparisons between Duration and Timestamp values.
if isinstance(other, (Duration, Timestamp)):
return self.micros == other.micros
elif isinstance(other, (int, long, float)):
return self.micros == Duration.of(other).micros
else:
# Support equality with other types
return NotImplemented
def __ne__(self, other):
# type: (Any) -> bool
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __lt__(self, other):
# type: (TimestampDurationTypes) -> bool
# Allow comparisons between Duration and Timestamp values.
if not isinstance(other, Timestamp):
other = Duration.of(other)
return self.micros < other.micros
def __gt__(self, other):
# type: (TimestampDurationTypes) -> bool
return not (self < other or self == other)
def __le__(self, other):
# type: (TimestampDurationTypes) -> bool
return self < other or self == other
def __ge__(self, other):
# type: (TimestampDurationTypes) -> bool
return not self < other
def __hash__(self):
# type: () -> int
return hash(self.micros)
def __neg__(self):
# type: () -> Duration
return Duration(micros=-self.micros)
def __add__(self, other):
# type: (DurationTypes) -> Duration
if isinstance(other, Timestamp):
# defer to Timestamp.__add__
return NotImplemented
other = Duration.of(other)
return Duration(micros=self.micros + other.micros)
def __radd__(self, other):
# type: (DurationTypes) -> Duration
return self + other
def __sub__(self, other):
# type: (DurationTypes) -> Duration
other = Duration.of(other)
return Duration(micros=self.micros - other.micros)
def __rsub__(self, other):
# type: (DurationTypes) -> Duration
return -(self - other)
def __mul__(self, other):
# type: (DurationTypes) -> Duration
other = Duration.of(other)
return Duration(micros=self.micros * other.micros // 1000000)
def __rmul__(self, other):
# type: (DurationTypes) -> Duration
return self * other
def __mod__(self, other):
# type: (DurationTypes) -> Duration
other = Duration.of(other)
return Duration(micros=self.micros % other.micros)
# The minimum granularity / interval expressible in a Timestamp / Duration
# object.
TIME_GRANULARITY = Duration(micros=1) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/utils/timestamp.py | 0.84858 | 0.373733 | timestamp.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import logging
_LOGGER = logging.getLogger(__name__)
def is_in_ipython():
"""Determines if current code is executed within an ipython session."""
try:
from IPython import get_ipython # pylint: disable=import-error
if get_ipython():
return True
return False
except ImportError:
# If dependencies are not available, then not interactive for sure.
return False
except (KeyboardInterrupt, SystemExit):
raise
except: # pylint: disable=bare-except
_LOGGER.info(
'Unexpected error occurred, treated as not in IPython.', exc_info=True)
return False
def is_in_notebook():
"""Determines if current code is executed from an ipython notebook.
If is_in_notebook() is True, then is_in_ipython() must also be True.
"""
is_in_notebook = False
if is_in_ipython():
# The import and usage must be valid under the execution path.
from IPython import get_ipython
if 'IPKernelApp' in get_ipython().config:
is_in_notebook = True
return is_in_notebook
def alter_label_if_ipython(transform, pvalueish):
"""Alters the label to an interactive label with ipython prompt metadata
prefixed for the given transform if the given pvalueish belongs to a
user-defined pipeline and current code execution is within an ipython kernel.
Otherwise, noop.
A label is either a user-defined or auto-generated str name of a PTransform
that is unique within a pipeline. If current environment is_in_ipython(), Beam
can implicitly create interactive labels to replace labels of top-level
PTransforms to be applied. The label is formatted as:
`Cell {prompt}: {original_label}`.
"""
if is_in_ipython():
from apache_beam.runners.interactive import interactive_environment as ie
# Tracks user defined pipeline instances in watched scopes so that we only
# alter labels for any transform to pvalueish belonging to those pipeline
# instances, excluding any transform to be applied in other pipeline
# instances the Beam SDK creates implicitly.
ie.current_env().track_user_pipelines()
from IPython import get_ipython
prompt = get_ipython().execution_count
pipeline = _extract_pipeline_of_pvalueish(pvalueish)
if (pipeline
# We only alter for transforms to be applied to user-defined pipelines
# at pipeline construction time.
and pipeline in ie.current_env().tracked_user_pipelines):
transform.label = '[{}]: {}'.format(prompt, transform.label)
def _extract_pipeline_of_pvalueish(pvalueish):
"""Extracts the pipeline that the given pvalueish belongs to."""
if isinstance(pvalueish, tuple) and len(pvalueish) > 0:
pvalue = pvalueish[0]
elif isinstance(pvalueish, dict) and len(pvalueish) > 0:
pvalue = next(iter(pvalueish.values()))
else:
pvalue = pvalueish
if hasattr(pvalue, 'pipeline'):
return pvalue.pipeline
return None | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/utils/interactive_utils.py | 0.739046 | 0.232005 | interactive_utils.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import functools
import logging
import random
import sys
import time
import traceback
from builtins import next
from builtins import object
from builtins import range
from future.utils import raise_with_traceback
from apache_beam.io.filesystem import BeamIOError
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
# TODO(sourabhbajaj): Remove the GCP specific error code to a submodule
try:
from apitools.base.py.exceptions import HttpError
except ImportError as e:
HttpError = None
# Protect against environments where aws tools are not available.
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
from apache_beam.io.aws.clients.s3 import messages as _s3messages
except ImportError:
S3ClientError = None
else:
S3ClientError = _s3messages.S3ClientError
# pylint: enable=wrong-import-order, wrong-import-position
_LOGGER = logging.getLogger(__name__)
class PermanentException(Exception):
"""Base class for exceptions that should not be retried."""
pass
class FuzzedExponentialIntervals(object):
"""Iterable for intervals that are exponentially spaced, with fuzzing.
On iteration, yields retry interval lengths, in seconds. Every iteration over
this iterable will yield differently fuzzed interval lengths, as long as fuzz
is nonzero.
Args:
initial_delay_secs: The delay before the first retry, in seconds.
num_retries: The total number of times to retry.
factor: The exponential factor to use on subsequent retries.
Default is 2 (doubling).
fuzz: A value between 0 and 1, indicating the fraction of fuzz. For a
given delay d, the fuzzed delay is randomly chosen between
[(1 - fuzz) * d, d].
max_delay_secs: Maximum delay (in seconds). After this limit is reached,
further tries use max_delay_sec instead of exponentially increasing
the time. Defaults to 1 hour.
stop_after_secs: Places a limit on the sum of intervals returned (in
seconds), such that the sum is <= stop_after_secs. Defaults to disabled
(None). You may need to increase num_retries to effectively use this
feature.
"""
def __init__(
self,
initial_delay_secs,
num_retries,
factor=2,
fuzz=0.5,
max_delay_secs=60 * 60 * 1,
stop_after_secs=None):
self._initial_delay_secs = initial_delay_secs
if num_retries > 10000:
raise ValueError('num_retries parameter cannot exceed 10000.')
self._num_retries = num_retries
self._factor = factor
if not 0 <= fuzz <= 1:
raise ValueError('fuzz parameter expected to be in [0, 1] range.')
self._fuzz = fuzz
self._max_delay_secs = max_delay_secs
self._stop_after_secs = stop_after_secs
def __iter__(self):
current_delay_secs = min(self._max_delay_secs, self._initial_delay_secs)
total_delay_secs = 0
for _ in range(self._num_retries):
fuzz_multiplier = 1 - self._fuzz + random.random() * self._fuzz
delay_secs = current_delay_secs * fuzz_multiplier
total_delay_secs += delay_secs
if (self._stop_after_secs is not None and
total_delay_secs > self._stop_after_secs):
break
yield delay_secs
current_delay_secs = min(
self._max_delay_secs, current_delay_secs * self._factor)
def retry_on_server_errors_filter(exception):
"""Filter allowing retries on server errors and non-HttpErrors."""
if (HttpError is not None) and isinstance(exception, HttpError):
return exception.status_code >= 500
if (S3ClientError is not None) and isinstance(exception, S3ClientError):
return exception.code >= 500
return not isinstance(exception, PermanentException)
# TODO(BEAM-6202): Dataflow returns 404 for job ids that actually exist.
# Retry on those errors.
def retry_on_server_errors_and_notfound_filter(exception):
if HttpError is not None and isinstance(exception, HttpError):
if exception.status_code == 404: # 404 Not Found
return True
return retry_on_server_errors_filter(exception)
def retry_on_server_errors_and_timeout_filter(exception):
if HttpError is not None and isinstance(exception, HttpError):
if exception.status_code == 408: # 408 Request Timeout
return True
if S3ClientError is not None and isinstance(exception, S3ClientError):
if exception.code == 408: # 408 Request Timeout
return True
return retry_on_server_errors_filter(exception)
def retry_on_server_errors_timeout_or_quota_issues_filter(exception):
"""Retry on server, timeout and 403 errors.
403 errors can be accessDenied, billingNotEnabled, and also quotaExceeded,
rateLimitExceeded."""
if HttpError is not None and isinstance(exception, HttpError):
if exception.status_code == 403:
return True
if S3ClientError is not None and isinstance(exception, S3ClientError):
if exception.code == 403:
return True
return retry_on_server_errors_and_timeout_filter(exception)
def retry_on_beam_io_error_filter(exception):
"""Filter allowing retries on Beam IO errors."""
return isinstance(exception, BeamIOError)
def retry_if_valid_input_but_server_error_and_timeout_filter(exception):
if isinstance(exception, ValueError):
return False
return retry_on_server_errors_and_timeout_filter(exception)
SERVER_ERROR_OR_TIMEOUT_CODES = [408, 500, 502, 503, 504, 598, 599]
class Clock(object):
"""A simple clock implementing sleep()."""
def sleep(self, value):
time.sleep(value)
def no_retries(fun):
"""A retry decorator for places where we do not want retries."""
return with_exponential_backoff(retry_filter=lambda _: False, clock=None)(fun)
def with_exponential_backoff(
num_retries=7,
initial_delay_secs=5.0,
logger=_LOGGER.warning,
retry_filter=retry_on_server_errors_filter,
clock=Clock(),
fuzz=True,
factor=2,
max_delay_secs=60 * 60,
stop_after_secs=None):
"""Decorator with arguments that control the retry logic.
Args:
num_retries: The total number of times to retry.
initial_delay_secs: The delay before the first retry, in seconds.
logger: A callable used to report an exception. Must have the same signature
as functions in the standard logging module. The default is
_LOGGER.warning.
retry_filter: A callable getting the exception raised and returning True
if the retry should happen. For instance we do not want to retry on
404 Http errors most of the time. The default value will return true
for server errors (HTTP status code >= 500) and non Http errors.
clock: A clock object implementing a sleep method. The default clock will
use time.sleep().
fuzz: True if the delay should be fuzzed (default). During testing False
can be used so that the delays are not randomized.
factor: The exponential factor to use on subsequent retries.
Default is 2 (doubling).
max_delay_secs: Maximum delay (in seconds). After this limit is reached,
further tries use max_delay_sec instead of exponentially increasing
the time. Defaults to 1 hour.
stop_after_secs: Places a limit on the sum of delays between retries, such
that the sum is <= stop_after_secs. Retries will stop after the limit is
reached. Defaults to disabled (None). You may need to increase num_retries
to effectively use this feature.
Returns:
As per Python decorators with arguments pattern returns a decorator
for the function which in turn will return the wrapped (decorated) function.
The decorator is intended to be used on callables that make HTTP or RPC
requests that can temporarily timeout or have transient errors. For instance
the make_http_request() call below will be retried 16 times with exponential
backoff and fuzzing of the delay interval (default settings).
from apache_beam.utils import retry
# ...
@retry.with_exponential_backoff()
make_http_request(args)
"""
def real_decorator(fun):
"""The real decorator whose purpose is to return the wrapped function."""
@functools.wraps(fun)
def wrapper(*args, **kwargs):
retry_intervals = iter(
FuzzedExponentialIntervals(
initial_delay_secs,
num_retries,
factor,
fuzz=0.5 if fuzz else 0,
max_delay_secs=max_delay_secs,
stop_after_secs=stop_after_secs))
while True:
try:
return fun(*args, **kwargs)
except Exception as exn: # pylint: disable=broad-except
if not retry_filter(exn):
raise
# Get the traceback object for the current exception. The
# sys.exc_info() function returns a tuple with three elements:
# exception type, exception value, and exception traceback.
exn_traceback = sys.exc_info()[2]
try:
try:
sleep_interval = next(retry_intervals)
except StopIteration:
# Re-raise the original exception since we finished the retries.
raise_with_traceback(exn, exn_traceback)
logger(
'Retry with exponential backoff: waiting for %s seconds before '
'retrying %s because we caught exception: %s '
'Traceback for above exception (most recent call last):\n%s',
sleep_interval,
getattr(fun, '__name__', str(fun)),
''.join(traceback.format_exception_only(exn.__class__, exn)),
''.join(traceback.format_tb(exn_traceback)))
clock.sleep(sleep_interval)
finally:
# Traceback objects in locals can cause reference cycles that will
# prevent garbage collection. Clear it now since we do not need
# it anymore.
exn_traceback = None
return wrapper
return real_decorator | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/utils/retry.py | 0.561455 | 0.154472 | retry.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import random
from builtins import range
import apache_beam as beam
import apache_beam.typehints.typehints as typehints
from apache_beam.coders import VarIntCoder
from apache_beam.runners.portability.fn_api_runner import FnApiRunner
from apache_beam.tools import utils
from apache_beam.transforms.timeutil import TimeDomain
from apache_beam.transforms.userstate import SetStateSpec
from apache_beam.transforms.userstate import TimerSpec
from apache_beam.transforms.userstate import on_timer
NUM_PARALLEL_STAGES = 7
NUM_SERIAL_STAGES = 5
class BagInStateOutputAfterTimer(beam.DoFn):
SET_STATE = SetStateSpec('buffer', VarIntCoder())
EMIT_TIMER = TimerSpec('emit_timer', TimeDomain.WATERMARK)
def process(
self,
element,
set_state=beam.DoFn.StateParam(SET_STATE),
emit_timer=beam.DoFn.TimerParam(EMIT_TIMER)):
_, values = element
for v in values:
set_state.add(v)
emit_timer.set(1)
@on_timer(EMIT_TIMER)
def emit_values(self, set_state=beam.DoFn.StateParam(SET_STATE)):
values = set_state.read()
return [(random.randint(0, 1000), v) for v in values]
def _build_serial_stages(
pipeline, num_serial_stages, num_elements, stage_count):
pc = (
pipeline | ('start_stage%s' % stage_count) >> beam.Create(
[(random.randint(0, 1000), i) for i in range(num_elements)])
| ('gbk_start_stage%s' % stage_count) >> beam.GroupByKey())
for i in range(num_serial_stages):
pc = (
pc
| ('stage%s_map%s' % (stage_count, i)) >> beam.ParDo(
BagInStateOutputAfterTimer()).with_output_types(
typehints.KV[int, int])
| ('stage%s_gbk%s' % (stage_count, i)) >> beam.GroupByKey())
return pc
def run_single_pipeline(size):
def _pipeline_runner():
with beam.Pipeline(runner=FnApiRunner()) as p:
for i in range(NUM_PARALLEL_STAGES):
_build_serial_stages(p, NUM_SERIAL_STAGES, size, i)
return _pipeline_runner
def run_benchmark(starting_point, num_runs, num_elements_step, verbose):
suite = [
utils.LinearRegressionBenchmarkConfig(
run_single_pipeline, starting_point, num_elements_step, num_runs)
]
utils.run_benchmarks(suite, verbose=verbose)
if __name__ == '__main__':
logging.basicConfig()
utils.check_compiled('apache_beam.runners.common')
parser = argparse.ArgumentParser()
parser.add_argument('--num_runs', default=10, type=int)
parser.add_argument('--starting_point', default=1, type=int)
parser.add_argument('--increment', default=100, type=int)
parser.add_argument('--verbose', default=True, type=bool)
options = parser.parse_args()
run_benchmark(
options.starting_point,
options.num_runs,
options.increment,
options.verbose) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/tools/fn_api_runner_microbenchmark.py | 0.5769 | 0.178097 | fn_api_runner_microbenchmark.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import argparse
import logging
import random
import re
import string
import sys
from past.builtins import unicode
from apache_beam.coders import proto2_coder_test_messages_pb2 as test_message
from apache_beam.coders import coders
from apache_beam.tools import utils
from apache_beam.transforms import window
from apache_beam.utils import windowed_value
def coder_benchmark_factory(coder, generate_fn):
"""Creates a benchmark that encodes and decodes a list of elements.
Args:
coder: coder to use to encode an element.
generate_fn: a callable that generates an element.
"""
class CoderBenchmark(object):
def __init__(self, num_elements_per_benchmark):
self._coder = coders.IterableCoder(coder)
self._list = [generate_fn() for _ in range(num_elements_per_benchmark)]
def __call__(self):
# Calling coder operations on a single element at a time may incur
# unrelevant overhead. To compensate, we use a list elements.
_ = self._coder.decode(self._coder.encode(self._list))
CoderBenchmark.__name__ = "%s, %s" % (generate_fn.__name__, str(coder))
return CoderBenchmark
def small_int():
return random.randint(0, 127)
def large_int():
return random.randint(sys.maxsize >> 2, sys.maxsize)
def random_string(length):
return unicode(
''.join(
random.choice(string.ascii_letters + string.digits)
for _ in range(length)))
def small_string():
return random_string(4)
def large_string():
return random_string(100)
def list_int(size):
return [small_int() for _ in range(size)]
def dict_int_int(size):
return {i: i for i in list_int(size)}
def small_list():
return list_int(10)
def large_list():
# Bool is the last item in FastPrimitiveCoders before pickle.
return [bool(k) for k in list_int(1000)]
def small_tuple():
# Benchmark a common case of 2-element tuples.
return tuple(list_int(2))
def large_tuple():
return tuple(large_list())
def small_dict():
return {i: i for i in small_list()}
def large_dict():
return {i: i for i in large_list()}
def large_iterable():
yield 'a' * coders.coder_impl.SequenceCoderImpl._DEFAULT_BUFFER_SIZE
for k in range(1000):
yield k
def random_message_with_map(size):
message = test_message.MessageWithMap()
keys = list_int(size)
random.shuffle(keys)
for key in keys:
message.field1[str(key)].field1 = small_string()
return message
def small_message_with_map():
return random_message_with_map(5)
def large_message_with_map():
return random_message_with_map(20)
def globally_windowed_value():
return windowed_value.WindowedValue(
value=small_int(), timestamp=12345678, windows=(window.GlobalWindow(), ))
def random_windowed_value(num_windows):
return windowed_value.WindowedValue(
value=small_int(),
timestamp=12345678,
windows=tuple(
window.IntervalWindow(i * 10, i * 10 + small_int())
for i in range(num_windows)))
def wv_with_one_window():
return random_windowed_value(num_windows=1)
def wv_with_multiple_windows():
return random_windowed_value(num_windows=32)
def run_coder_benchmarks(
num_runs, input_size, seed, verbose, filter_regex='.*'):
random.seed(seed)
# TODO(BEAM-4441): Pick coders using type hints, for example:
# tuple_coder = typecoders.registry.get_coder(typing.Tuple[int, ...])
benchmarks = [
coder_benchmark_factory(coders.FastPrimitivesCoder(), small_int),
coder_benchmark_factory(coders.FastPrimitivesCoder(), large_int),
coder_benchmark_factory(coders.FastPrimitivesCoder(), small_string),
coder_benchmark_factory(coders.FastPrimitivesCoder(), large_string),
coder_benchmark_factory(coders.FastPrimitivesCoder(), small_list),
coder_benchmark_factory(
coders.IterableCoder(coders.FastPrimitivesCoder()), small_list),
coder_benchmark_factory(coders.FastPrimitivesCoder(), large_list),
coder_benchmark_factory(
coders.IterableCoder(coders.FastPrimitivesCoder()), large_list),
coder_benchmark_factory(
coders.IterableCoder(coders.FastPrimitivesCoder()), large_iterable),
coder_benchmark_factory(coders.FastPrimitivesCoder(), small_tuple),
coder_benchmark_factory(coders.FastPrimitivesCoder(), large_tuple),
coder_benchmark_factory(coders.FastPrimitivesCoder(), small_dict),
coder_benchmark_factory(coders.FastPrimitivesCoder(), large_dict),
coder_benchmark_factory(
coders.ProtoCoder(test_message.MessageWithMap),
small_message_with_map),
coder_benchmark_factory(
coders.ProtoCoder(test_message.MessageWithMap),
large_message_with_map),
coder_benchmark_factory(
coders.DeterministicProtoCoder(test_message.MessageWithMap),
small_message_with_map),
coder_benchmark_factory(
coders.DeterministicProtoCoder(test_message.MessageWithMap),
large_message_with_map),
coder_benchmark_factory(
coders.WindowedValueCoder(coders.FastPrimitivesCoder()),
wv_with_one_window),
coder_benchmark_factory(
coders.WindowedValueCoder(
coders.FastPrimitivesCoder(), coders.IntervalWindowCoder()),
wv_with_multiple_windows),
coder_benchmark_factory(
coders.WindowedValueCoder(
coders.FastPrimitivesCoder(), coders.GlobalWindowCoder()),
globally_windowed_value),
coder_benchmark_factory(
coders.LengthPrefixCoder(coders.FastPrimitivesCoder()), small_int)
]
suite = [
utils.BenchmarkConfig(b, input_size, num_runs) for b in benchmarks
if re.search(filter_regex, b.__name__, flags=re.I)
]
utils.run_benchmarks(suite, verbose=verbose)
if __name__ == "__main__":
logging.basicConfig()
parser = argparse.ArgumentParser()
parser.add_argument('--filter', default='.*')
parser.add_argument('--num_runs', default=20, type=int)
parser.add_argument('--num_elements_per_benchmark', default=1000, type=int)
parser.add_argument('--seed', default=42, type=int)
options = parser.parse_args()
utils.check_compiled("apache_beam.coders.coder_impl")
num_runs = 20
num_elements_per_benchmark = 1000
seed = 42 # Fix the seed for better consistency
run_coder_benchmarks(
options.num_runs,
options.num_elements_per_benchmark,
options.seed,
verbose=True,
filter_regex=options.filter) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/tools/coders_microbenchmark.py | 0.431345 | 0.162845 | coders_microbenchmark.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gc
import importlib
import os
import time
import numpy
def check_compiled(module):
"""Check whether given module has been compiled.
Args:
module: string, module name
"""
check_module = importlib.import_module(module)
ext = os.path.splitext(check_module.__file__)[-1]
if ext in ('.py', '.pyc'):
raise RuntimeError(
"Profiling uncompiled code.\n"
"To compile beam, run "
"'pip install Cython; python setup.py build_ext --inplace'")
class BenchmarkConfig(collections.namedtuple("BenchmarkConfig",
["benchmark", "size", "num_runs"])
):
"""
Attributes:
benchmark: a callable that takes an int argument - benchmark size,
and returns a callable. A returned callable must run the code being
benchmarked on an input of specified size.
For example, one can implement a benchmark as:
class MyBenchmark(object):
def __init__(self, size):
[do necessary initialization]
def __call__(self):
[run the code in question]
size: int, a size of the input. Aggregated per-element metrics
are counted based on the size of the input.
num_runs: int, number of times to run each benchmark.
"""
def __str__(self):
return "%s, %s element(s)" % (
getattr(self.benchmark, '__name__', str(self.benchmark)),
str(self.size))
class LinearRegressionBenchmarkConfig(collections.namedtuple(
"LinearRegressionBenchmarkConfig",
["benchmark", "starting_point", "increment", "num_runs"])):
"""
Attributes:
benchmark: a callable that takes an int argument - benchmark size,
and returns a callable. A returned callable must run the code being
benchmarked on an input of specified size.
For example, one can implement a benchmark as:
class MyBenchmark(object):
def __init__(self, size):
[do necessary initialization]
def __call__(self):
[run the code in question]
starting_point: int, an initial size of the input. Regression results are
calculated based on the input.
increment: int, the rate of growth of the input for each run of the
benchmark.
num_runs: int, number of times to run each benchmark.
"""
def __str__(self):
return "%s, %s element(s) at start, %s growth per run" % (
getattr(self.benchmark, '__name__', str(self.benchmark)),
str(self.starting_point),
str(self.increment))
def run_benchmarks(benchmark_suite, verbose=True):
"""Runs benchmarks, and collects execution times.
A simple instrumentation to run a callable several times, collect and print
its execution times.
Args:
benchmark_suite: A list of BenchmarkConfig.
verbose: bool, whether to print benchmark results to stdout.
Returns:
A dictionary of the form string -> list of floats. Keys of the dictionary
are benchmark names, values are execution times in seconds for each run.
"""
def run(benchmark_fn, size):
# Contain each run of a benchmark inside a function so that any temporary
# objects can be garbage-collected after the run.
benchmark_instance_callable = benchmark_fn(size)
start = time.time()
_ = benchmark_instance_callable()
return time.time() - start
cost_series = collections.defaultdict(list)
size_series = collections.defaultdict(list)
for benchmark_config in benchmark_suite:
name = str(benchmark_config)
num_runs = benchmark_config.num_runs
if isinstance(benchmark_config, LinearRegressionBenchmarkConfig):
size = benchmark_config.starting_point
step = benchmark_config.increment
else:
assert isinstance(benchmark_config, BenchmarkConfig)
size = benchmark_config.size
step = 0
for run_id in range(num_runs):
# Do a proactive GC before each run to minimize side-effects of different
# runs.
gc.collect()
time_cost = run(benchmark_config.benchmark, size)
# Appending size and time cost to perform linear regression
cost_series[name].append(time_cost)
size_series[name].append(size)
if verbose:
per_element_cost = time_cost / size
print(
"%s: run %d of %d, per element time cost: %g sec" %
(name, run_id + 1, num_runs, per_element_cost))
# Incrementing the size of the benchmark run by the step size
size += step
if verbose:
print("")
if verbose:
pad_length = max([len(str(bc)) for bc in benchmark_suite])
for benchmark_config in benchmark_suite:
name = str(benchmark_config)
if isinstance(benchmark_config, LinearRegressionBenchmarkConfig):
from scipy import stats
print()
# pylint: disable=unused-variable
gradient, intercept, r_value, p_value, std_err = stats.linregress(
size_series[name], cost_series[name])
print("Fixed cost ", intercept)
print("Per-element ", gradient)
print("R^2 ", r_value**2)
else:
assert isinstance(benchmark_config, BenchmarkConfig)
per_element_median_cost = (
numpy.median(cost_series[name]) / benchmark_config.size)
std = numpy.std(cost_series[name]) / benchmark_config.size
print(
"%s: p. element median time cost: %g sec, relative std: %.2f%%" % (
name.ljust(pad_length, " "),
per_element_median_cost,
std * 100 / per_element_median_cost))
return size_series, cost_series | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/tools/utils.py | 0.841793 | 0.306187 | utils.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from builtins import range
from collections import defaultdict
from time import time
from typing import Iterable
from typing import Tuple
from typing import Union
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.tools import utils
@beam.typehints.with_input_types(Tuple[int, ...])
class SimpleInput(beam.DoFn):
def process(self, element, *args, **kwargs):
yield element
@beam.typehints.with_output_types(Tuple[int, ...])
class SimpleOutput(beam.DoFn):
def process(self, element, *args, **kwargs):
yield element
@beam.typehints.with_input_types(
Tuple[int, str, Tuple[float, ...], Iterable[int], Union[str, int]])
class NestedInput(beam.DoFn):
def process(self, element, *args, **kwargs):
yield element
@beam.typehints.with_output_types(
Tuple[int, str, Tuple[float, ...], Iterable[int], Union[str, int]])
class NestedOutput(beam.DoFn):
def process(self, element, *args, **kwargs):
yield element
def run_benchmark(
num_dofns=100, num_runs=10, num_elements_step=2000, num_for_averaging=4):
options_map = {
'No Type Check': PipelineOptions(),
'Runtime Type Check': PipelineOptions(runtime_type_check=True),
'Performance Runtime Type Check': PipelineOptions(
performance_runtime_type_check=True)
}
for run in range(num_runs):
num_elements = num_elements_step * run + 1
simple_elements = [
tuple(i for i in range(200)) for _ in range(num_elements)
]
nested_elements = [(
1,
'2',
tuple(float(i) for i in range(100)), [i for i in range(100)],
'5') for _ in range(num_elements)]
timings = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
# Do each run num_for_averaging times to get an average with reduced noise
for _ in range(num_for_averaging):
for option_name, options in options_map.items():
# Run a Pipeline using DoFn's with simple typehints
start = time()
with beam.Pipeline(options=options) as p:
pc = p | beam.Create(simple_elements)
for ix in range(num_dofns):
pc = (
pc | 'SimpleOutput %i' % ix >> beam.ParDo(SimpleOutput())
| 'SimpleInput %i' % ix >> beam.ParDo(SimpleInput()))
timings[num_elements]['Simple Types'][option_name] += time() - start
# Run a pipeline using DoFn's with nested typehints
start = time()
with beam.Pipeline(options=options) as p:
pc = p | beam.Create(nested_elements)
for ix in range(num_dofns):
pc = (
pc | 'NestedOutput %i' % ix >> beam.ParDo(NestedOutput())
| 'NestedInput %i' % ix >> beam.ParDo(NestedInput()))
timings[num_elements]['Nested Types'][option_name] += time() - start
for num_elements, element_type_map in timings.items():
print("%d Element%s" % (num_elements, " " if num_elements == 1 else "s"))
for element_type, option_name_map in element_type_map.items():
print("-- %s" % element_type)
for option_name, time_elapsed in option_name_map.items():
print(
"---- %.2f sec (%s)" %
(time_elapsed / num_for_averaging, option_name))
print('\n')
if __name__ == '__main__':
logging.basicConfig()
utils.check_compiled('apache_beam.runners.common')
run_benchmark() | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/tools/runtime_type_check_microbenchmark.py | 0.756178 | 0.235141 | runtime_type_check_microbenchmark.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
import threading
import time
from builtins import object
from datetime import datetime
from typing import Any
from typing import Optional
from typing import SupportsInt
try:
import cython
except ImportError:
class fake_cython:
compiled = False
globals()['cython'] = fake_cython
__all__ = [
'MetricAggregator',
'MetricCell',
'MetricCellFactory',
'DistributionResult',
'GaugeResult'
]
class MetricCell(object):
"""For internal use only; no backwards-compatibility guarantees.
Accumulates in-memory changes to a metric.
A MetricCell represents a specific metric in a single context and bundle.
All subclasses must be thread safe, as these are used in the pipeline runners,
and may be subject to parallel/concurrent updates. Cells should only be used
directly within a runner.
"""
def __init__(self):
self._lock = threading.Lock()
self._start_time = None
def update(self, value):
raise NotImplementedError
def get_cumulative(self):
raise NotImplementedError
def to_runner_api_monitoring_info(self, name, transform_id):
if not self._start_time:
self._start_time = datetime.utcnow()
mi = self.to_runner_api_monitoring_info_impl(name, transform_id)
mi.start_time.FromDatetime(self._start_time)
return mi
def to_runner_api_monitoring_info_impl(self, name, transform_id):
raise NotImplementedError
def reset(self):
# type: () -> None
raise NotImplementedError
def __reduce__(self):
raise NotImplementedError
class MetricCellFactory(object):
def __call__(self):
# type: () -> MetricCell
raise NotImplementedError
class CounterCell(MetricCell):
"""For internal use only; no backwards-compatibility guarantees.
Tracks the current value and delta of a counter metric.
Each cell tracks the state of a metric independently per context per bundle.
Therefore, each metric has a different cell in each bundle, cells are
aggregated by the runner.
This class is thread safe.
"""
def __init__(self, *args):
super(CounterCell, self).__init__(*args)
self.value = CounterAggregator.identity_element()
def reset(self):
# type: () -> None
self.value = CounterAggregator.identity_element()
def combine(self, other):
# type: (CounterCell) -> CounterCell
result = CounterCell()
result.inc(self.value + other.value)
return result
def inc(self, n=1):
self.update(n)
def dec(self, n=1):
self.update(-n)
def update(self, value):
if cython.compiled:
ivalue = value
# Since We hold the GIL, no need for another lock.
# And because the C threads won't preempt and interleave
# each other.
# Assuming there is no code trying to access the counters
# directly by circumventing the GIL.
self.value += ivalue
else:
with self._lock:
self.value += value
def get_cumulative(self):
# type: () -> int
with self._lock:
return self.value
def to_runner_api_monitoring_info_impl(self, name, transform_id):
from apache_beam.metrics import monitoring_infos
if not name.urn:
# User counter case.
return monitoring_infos.int64_user_counter(
name.namespace,
name.name,
self.get_cumulative(),
ptransform=transform_id)
else:
# Arbitrary URN case.
return monitoring_infos.int64_counter(
name.urn, self.get_cumulative(), labels=name.labels)
class DistributionCell(MetricCell):
"""For internal use only; no backwards-compatibility guarantees.
Tracks the current value and delta for a distribution metric.
Each cell tracks the state of a metric independently per context per bundle.
Therefore, each metric has a different cell in each bundle, that is later
aggregated.
This class is thread safe.
"""
def __init__(self, *args):
super(DistributionCell, self).__init__(*args)
self.data = DistributionAggregator.identity_element()
def reset(self):
# type: () -> None
self.data = DistributionAggregator.identity_element()
def combine(self, other):
# type: (DistributionCell) -> DistributionCell
result = DistributionCell()
result.data = self.data.combine(other.data)
return result
def update(self, value):
if cython.compiled:
# We will hold the GIL throughout the entire _update.
self._update(value)
else:
with self._lock:
self._update(value)
def _update(self, value):
if cython.compiled:
ivalue = value
else:
ivalue = int(value)
self.data.count = self.data.count + 1
self.data.sum = self.data.sum + ivalue
if ivalue < self.data.min:
self.data.min = ivalue
if ivalue > self.data.max:
self.data.max = ivalue
def get_cumulative(self):
# type: () -> DistributionData
with self._lock:
return self.data.get_cumulative()
def to_runner_api_monitoring_info_impl(self, name, transform_id):
from apache_beam.metrics import monitoring_infos
return monitoring_infos.int64_user_distribution(
name.namespace,
name.name,
self.get_cumulative(),
ptransform=transform_id)
class GaugeCell(MetricCell):
"""For internal use only; no backwards-compatibility guarantees.
Tracks the current value and delta for a gauge metric.
Each cell tracks the state of a metric independently per context per bundle.
Therefore, each metric has a different cell in each bundle, that is later
aggregated.
This class is thread safe.
"""
def __init__(self, *args):
super(GaugeCell, self).__init__(*args)
self.data = GaugeAggregator.identity_element()
def reset(self):
self.data = GaugeAggregator.identity_element()
def combine(self, other):
# type: (GaugeCell) -> GaugeCell
result = GaugeCell()
result.data = self.data.combine(other.data)
return result
def set(self, value):
self.update(value)
def update(self, value):
# type: (SupportsInt) -> None
value = int(value)
with self._lock:
# Set the value directly without checking timestamp, because
# this value is naturally the latest value.
self.data.value = value
self.data.timestamp = time.time()
def get_cumulative(self):
# type: () -> GaugeData
with self._lock:
return self.data.get_cumulative()
def to_runner_api_monitoring_info_impl(self, name, transform_id):
from apache_beam.metrics import monitoring_infos
return monitoring_infos.int64_user_gauge(
name.namespace,
name.name,
self.get_cumulative(),
ptransform=transform_id)
class DistributionResult(object):
"""The result of a Distribution metric."""
def __init__(self, data):
# type: (DistributionData) -> None
self.data = data
def __eq__(self, other):
# type: (object) -> bool
if isinstance(other, DistributionResult):
return self.data == other.data
else:
return False
def __hash__(self):
# type: () -> int
return hash(self.data)
def __ne__(self, other):
# type: (object) -> bool
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __repr__(self):
# type: () -> str
return 'DistributionResult(sum={}, count={}, min={}, max={})'.format(
self.sum, self.count, self.min, self.max)
@property
def max(self):
# type: () -> Optional[int]
return self.data.max if self.data.count else None
@property
def min(self):
# type: () -> Optional[int]
return self.data.min if self.data.count else None
@property
def count(self):
# type: () -> Optional[int]
return self.data.count
@property
def sum(self):
# type: () -> Optional[int]
return self.data.sum
@property
def mean(self):
# type: () -> Optional[float]
"""Returns the float mean of the distribution.
If the distribution contains no elements, it returns None.
"""
if self.data.count == 0:
return None
return self.data.sum / self.data.count
class GaugeResult(object):
def __init__(self, data):
# type: (GaugeData) -> None
self.data = data
def __eq__(self, other):
# type: (object) -> bool
if isinstance(other, GaugeResult):
return self.data == other.data
else:
return False
def __hash__(self):
# type: () -> int
return hash(self.data)
def __ne__(self, other):
# type: (object) -> bool
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __repr__(self):
return '<GaugeResult(value={}, timestamp={})>'.format(
self.value, self.timestamp)
@property
def value(self):
# type: () -> Optional[int]
return self.data.value
@property
def timestamp(self):
# type: () -> Optional[int]
return self.data.timestamp
class GaugeData(object):
"""For internal use only; no backwards-compatibility guarantees.
The data structure that holds data about a gauge metric.
Gauge metrics are restricted to integers only.
This object is not thread safe, so it's not supposed to be modified
by other than the GaugeCell that contains it.
"""
def __init__(self, value, timestamp=None):
# type: (Optional[int], Optional[int]) -> None
self.value = value
self.timestamp = timestamp if timestamp is not None else 0
def __eq__(self, other):
# type: (object) -> bool
if isinstance(other, GaugeData):
return self.value == other.value and self.timestamp == other.timestamp
else:
return False
def __hash__(self):
# type: () -> int
return hash((self.value, self.timestamp))
def __ne__(self, other):
# type: (object) -> bool
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __repr__(self):
# type: () -> str
return '<GaugeData(value={}, timestamp={})>'.format(
self.value, self.timestamp)
def get_cumulative(self):
# type: () -> GaugeData
return GaugeData(self.value, timestamp=self.timestamp)
def combine(self, other):
# type: (Optional[GaugeData]) -> GaugeData
if other is None:
return self
if other.timestamp > self.timestamp:
return other
else:
return self
@staticmethod
def singleton(value, timestamp=None):
# type: (Optional[int], Optional[int]) -> GaugeData
return GaugeData(value, timestamp=timestamp)
class DistributionData(object):
"""For internal use only; no backwards-compatibility guarantees.
The data structure that holds data about a distribution metric.
Distribution metrics are restricted to distributions of integers only.
This object is not thread safe, so it's not supposed to be modified
by other than the DistributionCell that contains it.
"""
def __init__(self, sum, count, min, max):
# type: (int, int, int, int) -> None
if count:
self.sum = sum
self.count = count
self.min = min
self.max = max
else:
self.sum = self.count = 0
self.min = 2**63 - 1
# Avoid Wimplicitly-unsigned-literal caused by -2**63.
self.max = -self.min - 1
def __eq__(self, other):
# type: (object) -> bool
if isinstance(other, DistributionData):
return (
self.sum == other.sum and self.count == other.count and
self.min == other.min and self.max == other.max)
else:
return False
def __hash__(self):
# type: () -> int
return hash((self.sum, self.count, self.min, self.max))
def __ne__(self, other):
# type: (object) -> bool
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __repr__(self):
# type: () -> str
return 'DistributionData(sum={}, count={}, min={}, max={})'.format(
self.sum, self.count, self.min, self.max)
def get_cumulative(self):
# type: () -> DistributionData
return DistributionData(self.sum, self.count, self.min, self.max)
def combine(self, other):
# type: (Optional[DistributionData]) -> DistributionData
if other is None:
return self
return DistributionData(
self.sum + other.sum,
self.count + other.count,
self.min if self.min < other.min else other.min,
self.max if self.max > other.max else other.max)
@staticmethod
def singleton(value):
# type: (int) -> DistributionData
return DistributionData(value, 1, value, value)
class MetricAggregator(object):
"""For internal use only; no backwards-compatibility guarantees.
Base interface for aggregating metric data during pipeline execution."""
def identity_element(self):
# type: () -> Any
"""Returns the identical element of an Aggregation.
For the identity element, it must hold that
Aggregator.combine(any_element, identity_element) == any_element.
"""
raise NotImplementedError
def combine(self, x, y):
# type: (Any, Any) -> Any
raise NotImplementedError
def result(self, x):
# type: (Any) -> Any
raise NotImplementedError
class CounterAggregator(MetricAggregator):
"""For internal use only; no backwards-compatibility guarantees.
Aggregator for Counter metric data during pipeline execution.
Values aggregated should be ``int`` objects.
"""
@staticmethod
def identity_element():
# type: () -> int
return 0
def combine(self, x, y):
# type: (SupportsInt, SupportsInt) -> int
return int(x) + int(y)
def result(self, x):
# type: (SupportsInt) -> int
return int(x)
class DistributionAggregator(MetricAggregator):
"""For internal use only; no backwards-compatibility guarantees.
Aggregator for Distribution metric data during pipeline execution.
Values aggregated should be ``DistributionData`` objects.
"""
@staticmethod
def identity_element():
# type: () -> DistributionData
return DistributionData(0, 0, 2**63 - 1, -2**63)
def combine(self, x, y):
# type: (DistributionData, DistributionData) -> DistributionData
return x.combine(y)
def result(self, x):
# type: (DistributionData) -> DistributionResult
return DistributionResult(x.get_cumulative())
class GaugeAggregator(MetricAggregator):
"""For internal use only; no backwards-compatibility guarantees.
Aggregator for Gauge metric data during pipeline execution.
Values aggregated should be ``GaugeData`` objects.
"""
@staticmethod
def identity_element():
# type: () -> GaugeData
return GaugeData(0, timestamp=0)
def combine(self, x, y):
# type: (GaugeData, GaugeData) -> GaugeData
result = x.combine(y)
return result
def result(self, x):
# type: (GaugeData) -> GaugeResult
return GaugeResult(x.get_cumulative()) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/metrics/cells.py | 0.89865 | 0.22863 | cells.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
import threading
from builtins import object
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import FrozenSet
from typing import Optional
from typing import Type
from typing import Union
from typing import cast
from apache_beam.metrics import monitoring_infos
from apache_beam.metrics.cells import CounterCell
from apache_beam.metrics.cells import DistributionCell
from apache_beam.metrics.cells import GaugeCell
from apache_beam.runners.worker import statesampler
from apache_beam.runners.worker.statesampler import get_current_tracker
if TYPE_CHECKING:
from apache_beam.metrics.cells import GaugeData
from apache_beam.metrics.cells import DistributionData
from apache_beam.metrics.cells import MetricCell
from apache_beam.metrics.cells import MetricCellFactory
from apache_beam.metrics.metricbase import MetricName
from apache_beam.portability.api import metrics_pb2
class MetricKey(object):
"""Key used to identify instance of metric cell.
Metrics are internally keyed by the name of the step they're associated with,
the name and namespace (if it is a user defined metric) of the metric,
and any extra label metadata added by the runner specific metric collection
service.
"""
def __init__(self, step, metric, labels=None):
"""Initializes ``MetricKey``.
Args:
step: A string with the step this metric cell is part of.
metric: A ``MetricName`` namespace+name that identifies a metric.
labels: An arbitrary set of labels that also identifies the metric.
"""
self.step = step
self.metric = metric
self.labels = labels if labels else dict()
def __eq__(self, other):
return (
self.step == other.step and self.metric == other.metric and
self.labels == other.labels)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash((self.step, self.metric, frozenset(self.labels)))
def __repr__(self):
return 'MetricKey(step={}, metric={}, labels={})'.format(
self.step, self.metric, self.labels)
class MetricResult(object):
"""Keeps track of the status of a metric within a single bundle.
It contains the physical and logical updates to the metric. Physical updates
are updates that have not necessarily been committed, but that have been made
during pipeline execution. Logical updates are updates that have been
committed.
Attributes:
key: A ``MetricKey`` that identifies the metric and bundle of this result.
committed: The committed updates of the metric. This attribute's type is
of metric type result (e.g. int, DistributionResult, GaugeResult).
attempted: The logical updates of the metric. This attribute's type is that
of metric type result (e.g. int, DistributionResult, GaugeResult).
"""
def __init__(self, key, committed, attempted):
"""Initializes ``MetricResult``.
Args:
key: A ``MetricKey`` object.
committed: Metric data that has been committed (e.g. logical updates)
attempted: Metric data that has been attempted (e.g. physical updates)
"""
self.key = key
self.committed = committed
self.attempted = attempted
def __eq__(self, other):
return (
self.key == other.key and self.committed == other.committed and
self.attempted == other.attempted)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash((self.key, self.committed, self.attempted))
def __repr__(self):
return 'MetricResult(key={}, committed={}, attempted={})'.format(
self.key, str(self.committed), str(self.attempted))
def __str__(self):
return repr(self)
@property
def result(self):
"""Short-hand for falling back to attempted metrics if it seems that
committed was not populated (e.g. due to not being supported on a given
runner"""
return self.committed if self.committed else self.attempted
class _MetricsEnvironment(object):
"""Holds the MetricsContainer for every thread and other metric information.
This class is not meant to be instantiated, instead being used to keep
track of global state.
"""
def current_container(self):
"""Returns the current MetricsContainer."""
sampler = statesampler.get_current_tracker()
if sampler is None:
return None
return sampler.current_state().metrics_container
def process_wide_container(self):
"""Returns the MetricsContainer for process wide metrics, e.g. memory."""
return PROCESS_WIDE_METRICS_CONTAINER
MetricsEnvironment = _MetricsEnvironment()
class _TypedMetricName(object):
"""Like MetricName, but also stores the cell type of the metric."""
def __init__(
self,
cell_type, # type: Union[Type[MetricCell], MetricCellFactory]
metric_name # type: Union[str, MetricName]
):
# type: (...) -> None
self.cell_type = cell_type
self.metric_name = metric_name
if isinstance(metric_name, str):
self.fast_name = metric_name
else:
self.fast_name = metric_name.fast_name()
# Cached for speed, as this is used as a key for every counter update.
self._hash = hash((cell_type, self.fast_name))
def __eq__(self, other):
return self is other or (
self.cell_type == other.cell_type and self.fast_name == other.fast_name)
def __ne__(self, other):
return not self == other
def __hash__(self):
return self._hash
def __str__(self):
return '%s %s' % (self.cell_type, self.metric_name)
def __reduce__(self):
return _TypedMetricName, (self.cell_type, self.metric_name)
_DEFAULT = None # type: Any
class MetricUpdater(object):
"""A callable that updates the metric as quickly as possible."""
def __init__(
self,
cell_type, # type: Union[Type[MetricCell], MetricCellFactory]
metric_name, # type: Union[str, MetricName]
default_value=None,
process_wide=False):
self.process_wide = process_wide
self.typed_metric_name = _TypedMetricName(cell_type, metric_name)
self.default_value = default_value
def __call__(self, value=_DEFAULT):
# type: (Any) -> None
if value is _DEFAULT:
if self.default_value is _DEFAULT:
raise ValueError(
'Missing value for update of %s' % self.typed_metric_name.fast_name)
value = self.default_value
if self.process_wide:
MetricsEnvironment.process_wide_container().get_metric_cell(
self.typed_metric_name).update(value)
else:
tracker = get_current_tracker()
if tracker is not None:
tracker.update_metric(self.typed_metric_name, value)
def __reduce__(self):
return MetricUpdater, (
self.typed_metric_name.cell_type,
self.typed_metric_name.metric_name,
self.default_value)
class MetricsContainer(object):
"""Holds the metrics of a single step and a single bundle.
Or the metrics associated with the process/SDK harness. I.e. memory usage.
"""
def __init__(self, step_name):
self.step_name = step_name
self.lock = threading.Lock()
self.metrics = dict() # type: Dict[_TypedMetricName, MetricCell]
def get_counter(self, metric_name):
# type: (MetricName) -> CounterCell
return cast(
CounterCell,
self.get_metric_cell(_TypedMetricName(CounterCell, metric_name)))
def get_distribution(self, metric_name):
# type: (MetricName) -> DistributionCell
return cast(
DistributionCell,
self.get_metric_cell(_TypedMetricName(DistributionCell, metric_name)))
def get_gauge(self, metric_name):
# type: (MetricName) -> GaugeCell
return cast(
GaugeCell,
self.get_metric_cell(_TypedMetricName(GaugeCell, metric_name)))
def get_metric_cell(self, typed_metric_name):
# type: (_TypedMetricName) -> MetricCell
cell = self.metrics.get(typed_metric_name, None)
if cell is None:
with self.lock:
cell = self.metrics[typed_metric_name] = typed_metric_name.cell_type()
return cell
def get_cumulative(self):
# type: () -> MetricUpdates
"""Return MetricUpdates with cumulative values of all metrics in container.
This returns all the cumulative values for all metrics.
"""
counters = {
MetricKey(self.step_name, k.metric_name): v.get_cumulative()
for k,
v in self.metrics.items() if k.cell_type == CounterCell
}
distributions = {
MetricKey(self.step_name, k.metric_name): v.get_cumulative()
for k,
v in self.metrics.items() if k.cell_type == DistributionCell
}
gauges = {
MetricKey(self.step_name, k.metric_name): v.get_cumulative()
for k,
v in self.metrics.items() if k.cell_type == GaugeCell
}
return MetricUpdates(counters, distributions, gauges)
def to_runner_api(self):
return [
cell.to_runner_api_user_metric(key.metric_name) for key,
cell in self.metrics.items()
]
def to_runner_api_monitoring_infos(self, transform_id):
# type: (str) -> Dict[FrozenSet, metrics_pb2.MonitoringInfo]
"""Returns a list of MonitoringInfos for the metrics in this container."""
with self.lock:
items = list(self.metrics.items())
all_metrics = [
cell.to_runner_api_monitoring_info(key.metric_name, transform_id)
for key,
cell in items
]
return {
monitoring_infos.to_key(mi): mi
for mi in all_metrics if mi is not None
}
def reset(self):
# type: () -> None
for metric in self.metrics.values():
metric.reset()
def __reduce__(self):
raise NotImplementedError
PROCESS_WIDE_METRICS_CONTAINER = MetricsContainer(None)
class MetricUpdates(object):
"""Contains updates for several metrics.
A metric update is an object containing information to update a metric.
For Distribution metrics, it is DistributionData, and for Counter metrics,
it's an int.
"""
def __init__(
self,
counters=None, # type: Optional[Dict[MetricKey, int]]
distributions=None, # type: Optional[Dict[MetricKey, DistributionData]]
gauges=None # type: Optional[Dict[MetricKey, GaugeData]]
):
# type: (...) -> None
"""Create a MetricUpdates object.
Args:
counters: Dictionary of MetricKey:MetricUpdate updates.
distributions: Dictionary of MetricKey:MetricUpdate objects.
gauges: Dictionary of MetricKey:MetricUpdate objects.
"""
self.counters = counters or {}
self.distributions = distributions or {}
self.gauges = gauges or {} | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/metrics/execution.py | 0.888493 | 0.31948 | execution.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from builtins import object
from typing import Dict
from typing import Optional
__all__ = [
'Metric', 'Counter', 'Distribution', 'Gauge', 'Histogram', 'MetricName'
]
class MetricName(object):
"""The name of a metric.
The name of a metric consists of a namespace and a name. The namespace
allows grouping related metrics together and also prevents collisions
between multiple metrics of the same name.
"""
def __init__(self, namespace, name, urn=None, labels=None):
# type: (Optional[str], Optional[str], Optional[str], Optional[Dict[str, str]]) -> None
"""Initializes ``MetricName``.
Note: namespace and name should be set for user metrics,
urn and labels should be set for an arbitrary metric to package into a
MonitoringInfo.
Args:
namespace: A string with the namespace of a metric.
name: A string with the name of a metric.
urn: URN to populate on a MonitoringInfo, when sending to RunnerHarness.
labels: Labels to populate on a MonitoringInfo
"""
if not urn:
if not namespace:
raise ValueError('Metric namespace must be non-empty')
if not name:
raise ValueError('Metric name must be non-empty')
self.namespace = namespace
self.name = name
self.urn = urn
self.labels = labels if labels else {}
def __eq__(self, other):
return (
self.namespace == other.namespace and self.name == other.name and
self.urn == other.urn and self.labels == other.labels)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __str__(self):
if self.urn:
return 'MetricName(namespace={}, name={}, urn={}, labels={})'.format(
self.namespace, self.name, self.urn, self.labels)
else: # User counter case.
return 'MetricName(namespace={}, name={})'.format(
self.namespace, self.name)
def __hash__(self):
return hash((self.namespace, self.name, self.urn) +
tuple(self.labels.items()))
def fast_name(self):
name = self.name or ''
namespace = self.namespace or ''
urn = self.urn or ''
labels = ''
if self.labels:
labels = '_'.join(['%s=%s' % (k, v) for (k, v) in self.labels.items()])
return '%d_%s%s%s%s' % (len(name), name, namespace, urn, labels)
class Metric(object):
"""Base interface of a metric object."""
def __init__(self, metric_name):
# type: (MetricName) -> None
self.metric_name = metric_name
class Counter(Metric):
"""Counter metric interface. Allows a count to be incremented/decremented
during pipeline execution."""
def inc(self, n=1):
raise NotImplementedError
def dec(self, n=1):
self.inc(-n)
class Distribution(Metric):
"""Distribution Metric interface.
Allows statistics about the distribution of a variable to be collected during
pipeline execution."""
def update(self, value):
raise NotImplementedError
class Gauge(Metric):
"""Gauge Metric interface.
Allows tracking of the latest value of a variable during pipeline
execution."""
def set(self, value):
raise NotImplementedError
class Histogram(Metric):
"""Histogram Metric interface.
Allows statistics about the percentile of a variable to be collected during
pipeline execution."""
def update(self, value):
raise NotImplementedError | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/metrics/metricbase.py | 0.869188 | 0.482307 | metricbase.py | pypi |
# pytype: skip-file
# mypy: disallow-untyped-defs
from __future__ import absolute_import
import logging
from builtins import object
from typing import TYPE_CHECKING
from typing import Dict
from typing import FrozenSet
from typing import Iterable
from typing import List
from typing import Optional
from typing import Set
from typing import Type
from typing import Union
from apache_beam.metrics import cells
from apache_beam.metrics.execution import MetricUpdater
from apache_beam.metrics.metricbase import Counter
from apache_beam.metrics.metricbase import Distribution
from apache_beam.metrics.metricbase import Gauge
from apache_beam.metrics.metricbase import MetricName
if TYPE_CHECKING:
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.metricbase import Metric
__all__ = ['Metrics', 'MetricsFilter']
_LOGGER = logging.getLogger(__name__)
class Metrics(object):
"""Lets users create/access metric objects during pipeline execution."""
@staticmethod
def get_namespace(namespace):
# type: (Union[Type, str]) -> str
if isinstance(namespace, type):
return '{}.{}'.format(namespace.__module__, namespace.__name__)
elif isinstance(namespace, str):
return namespace
else:
raise ValueError('Unknown namespace type')
@staticmethod
def counter(namespace, name):
# type: (Union[Type, str], str) -> Metrics.DelegatingCounter
"""Obtains or creates a Counter metric.
Args:
namespace: A class or string that gives the namespace to a metric
name: A string that gives a unique name to a metric
Returns:
A Counter object.
"""
namespace = Metrics.get_namespace(namespace)
return Metrics.DelegatingCounter(MetricName(namespace, name))
@staticmethod
def distribution(namespace, name):
# type: (Union[Type, str], str) -> Metrics.DelegatingDistribution
"""Obtains or creates a Distribution metric.
Distribution metrics are restricted to integer-only distributions.
Args:
namespace: A class or string that gives the namespace to a metric
name: A string that gives a unique name to a metric
Returns:
A Distribution object.
"""
namespace = Metrics.get_namespace(namespace)
return Metrics.DelegatingDistribution(MetricName(namespace, name))
@staticmethod
def gauge(namespace, name):
# type: (Union[Type, str], str) -> Metrics.DelegatingGauge
"""Obtains or creates a Gauge metric.
Gauge metrics are restricted to integer-only values.
Args:
namespace: A class or string that gives the namespace to a metric
name: A string that gives a unique name to a metric
Returns:
A Distribution object.
"""
namespace = Metrics.get_namespace(namespace)
return Metrics.DelegatingGauge(MetricName(namespace, name))
class DelegatingCounter(Counter):
"""Metrics Counter that Delegates functionality to MetricsEnvironment."""
def __init__(self, metric_name, process_wide=False):
# type: (MetricName, bool) -> None
super(Metrics.DelegatingCounter, self).__init__(metric_name)
self.inc = MetricUpdater( # type: ignore[assignment]
cells.CounterCell,
metric_name,
default_value=1,
process_wide=process_wide)
class DelegatingDistribution(Distribution):
"""Metrics Distribution Delegates functionality to MetricsEnvironment."""
def __init__(self, metric_name):
# type: (MetricName) -> None
super(Metrics.DelegatingDistribution, self).__init__(metric_name)
self.update = MetricUpdater(cells.DistributionCell, metric_name) # type: ignore[assignment]
class DelegatingGauge(Gauge):
"""Metrics Gauge that Delegates functionality to MetricsEnvironment."""
def __init__(self, metric_name):
# type: (MetricName) -> None
super(Metrics.DelegatingGauge, self).__init__(metric_name)
self.set = MetricUpdater(cells.GaugeCell, metric_name) # type: ignore[assignment]
class MetricResults(object):
COUNTERS = "counters"
DISTRIBUTIONS = "distributions"
GAUGES = "gauges"
@staticmethod
def _matches_name(filter, metric_key):
# type: (MetricsFilter, MetricKey) -> bool
if ((filter.namespaces and
metric_key.metric.namespace not in filter.namespaces) or
(filter.names and metric_key.metric.name not in filter.names)):
return False
else:
return True
@staticmethod
def _is_sub_list(needle, haystack):
# type: (List[str], List[str]) -> bool
"""True iff `needle` is a sub-list of `haystack` (i.e. a contiguous slice
of `haystack` exactly matches `needle`"""
needle_len = len(needle)
haystack_len = len(haystack)
for i in range(0, haystack_len - needle_len + 1):
if haystack[i:i + needle_len] == needle:
return True
return False
@staticmethod
def _matches_sub_path(actual_scope, filter_scope):
# type: (str, str) -> bool
"""True iff the '/'-delimited pieces of filter_scope exist as a sub-list
of the '/'-delimited pieces of actual_scope"""
return MetricResults._is_sub_list(
filter_scope.split('/'), actual_scope.split('/'))
@staticmethod
def _matches_scope(filter, metric_key):
# type: (MetricsFilter, MetricKey) -> bool
if not filter.steps:
return True
for step in filter.steps:
if MetricResults._matches_sub_path(metric_key.step, step):
return True
return False
@staticmethod
def matches(filter, metric_key):
# type: (Optional[MetricsFilter], MetricKey) -> bool
if filter is None:
return True
if (MetricResults._matches_name(filter, metric_key) and
MetricResults._matches_scope(filter, metric_key)):
return True
return False
def query(self, filter=None):
# type: (Optional[MetricsFilter]) -> Dict[str, List[MetricResults]]
"""Queries the runner for existing user metrics that match the filter.
It should return a dictionary, with lists of each kind of metric, and
each list contains the corresponding kind of MetricResult. Like so:
{
"counters": [MetricResult(counter_key, committed, attempted), ...],
"distributions": [MetricResult(dist_key, committed, attempted), ...],
"gauges": [] // Empty list if nothing matched the filter.
}
The committed / attempted values are DistributionResult / GaugeResult / int
objects.
"""
raise NotImplementedError
class MetricsFilter(object):
"""Simple object to filter metrics results.
This class is experimental. No backwards-compatibility guarantees.
If filters by matching a result's step-namespace-name with three internal
sets. No execution/matching logic is added to this object, so that it may
be used to construct arguments as an RPC request. It is left for runners
to implement matching logic by themselves.
Note: This class only supports user defined metrics.
"""
def __init__(self):
# type: () -> None
self._names = set() # type: Set[str]
self._namespaces = set() # type: Set[str]
self._steps = set() # type: Set[str]
@property
def steps(self):
# type: () -> FrozenSet[str]
return frozenset(self._steps)
@property
def names(self):
# type: () -> FrozenSet[str]
return frozenset(self._names)
@property
def namespaces(self):
# type: () -> FrozenSet[str]
return frozenset(self._namespaces)
def with_metric(self, metric):
# type: (Metric) -> MetricsFilter
name = metric.metric_name.name or ''
namespace = metric.metric_name.namespace or ''
return self.with_name(name).with_namespace(namespace)
def with_name(self, name):
# type: (str) -> MetricsFilter
return self.with_names([name])
def with_names(self, names):
# type: (Iterable[str]) -> MetricsFilter
if isinstance(names, str):
raise ValueError('Names must be a collection, not a string')
self._names.update(names)
return self
def with_namespace(self, namespace):
# type: (Union[Type, str]) -> MetricsFilter
return self.with_namespaces([namespace])
def with_namespaces(self, namespaces):
# type: (Iterable[Union[Type, str]]) -> MetricsFilter
if isinstance(namespaces, str):
raise ValueError('Namespaces must be an iterable, not a string')
self._namespaces.update([Metrics.get_namespace(ns) for ns in namespaces])
return self
def with_step(self, step):
# type: (str) -> MetricsFilter
return self.with_steps([step])
def with_steps(self, steps):
# type: (Iterable[str]) -> MetricsFilter
if isinstance(steps, str):
raise ValueError('Steps must be an iterable, not a string')
self._steps.update(steps)
return self | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/metrics/metric.py | 0.93196 | 0.324637 | metric.py | pypi |
# pytype: skip-file
from __future__ import absolute_import
from apache_beam.portability.api.beam_runner_api_pb2_urns import BeamConstants
from apache_beam.portability.api.beam_runner_api_pb2_urns import StandardArtifacts
from apache_beam.portability.api.beam_runner_api_pb2_urns import StandardCoders
from apache_beam.portability.api.beam_runner_api_pb2_urns import StandardEnvironments
from apache_beam.portability.api.beam_runner_api_pb2_urns import StandardProtocols
from apache_beam.portability.api.beam_runner_api_pb2_urns import StandardPTransforms
from apache_beam.portability.api.beam_runner_api_pb2_urns import StandardRequirements
from apache_beam.portability.api.beam_runner_api_pb2_urns import StandardSideInputTypes
from apache_beam.portability.api.metrics_pb2_urns import MonitoringInfo
from apache_beam.portability.api.metrics_pb2_urns import MonitoringInfoSpecs
from apache_beam.portability.api.metrics_pb2_urns import MonitoringInfoTypeUrns
from apache_beam.portability.api.standard_window_fns_pb2_urns import FixedWindowsPayload
from apache_beam.portability.api.standard_window_fns_pb2_urns import GlobalWindowsPayload
from apache_beam.portability.api.standard_window_fns_pb2_urns import SessionWindowsPayload
from apache_beam.portability.api.standard_window_fns_pb2_urns import SlidingWindowsPayload
primitives = StandardPTransforms.Primitives
deprecated_primitives = StandardPTransforms.DeprecatedPrimitives
composites = StandardPTransforms.Composites
combine_components = StandardPTransforms.CombineComponents
sdf_components = StandardPTransforms.SplittableParDoComponents
group_into_batches_components = StandardPTransforms.GroupIntoBatchesComponents
side_inputs = StandardSideInputTypes.Enum
coders = StandardCoders.Enum
constants = BeamConstants.Constants
environments = StandardEnvironments.Environments
artifact_types = StandardArtifacts.Types
artifact_roles = StandardArtifacts.Roles
global_windows = GlobalWindowsPayload.Enum.PROPERTIES
fixed_windows = FixedWindowsPayload.Enum.PROPERTIES
sliding_windows = SlidingWindowsPayload.Enum.PROPERTIES
session_windows = SessionWindowsPayload.Enum.PROPERTIES
monitoring_info_specs = MonitoringInfoSpecs.Enum
monitoring_info_types = MonitoringInfoTypeUrns.Enum
monitoring_info_labels = MonitoringInfo.MonitoringInfoLabels
protocols = StandardProtocols.Enum
requirements = StandardRequirements.Enum | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/portability/common_urns.py | 0.521227 | 0.223716 | common_urns.py | pypi |
"""Client and server classes corresponding to protobuf-defined services."""
from __future__ import absolute_import
from builtins import object
import grpc
from . import beam_provision_api_pb2 as beam__provision__api__pb2
class ProvisionServiceStub(object):
"""A service to provide runtime provisioning information to the SDK harness
worker instances -- such as pipeline options, resource constraints and
other job metadata -- needed by an SDK harness instance to initialize.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetProvisionInfo = channel.unary_unary(
'/org.apache.beam.model.fn_execution.v1.ProvisionService/GetProvisionInfo',
request_serializer=beam__provision__api__pb2.GetProvisionInfoRequest.SerializeToString,
response_deserializer=beam__provision__api__pb2.GetProvisionInfoResponse.FromString,
)
class ProvisionServiceServicer(object):
"""A service to provide runtime provisioning information to the SDK harness
worker instances -- such as pipeline options, resource constraints and
other job metadata -- needed by an SDK harness instance to initialize.
"""
def GetProvisionInfo(self, request, context):
"""Get provision information for the SDK harness worker instance.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ProvisionServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetProvisionInfo': grpc.unary_unary_rpc_method_handler(
servicer.GetProvisionInfo,
request_deserializer=beam__provision__api__pb2.GetProvisionInfoRequest.FromString,
response_serializer=beam__provision__api__pb2.GetProvisionInfoResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'org.apache.beam.model.fn_execution.v1.ProvisionService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ProvisionService(object):
"""A service to provide runtime provisioning information to the SDK harness
worker instances -- such as pipeline options, resource constraints and
other job metadata -- needed by an SDK harness instance to initialize.
"""
@staticmethod
def GetProvisionInfo(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.beam.model.fn_execution.v1.ProvisionService/GetProvisionInfo',
beam__provision__api__pb2.GetProvisionInfoRequest.SerializeToString,
beam__provision__api__pb2.GetProvisionInfoResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/portability/api/beam_provision_api_pb2_grpc.py | 0.855685 | 0.181028 | beam_provision_api_pb2_grpc.py | pypi |
"""Client and server classes corresponding to protobuf-defined services."""
from __future__ import absolute_import
from builtins import object
import grpc
from . import beam_expansion_api_pb2 as beam__expansion__api__pb2
class ExpansionServiceStub(object):
"""Job Service for constructing pipelines
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Expand = channel.unary_unary(
'/org.apache.beam.model.expansion.v1.ExpansionService/Expand',
request_serializer=beam__expansion__api__pb2.ExpansionRequest.SerializeToString,
response_deserializer=beam__expansion__api__pb2.ExpansionResponse.FromString,
)
class ExpansionServiceServicer(object):
"""Job Service for constructing pipelines
"""
def Expand(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ExpansionServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Expand': grpc.unary_unary_rpc_method_handler(
servicer.Expand,
request_deserializer=beam__expansion__api__pb2.ExpansionRequest.FromString,
response_serializer=beam__expansion__api__pb2.ExpansionResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'org.apache.beam.model.expansion.v1.ExpansionService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ExpansionService(object):
"""Job Service for constructing pipelines
"""
@staticmethod
def Expand(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.beam.model.expansion.v1.ExpansionService/Expand',
beam__expansion__api__pb2.ExpansionRequest.SerializeToString,
beam__expansion__api__pb2.ExpansionResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/portability/api/beam_expansion_api_pb2_grpc.py | 0.797044 | 0.157008 | beam_expansion_api_pb2_grpc.py | pypi |
"""Client and server classes corresponding to protobuf-defined services."""
from __future__ import absolute_import
from builtins import object
import grpc
from . import beam_fn_api_pb2 as beam__fn__api__pb2
class BeamFnControlStub(object):
"""
Control Plane API
Progress reporting and splitting still need further vetting. Also, this may
change with the addition of new types of instructions/responses related to
metrics.
An API that describes the work that a SDK harness is meant to do.
Stable
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Control = channel.stream_stream(
'/org.apache.beam.model.fn_execution.v1.BeamFnControl/Control',
request_serializer=beam__fn__api__pb2.InstructionResponse.SerializeToString,
response_deserializer=beam__fn__api__pb2.InstructionRequest.FromString,
)
self.GetProcessBundleDescriptor = channel.unary_unary(
'/org.apache.beam.model.fn_execution.v1.BeamFnControl/GetProcessBundleDescriptor',
request_serializer=beam__fn__api__pb2.GetProcessBundleDescriptorRequest.SerializeToString,
response_deserializer=beam__fn__api__pb2.ProcessBundleDescriptor.FromString,
)
class BeamFnControlServicer(object):
"""
Control Plane API
Progress reporting and splitting still need further vetting. Also, this may
change with the addition of new types of instructions/responses related to
metrics.
An API that describes the work that a SDK harness is meant to do.
Stable
"""
def Control(self, request_iterator, context):
"""Instructions sent by the runner to the SDK requesting different types
of work.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetProcessBundleDescriptor(self, request, context):
"""Used to get the full process bundle descriptors for bundles one
is asked to process.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BeamFnControlServicer_to_server(servicer, server):
rpc_method_handlers = {
'Control': grpc.stream_stream_rpc_method_handler(
servicer.Control,
request_deserializer=beam__fn__api__pb2.InstructionResponse.FromString,
response_serializer=beam__fn__api__pb2.InstructionRequest.SerializeToString,
),
'GetProcessBundleDescriptor': grpc.unary_unary_rpc_method_handler(
servicer.GetProcessBundleDescriptor,
request_deserializer=beam__fn__api__pb2.GetProcessBundleDescriptorRequest.FromString,
response_serializer=beam__fn__api__pb2.ProcessBundleDescriptor.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'org.apache.beam.model.fn_execution.v1.BeamFnControl', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class BeamFnControl(object):
"""
Control Plane API
Progress reporting and splitting still need further vetting. Also, this may
change with the addition of new types of instructions/responses related to
metrics.
An API that describes the work that a SDK harness is meant to do.
Stable
"""
@staticmethod
def Control(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/org.apache.beam.model.fn_execution.v1.BeamFnControl/Control',
beam__fn__api__pb2.InstructionResponse.SerializeToString,
beam__fn__api__pb2.InstructionRequest.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetProcessBundleDescriptor(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.beam.model.fn_execution.v1.BeamFnControl/GetProcessBundleDescriptor',
beam__fn__api__pb2.GetProcessBundleDescriptorRequest.SerializeToString,
beam__fn__api__pb2.ProcessBundleDescriptor.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
class BeamFnDataStub(object):
"""Stable
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Data = channel.stream_stream(
'/org.apache.beam.model.fn_execution.v1.BeamFnData/Data',
request_serializer=beam__fn__api__pb2.Elements.SerializeToString,
response_deserializer=beam__fn__api__pb2.Elements.FromString,
)
class BeamFnDataServicer(object):
"""Stable
"""
def Data(self, request_iterator, context):
"""Used to send data between harnesses.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BeamFnDataServicer_to_server(servicer, server):
rpc_method_handlers = {
'Data': grpc.stream_stream_rpc_method_handler(
servicer.Data,
request_deserializer=beam__fn__api__pb2.Elements.FromString,
response_serializer=beam__fn__api__pb2.Elements.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'org.apache.beam.model.fn_execution.v1.BeamFnData', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class BeamFnData(object):
"""Stable
"""
@staticmethod
def Data(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/org.apache.beam.model.fn_execution.v1.BeamFnData/Data',
beam__fn__api__pb2.Elements.SerializeToString,
beam__fn__api__pb2.Elements.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
class BeamFnStateStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.State = channel.stream_stream(
'/org.apache.beam.model.fn_execution.v1.BeamFnState/State',
request_serializer=beam__fn__api__pb2.StateRequest.SerializeToString,
response_deserializer=beam__fn__api__pb2.StateResponse.FromString,
)
class BeamFnStateServicer(object):
"""Missing associated documentation comment in .proto file."""
def State(self, request_iterator, context):
"""Used to get/append/clear state stored by the runner on behalf of the SDK.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BeamFnStateServicer_to_server(servicer, server):
rpc_method_handlers = {
'State': grpc.stream_stream_rpc_method_handler(
servicer.State,
request_deserializer=beam__fn__api__pb2.StateRequest.FromString,
response_serializer=beam__fn__api__pb2.StateResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'org.apache.beam.model.fn_execution.v1.BeamFnState', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class BeamFnState(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def State(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/org.apache.beam.model.fn_execution.v1.BeamFnState/State',
beam__fn__api__pb2.StateRequest.SerializeToString,
beam__fn__api__pb2.StateResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
class BeamFnLoggingStub(object):
"""Stable
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Logging = channel.stream_stream(
'/org.apache.beam.model.fn_execution.v1.BeamFnLogging/Logging',
request_serializer=beam__fn__api__pb2.LogEntry.List.SerializeToString,
response_deserializer=beam__fn__api__pb2.LogControl.FromString,
)
class BeamFnLoggingServicer(object):
"""Stable
"""
def Logging(self, request_iterator, context):
"""Allows for the SDK to emit log entries which the runner can
associate with the active job.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BeamFnLoggingServicer_to_server(servicer, server):
rpc_method_handlers = {
'Logging': grpc.stream_stream_rpc_method_handler(
servicer.Logging,
request_deserializer=beam__fn__api__pb2.LogEntry.List.FromString,
response_serializer=beam__fn__api__pb2.LogControl.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'org.apache.beam.model.fn_execution.v1.BeamFnLogging', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class BeamFnLogging(object):
"""Stable
"""
@staticmethod
def Logging(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/org.apache.beam.model.fn_execution.v1.BeamFnLogging/Logging',
beam__fn__api__pb2.LogEntry.List.SerializeToString,
beam__fn__api__pb2.LogControl.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
class BeamFnExternalWorkerPoolStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.StartWorker = channel.unary_unary(
'/org.apache.beam.model.fn_execution.v1.BeamFnExternalWorkerPool/StartWorker',
request_serializer=beam__fn__api__pb2.StartWorkerRequest.SerializeToString,
response_deserializer=beam__fn__api__pb2.StartWorkerResponse.FromString,
)
self.StopWorker = channel.unary_unary(
'/org.apache.beam.model.fn_execution.v1.BeamFnExternalWorkerPool/StopWorker',
request_serializer=beam__fn__api__pb2.StopWorkerRequest.SerializeToString,
response_deserializer=beam__fn__api__pb2.StopWorkerResponse.FromString,
)
class BeamFnExternalWorkerPoolServicer(object):
"""Missing associated documentation comment in .proto file."""
def StartWorker(self, request, context):
"""Start the SDK worker with the given ID.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StopWorker(self, request, context):
"""Stop the SDK worker.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BeamFnExternalWorkerPoolServicer_to_server(servicer, server):
rpc_method_handlers = {
'StartWorker': grpc.unary_unary_rpc_method_handler(
servicer.StartWorker,
request_deserializer=beam__fn__api__pb2.StartWorkerRequest.FromString,
response_serializer=beam__fn__api__pb2.StartWorkerResponse.SerializeToString,
),
'StopWorker': grpc.unary_unary_rpc_method_handler(
servicer.StopWorker,
request_deserializer=beam__fn__api__pb2.StopWorkerRequest.FromString,
response_serializer=beam__fn__api__pb2.StopWorkerResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'org.apache.beam.model.fn_execution.v1.BeamFnExternalWorkerPool', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class BeamFnExternalWorkerPool(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def StartWorker(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.beam.model.fn_execution.v1.BeamFnExternalWorkerPool/StartWorker',
beam__fn__api__pb2.StartWorkerRequest.SerializeToString,
beam__fn__api__pb2.StartWorkerResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def StopWorker(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/org.apache.beam.model.fn_execution.v1.BeamFnExternalWorkerPool/StopWorker',
beam__fn__api__pb2.StopWorkerRequest.SerializeToString,
beam__fn__api__pb2.StopWorkerResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
class BeamFnWorkerStatusStub(object):
"""API for SDKs to report debug-related statuses to runner during pipeline execution.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.WorkerStatus = channel.stream_stream(
'/org.apache.beam.model.fn_execution.v1.BeamFnWorkerStatus/WorkerStatus',
request_serializer=beam__fn__api__pb2.WorkerStatusResponse.SerializeToString,
response_deserializer=beam__fn__api__pb2.WorkerStatusRequest.FromString,
)
class BeamFnWorkerStatusServicer(object):
"""API for SDKs to report debug-related statuses to runner during pipeline execution.
"""
def WorkerStatus(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BeamFnWorkerStatusServicer_to_server(servicer, server):
rpc_method_handlers = {
'WorkerStatus': grpc.stream_stream_rpc_method_handler(
servicer.WorkerStatus,
request_deserializer=beam__fn__api__pb2.WorkerStatusResponse.FromString,
response_serializer=beam__fn__api__pb2.WorkerStatusRequest.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'org.apache.beam.model.fn_execution.v1.BeamFnWorkerStatus', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class BeamFnWorkerStatus(object):
"""API for SDKs to report debug-related statuses to runner during pipeline execution.
"""
@staticmethod
def WorkerStatus(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/org.apache.beam.model.fn_execution.v1.BeamFnWorkerStatus/WorkerStatus',
beam__fn__api__pb2.WorkerStatusResponse.SerializeToString,
beam__fn__api__pb2.WorkerStatusRequest.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata) | /rflow-apache-beam-2.28.0.tar.gz/rflow-apache-beam-2.28.0/apache_beam/portability/api/beam_fn_api_pb2_grpc.py | 0.809389 | 0.159119 | beam_fn_api_pb2_grpc.py | pypi |
from datetime import timedelta
from uuid import uuid4
from django.conf import settings
from django.utils.module_loading import import_string
from django.utils.translation import gettext_lazy as _
from .exceptions import TokenBackendError, TokenError
from .settings import api_settings
from .token_blacklist.models import BlacklistedToken, OutstandingToken
from .utils import aware_utcnow, datetime_from_epoch, datetime_to_epoch, format_lazy
class Token:
"""
A class which validates and wraps an existing JWT or can be used to build a
new JWT.
"""
token_type = None
lifetime = None
def __init__(self, token=None, verify=True):
"""
!!!! IMPORTANT !!!! MUST raise a TokenError with a user-facing error
message if the given token is invalid, expired, or otherwise not safe
to use.
"""
if self.token_type is None or self.lifetime is None:
raise TokenError(_("Cannot create token with no type or lifetime"))
self.token = token
self.current_time = aware_utcnow()
# Set up token
if token is not None:
# An encoded token was provided
token_backend = self.get_token_backend()
# Decode token
try:
self.payload = token_backend.decode(token, verify=verify)
except TokenBackendError:
raise TokenError(_("Token is invalid or expired"))
if verify:
self.verify()
else:
# New token. Skip all the verification steps.
self.payload = {api_settings.TOKEN_TYPE_CLAIM: self.token_type}
# Set "exp" and "iat" claims with default value
self.set_exp(from_time=self.current_time, lifetime=self.lifetime)
self.set_iat(at_time=self.current_time)
# Set "jti" claim
self.set_jti()
def __repr__(self):
return repr(self.payload)
def __getitem__(self, key):
return self.payload[key]
def __setitem__(self, key, value):
self.payload[key] = value
def __delitem__(self, key):
del self.payload[key]
def __contains__(self, key):
return key in self.payload
def get(self, key, default=None):
return self.payload.get(key, default)
def __str__(self):
"""
Signs and returns a token as a base64 encoded string.
"""
return self.get_token_backend().encode(self.payload)
def verify(self):
"""
Performs additional validation steps which were not performed when this
token was decoded. This method is part of the "public" API to indicate
the intention that it may be overridden in subclasses.
"""
# According to RFC 7519, the "exp" claim is OPTIONAL
# (https://tools.ietf.org/html/rfc7519#section-4.1.4). As a more
# correct behavior for authorization tokens, we require an "exp"
# claim. We don't want any zombie tokens walking around.
self.check_exp()
# Ensure token id is present
if api_settings.JTI_CLAIM not in self.payload:
raise TokenError(_("Token has no id"))
self.verify_token_type()
def verify_token_type(self):
"""
Ensures that the token type claim is present and has the correct value.
"""
try:
token_type = self.payload[api_settings.TOKEN_TYPE_CLAIM]
except KeyError:
raise TokenError(_("Token has no type"))
if self.token_type != token_type:
raise TokenError(_("Token has wrong type"))
def set_jti(self):
"""
Populates the configured jti claim of a token with a string where there
is a negligible probability that the same string will be chosen at a
later time.
See here:
https://tools.ietf.org/html/rfc7519#section-4.1.7
"""
self.payload[api_settings.JTI_CLAIM] = uuid4().hex
def set_exp(self, claim="exp", from_time=None, lifetime=None):
"""
Updates the expiration time of a token.
See here:
https://tools.ietf.org/html/rfc7519#section-4.1.4
"""
if from_time is None:
from_time = self.current_time
if lifetime is None:
lifetime = self.lifetime
self.payload[claim] = datetime_to_epoch(from_time + lifetime)
def set_iat(self, claim="iat", at_time=None):
"""
Updates the time at which the token was issued.
See here:
https://tools.ietf.org/html/rfc7519#section-4.1.6
"""
if at_time is None:
at_time = self.current_time
self.payload[claim] = datetime_to_epoch(at_time)
def check_exp(self, claim="exp", current_time=None):
"""
Checks whether a timestamp value in the given claim has passed (since
the given datetime value in `current_time`). Raises a TokenError with
a user-facing error message if so.
"""
if current_time is None:
current_time = self.current_time
try:
claim_value = self.payload[claim]
except KeyError:
raise TokenError(format_lazy(_("Token has no '{}' claim"), claim))
claim_time = datetime_from_epoch(claim_value)
leeway = self.get_token_backend().leeway
if claim_time <= current_time - timedelta(seconds=leeway):
raise TokenError(format_lazy(_("Token '{}' claim has expired"), claim))
@classmethod
def for_user(cls, user):
"""
Returns an authorization token for the given user that will be provided
after authenticating the user's credentials.
"""
user_id = getattr(user, api_settings.USER_ID_FIELD)
if not isinstance(user_id, int):
user_id = str(user_id)
token = cls()
token[api_settings.USER_ID_CLAIM] = user_id
return token
_token_backend = None
@property
def token_backend(self):
if self._token_backend is None:
self._token_backend = import_string(
"rest_framework_simplejwt.state.token_backend"
)
return self._token_backend
def get_token_backend(self):
# Backward compatibility.
return self.token_backend
class BlacklistMixin:
"""
If the `rest_framework_simplejwt.token_blacklist` app was configured to be
used, tokens created from `BlacklistMixin` subclasses will insert
themselves into an outstanding token list and also check for their
membership in a token blacklist.
"""
if "rest_framework_simplejwt.token_blacklist" in settings.INSTALLED_APPS:
def verify(self, *args, **kwargs):
self.check_blacklist()
super().verify(*args, **kwargs)
def check_blacklist(self):
"""
Checks if this token is present in the token blacklist. Raises
`TokenError` if so.
"""
jti = self.payload[api_settings.JTI_CLAIM]
if BlacklistedToken.objects.filter(token__jti=jti).exists():
raise TokenError(_("Token is blacklisted"))
def blacklist(self):
"""
Ensures this token is included in the outstanding token list and
adds it to the blacklist.
"""
jti = self.payload[api_settings.JTI_CLAIM]
exp = self.payload["exp"]
# Ensure outstanding token exists with given jti
token, _ = OutstandingToken.objects.get_or_create(
jti=jti,
defaults={
"token": str(self),
"expires_at": datetime_from_epoch(exp),
},
)
return BlacklistedToken.objects.get_or_create(token=token)
@classmethod
def for_user(cls, user):
"""
Adds this token to the outstanding token list.
"""
token = super().for_user(user)
jti = token[api_settings.JTI_CLAIM]
exp = token["exp"]
OutstandingToken.objects.create(
user=user,
jti=jti,
token=str(token),
created_at=token.current_time,
expires_at=datetime_from_epoch(exp),
)
return token
class SlidingToken(BlacklistMixin, Token):
token_type = "sliding"
lifetime = api_settings.SLIDING_TOKEN_LIFETIME
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.token is None:
# Set sliding refresh expiration claim if new token
self.set_exp(
api_settings.SLIDING_TOKEN_REFRESH_EXP_CLAIM,
from_time=self.current_time,
lifetime=api_settings.SLIDING_TOKEN_REFRESH_LIFETIME,
)
class AccessToken(Token):
token_type = "access"
lifetime = api_settings.ACCESS_TOKEN_LIFETIME
class RefreshToken(BlacklistMixin, Token):
token_type = "refresh"
lifetime = api_settings.REFRESH_TOKEN_LIFETIME
no_copy_claims = (
api_settings.TOKEN_TYPE_CLAIM,
"exp",
# Both of these claims are included even though they may be the same.
# It seems possible that a third party token might have a custom or
# namespaced JTI claim as well as a default "jti" claim. In that case,
# we wouldn't want to copy either one.
api_settings.JTI_CLAIM,
"jti",
)
access_token_class = AccessToken
@property
def access_token(self):
"""
Returns an access token created from this refresh token. Copies all
claims present in this refresh token to the new access token except
those claims listed in the `no_copy_claims` attribute.
"""
access = self.access_token_class()
# Use instantiation time of refresh token as relative timestamp for
# access token "exp" claim. This ensures that both a refresh and
# access token expire relative to the same time if they are created as
# a pair.
access.set_exp(from_time=self.current_time)
no_copy = self.no_copy_claims
for claim, value in self.payload.items():
if claim in no_copy:
continue
access[claim] = value
return access
class UntypedToken(Token):
token_type = "untyped"
lifetime = timedelta(seconds=0)
def verify_token_type(self):
"""
Untyped tokens do not verify the "token_type" claim. This is useful
when performing general validation of a token's signature and other
properties which do not relate to the token's intended use.
"""
pass | /rflow_djangorestframework_simplejwt-5.1.0.tar.gz/rflow_djangorestframework_simplejwt-5.1.0/rest_framework_simplejwt/tokens.py | 0.701304 | 0.188007 | tokens.py | pypi |
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from rest_framework import HTTP_HEADER_ENCODING, authentication
from .exceptions import AuthenticationFailed, InvalidToken, TokenError
from .settings import api_settings
AUTH_HEADER_TYPES = api_settings.AUTH_HEADER_TYPES
if not isinstance(api_settings.AUTH_HEADER_TYPES, (list, tuple)):
AUTH_HEADER_TYPES = (AUTH_HEADER_TYPES,)
AUTH_HEADER_TYPE_BYTES = {h.encode(HTTP_HEADER_ENCODING) for h in AUTH_HEADER_TYPES}
class JWTAuthentication(authentication.BaseAuthentication):
"""
An authentication plugin that authenticates requests through a JSON web
token provided in a request header.
"""
www_authenticate_realm = "api"
media_type = "application/json"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user_model = get_user_model()
def authenticate(self, request):
header = self.get_header(request)
if header is None:
return None
raw_token = self.get_raw_token(header)
if raw_token is None:
return None
validated_token = self.get_validated_token(raw_token)
return self.get_user(validated_token), validated_token
def authenticate_header(self, request):
return '{} realm="{}"'.format(
AUTH_HEADER_TYPES[0],
self.www_authenticate_realm,
)
def get_header(self, request):
"""
Extracts the header containing the JSON web token from the given
request.
"""
header = request.META.get(api_settings.AUTH_HEADER_NAME)
if isinstance(header, str):
# Work around django test client oddness
header = header.encode(HTTP_HEADER_ENCODING)
return header
def get_raw_token(self, header):
"""
Extracts an unvalidated JSON web token from the given "Authorization"
header value.
"""
parts = header.split()
if len(parts) == 0:
# Empty AUTHORIZATION header sent
return None
if parts[0] not in AUTH_HEADER_TYPE_BYTES:
# Assume the header does not contain a JSON web token
return None
if len(parts) != 2:
raise AuthenticationFailed(
_("Authorization header must contain two space-delimited values"),
code="bad_authorization_header",
)
return parts[1]
def get_validated_token(self, raw_token):
"""
Validates an encoded JSON web token and returns a validated token
wrapper object.
"""
messages = []
for AuthToken in api_settings.AUTH_TOKEN_CLASSES:
try:
return AuthToken(raw_token)
except TokenError as e:
messages.append(
{
"token_class": AuthToken.__name__,
"token_type": AuthToken.token_type,
"message": e.args[0],
}
)
raise InvalidToken(
{
"detail": _("Given token not valid for any token type"),
"messages": messages,
}
)
def get_user(self, validated_token):
"""
Attempts to find and return a user using the given validated token.
"""
try:
user_id = validated_token[api_settings.USER_ID_CLAIM]
except KeyError:
raise InvalidToken(_("Token contained no recognizable user identification"))
try:
user = self.user_model.objects.get(**{api_settings.USER_ID_FIELD: user_id})
except self.user_model.DoesNotExist:
raise AuthenticationFailed(_("User not found"), code="user_not_found")
if not user.is_active:
raise AuthenticationFailed(_("User is inactive"), code="user_inactive")
return user
class JWTTokenUserAuthentication(JWTAuthentication):
def get_user(self, validated_token):
"""
Returns a stateless user object which is backed by the given validated
token.
"""
if api_settings.USER_ID_CLAIM not in validated_token:
# The TokenUser class assumes tokens will have a recognizable user
# identifier claim.
raise InvalidToken(_("Token contained no recognizable user identification"))
return api_settings.TOKEN_USER_CLASS(validated_token)
def default_user_authentication_rule(user):
# Prior to Django 1.10, inactive users could be authenticated with the
# default `ModelBackend`. As of Django 1.10, the `ModelBackend`
# prevents inactive users from authenticating. App designers can still
# allow inactive users to authenticate by opting for the new
# `AllowAllUsersModelBackend`. However, we explicitly prevent inactive
# users from authenticating to enforce a reasonable policy and provide
# sensible backwards compatibility with older Django versions.
return user is not None and user.is_active | /rflow_djangorestframework_simplejwt-5.1.0.tar.gz/rflow_djangorestframework_simplejwt-5.1.0/rest_framework_simplejwt/authentication.py | 0.72662 | 0.197116 | authentication.py | pypi |
import jwt
from django.utils.translation import gettext_lazy as _
from jwt import InvalidAlgorithmError, InvalidTokenError, algorithms
from .exceptions import TokenBackendError
from .utils import format_lazy
try:
from jwt import PyJWKClient
JWK_CLIENT_AVAILABLE = True
except ImportError:
JWK_CLIENT_AVAILABLE = False
ALLOWED_ALGORITHMS = {
"HS256",
"HS384",
"HS512",
"RS256",
"RS384",
"RS512",
"ES256",
"ES384",
"ES512",
}
class TokenBackend:
def __init__(
self,
algorithm,
signing_key=None,
verifying_key="",
audience=None,
issuer=None,
jwk_url: str = None,
leeway=0,
):
self._validate_algorithm(algorithm)
self.algorithm = algorithm
self.signing_key = signing_key
self.verifying_key = verifying_key
self.audience = audience
self.issuer = issuer
if JWK_CLIENT_AVAILABLE:
self.jwks_client = PyJWKClient(jwk_url) if jwk_url else None
else:
self.jwks_client = None
self.leeway = leeway
def _validate_algorithm(self, algorithm):
"""
Ensure that the nominated algorithm is recognized, and that cryptography is installed for those
algorithms that require it
"""
if algorithm not in ALLOWED_ALGORITHMS:
raise TokenBackendError(
format_lazy(_("Unrecognized algorithm type '{}'"), algorithm)
)
if algorithm in algorithms.requires_cryptography and not algorithms.has_crypto:
raise TokenBackendError(
format_lazy(
_("You must have cryptography installed to use {}."), algorithm
)
)
def get_verifying_key(self, token):
if self.algorithm.startswith("HS"):
return self.signing_key
if self.jwks_client:
return self.jwks_client.get_signing_key_from_jwt(token).key
return self.verifying_key
def encode(self, payload):
"""
Returns an encoded token for the given payload dictionary.
"""
jwt_payload = payload.copy()
if self.audience is not None:
jwt_payload["aud"] = self.audience
if self.issuer is not None:
jwt_payload["iss"] = self.issuer
token = jwt.encode(jwt_payload, self.signing_key, algorithm=self.algorithm)
if isinstance(token, bytes):
# For PyJWT <= 1.7.1
return token.decode("utf-8")
# For PyJWT >= 2.0.0a1
return token
def decode(self, token, verify=True):
"""
Performs a validation of the given token and returns its payload
dictionary.
Raises a `TokenBackendError` if the token is malformed, if its
signature check fails, or if its 'exp' claim indicates it has expired.
"""
try:
return jwt.decode(
token,
self.get_verifying_key(token),
algorithms=[self.algorithm],
audience=self.audience,
issuer=self.issuer,
leeway=self.leeway,
options={
"verify_aud": self.audience is not None,
"verify_signature": verify,
},
)
except InvalidAlgorithmError as ex:
raise TokenBackendError(_("Invalid algorithm specified")) from ex
except InvalidTokenError:
raise TokenBackendError(_("Token is invalid or expired")) | /rflow_djangorestframework_simplejwt-5.1.0.tar.gz/rflow_djangorestframework_simplejwt-5.1.0/rest_framework_simplejwt/backends.py | 0.660172 | 0.165593 | backends.py | pypi |
from django.contrib.auth import models as auth_models
from django.db.models.manager import EmptyManager
from django.utils.functional import cached_property
from .compat import CallableFalse, CallableTrue
from .settings import api_settings
class TokenUser:
"""
A dummy user class modeled after django.contrib.auth.models.AnonymousUser.
Used in conjunction with the `JWTTokenUserAuthentication` backend to
implement single sign-on functionality across services which share the same
secret key. `JWTTokenUserAuthentication` will return an instance of this
class instead of a `User` model instance. Instances of this class act as
stateless user objects which are backed by validated tokens.
"""
# User is always active since Simple JWT will never issue a token for an
# inactive user
is_active = True
_groups = EmptyManager(auth_models.Group)
_user_permissions = EmptyManager(auth_models.Permission)
def __init__(self, token):
self.token = token
def __str__(self):
return f"TokenUser {self.id}"
@cached_property
def id(self):
return self.token[api_settings.USER_ID_CLAIM]
@cached_property
def pk(self):
return self.id
@cached_property
def username(self):
return self.token.get("username", "")
@cached_property
def is_staff(self):
return self.token.get("is_staff", False)
@cached_property
def is_superuser(self):
return self.token.get("is_superuser", False)
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.id)
def save(self):
raise NotImplementedError("Token users have no DB representation")
def delete(self):
raise NotImplementedError("Token users have no DB representation")
def set_password(self, raw_password):
raise NotImplementedError("Token users have no DB representation")
def check_password(self, raw_password):
raise NotImplementedError("Token users have no DB representation")
@property
def groups(self):
return self._groups
@property
def user_permissions(self):
return self._user_permissions
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return set()
def has_perm(self, perm, obj=None):
return False
def has_perms(self, perm_list, obj=None):
return False
def has_module_perms(self, module):
return False
@property
def is_anonymous(self):
return CallableFalse
@property
def is_authenticated(self):
return CallableTrue
def get_username(self):
return self.username
def __getattr__(self, attr):
"""This acts as a backup attribute getter for custom claims defined in Token serializers."""
return self.token.get(attr, None) | /rflow_djangorestframework_simplejwt-5.1.0.tar.gz/rflow_djangorestframework_simplejwt-5.1.0/rest_framework_simplejwt/models.py | 0.866557 | 0.220615 | models.py | pypi |
from time import time
from tokenize import group
from unittest import result
import grpc
from dpc.base.consumer import ConsumeBase
from dpc.protos.kafkapixy_pb2 import ConsNAckRq, AckRq
from dpc.protos.kafkapixy_pb2_grpc import KafkaPixyStub
from dpc.tools import utils
import logging
class Consumer(ConsumeBase):
"""Consumer class"""
def __init__(self, group: str, topic: str, address: str, port: str):
self.group = group
self.topic = topic
self.address = address
self.port = port
self.client = utils.create_client(self.address, self.port)
logging.getLogger().setLevel(logging.INFO)
def consume(self) -> ConsNAckRq:
"""Function for consuming
Args:
topic (str): Name of the topic in the cluster to produce to.
group (str): Name of a consumer group.
_create_client:Produce pixy_client to consume
Returns:
response: offset(str) and message(Bytes) and key_undefined(bool)
"""
ack_partition = None
ack_offset = None
request = ConsNAckRq(topic=self.topic, group=self.group)
client = utils.create_client(self.address, self.port)
keep_running = True
timeout = 1000
while keep_running:
logging.info("Start sreaching data")
if ack_offset is None:
request.no_ack = True
request.ack_partition = 0
request.ack_offset = 0
# logging.info("Set partition and offset to 0")
else:
request.no_ack = False
request.ack_partition = ack_partition
request.ack_offset = ack_offset
# logging.info("Search partition and offset")
try:
response = client.ConsumeNAck(request, timeout=timeout)
logging.info("connecting")
except grpc.RpcError as err:
if err.code() == grpc.StatusCode.NOT_FOUND:
ack_offset = None
logging.error(err.details())
continue
else:
logging.error(err.details())
continue
try:
ack_partition = response.partition
ack_offset = response.offset
results = {
"Message": response.message,
"Partition": response.partition,
"Offset": response.offset,
"key": response.key_value,
"key_undefined": response.key_undefined,
"RecordHeader": response.headers,
}
logging.info("ββββGot data from produce ββββ")
yield results
except:
logging.info("Reset")
ack_offset = None | /rflow_dpc-0.0.1.tar.gz/rflow_dpc-0.0.1/src/dpc/consumer.py | 0.621196 | 0.152726 | consumer.py | pypi |
from __future__ import annotations
from abc import ABCMeta, abstractmethod
from typing import List, ByteString, Dict
from dpc.protos.kafkapixy_pb2 import ProdRs
class ProduceBase(metaclass=ABCMeta):
"""Produce abstract base class"""
def __init__(
self,
cluster: str,
topic: str,
message: bytes,
address: str,
port: str,
headers: Dict,
key_value: List,
key_undefined: bool,
async_mode: bool,
):
"""
Initialize the produce args
Args:
cluster (str): Name of the kafka cluster to operate on
topic (str): Name of the topic in the cluster to produce to.
key_value (List,optional): Hash pf the key used to determine the partition to produce to
key_undefined (bool): A boolean value to specify where the message is written to
if provided the messages are written in a random partition otherwise the hash
of the key value is used to determine the partition
message: (ByteString): The message body
async_mode: (bool): If true then the method returns immediately after Kafka-Pixy gets the
produce request, and the message is written to Kafka asynchronously.
In that case partition and offset returned in response should be ignored.
If false, then a response is returned in accordance with the
producer.required_acks parameter, that can be one of:
* no_response: the response is returned as soon as a produce request
is delivered to a partition leader Kafka broker.
* wait_for_local: the response is returned as soon as data is written
to the disk by a partition leader Kafka broker.
* wait_for_all: the response is returned after all in-sync replicas
have data committed to disk.
headers: (Dict) Headers to include with the published message
address (str): Name of address number.
port (str):Name of port number.
"""
pass
@abstractmethod
def produce(self):
"""Produce abstract method"""
pass | /rflow_dpc-0.0.1.tar.gz/rflow_dpc-0.0.1/src/dpc/base/producer.py | 0.879192 | 0.231908 | producer.py | pypi |
"""Experimental Resolver for getting the artifacts based on Span."""
from typing import Dict, List, Optional, Text
from tfx import types
from tfx.components.example_gen import utils
from tfx.dsl.components.common import resolver
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.proto import range_config_pb2
from tfx.types import artifact_utils
class SpansResolver(resolver.ResolverStrategy):
"""Resolver that return the artifacts based on Span.
Note that this Resolver is experimental and is subject to change in terms of
both interface and implementation.
"""
def __init__(self, range_config: range_config_pb2.RangeConfig):
self._range_config = range_config
def _resolve(self, input_dict: Dict[Text, List[types.Artifact]]):
result = {}
for k, artifact_list in input_dict.items():
in_range_artifacts = []
if self._range_config.HasField('static_range'):
start_span_number = self._range_config.static_range.start_span_number
end_span_number = self._range_config.static_range.end_span_number
# Get the artifacts within range.
for artifact in artifact_list:
if not artifact.has_custom_property(utils.SPAN_PROPERTY_NAME):
raise RuntimeError('Span does not exist for' % str(artifact))
span = int(
artifact.get_string_custom_property(utils.SPAN_PROPERTY_NAME))
if span >= start_span_number and span <= end_span_number:
in_range_artifacts.append(artifact)
elif self._range_config.HasField('rolling_range'):
start_span_number = self._range_config.rolling_range.start_span_number
num_spans = self._range_config.rolling_range.num_spans
if num_spans <= 0:
raise ValueError('num_spans should be positive number.')
most_recent_span = -1
# Get most recent span number.
for artifact in artifact_list:
if not artifact.has_custom_property(utils.SPAN_PROPERTY_NAME):
raise RuntimeError('Span does not exist for' % str(artifact))
span = int(
artifact.get_string_custom_property(utils.SPAN_PROPERTY_NAME))
if span > most_recent_span:
most_recent_span = span
start_span_number = max(start_span_number,
most_recent_span - num_spans + 1)
end_span_number = most_recent_span
# Get the artifacts within range.
for artifact in artifact_list:
span = int(
artifact.get_string_custom_property(utils.SPAN_PROPERTY_NAME))
if span >= start_span_number and span <= end_span_number:
in_range_artifacts.append(artifact)
else:
raise ValueError('RangeConfig type is not supported.')
result[k] = sorted(
in_range_artifacts,
key=lambda a: a.get_string_custom_property(utils.SPAN_PROPERTY_NAME),
reverse=True)
return result
def resolve(
self,
pipeline_info: data_types.PipelineInfo,
metadata_handler: metadata.Metadata,
source_channels: Dict[Text, types.Channel],
) -> resolver.ResolveResult:
pipeline_context = metadata_handler.get_pipeline_context(pipeline_info)
if pipeline_context is None:
raise RuntimeError('Pipeline context absent for %s' % pipeline_context)
candidate_dict = {}
for k, c in source_channels.items():
candidate_artifacts = metadata_handler.get_qualified_artifacts(
contexts=[pipeline_context],
type_name=c.type_name,
producer_component_id=c.producer_component_id,
output_key=c.output_key)
candidate_dict[k] = [
artifact_utils.deserialize_artifact(a.type, a.artifact)
for a in candidate_artifacts
]
resolved_dict = self._resolve(candidate_dict)
resolve_state_dict = {
k: bool(artifact_list) for k, artifact_list in resolved_dict.items()
}
return resolver.ResolveResult(
per_key_resolve_result=resolved_dict,
per_key_resolve_state=resolve_state_dict)
def resolve_artifacts(
self, metadata_handler: metadata.Metadata,
input_dict: Dict[Text, List[types.Artifact]]
) -> Optional[Dict[Text, List[types.Artifact]]]:
"""Resolves artifacts from channels by querying MLMD.
Args:
metadata_handler: A metadata handler to access MLMD store.
input_dict: The input_dict to resolve from.
Returns:
If `min_count` for every input is met, returns README.ml-pipelines-sdk.md
Dict[Text, List[Artifact]]. Otherwise, return None.
Raises:
RuntimeError: if input_dict contains artifact without span property.
"""
resolved_dict = self._resolve(input_dict)
all_min_count_met = all(
bool(artifact_list) for artifact_list in resolved_dict.values())
return resolved_dict if all_min_count_met else None | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/dsl/experimental/spans_resolver.py | 0.945851 | 0.257654 | spans_resolver.py | pypi |
"""Utils for TFX component types. Intended for internal usage only."""
from typing import Any, Callable, Dict, Optional, Text
from tfx import types
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec as base_executor_spec
from tfx.types import component_spec
def create_tfx_component_class(
name: Text,
tfx_executor_spec: base_executor_spec.ExecutorSpec,
input_channel_parameters: Dict[Text,
component_spec.ChannelParameter] = None,
output_channel_parameters: Dict[Text,
component_spec.ChannelParameter] = None,
execution_parameters: Dict[Text, component_spec.ExecutionParameter] = None,
default_init_args: Optional[Dict[Text, Any]] = None
) -> Callable[..., base_component.BaseComponent]:
"""Creates README.ml-pipelines-sdk.md TFX component class dynamically."""
tfx_component_spec_class = type(
str(name) + 'Spec',
(component_spec.ComponentSpec,),
dict(
PARAMETERS=execution_parameters,
INPUTS=input_channel_parameters,
OUTPUTS=output_channel_parameters,
),
)
def tfx_component_class_init(self, **kwargs):
instance_name = kwargs.pop('instance_name', None)
arguments = {}
arguments.update(kwargs)
arguments.update(default_init_args)
# Provide default values for output channels.
output_channel_params = output_channel_parameters or {}
for output_key, output_channel_param in output_channel_params.items():
if output_key not in arguments:
arguments[output_key] = types.Channel(type=output_channel_param.type)
base_component.BaseComponent.__init__(
self,
# Generate spec by wiring up the input/output channel.
spec=self.__class__.SPEC_CLASS(**arguments),
instance_name=instance_name,
)
tfx_component_class = type(
str(name),
(base_component.BaseComponent,),
dict(
SPEC_CLASS=tfx_component_spec_class,
EXECUTOR_SPEC=tfx_executor_spec,
__init__=tfx_component_class_init,
),
)
return tfx_component_class | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/dsl/component/experimental/component_utils.py | 0.880393 | 0.37419 | component_utils.py | pypi |
"""Functions for creating container components."""
from typing import Any, Callable, Dict, List, Text
from tfx.dsl.component.experimental import component_utils
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.component.experimental import placeholders
from tfx.dsl.components.base import base_component
from tfx.types import channel_utils
from tfx.types import component_spec
def create_container_component(
name: Text,
image: Text,
command: List[placeholders.CommandlineArgumentType],
inputs: Dict[Text, Any] = None,
outputs: Dict[Text, Any] = None,
parameters: Dict[Text, Any] = None,
) -> Callable[..., base_component.BaseComponent]:
"""Creates README.ml-pipelines-sdk.md container-based component.
Args:
name: The name of the component
image: Container image name.
command: Container entrypoint command-line. Not executed within README.ml-pipelines-sdk.md shell. The
command-line can use placeholder objects that will be replaced at the
compilation time. The placeholder objects can be imported from
tfx.dsl.component.experimental.placeholders. Note that Jinja templates are
not supported.
inputs: The list of component inputs
outputs: The list of component outputs
parameters: The list of component parameters
Returns:
Component that can be instantiated and user inside pipeline.
Example:
component = create_container_component(
name='TrainModel',
inputs={
'training_data': Dataset,
},
outputs={
'model': Model,
},
parameters={
'num_training_steps': int,
},
image='gcr.io/my-project/my-trainer',
command=[
'python3', 'my_trainer',
'--training_data_uri', InputUriPlaceholder('training_data'),
'--model_uri', OutputUriPlaceholder('model'),
'--num_training-steps', InputValuePlaceholder('num_training_steps'),
]
)
"""
if not name:
raise ValueError('Component name cannot be empty.')
if inputs is None:
inputs = {}
if outputs is None:
outputs = {}
if parameters is None:
parameters = {}
input_channel_parameters = {}
output_channel_parameters = {}
output_channels = {}
execution_parameters = {}
for input_name, channel_type in inputs.items():
# TODO(b/155804245) Sanitize the names so that they're valid python names
input_channel_parameters[input_name] = (
component_spec.ChannelParameter(type=channel_type,))
for output_name, channel_type in outputs.items():
# TODO(b/155804245) Sanitize the names so that they're valid python names
output_channel_parameters[output_name] = (
component_spec.ChannelParameter(type=channel_type))
artifact = channel_type()
channel = channel_utils.as_channel([artifact])
output_channels[output_name] = channel
for param_name, parameter_type in parameters.items():
# TODO(b/155804245) Sanitize the names so that they're valid python names
execution_parameters[param_name] = (
component_spec.ExecutionParameter(type=parameter_type))
default_init_args = {**output_channels}
return component_utils.create_tfx_component_class(
name=name,
tfx_executor_spec=executor_specs.TemplatedExecutorContainerSpec(
image=image,
command=command,
),
input_channel_parameters=input_channel_parameters,
output_channel_parameters=output_channel_parameters,
execution_parameters=execution_parameters,
default_init_args=default_init_args) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/dsl/component/experimental/container_component.py | 0.840521 | 0.356895 | container_component.py | pypi |
"""Command-line placeholders for use in container component definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Text, Union
from tfx.utils import json_utils
class InputValuePlaceholder(json_utils.Jsonable):
"""Represents README.ml-pipelines-sdk.md placeholder for the value of the input argument.
Represents README.ml-pipelines-sdk.md placeholder that will be replaced at runtime with the string
value of the input argument of an execution property.
"""
def __init__(self, input_name: Text):
self.input_name = input_name
def __eq__(self, other) -> bool:
return (isinstance(other, self.__class__) and
self.input_name == other.input_name)
def __ne__(self, other) -> bool:
return not self.__eq__(other)
class InputUriPlaceholder(json_utils.Jsonable):
"""Represents README.ml-pipelines-sdk.md placeholder for the URI of the input artifact argument.
Represents README.ml-pipelines-sdk.md placeholder that will be replaced at runtime with the URI
of the input artifact argument data.
"""
def __init__(self, input_name: Text):
self.input_name = input_name
def __eq__(self, other) -> bool:
return (isinstance(other, self.__class__) and
self.input_name == other.input_name)
def __ne__(self, other) -> bool:
return not self.__eq__(other)
class OutputUriPlaceholder(json_utils.Jsonable):
"""Represents README.ml-pipelines-sdk.md placeholder for the URI of the output artifact argument.
Represents README.ml-pipelines-sdk.md placeholder that will be replaced at runtime with the URI
for the output artifact data.
"""
def __init__(self, output_name: Text):
self.output_name = output_name
def __eq__(self, other) -> bool:
return (isinstance(other, self.__class__) and
self.output_name == other.output_name)
def __ne__(self, other) -> bool:
return not self.__eq__(other)
class ConcatPlaceholder(object):
"""Represents README.ml-pipelines-sdk.md placeholder for result of concatenation of multiple parts.
Represents README.ml-pipelines-sdk.md placeholder that will be replaced at runtime with README.ml-pipelines-sdk.md single string
containing the concatenated parts.
"""
def __init__(self, items: List['CommandlineArgumentType']):
self.items = items
def __eq__(self, other) -> bool:
return isinstance(other, self.__class__) and self.items == other.items
def __ne__(self, other) -> bool:
return not self.__eq__(other)
CommandlineArgumentType = Union[
Text,
InputValuePlaceholder,
InputUriPlaceholder,
OutputUriPlaceholder,
ConcatPlaceholder,
] | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/dsl/component/experimental/placeholders.py | 0.936008 | 0.315225 | placeholders.py | pypi |
# TODO(ccy): Remove pytype "disable=attribute-error" and "disable=module-attr"
# overrides after Python 2 support is removed from TFX.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import inspect
import types
from typing import Any, Dict, Optional, Set, Text, Tuple, Type, Union
from tfx.dsl.component.experimental import annotations
from tfx.types import artifact
from tfx.types import standard_artifacts
class ArgFormats(enum.Enum):
INPUT_ARTIFACT = 1
OUTPUT_ARTIFACT = 2
ARTIFACT_VALUE = 3
PARAMETER = 4
_PRIMITIVE_TO_ARTIFACT = {
int: standard_artifacts.Integer,
float: standard_artifacts.Float,
Text: standard_artifacts.String,
bytes: standard_artifacts.Bytes,
}
# Map from `Optional[T]` to `T` for primitive types. This map is README.ml-pipelines-sdk.md simple way
# to extract the value of `T` from its optional typehint, since the internal
# fields of the typehint vary depending on the Python version.
_OPTIONAL_PRIMITIVE_MAP = dict((Optional[t], t) for t in _PRIMITIVE_TO_ARTIFACT)
def _validate_signature(
func: types.FunctionType,
argspec: inspect.FullArgSpec, # pytype: disable=module-attr
typehints: Dict[Text, Any],
subject_message: Text) -> None:
"""Validates signature of README.ml-pipelines-sdk.md typehint-annotated component executor function."""
args, varargs, keywords = argspec.args, argspec.varargs, argspec.varkw
if varargs or keywords:
raise ValueError('%s does not support *args or **kwargs arguments.' %
subject_message)
# Validate argument type hints.
for arg in args:
if isinstance(arg, list):
# Note: this feature was removed in Python 3:
# https://www.python.org/dev/peps/pep-3113/.
raise ValueError('%s does not support nested input arguments.' %
subject_message)
if arg not in typehints:
raise ValueError('%s must have all arguments annotated with typehints.' %
subject_message)
# Validate return type hints.
if isinstance(typehints.get('return', None), annotations.OutputDict):
for arg, arg_typehint in typehints['return'].kwargs.items():
if (isinstance(arg_typehint, annotations.OutputArtifact) or
(inspect.isclass(arg_typehint) and
issubclass(arg_typehint, artifact.Artifact))):
raise ValueError(
('Output artifacts for the component executor function %r should '
'be declared as function parameters annotated with type hint '
'`tfx.types.annotations.OutputArtifact[T]` where T is README.ml-pipelines-sdk.md '
'subclass of `tfx.types.Artifact`. They should not be declared '
'as part of the return value `OutputDict` type hint.') % func)
elif 'return' not in typehints or typehints['return'] in (None, type(None)):
pass
else:
raise ValueError(
('%s must have either an OutputDict instance or `None` as its return '
'value typehint.') % subject_message)
def _parse_signature(
func: types.FunctionType,
argspec: inspect.FullArgSpec, # pytype: disable=module-attr
typehints: Dict[Text, Any]
) -> Tuple[Dict[Text, Type[artifact.Artifact]], Dict[
Text, Type[artifact.Artifact]], Dict[Text, Type[Union[
int, float, Text, bytes]]], Dict[Text, Any], Dict[Text, ArgFormats],
Set[Text]]:
"""Parses signature of README.ml-pipelines-sdk.md typehint-annotated component executor function.
Args:
func: A component executor function to be parsed.
argspec: A `inspect.FullArgSpec` instance describing the component executor
function. Usually obtained from `inspect.getfullargspec(func)`.
typehints: A dictionary mapping function argument names to type hints.
Usually obtained from `func.__annotations__`.
Returns:
inputs: A dictionary mapping each input name to its artifact type (as README.ml-pipelines-sdk.md
subclass of `tfx.types.Artifact`).
outputs: A dictionary mapping each output name to its artifact type (as README.ml-pipelines-sdk.md
subclass of `tfx.types.Artifact`).
parameters: A dictionary mapping each parameter name to its primitive type
(one of `int`, `float`, `Text` and `bytes`).
arg_formats: Dictionary representing the input arguments of the given
component executor function. Each entry's key is the argument's string
name; each entry's value is the format of the argument to be passed into
the function (given by README.ml-pipelines-sdk.md value of the `ArgFormats` enum).
arg_defaults: Dictionary mapping names of optional arguments to default
values.
returned_outputs: A set of output names that are declared as ValueArtifact
returned outputs.
"""
# Extract optional arguments as dict from name to its declared optional value.
arg_defaults = {}
if argspec.defaults:
arg_defaults = dict(
zip(argspec.args[-len(argspec.defaults):], argspec.defaults))
# Parse function arguments.
inputs = {}
outputs = {}
parameters = {}
arg_formats = {}
returned_outputs = set()
for arg in argspec.args:
arg_typehint = typehints[arg]
# If the typehint is `Optional[T]` for README.ml-pipelines-sdk.md primitive type `T`, unwrap it.
if arg_typehint in _OPTIONAL_PRIMITIVE_MAP:
arg_typehint = _OPTIONAL_PRIMITIVE_MAP[arg_typehint]
if isinstance(arg_typehint, annotations.InputArtifact):
if arg_defaults.get(arg, None) is not None:
raise ValueError(
('If an input artifact is declared as an optional argument, '
'its default value must be `None` (got default value %r for '
'input argument %r of %r instead).') %
(arg_defaults[arg], arg, func))
arg_formats[arg] = ArgFormats.INPUT_ARTIFACT
inputs[arg] = arg_typehint.type
elif isinstance(arg_typehint, annotations.OutputArtifact):
if arg in arg_defaults:
raise ValueError(
('Output artifact of component function cannot be declared as '
'optional (error for argument %r of %r).') % (arg, func))
arg_formats[arg] = ArgFormats.OUTPUT_ARTIFACT
outputs[arg] = arg_typehint.type
elif isinstance(arg_typehint, annotations.Parameter):
if arg in arg_defaults:
if not (arg_defaults[arg] is None or
isinstance(arg_defaults[arg], arg_typehint.type)):
raise ValueError((
'The default value for optional parameter %r on function %r must '
'be an instance of its declared type %r or `None` (got %r '
'instead)') % (arg, func, arg_typehint.type, arg_defaults[arg]))
arg_formats[arg] = ArgFormats.PARAMETER
parameters[arg] = arg_typehint.type
elif arg_typehint in _PRIMITIVE_TO_ARTIFACT:
if arg in arg_defaults:
if not (arg_defaults[arg] is None or
isinstance(arg_defaults[arg], arg_typehint)):
raise ValueError(
('The default value for optional input value %r on function %r '
'must be an instance of its declared type %r or `None` (got %r '
'instead)') % (arg, func, arg_typehint, arg_defaults[arg]))
arg_formats[arg] = ArgFormats.ARTIFACT_VALUE
inputs[arg] = _PRIMITIVE_TO_ARTIFACT[arg_typehint]
elif (inspect.isclass(arg_typehint) and
issubclass(arg_typehint, artifact.Artifact)):
raise ValueError((
'Invalid type hint annotation for argument %r on function %r. '
'Argument with an artifact class typehint annotation should indicate '
'whether it is used as an input or output artifact by using the '
'`InputArtifact[ArtifactType]` or `OutputArtifact[ArtifactType]` '
'typehint annotations.') % (arg, func))
else:
raise ValueError(
'Unknown type hint annotation for argument %r on function %r' %
(arg, func))
if 'return' in typehints and typehints['return'] not in (None, type(None)):
for arg, arg_typehint in typehints['return'].kwargs.items():
if arg_typehint in _PRIMITIVE_TO_ARTIFACT:
outputs[arg] = _PRIMITIVE_TO_ARTIFACT[arg_typehint]
returned_outputs.add(arg)
else:
raise ValueError(
('Unknown type hint annotation %r for returned output %r on '
'function %r') % (arg_typehint, arg, func))
return (inputs, outputs, parameters, arg_formats, arg_defaults,
returned_outputs)
def parse_typehint_component_function(
func: types.FunctionType
) -> Tuple[Dict[Text, Type[artifact.Artifact]], Dict[
Text, Type[artifact.Artifact]], Dict[Text, Type[Union[
int, float, Text, bytes]]], Dict[Text, Any], Dict[Text, ArgFormats],
Set[Text]]:
"""Parses the given component executor function.
This method parses README.ml-pipelines-sdk.md typehinted-annotated Python function that is intended to
be used as README.ml-pipelines-sdk.md component and returns the information needed about the interface
(inputs / outputs / returned output values) about that components, as well as
README.ml-pipelines-sdk.md list of argument names and formats for determining the parameters that
should be passed when calling `func(*args)`.
Args:
func: A component executor function to be parsed.
Returns:
inputs: A dictionary mapping each input name to its artifact type (as README.ml-pipelines-sdk.md
subclass of `tfx.types.Artifact`).
outputs: A dictionary mapping each output name to its artifact type (as README.ml-pipelines-sdk.md
subclass of `tfx.types.Artifact`).
parameters: A dictionary mapping each parameter name to its primitive type
(one of `int`, `float`, `Text` and `bytes`).
arg_formats: Dictionary representing the input arguments of the given
component executor function. Each entry's key is the argument's string
name; each entry's value is the format of the argument to be passed into
the function (given by README.ml-pipelines-sdk.md value of the `ArgFormats` enum).
arg_defaults: Dictionary mapping names of optional arguments to default
values.
returned_outputs: A set of output names that are declared as ValueArtifact
returned outputs.
"""
# Check input argument type.
if not isinstance(func, types.FunctionType):
raise ValueError(
'Expected README.ml-pipelines-sdk.md typehint-annotated Python function (got %r instead).' %
(func,))
# Inspect the component executor function.
typehints = func.__annotations__ # pytype: disable=attribute-error
argspec = inspect.getfullargspec(func) # pytype: disable=module-attr
subject_message = 'Component declared as README.ml-pipelines-sdk.md typehint-annotated function'
_validate_signature(func, argspec, typehints, subject_message)
# Parse the function and return its details.
inputs, outputs, parameters, arg_formats, arg_defaults, returned_outputs = (
_parse_signature(func, argspec, typehints))
return (inputs, outputs, parameters, arg_formats, arg_defaults,
returned_outputs) | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/dsl/component/experimental/function_parser.py | 0.656878 | 0.288203 | function_parser.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from typing import Text, Type, Union
from six import with_metaclass
from tfx.types import artifact
class _ArtifactGenericMeta(type):
"""Metaclass for _ArtifactGeneric, to enable class indexing."""
def __getitem__(cls: Type['_ArtifactGeneric'],
params: Type[artifact.Artifact]):
"""Metaclass method allowing indexing class (`_ArtifactGeneric[T]`)."""
return cls._generic_getitem(params) # pytype: disable=attribute-error
class _ArtifactGeneric(with_metaclass(_ArtifactGenericMeta, object)):
"""A generic that takes README.ml-pipelines-sdk.md Type[tfx.types.Artifact] as its single argument."""
def __init__( # pylint: disable=invalid-name
self,
artifact_type: Type[artifact.Artifact],
_init_via_getitem=False):
if not _init_via_getitem:
class_name = self.__class__.__name__
raise ValueError(
('%s should be instantiated via the syntax `%s[T]`, where T is README.ml-pipelines-sdk.md '
'subclass of tfx.types.Artifact.') % (class_name, class_name))
self.type = artifact_type
@classmethod
def _generic_getitem(cls, params):
"""Return the result of `_ArtifactGeneric[T]` for README.ml-pipelines-sdk.md given type T."""
# Check that the given parameter is README.ml-pipelines-sdk.md concrete (i.e. non-abstract) subclass
# of `tfx.types.Artifact`.
if (inspect.isclass(params) and issubclass(params, artifact.Artifact) and
params.TYPE_NAME):
return cls(params, _init_via_getitem=True)
else:
class_name = cls.__name__
raise ValueError(
('Generic type `%s[T]` expects the single parameter T to be README.ml-pipelines-sdk.md '
'concrete subclass of `tfx.types.Artifact` (got %r instead).') %
(class_name, params))
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self.type)
class _PrimitiveTypeGenericMeta(type):
"""Metaclass for _PrimitiveTypeGeneric, to enable primitive type indexing."""
def __getitem__(cls: Type[Union[int, float, Text, bytes]],
params: Type[artifact.Artifact]):
"""Metaclass method allowing indexing class (`_PrimitiveTypeGeneric[T]`)."""
return cls._generic_getitem(params) # pytype: disable=attribute-error
class _PrimitiveTypeGeneric(with_metaclass(_PrimitiveTypeGenericMeta, object)):
"""A generic that takes README.ml-pipelines-sdk.md primitive type as its single argument."""
def __init__( # pylint: disable=invalid-name
self,
artifact_type: Type[Union[int, float, Text, bytes]],
_init_via_getitem=False):
if not _init_via_getitem:
class_name = self.__class__.__name__
raise ValueError(
('%s should be instantiated via the syntax `%s[T]`, where T is '
'`int`, `float`, `str` or `bytes`.') % (class_name, class_name))
self.type = artifact_type
@classmethod
def _generic_getitem(cls, params):
"""Return the result of `_PrimitiveTypeGeneric[T]` for README.ml-pipelines-sdk.md given type T."""
# Check that the given parameter is README.ml-pipelines-sdk.md primitive type.
if inspect.isclass(params) and params in (int, float, Text, bytes):
return cls(params, _init_via_getitem=True)
else:
class_name = cls.__name__
raise ValueError(
('Generic type `%s[T]` expects the single parameter T to be '
'`int`, `float`, `str` or `bytes` (got %r instead).') %
(class_name, params))
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self.type)
# Typehint annotations for component authoring.
class InputArtifact(_ArtifactGeneric):
"""Input artifact object type annotation."""
pass
class OutputArtifact(_ArtifactGeneric):
"""Output artifact object type annotation."""
pass
class Parameter(_PrimitiveTypeGeneric):
"""Component parameter type annotation."""
pass
# TODO(ccy): potentially make this compatible `typing.TypedDict` in
# Python 3.8, to allow for component return value type checking.
class OutputDict(object):
"""Decorator declaring component executor function outputs."""
def __init__(self, **kwargs):
self.kwargs = kwargs | /rflow_ml_pipelines_sdk-1.1.18-py3-none-any.whl/tfx/dsl/component/experimental/annotations.py | 0.890112 | 0.158109 | annotations.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.