repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
google/apitools
apitools/base/protorpclite/descriptor.py
describe_enum
def describe_enum(enum_definition): """Build descriptor for Enum class. Args: enum_definition: Enum class to provide descriptor for. Returns: Initialized EnumDescriptor instance describing the Enum class. """ enum_descriptor = EnumDescriptor() enum_descriptor.name = enum_definition.definition_name().split('.')[-1] values = [] for number in enum_definition.numbers(): value = enum_definition.lookup_by_number(number) values.append(describe_enum_value(value)) if values: enum_descriptor.values = values return enum_descriptor
python
def describe_enum(enum_definition): """Build descriptor for Enum class. Args: enum_definition: Enum class to provide descriptor for. Returns: Initialized EnumDescriptor instance describing the Enum class. """ enum_descriptor = EnumDescriptor() enum_descriptor.name = enum_definition.definition_name().split('.')[-1] values = [] for number in enum_definition.numbers(): value = enum_definition.lookup_by_number(number) values.append(describe_enum_value(value)) if values: enum_descriptor.values = values return enum_descriptor
[ "def", "describe_enum", "(", "enum_definition", ")", ":", "enum_descriptor", "=", "EnumDescriptor", "(", ")", "enum_descriptor", ".", "name", "=", "enum_definition", ".", "definition_name", "(", ")", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "values...
Build descriptor for Enum class. Args: enum_definition: Enum class to provide descriptor for. Returns: Initialized EnumDescriptor instance describing the Enum class.
[ "Build", "descriptor", "for", "Enum", "class", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/descriptor.py#L282-L302
train
207,600
google/apitools
apitools/base/protorpclite/descriptor.py
describe_field
def describe_field(field_definition): """Build descriptor for Field instance. Args: field_definition: Field instance to provide descriptor for. Returns: Initialized FieldDescriptor instance describing the Field instance. """ field_descriptor = FieldDescriptor() field_descriptor.name = field_definition.name field_descriptor.number = field_definition.number field_descriptor.variant = field_definition.variant if isinstance(field_definition, messages.EnumField): field_descriptor.type_name = field_definition.type.definition_name() if isinstance(field_definition, messages.MessageField): field_descriptor.type_name = ( field_definition.message_type.definition_name()) if field_definition.default is not None: field_descriptor.default_value = _DEFAULT_TO_STRING_MAP[ type(field_definition)](field_definition.default) # Set label. if field_definition.repeated: field_descriptor.label = FieldDescriptor.Label.REPEATED elif field_definition.required: field_descriptor.label = FieldDescriptor.Label.REQUIRED else: field_descriptor.label = FieldDescriptor.Label.OPTIONAL return field_descriptor
python
def describe_field(field_definition): """Build descriptor for Field instance. Args: field_definition: Field instance to provide descriptor for. Returns: Initialized FieldDescriptor instance describing the Field instance. """ field_descriptor = FieldDescriptor() field_descriptor.name = field_definition.name field_descriptor.number = field_definition.number field_descriptor.variant = field_definition.variant if isinstance(field_definition, messages.EnumField): field_descriptor.type_name = field_definition.type.definition_name() if isinstance(field_definition, messages.MessageField): field_descriptor.type_name = ( field_definition.message_type.definition_name()) if field_definition.default is not None: field_descriptor.default_value = _DEFAULT_TO_STRING_MAP[ type(field_definition)](field_definition.default) # Set label. if field_definition.repeated: field_descriptor.label = FieldDescriptor.Label.REPEATED elif field_definition.required: field_descriptor.label = FieldDescriptor.Label.REQUIRED else: field_descriptor.label = FieldDescriptor.Label.OPTIONAL return field_descriptor
[ "def", "describe_field", "(", "field_definition", ")", ":", "field_descriptor", "=", "FieldDescriptor", "(", ")", "field_descriptor", ".", "name", "=", "field_definition", ".", "name", "field_descriptor", ".", "number", "=", "field_definition", ".", "number", "field...
Build descriptor for Field instance. Args: field_definition: Field instance to provide descriptor for. Returns: Initialized FieldDescriptor instance describing the Field instance.
[ "Build", "descriptor", "for", "Field", "instance", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/descriptor.py#L305-L338
train
207,601
google/apitools
apitools/base/protorpclite/descriptor.py
describe_message
def describe_message(message_definition): """Build descriptor for Message class. Args: message_definition: Message class to provide descriptor for. Returns: Initialized MessageDescriptor instance describing the Message class. """ message_descriptor = MessageDescriptor() message_descriptor.name = message_definition.definition_name().split( '.')[-1] fields = sorted(message_definition.all_fields(), key=lambda v: v.number) if fields: message_descriptor.fields = [describe_field(field) for field in fields] try: nested_messages = message_definition.__messages__ except AttributeError: pass else: message_descriptors = [] for name in nested_messages: value = getattr(message_definition, name) message_descriptors.append(describe_message(value)) message_descriptor.message_types = message_descriptors try: nested_enums = message_definition.__enums__ except AttributeError: pass else: enum_descriptors = [] for name in nested_enums: value = getattr(message_definition, name) enum_descriptors.append(describe_enum(value)) message_descriptor.enum_types = enum_descriptors return message_descriptor
python
def describe_message(message_definition): """Build descriptor for Message class. Args: message_definition: Message class to provide descriptor for. Returns: Initialized MessageDescriptor instance describing the Message class. """ message_descriptor = MessageDescriptor() message_descriptor.name = message_definition.definition_name().split( '.')[-1] fields = sorted(message_definition.all_fields(), key=lambda v: v.number) if fields: message_descriptor.fields = [describe_field(field) for field in fields] try: nested_messages = message_definition.__messages__ except AttributeError: pass else: message_descriptors = [] for name in nested_messages: value = getattr(message_definition, name) message_descriptors.append(describe_message(value)) message_descriptor.message_types = message_descriptors try: nested_enums = message_definition.__enums__ except AttributeError: pass else: enum_descriptors = [] for name in nested_enums: value = getattr(message_definition, name) enum_descriptors.append(describe_enum(value)) message_descriptor.enum_types = enum_descriptors return message_descriptor
[ "def", "describe_message", "(", "message_definition", ")", ":", "message_descriptor", "=", "MessageDescriptor", "(", ")", "message_descriptor", ".", "name", "=", "message_definition", ".", "definition_name", "(", ")", ".", "split", "(", "'.'", ")", "[", "-", "1"...
Build descriptor for Message class. Args: message_definition: Message class to provide descriptor for. Returns: Initialized MessageDescriptor instance describing the Message class.
[ "Build", "descriptor", "for", "Message", "class", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/descriptor.py#L341-L383
train
207,602
google/apitools
apitools/base/protorpclite/descriptor.py
describe_file
def describe_file(module): """Build a file from a specified Python module. Args: module: Python module to describe. Returns: Initialized FileDescriptor instance describing the module. """ descriptor = FileDescriptor() descriptor.package = util.get_package_for_module(module) if not descriptor.package: descriptor.package = None message_descriptors = [] enum_descriptors = [] # Need to iterate over all top level attributes of the module looking for # message and enum definitions. Each definition must be itself described. for name in sorted(dir(module)): value = getattr(module, name) if isinstance(value, type): if issubclass(value, messages.Message): message_descriptors.append(describe_message(value)) elif issubclass(value, messages.Enum): enum_descriptors.append(describe_enum(value)) if message_descriptors: descriptor.message_types = message_descriptors if enum_descriptors: descriptor.enum_types = enum_descriptors return descriptor
python
def describe_file(module): """Build a file from a specified Python module. Args: module: Python module to describe. Returns: Initialized FileDescriptor instance describing the module. """ descriptor = FileDescriptor() descriptor.package = util.get_package_for_module(module) if not descriptor.package: descriptor.package = None message_descriptors = [] enum_descriptors = [] # Need to iterate over all top level attributes of the module looking for # message and enum definitions. Each definition must be itself described. for name in sorted(dir(module)): value = getattr(module, name) if isinstance(value, type): if issubclass(value, messages.Message): message_descriptors.append(describe_message(value)) elif issubclass(value, messages.Enum): enum_descriptors.append(describe_enum(value)) if message_descriptors: descriptor.message_types = message_descriptors if enum_descriptors: descriptor.enum_types = enum_descriptors return descriptor
[ "def", "describe_file", "(", "module", ")", ":", "descriptor", "=", "FileDescriptor", "(", ")", "descriptor", ".", "package", "=", "util", ".", "get_package_for_module", "(", "module", ")", "if", "not", "descriptor", ".", "package", ":", "descriptor", ".", "...
Build a file from a specified Python module. Args: module: Python module to describe. Returns: Initialized FileDescriptor instance describing the module.
[ "Build", "a", "file", "from", "a", "specified", "Python", "module", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/descriptor.py#L386-L422
train
207,603
google/apitools
apitools/base/protorpclite/descriptor.py
describe_file_set
def describe_file_set(modules): """Build a file set from a specified Python modules. Args: modules: Iterable of Python module to describe. Returns: Initialized FileSet instance describing the modules. """ descriptor = FileSet() file_descriptors = [] for module in modules: file_descriptors.append(describe_file(module)) if file_descriptors: descriptor.files = file_descriptors return descriptor
python
def describe_file_set(modules): """Build a file set from a specified Python modules. Args: modules: Iterable of Python module to describe. Returns: Initialized FileSet instance describing the modules. """ descriptor = FileSet() file_descriptors = [] for module in modules: file_descriptors.append(describe_file(module)) if file_descriptors: descriptor.files = file_descriptors return descriptor
[ "def", "describe_file_set", "(", "modules", ")", ":", "descriptor", "=", "FileSet", "(", ")", "file_descriptors", "=", "[", "]", "for", "module", "in", "modules", ":", "file_descriptors", ".", "append", "(", "describe_file", "(", "module", ")", ")", "if", ...
Build a file set from a specified Python modules. Args: modules: Iterable of Python module to describe. Returns: Initialized FileSet instance describing the modules.
[ "Build", "a", "file", "set", "from", "a", "specified", "Python", "modules", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/descriptor.py#L425-L442
train
207,604
google/apitools
apitools/base/protorpclite/descriptor.py
describe
def describe(value): """Describe any value as a descriptor. Helper function for describing any object with an appropriate descriptor object. Args: value: Value to describe as a descriptor. Returns: Descriptor message class if object is describable as a descriptor, else None. """ if isinstance(value, types.ModuleType): return describe_file(value) elif isinstance(value, messages.Field): return describe_field(value) elif isinstance(value, messages.Enum): return describe_enum_value(value) elif isinstance(value, type): if issubclass(value, messages.Message): return describe_message(value) elif issubclass(value, messages.Enum): return describe_enum(value) return None
python
def describe(value): """Describe any value as a descriptor. Helper function for describing any object with an appropriate descriptor object. Args: value: Value to describe as a descriptor. Returns: Descriptor message class if object is describable as a descriptor, else None. """ if isinstance(value, types.ModuleType): return describe_file(value) elif isinstance(value, messages.Field): return describe_field(value) elif isinstance(value, messages.Enum): return describe_enum_value(value) elif isinstance(value, type): if issubclass(value, messages.Message): return describe_message(value) elif issubclass(value, messages.Enum): return describe_enum(value) return None
[ "def", "describe", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "types", ".", "ModuleType", ")", ":", "return", "describe_file", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "messages", ".", "Field", ")", ":", "return", ...
Describe any value as a descriptor. Helper function for describing any object with an appropriate descriptor object. Args: value: Value to describe as a descriptor. Returns: Descriptor message class if object is describable as a descriptor, else None.
[ "Describe", "any", "value", "as", "a", "descriptor", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/descriptor.py#L445-L469
train
207,605
google/apitools
apitools/base/protorpclite/descriptor.py
import_descriptor_loader
def import_descriptor_loader(definition_name, importer=__import__): """Find objects by importing modules as needed. A definition loader is a function that resolves a definition name to a descriptor. The import finder resolves definitions to their names by importing modules when necessary. Args: definition_name: Name of definition to find. importer: Import function used for importing new modules. Returns: Appropriate descriptor for any describable type located by name. Raises: DefinitionNotFoundError when a name does not refer to either a definition or a module. """ # Attempt to import descriptor as a module. if definition_name.startswith('.'): definition_name = definition_name[1:] if not definition_name.startswith('.'): leaf = definition_name.split('.')[-1] if definition_name: try: module = importer(definition_name, '', '', [leaf]) except ImportError: pass else: return describe(module) try: # Attempt to use messages.find_definition to find item. return describe(messages.find_definition(definition_name, importer=__import__)) except messages.DefinitionNotFoundError as err: # There are things that find_definition will not find, but if # the parent is loaded, its children can be searched for a # match. split_name = definition_name.rsplit('.', 1) if len(split_name) > 1: parent, child = split_name try: parent_definition = import_descriptor_loader( parent, importer=importer) except messages.DefinitionNotFoundError: # Fall through to original error. pass else: # Check the parent definition for a matching descriptor. if isinstance(parent_definition, EnumDescriptor): search_list = parent_definition.values or [] elif isinstance(parent_definition, MessageDescriptor): search_list = parent_definition.fields or [] else: search_list = [] for definition in search_list: if definition.name == child: return definition # Still didn't find. Reraise original exception. raise err
python
def import_descriptor_loader(definition_name, importer=__import__): """Find objects by importing modules as needed. A definition loader is a function that resolves a definition name to a descriptor. The import finder resolves definitions to their names by importing modules when necessary. Args: definition_name: Name of definition to find. importer: Import function used for importing new modules. Returns: Appropriate descriptor for any describable type located by name. Raises: DefinitionNotFoundError when a name does not refer to either a definition or a module. """ # Attempt to import descriptor as a module. if definition_name.startswith('.'): definition_name = definition_name[1:] if not definition_name.startswith('.'): leaf = definition_name.split('.')[-1] if definition_name: try: module = importer(definition_name, '', '', [leaf]) except ImportError: pass else: return describe(module) try: # Attempt to use messages.find_definition to find item. return describe(messages.find_definition(definition_name, importer=__import__)) except messages.DefinitionNotFoundError as err: # There are things that find_definition will not find, but if # the parent is loaded, its children can be searched for a # match. split_name = definition_name.rsplit('.', 1) if len(split_name) > 1: parent, child = split_name try: parent_definition = import_descriptor_loader( parent, importer=importer) except messages.DefinitionNotFoundError: # Fall through to original error. pass else: # Check the parent definition for a matching descriptor. if isinstance(parent_definition, EnumDescriptor): search_list = parent_definition.values or [] elif isinstance(parent_definition, MessageDescriptor): search_list = parent_definition.fields or [] else: search_list = [] for definition in search_list: if definition.name == child: return definition # Still didn't find. Reraise original exception. raise err
[ "def", "import_descriptor_loader", "(", "definition_name", ",", "importer", "=", "__import__", ")", ":", "# Attempt to import descriptor as a module.", "if", "definition_name", ".", "startswith", "(", "'.'", ")", ":", "definition_name", "=", "definition_name", "[", "1",...
Find objects by importing modules as needed. A definition loader is a function that resolves a definition name to a descriptor. The import finder resolves definitions to their names by importing modules when necessary. Args: definition_name: Name of definition to find. importer: Import function used for importing new modules. Returns: Appropriate descriptor for any describable type located by name. Raises: DefinitionNotFoundError when a name does not refer to either a definition or a module.
[ "Find", "objects", "by", "importing", "modules", "as", "needed", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/descriptor.py#L473-L537
train
207,606
google/apitools
apitools/base/protorpclite/descriptor.py
DescriptorLibrary.lookup_descriptor
def lookup_descriptor(self, definition_name): """Lookup descriptor by name. Get descriptor from library by name. If descriptor is not found will attempt to find via descriptor loader if provided. Args: definition_name: Definition name to find. Returns: Descriptor that describes definition name. Raises: DefinitionNotFoundError if not descriptor exists for definition name. """ try: return self.__descriptors[definition_name] except KeyError: pass if self.__descriptor_loader: definition = self.__descriptor_loader(definition_name) self.__descriptors[definition_name] = definition return definition else: raise messages.DefinitionNotFoundError( 'Could not find definition for %s' % definition_name)
python
def lookup_descriptor(self, definition_name): """Lookup descriptor by name. Get descriptor from library by name. If descriptor is not found will attempt to find via descriptor loader if provided. Args: definition_name: Definition name to find. Returns: Descriptor that describes definition name. Raises: DefinitionNotFoundError if not descriptor exists for definition name. """ try: return self.__descriptors[definition_name] except KeyError: pass if self.__descriptor_loader: definition = self.__descriptor_loader(definition_name) self.__descriptors[definition_name] = definition return definition else: raise messages.DefinitionNotFoundError( 'Could not find definition for %s' % definition_name)
[ "def", "lookup_descriptor", "(", "self", ",", "definition_name", ")", ":", "try", ":", "return", "self", ".", "__descriptors", "[", "definition_name", "]", "except", "KeyError", ":", "pass", "if", "self", ".", "__descriptor_loader", ":", "definition", "=", "se...
Lookup descriptor by name. Get descriptor from library by name. If descriptor is not found will attempt to find via descriptor loader if provided. Args: definition_name: Definition name to find. Returns: Descriptor that describes definition name. Raises: DefinitionNotFoundError if not descriptor exists for definition name.
[ "Lookup", "descriptor", "by", "name", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/descriptor.py#L568-L594
train
207,607
google/apitools
apitools/base/protorpclite/descriptor.py
DescriptorLibrary.lookup_package
def lookup_package(self, definition_name): """Determines the package name for any definition. Determine the package that any definition name belongs to. May check parent for package name and will resolve missing descriptors if provided descriptor loader. Args: definition_name: Definition name to find package for. """ while True: descriptor = self.lookup_descriptor(definition_name) if isinstance(descriptor, FileDescriptor): return descriptor.package else: index = definition_name.rfind('.') if index < 0: return None definition_name = definition_name[:index]
python
def lookup_package(self, definition_name): """Determines the package name for any definition. Determine the package that any definition name belongs to. May check parent for package name and will resolve missing descriptors if provided descriptor loader. Args: definition_name: Definition name to find package for. """ while True: descriptor = self.lookup_descriptor(definition_name) if isinstance(descriptor, FileDescriptor): return descriptor.package else: index = definition_name.rfind('.') if index < 0: return None definition_name = definition_name[:index]
[ "def", "lookup_package", "(", "self", ",", "definition_name", ")", ":", "while", "True", ":", "descriptor", "=", "self", ".", "lookup_descriptor", "(", "definition_name", ")", "if", "isinstance", "(", "descriptor", ",", "FileDescriptor", ")", ":", "return", "d...
Determines the package name for any definition. Determine the package that any definition name belongs to. May check parent for package name and will resolve missing descriptors if provided descriptor loader. Args: definition_name: Definition name to find package for.
[ "Determines", "the", "package", "name", "for", "any", "definition", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/descriptor.py#L596-L615
train
207,608
google/apitools
apitools/base/protorpclite/protojson.py
_load_json_module
def _load_json_module(): """Try to load a valid json module. There are more than one json modules that might be installed. They are mostly compatible with one another but some versions may be different. This function attempts to load various json modules in a preferred order. It does a basic check to guess if a loaded version of json is compatible. Returns: Compatible json module. Raises: ImportError if there are no json modules or the loaded json module is not compatible with ProtoRPC. """ first_import_error = None for module_name in ['json', 'simplejson']: try: module = __import__(module_name, {}, {}, 'json') if not hasattr(module, 'JSONEncoder'): message = ( 'json library "%s" is not compatible with ProtoRPC' % module_name) logging.warning(message) raise ImportError(message) else: return module except ImportError as err: if not first_import_error: first_import_error = err logging.error('Must use valid json library (json or simplejson)') raise first_import_error
python
def _load_json_module(): """Try to load a valid json module. There are more than one json modules that might be installed. They are mostly compatible with one another but some versions may be different. This function attempts to load various json modules in a preferred order. It does a basic check to guess if a loaded version of json is compatible. Returns: Compatible json module. Raises: ImportError if there are no json modules or the loaded json module is not compatible with ProtoRPC. """ first_import_error = None for module_name in ['json', 'simplejson']: try: module = __import__(module_name, {}, {}, 'json') if not hasattr(module, 'JSONEncoder'): message = ( 'json library "%s" is not compatible with ProtoRPC' % module_name) logging.warning(message) raise ImportError(message) else: return module except ImportError as err: if not first_import_error: first_import_error = err logging.error('Must use valid json library (json or simplejson)') raise first_import_error
[ "def", "_load_json_module", "(", ")", ":", "first_import_error", "=", "None", "for", "module_name", "in", "[", "'json'", ",", "'simplejson'", "]", ":", "try", ":", "module", "=", "__import__", "(", "module_name", ",", "{", "}", ",", "{", "}", ",", "'json...
Try to load a valid json module. There are more than one json modules that might be installed. They are mostly compatible with one another but some versions may be different. This function attempts to load various json modules in a preferred order. It does a basic check to guess if a loaded version of json is compatible. Returns: Compatible json module. Raises: ImportError if there are no json modules or the loaded json module is not compatible with ProtoRPC.
[ "Try", "to", "load", "a", "valid", "json", "module", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/protojson.py#L47-L80
train
207,609
google/apitools
apitools/base/protorpclite/protojson.py
MessageJSONEncoder.default
def default(self, value): """Return dictionary instance from a message object. Args: value: Value to get dictionary for. If not encodable, will call superclasses default method. """ if isinstance(value, messages.Enum): return str(value) if six.PY3 and isinstance(value, bytes): return value.decode('utf8') if isinstance(value, messages.Message): result = {} for field in value.all_fields(): item = value.get_assigned_value(field.name) if item not in (None, [], ()): result[field.name] = ( self.__protojson_protocol.encode_field(field, item)) # Handle unrecognized fields, so they're included when a message is # decoded then encoded. for unknown_key in value.all_unrecognized_fields(): unrecognized_field, _ = value.get_unrecognized_field_info( unknown_key) # Unknown fields are not encoded as they should have been # processed before we get to here. result[unknown_key] = unrecognized_field return result return super(MessageJSONEncoder, self).default(value)
python
def default(self, value): """Return dictionary instance from a message object. Args: value: Value to get dictionary for. If not encodable, will call superclasses default method. """ if isinstance(value, messages.Enum): return str(value) if six.PY3 and isinstance(value, bytes): return value.decode('utf8') if isinstance(value, messages.Message): result = {} for field in value.all_fields(): item = value.get_assigned_value(field.name) if item not in (None, [], ()): result[field.name] = ( self.__protojson_protocol.encode_field(field, item)) # Handle unrecognized fields, so they're included when a message is # decoded then encoded. for unknown_key in value.all_unrecognized_fields(): unrecognized_field, _ = value.get_unrecognized_field_info( unknown_key) # Unknown fields are not encoded as they should have been # processed before we get to here. result[unknown_key] = unrecognized_field return result return super(MessageJSONEncoder, self).default(value)
[ "def", "default", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "messages", ".", "Enum", ")", ":", "return", "str", "(", "value", ")", "if", "six", ".", "PY3", "and", "isinstance", "(", "value", ",", "bytes", ")", ":",...
Return dictionary instance from a message object. Args: value: Value to get dictionary for. If not encodable, will call superclasses default method.
[ "Return", "dictionary", "instance", "from", "a", "message", "object", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/protojson.py#L103-L133
train
207,610
google/apitools
apitools/base/protorpclite/protojson.py
ProtoJson.encode_message
def encode_message(self, message): """Encode Message instance to JSON string. Args: Message instance to encode in to JSON string. Returns: String encoding of Message instance in protocol JSON format. Raises: messages.ValidationError if message is not initialized. """ message.check_initialized() return json.dumps(message, cls=MessageJSONEncoder, protojson_protocol=self)
python
def encode_message(self, message): """Encode Message instance to JSON string. Args: Message instance to encode in to JSON string. Returns: String encoding of Message instance in protocol JSON format. Raises: messages.ValidationError if message is not initialized. """ message.check_initialized() return json.dumps(message, cls=MessageJSONEncoder, protojson_protocol=self)
[ "def", "encode_message", "(", "self", ",", "message", ")", ":", "message", ".", "check_initialized", "(", ")", "return", "json", ".", "dumps", "(", "message", ",", "cls", "=", "MessageJSONEncoder", ",", "protojson_protocol", "=", "self", ")" ]
Encode Message instance to JSON string. Args: Message instance to encode in to JSON string. Returns: String encoding of Message instance in protocol JSON format. Raises: messages.ValidationError if message is not initialized.
[ "Encode", "Message", "instance", "to", "JSON", "string", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/protojson.py#L178-L193
train
207,611
google/apitools
apitools/base/protorpclite/protojson.py
ProtoJson.decode_message
def decode_message(self, message_type, encoded_message): """Merge JSON structure to Message instance. Args: message_type: Message to decode data to. encoded_message: JSON encoded version of message. Returns: Decoded instance of message_type. Raises: ValueError: If encoded_message is not valid JSON. messages.ValidationError if merged message is not initialized. """ encoded_message = six.ensure_str(encoded_message) if not encoded_message.strip(): return message_type() dictionary = json.loads(encoded_message) message = self.__decode_dictionary(message_type, dictionary) message.check_initialized() return message
python
def decode_message(self, message_type, encoded_message): """Merge JSON structure to Message instance. Args: message_type: Message to decode data to. encoded_message: JSON encoded version of message. Returns: Decoded instance of message_type. Raises: ValueError: If encoded_message is not valid JSON. messages.ValidationError if merged message is not initialized. """ encoded_message = six.ensure_str(encoded_message) if not encoded_message.strip(): return message_type() dictionary = json.loads(encoded_message) message = self.__decode_dictionary(message_type, dictionary) message.check_initialized() return message
[ "def", "decode_message", "(", "self", ",", "message_type", ",", "encoded_message", ")", ":", "encoded_message", "=", "six", ".", "ensure_str", "(", "encoded_message", ")", "if", "not", "encoded_message", ".", "strip", "(", ")", ":", "return", "message_type", "...
Merge JSON structure to Message instance. Args: message_type: Message to decode data to. encoded_message: JSON encoded version of message. Returns: Decoded instance of message_type. Raises: ValueError: If encoded_message is not valid JSON. messages.ValidationError if merged message is not initialized.
[ "Merge", "JSON", "structure", "to", "Message", "instance", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/protojson.py#L195-L216
train
207,612
google/apitools
apitools/base/protorpclite/protojson.py
ProtoJson.__find_variant
def __find_variant(self, value): """Find the messages.Variant type that describes this value. Args: value: The value whose variant type is being determined. Returns: The messages.Variant value that best describes value's type, or None if it's a type we don't know how to handle. """ if isinstance(value, bool): return messages.Variant.BOOL elif isinstance(value, six.integer_types): return messages.Variant.INT64 elif isinstance(value, float): return messages.Variant.DOUBLE elif isinstance(value, six.string_types): return messages.Variant.STRING elif isinstance(value, (list, tuple)): # Find the most specific variant that covers all elements. variant_priority = [None, messages.Variant.INT64, messages.Variant.DOUBLE, messages.Variant.STRING] chosen_priority = 0 for v in value: variant = self.__find_variant(v) try: priority = variant_priority.index(variant) except IndexError: priority = -1 if priority > chosen_priority: chosen_priority = priority return variant_priority[chosen_priority] # Unrecognized type. return None
python
def __find_variant(self, value): """Find the messages.Variant type that describes this value. Args: value: The value whose variant type is being determined. Returns: The messages.Variant value that best describes value's type, or None if it's a type we don't know how to handle. """ if isinstance(value, bool): return messages.Variant.BOOL elif isinstance(value, six.integer_types): return messages.Variant.INT64 elif isinstance(value, float): return messages.Variant.DOUBLE elif isinstance(value, six.string_types): return messages.Variant.STRING elif isinstance(value, (list, tuple)): # Find the most specific variant that covers all elements. variant_priority = [None, messages.Variant.INT64, messages.Variant.DOUBLE, messages.Variant.STRING] chosen_priority = 0 for v in value: variant = self.__find_variant(v) try: priority = variant_priority.index(variant) except IndexError: priority = -1 if priority > chosen_priority: chosen_priority = priority return variant_priority[chosen_priority] # Unrecognized type. return None
[ "def", "__find_variant", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "return", "messages", ".", "Variant", ".", "BOOL", "elif", "isinstance", "(", "value", ",", "six", ".", "integer_types", ")", ":", "r...
Find the messages.Variant type that describes this value. Args: value: The value whose variant type is being determined. Returns: The messages.Variant value that best describes value's type, or None if it's a type we don't know how to handle.
[ "Find", "the", "messages", ".", "Variant", "type", "that", "describes", "this", "value", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/protojson.py#L218-L254
train
207,613
google/apitools
apitools/base/protorpclite/protojson.py
ProtoJson.__decode_dictionary
def __decode_dictionary(self, message_type, dictionary): """Merge dictionary in to message. Args: message: Message to merge dictionary in to. dictionary: Dictionary to extract information from. Dictionary is as parsed from JSON. Nested objects will also be dictionaries. """ message = message_type() for key, value in six.iteritems(dictionary): if value is None: try: message.reset(key) except AttributeError: pass # This is an unrecognized field, skip it. continue try: field = message.field_by_name(key) except KeyError: # Save unknown values. variant = self.__find_variant(value) if variant: message.set_unrecognized_field(key, value, variant) continue if field.repeated: # This should be unnecessary? Or in fact become an error. if not isinstance(value, list): value = [value] valid_value = [self.decode_field(field, item) for item in value] setattr(message, field.name, valid_value) continue # This is just for consistency with the old behavior. if value == []: continue try: setattr(message, field.name, self.decode_field(field, value)) except messages.DecodeError: # Save unknown enum values. if not isinstance(field, messages.EnumField): raise variant = self.__find_variant(value) if variant: message.set_unrecognized_field(key, value, variant) return message
python
def __decode_dictionary(self, message_type, dictionary): """Merge dictionary in to message. Args: message: Message to merge dictionary in to. dictionary: Dictionary to extract information from. Dictionary is as parsed from JSON. Nested objects will also be dictionaries. """ message = message_type() for key, value in six.iteritems(dictionary): if value is None: try: message.reset(key) except AttributeError: pass # This is an unrecognized field, skip it. continue try: field = message.field_by_name(key) except KeyError: # Save unknown values. variant = self.__find_variant(value) if variant: message.set_unrecognized_field(key, value, variant) continue if field.repeated: # This should be unnecessary? Or in fact become an error. if not isinstance(value, list): value = [value] valid_value = [self.decode_field(field, item) for item in value] setattr(message, field.name, valid_value) continue # This is just for consistency with the old behavior. if value == []: continue try: setattr(message, field.name, self.decode_field(field, value)) except messages.DecodeError: # Save unknown enum values. if not isinstance(field, messages.EnumField): raise variant = self.__find_variant(value) if variant: message.set_unrecognized_field(key, value, variant) return message
[ "def", "__decode_dictionary", "(", "self", ",", "message_type", ",", "dictionary", ")", ":", "message", "=", "message_type", "(", ")", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "dictionary", ")", ":", "if", "value", "is", "None", ":...
Merge dictionary in to message. Args: message: Message to merge dictionary in to. dictionary: Dictionary to extract information from. Dictionary is as parsed from JSON. Nested objects will also be dictionaries.
[ "Merge", "dictionary", "in", "to", "message", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/protojson.py#L256-L303
train
207,614
google/apitools
apitools/gen/gen_client_lib.py
DescriptorGenerator.WriteSetupPy
def WriteSetupPy(self, out): """Write a setup.py for upload to PyPI.""" printer = self._GetPrinter(out) year = datetime.datetime.now().year printer('# Copyright %s Google Inc. All Rights Reserved.' % year) printer('#') printer('# Licensed under the Apache License, Version 2.0 (the' '"License");') printer('# you may not use this file except in compliance with ' 'the License.') printer('# You may obtain a copy of the License at') printer('#') printer('# http://www.apache.org/licenses/LICENSE-2.0') printer('#') printer('# Unless required by applicable law or agreed to in writing, ' 'software') printer('# distributed under the License is distributed on an "AS IS" ' 'BASIS,') printer('# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ' 'express or implied.') printer('# See the License for the specific language governing ' 'permissions and') printer('# limitations under the License.') printer() printer('import setuptools') printer('REQUIREMENTS = [') with printer.Indent(indent=' '): parts = self.apitools_version.split('.') major = parts.pop(0) minor = parts.pop(0) printer('"google-apitools>=%s,~=%s.%s",', self.apitools_version, major, minor) printer('"httplib2>=0.9",') printer('"oauth2client>=1.4.12",') printer(']') printer('_PACKAGE = "apitools.clients.%s"' % self.__package) printer() printer('setuptools.setup(') # TODO(craigcitro): Allow customization of these options. with printer.Indent(indent=' '): printer('name="google-apitools-%s-%s",', self.__package, self.__version) printer('version="%s.%s",', self.apitools_version, self.__revision) printer('description="Autogenerated apitools library for %s",' % ( self.__package,)) printer('url="https://github.com/google/apitools",') printer('author="Craig Citro",') printer('author_email="craigcitro@google.com",') printer('packages=setuptools.find_packages(),') printer('install_requires=REQUIREMENTS,') printer('classifiers=[') with printer.Indent(indent=' '): printer('"Programming Language :: Python :: 2.7",') printer('"License :: OSI Approved :: Apache Software ' 'License",') printer('],') printer('license="Apache 2.0",') printer('keywords="apitools apitools-%s %s",' % ( self.__package, self.__package)) printer(')')
python
def WriteSetupPy(self, out): """Write a setup.py for upload to PyPI.""" printer = self._GetPrinter(out) year = datetime.datetime.now().year printer('# Copyright %s Google Inc. All Rights Reserved.' % year) printer('#') printer('# Licensed under the Apache License, Version 2.0 (the' '"License");') printer('# you may not use this file except in compliance with ' 'the License.') printer('# You may obtain a copy of the License at') printer('#') printer('# http://www.apache.org/licenses/LICENSE-2.0') printer('#') printer('# Unless required by applicable law or agreed to in writing, ' 'software') printer('# distributed under the License is distributed on an "AS IS" ' 'BASIS,') printer('# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ' 'express or implied.') printer('# See the License for the specific language governing ' 'permissions and') printer('# limitations under the License.') printer() printer('import setuptools') printer('REQUIREMENTS = [') with printer.Indent(indent=' '): parts = self.apitools_version.split('.') major = parts.pop(0) minor = parts.pop(0) printer('"google-apitools>=%s,~=%s.%s",', self.apitools_version, major, minor) printer('"httplib2>=0.9",') printer('"oauth2client>=1.4.12",') printer(']') printer('_PACKAGE = "apitools.clients.%s"' % self.__package) printer() printer('setuptools.setup(') # TODO(craigcitro): Allow customization of these options. with printer.Indent(indent=' '): printer('name="google-apitools-%s-%s",', self.__package, self.__version) printer('version="%s.%s",', self.apitools_version, self.__revision) printer('description="Autogenerated apitools library for %s",' % ( self.__package,)) printer('url="https://github.com/google/apitools",') printer('author="Craig Citro",') printer('author_email="craigcitro@google.com",') printer('packages=setuptools.find_packages(),') printer('install_requires=REQUIREMENTS,') printer('classifiers=[') with printer.Indent(indent=' '): printer('"Programming Language :: Python :: 2.7",') printer('"License :: OSI Approved :: Apache Software ' 'License",') printer('],') printer('license="Apache 2.0",') printer('keywords="apitools apitools-%s %s",' % ( self.__package, self.__package)) printer(')')
[ "def", "WriteSetupPy", "(", "self", ",", "out", ")", ":", "printer", "=", "self", ".", "_GetPrinter", "(", "out", ")", "year", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "year", "printer", "(", "'# Copyright %s Google Inc. All Rights Reserv...
Write a setup.py for upload to PyPI.
[ "Write", "a", "setup", ".", "py", "for", "upload", "to", "PyPI", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/gen_client_lib.py#L195-L255
train
207,615
google/apitools
apitools/base/py/transfer.py
DownloadProgressPrinter
def DownloadProgressPrinter(response, unused_download): """Print download progress based on response.""" if 'content-range' in response.info: print('Received %s' % response.info['content-range']) else: print('Received %d bytes' % response.length)
python
def DownloadProgressPrinter(response, unused_download): """Print download progress based on response.""" if 'content-range' in response.info: print('Received %s' % response.info['content-range']) else: print('Received %d bytes' % response.length)
[ "def", "DownloadProgressPrinter", "(", "response", ",", "unused_download", ")", ":", "if", "'content-range'", "in", "response", ".", "info", ":", "print", "(", "'Received %s'", "%", "response", ".", "info", "[", "'content-range'", "]", ")", "else", ":", "print...
Print download progress based on response.
[ "Print", "download", "progress", "based", "on", "response", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L55-L60
train
207,616
google/apitools
apitools/base/py/transfer.py
_Transfer._Initialize
def _Initialize(self, http, url): """Initialize this download by setting self.http and self.url. We want the user to be able to override self.http by having set the value in the constructor; in that case, we ignore the provided http. Args: http: An httplib2.Http instance or None. url: The url for this transfer. Returns: None. Initializes self. """ self.EnsureUninitialized() if self.http is None: self.__http = http or http_wrapper.GetHttp() self.__url = url
python
def _Initialize(self, http, url): """Initialize this download by setting self.http and self.url. We want the user to be able to override self.http by having set the value in the constructor; in that case, we ignore the provided http. Args: http: An httplib2.Http instance or None. url: The url for this transfer. Returns: None. Initializes self. """ self.EnsureUninitialized() if self.http is None: self.__http = http or http_wrapper.GetHttp() self.__url = url
[ "def", "_Initialize", "(", "self", ",", "http", ",", "url", ")", ":", "self", ".", "EnsureUninitialized", "(", ")", "if", "self", ".", "http", "is", "None", ":", "self", ".", "__http", "=", "http", "or", "http_wrapper", ".", "GetHttp", "(", ")", "sel...
Initialize this download by setting self.http and self.url. We want the user to be able to override self.http by having set the value in the constructor; in that case, we ignore the provided http. Args: http: An httplib2.Http instance or None. url: The url for this transfer. Returns: None. Initializes self.
[ "Initialize", "this", "download", "by", "setting", "self", ".", "http", "and", "self", ".", "url", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L138-L155
train
207,617
google/apitools
apitools/base/py/transfer.py
Download.FromFile
def FromFile(cls, filename, overwrite=False, auto_transfer=True, **kwds): """Create a new download object from a filename.""" path = os.path.expanduser(filename) if os.path.exists(path) and not overwrite: raise exceptions.InvalidUserInputError( 'File %s exists and overwrite not specified' % path) return cls(open(path, 'wb'), close_stream=True, auto_transfer=auto_transfer, **kwds)
python
def FromFile(cls, filename, overwrite=False, auto_transfer=True, **kwds): """Create a new download object from a filename.""" path = os.path.expanduser(filename) if os.path.exists(path) and not overwrite: raise exceptions.InvalidUserInputError( 'File %s exists and overwrite not specified' % path) return cls(open(path, 'wb'), close_stream=True, auto_transfer=auto_transfer, **kwds)
[ "def", "FromFile", "(", "cls", ",", "filename", ",", "overwrite", "=", "False", ",", "auto_transfer", "=", "True", ",", "*", "*", "kwds", ")", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "filename", ")", "if", "os", ".", "path", "....
Create a new download object from a filename.
[ "Create", "a", "new", "download", "object", "from", "a", "filename", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L222-L229
train
207,618
google/apitools
apitools/base/py/transfer.py
Download.FromStream
def FromStream(cls, stream, auto_transfer=True, total_size=None, **kwds): """Create a new Download object from a stream.""" return cls(stream, auto_transfer=auto_transfer, total_size=total_size, **kwds)
python
def FromStream(cls, stream, auto_transfer=True, total_size=None, **kwds): """Create a new Download object from a stream.""" return cls(stream, auto_transfer=auto_transfer, total_size=total_size, **kwds)
[ "def", "FromStream", "(", "cls", ",", "stream", ",", "auto_transfer", "=", "True", ",", "total_size", "=", "None", ",", "*", "*", "kwds", ")", ":", "return", "cls", "(", "stream", ",", "auto_transfer", "=", "auto_transfer", ",", "total_size", "=", "total...
Create a new Download object from a stream.
[ "Create", "a", "new", "Download", "object", "from", "a", "stream", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L232-L235
train
207,619
google/apitools
apitools/base/py/transfer.py
Download.FromData
def FromData(cls, stream, json_data, http=None, auto_transfer=None, **kwds): """Create a new Download object from a stream and serialized data.""" info = json.loads(json_data) missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys()) if missing_keys: raise exceptions.InvalidDataError( 'Invalid serialization data, missing keys: %s' % ( ', '.join(missing_keys))) download = cls.FromStream(stream, **kwds) if auto_transfer is not None: download.auto_transfer = auto_transfer else: download.auto_transfer = info['auto_transfer'] setattr(download, '_Download__progress', info['progress']) setattr(download, '_Download__total_size', info['total_size']) download._Initialize( # pylint: disable=protected-access http, info['url']) return download
python
def FromData(cls, stream, json_data, http=None, auto_transfer=None, **kwds): """Create a new Download object from a stream and serialized data.""" info = json.loads(json_data) missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys()) if missing_keys: raise exceptions.InvalidDataError( 'Invalid serialization data, missing keys: %s' % ( ', '.join(missing_keys))) download = cls.FromStream(stream, **kwds) if auto_transfer is not None: download.auto_transfer = auto_transfer else: download.auto_transfer = info['auto_transfer'] setattr(download, '_Download__progress', info['progress']) setattr(download, '_Download__total_size', info['total_size']) download._Initialize( # pylint: disable=protected-access http, info['url']) return download
[ "def", "FromData", "(", "cls", ",", "stream", ",", "json_data", ",", "http", "=", "None", ",", "auto_transfer", "=", "None", ",", "*", "*", "kwds", ")", ":", "info", "=", "json", ".", "loads", "(", "json_data", ")", "missing_keys", "=", "cls", ".", ...
Create a new Download object from a stream and serialized data.
[ "Create", "a", "new", "Download", "object", "from", "a", "stream", "and", "serialized", "data", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L238-L256
train
207,620
google/apitools
apitools/base/py/transfer.py
Download.__SetTotal
def __SetTotal(self, info): """Sets the total size based off info if possible otherwise 0.""" if 'content-range' in info: _, _, total = info['content-range'].rpartition('/') if total != '*': self.__total_size = int(total) # Note "total_size is None" means we don't know it; if no size # info was returned on our initial range request, that means we # have a 0-byte file. (That last statement has been verified # empirically, but is not clearly documented anywhere.) if self.total_size is None: self.__total_size = 0
python
def __SetTotal(self, info): """Sets the total size based off info if possible otherwise 0.""" if 'content-range' in info: _, _, total = info['content-range'].rpartition('/') if total != '*': self.__total_size = int(total) # Note "total_size is None" means we don't know it; if no size # info was returned on our initial range request, that means we # have a 0-byte file. (That last statement has been verified # empirically, but is not clearly documented anywhere.) if self.total_size is None: self.__total_size = 0
[ "def", "__SetTotal", "(", "self", ",", "info", ")", ":", "if", "'content-range'", "in", "info", ":", "_", ",", "_", ",", "total", "=", "info", "[", "'content-range'", "]", ".", "rpartition", "(", "'/'", ")", "if", "total", "!=", "'*'", ":", "self", ...
Sets the total size based off info if possible otherwise 0.
[ "Sets", "the", "total", "size", "based", "off", "info", "if", "possible", "otherwise", "0", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L286-L297
train
207,621
google/apitools
apitools/base/py/transfer.py
Download.InitializeDownload
def InitializeDownload(self, http_request, http=None, client=None): """Initialize this download by making a request. Args: http_request: The HttpRequest to use to initialize this download. http: The httplib2.Http instance for this request. client: If provided, let this client process the final URL before sending any additional requests. If client is provided and http is not, client.http will be used instead. """ self.EnsureUninitialized() if http is None and client is None: raise exceptions.UserError('Must provide client or http.') http = http or client.http if client is not None: http_request.url = client.FinalizeTransferUrl(http_request.url) url = http_request.url if self.auto_transfer: end_byte = self.__ComputeEndByte(0) self.__SetRangeHeader(http_request, 0, end_byte) response = http_wrapper.MakeRequest( self.bytes_http or http, http_request) if response.status_code not in self._ACCEPTABLE_STATUSES: raise exceptions.HttpError.FromResponse(response) self.__initial_response = response self.__SetTotal(response.info) url = response.info.get('content-location', response.request_url) if client is not None: url = client.FinalizeTransferUrl(url) self._Initialize(http, url) # Unless the user has requested otherwise, we want to just # go ahead and pump the bytes now. if self.auto_transfer: self.StreamInChunks()
python
def InitializeDownload(self, http_request, http=None, client=None): """Initialize this download by making a request. Args: http_request: The HttpRequest to use to initialize this download. http: The httplib2.Http instance for this request. client: If provided, let this client process the final URL before sending any additional requests. If client is provided and http is not, client.http will be used instead. """ self.EnsureUninitialized() if http is None and client is None: raise exceptions.UserError('Must provide client or http.') http = http or client.http if client is not None: http_request.url = client.FinalizeTransferUrl(http_request.url) url = http_request.url if self.auto_transfer: end_byte = self.__ComputeEndByte(0) self.__SetRangeHeader(http_request, 0, end_byte) response = http_wrapper.MakeRequest( self.bytes_http or http, http_request) if response.status_code not in self._ACCEPTABLE_STATUSES: raise exceptions.HttpError.FromResponse(response) self.__initial_response = response self.__SetTotal(response.info) url = response.info.get('content-location', response.request_url) if client is not None: url = client.FinalizeTransferUrl(url) self._Initialize(http, url) # Unless the user has requested otherwise, we want to just # go ahead and pump the bytes now. if self.auto_transfer: self.StreamInChunks()
[ "def", "InitializeDownload", "(", "self", ",", "http_request", ",", "http", "=", "None", ",", "client", "=", "None", ")", ":", "self", ".", "EnsureUninitialized", "(", ")", "if", "http", "is", "None", "and", "client", "is", "None", ":", "raise", "excepti...
Initialize this download by making a request. Args: http_request: The HttpRequest to use to initialize this download. http: The httplib2.Http instance for this request. client: If provided, let this client process the final URL before sending any additional requests. If client is provided and http is not, client.http will be used instead.
[ "Initialize", "this", "download", "by", "making", "a", "request", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L299-L332
train
207,622
google/apitools
apitools/base/py/transfer.py
Download.__NormalizeStartEnd
def __NormalizeStartEnd(self, start, end=None): """Normalizes start and end values based on total size.""" if end is not None: if start < 0: raise exceptions.TransferInvalidError( 'Cannot have end index with negative start index ' + '[start=%d, end=%d]' % (start, end)) elif start >= self.total_size: raise exceptions.TransferInvalidError( 'Cannot have start index greater than total size ' + '[start=%d, total_size=%d]' % (start, self.total_size)) end = min(end, self.total_size - 1) if end < start: raise exceptions.TransferInvalidError( 'Range requested with end[%s] < start[%s]' % (end, start)) return start, end else: if start < 0: start = max(0, start + self.total_size) return start, self.total_size - 1
python
def __NormalizeStartEnd(self, start, end=None): """Normalizes start and end values based on total size.""" if end is not None: if start < 0: raise exceptions.TransferInvalidError( 'Cannot have end index with negative start index ' + '[start=%d, end=%d]' % (start, end)) elif start >= self.total_size: raise exceptions.TransferInvalidError( 'Cannot have start index greater than total size ' + '[start=%d, total_size=%d]' % (start, self.total_size)) end = min(end, self.total_size - 1) if end < start: raise exceptions.TransferInvalidError( 'Range requested with end[%s] < start[%s]' % (end, start)) return start, end else: if start < 0: start = max(0, start + self.total_size) return start, self.total_size - 1
[ "def", "__NormalizeStartEnd", "(", "self", ",", "start", ",", "end", "=", "None", ")", ":", "if", "end", "is", "not", "None", ":", "if", "start", "<", "0", ":", "raise", "exceptions", ".", "TransferInvalidError", "(", "'Cannot have end index with negative star...
Normalizes start and end values based on total size.
[ "Normalizes", "start", "and", "end", "values", "based", "on", "total", "size", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L334-L353
train
207,623
google/apitools
apitools/base/py/transfer.py
Download.__ComputeEndByte
def __ComputeEndByte(self, start, end=None, use_chunks=True): """Compute the last byte to fetch for this request. This is all based on the HTTP spec for Range and Content-Range. Note that this is potentially confusing in several ways: * the value for the last byte is 0-based, eg "fetch 10 bytes from the beginning" would return 9 here. * if we have no information about size, and don't want to use the chunksize, we'll return None. See the tests for more examples. Args: start: byte to start at. end: (int or None, default: None) Suggested last byte. use_chunks: (bool, default: True) If False, ignore self.chunksize. Returns: Last byte to use in a Range header, or None. """ end_byte = end if start < 0 and not self.total_size: return end_byte if use_chunks: alternate = start + self.chunksize - 1 if end_byte is not None: end_byte = min(end_byte, alternate) else: end_byte = alternate if self.total_size: alternate = self.total_size - 1 if end_byte is not None: end_byte = min(end_byte, alternate) else: end_byte = alternate return end_byte
python
def __ComputeEndByte(self, start, end=None, use_chunks=True): """Compute the last byte to fetch for this request. This is all based on the HTTP spec for Range and Content-Range. Note that this is potentially confusing in several ways: * the value for the last byte is 0-based, eg "fetch 10 bytes from the beginning" would return 9 here. * if we have no information about size, and don't want to use the chunksize, we'll return None. See the tests for more examples. Args: start: byte to start at. end: (int or None, default: None) Suggested last byte. use_chunks: (bool, default: True) If False, ignore self.chunksize. Returns: Last byte to use in a Range header, or None. """ end_byte = end if start < 0 and not self.total_size: return end_byte if use_chunks: alternate = start + self.chunksize - 1 if end_byte is not None: end_byte = min(end_byte, alternate) else: end_byte = alternate if self.total_size: alternate = self.total_size - 1 if end_byte is not None: end_byte = min(end_byte, alternate) else: end_byte = alternate return end_byte
[ "def", "__ComputeEndByte", "(", "self", ",", "start", ",", "end", "=", "None", ",", "use_chunks", "=", "True", ")", ":", "end_byte", "=", "end", "if", "start", "<", "0", "and", "not", "self", ".", "total_size", ":", "return", "end_byte", "if", "use_chu...
Compute the last byte to fetch for this request. This is all based on the HTTP spec for Range and Content-Range. Note that this is potentially confusing in several ways: * the value for the last byte is 0-based, eg "fetch 10 bytes from the beginning" would return 9 here. * if we have no information about size, and don't want to use the chunksize, we'll return None. See the tests for more examples. Args: start: byte to start at. end: (int or None, default: None) Suggested last byte. use_chunks: (bool, default: True) If False, ignore self.chunksize. Returns: Last byte to use in a Range header, or None.
[ "Compute", "the", "last", "byte", "to", "fetch", "for", "this", "request", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L363-L404
train
207,624
google/apitools
apitools/base/py/transfer.py
Download.__GetChunk
def __GetChunk(self, start, end, additional_headers=None): """Retrieve a chunk, and return the full response.""" self.EnsureInitialized() request = http_wrapper.Request(url=self.url) self.__SetRangeHeader(request, start, end=end) if additional_headers is not None: request.headers.update(additional_headers) return http_wrapper.MakeRequest( self.bytes_http, request, retry_func=self.retry_func, retries=self.num_retries)
python
def __GetChunk(self, start, end, additional_headers=None): """Retrieve a chunk, and return the full response.""" self.EnsureInitialized() request = http_wrapper.Request(url=self.url) self.__SetRangeHeader(request, start, end=end) if additional_headers is not None: request.headers.update(additional_headers) return http_wrapper.MakeRequest( self.bytes_http, request, retry_func=self.retry_func, retries=self.num_retries)
[ "def", "__GetChunk", "(", "self", ",", "start", ",", "end", ",", "additional_headers", "=", "None", ")", ":", "self", ".", "EnsureInitialized", "(", ")", "request", "=", "http_wrapper", ".", "Request", "(", "url", "=", "self", ".", "url", ")", "self", ...
Retrieve a chunk, and return the full response.
[ "Retrieve", "a", "chunk", "and", "return", "the", "full", "response", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L406-L415
train
207,625
google/apitools
apitools/base/py/transfer.py
Download.GetRange
def GetRange(self, start, end=None, additional_headers=None, use_chunks=True): """Retrieve a given byte range from this download, inclusive. Range must be of one of these three forms: * 0 <= start, end = None: Fetch from start to the end of the file. * 0 <= start <= end: Fetch the bytes from start to end. * start < 0, end = None: Fetch the last -start bytes of the file. (These variations correspond to those described in the HTTP 1.1 protocol for range headers in RFC 2616, sec. 14.35.1.) Args: start: (int) Where to start fetching bytes. (See above.) end: (int, optional) Where to stop fetching bytes. (See above.) additional_headers: (bool, optional) Any additional headers to pass with the request. use_chunks: (bool, default: True) If False, ignore self.chunksize and fetch this range in a single request. Returns: None. Streams bytes into self.stream. """ self.EnsureInitialized() progress_end_normalized = False if self.total_size is not None: progress, end_byte = self.__NormalizeStartEnd(start, end) progress_end_normalized = True else: progress = start end_byte = end while (not progress_end_normalized or end_byte is None or progress <= end_byte): end_byte = self.__ComputeEndByte(progress, end=end_byte, use_chunks=use_chunks) response = self.__GetChunk(progress, end_byte, additional_headers=additional_headers) if not progress_end_normalized: self.__SetTotal(response.info) progress, end_byte = self.__NormalizeStartEnd(start, end) progress_end_normalized = True response = self.__ProcessResponse(response) progress += response.length if response.length == 0: if response.status_code == http_client.OK: # There can legitimately be no Content-Length header sent # in some cases (e.g., when there's a Transfer-Encoding # header) and if this was a 200 response (as opposed to # 206 Partial Content) we know we're done now without # looping further on received length. return raise exceptions.TransferRetryError( 'Zero bytes unexpectedly returned in download response')
python
def GetRange(self, start, end=None, additional_headers=None, use_chunks=True): """Retrieve a given byte range from this download, inclusive. Range must be of one of these three forms: * 0 <= start, end = None: Fetch from start to the end of the file. * 0 <= start <= end: Fetch the bytes from start to end. * start < 0, end = None: Fetch the last -start bytes of the file. (These variations correspond to those described in the HTTP 1.1 protocol for range headers in RFC 2616, sec. 14.35.1.) Args: start: (int) Where to start fetching bytes. (See above.) end: (int, optional) Where to stop fetching bytes. (See above.) additional_headers: (bool, optional) Any additional headers to pass with the request. use_chunks: (bool, default: True) If False, ignore self.chunksize and fetch this range in a single request. Returns: None. Streams bytes into self.stream. """ self.EnsureInitialized() progress_end_normalized = False if self.total_size is not None: progress, end_byte = self.__NormalizeStartEnd(start, end) progress_end_normalized = True else: progress = start end_byte = end while (not progress_end_normalized or end_byte is None or progress <= end_byte): end_byte = self.__ComputeEndByte(progress, end=end_byte, use_chunks=use_chunks) response = self.__GetChunk(progress, end_byte, additional_headers=additional_headers) if not progress_end_normalized: self.__SetTotal(response.info) progress, end_byte = self.__NormalizeStartEnd(start, end) progress_end_normalized = True response = self.__ProcessResponse(response) progress += response.length if response.length == 0: if response.status_code == http_client.OK: # There can legitimately be no Content-Length header sent # in some cases (e.g., when there's a Transfer-Encoding # header) and if this was a 200 response (as opposed to # 206 Partial Content) we know we're done now without # looping further on received length. return raise exceptions.TransferRetryError( 'Zero bytes unexpectedly returned in download response')
[ "def", "GetRange", "(", "self", ",", "start", ",", "end", "=", "None", ",", "additional_headers", "=", "None", ",", "use_chunks", "=", "True", ")", ":", "self", ".", "EnsureInitialized", "(", ")", "progress_end_normalized", "=", "False", "if", "self", ".",...
Retrieve a given byte range from this download, inclusive. Range must be of one of these three forms: * 0 <= start, end = None: Fetch from start to the end of the file. * 0 <= start <= end: Fetch the bytes from start to end. * start < 0, end = None: Fetch the last -start bytes of the file. (These variations correspond to those described in the HTTP 1.1 protocol for range headers in RFC 2616, sec. 14.35.1.) Args: start: (int) Where to start fetching bytes. (See above.) end: (int, optional) Where to stop fetching bytes. (See above.) additional_headers: (bool, optional) Any additional headers to pass with the request. use_chunks: (bool, default: True) If False, ignore self.chunksize and fetch this range in a single request. Returns: None. Streams bytes into self.stream.
[ "Retrieve", "a", "given", "byte", "range", "from", "this", "download", "inclusive", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L445-L497
train
207,626
google/apitools
apitools/base/py/transfer.py
Download.StreamInChunks
def StreamInChunks(self, callback=None, finish_callback=None, additional_headers=None): """Stream the entire download in chunks.""" self.StreamMedia(callback=callback, finish_callback=finish_callback, additional_headers=additional_headers, use_chunks=True)
python
def StreamInChunks(self, callback=None, finish_callback=None, additional_headers=None): """Stream the entire download in chunks.""" self.StreamMedia(callback=callback, finish_callback=finish_callback, additional_headers=additional_headers, use_chunks=True)
[ "def", "StreamInChunks", "(", "self", ",", "callback", "=", "None", ",", "finish_callback", "=", "None", ",", "additional_headers", "=", "None", ")", ":", "self", ".", "StreamMedia", "(", "callback", "=", "callback", ",", "finish_callback", "=", "finish_callba...
Stream the entire download in chunks.
[ "Stream", "the", "entire", "download", "in", "chunks", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L499-L504
train
207,627
google/apitools
apitools/base/py/transfer.py
Download.StreamMedia
def StreamMedia(self, callback=None, finish_callback=None, additional_headers=None, use_chunks=True): """Stream the entire download. Args: callback: (default: None) Callback to call as each chunk is completed. finish_callback: (default: None) Callback to call when the download is complete. additional_headers: (default: None) Additional headers to include in fetching bytes. use_chunks: (bool, default: True) If False, ignore self.chunksize and stream this download in a single request. Returns: None. Streams bytes into self.stream. """ callback = callback or self.progress_callback finish_callback = finish_callback or self.finish_callback self.EnsureInitialized() while True: if self.__initial_response is not None: response = self.__initial_response self.__initial_response = None else: end_byte = self.__ComputeEndByte(self.progress, use_chunks=use_chunks) response = self.__GetChunk( self.progress, end_byte, additional_headers=additional_headers) if self.total_size is None: self.__SetTotal(response.info) response = self.__ProcessResponse(response) self._ExecuteCallback(callback, response) if (response.status_code == http_client.OK or self.progress >= self.total_size): break self._ExecuteCallback(finish_callback, response)
python
def StreamMedia(self, callback=None, finish_callback=None, additional_headers=None, use_chunks=True): """Stream the entire download. Args: callback: (default: None) Callback to call as each chunk is completed. finish_callback: (default: None) Callback to call when the download is complete. additional_headers: (default: None) Additional headers to include in fetching bytes. use_chunks: (bool, default: True) If False, ignore self.chunksize and stream this download in a single request. Returns: None. Streams bytes into self.stream. """ callback = callback or self.progress_callback finish_callback = finish_callback or self.finish_callback self.EnsureInitialized() while True: if self.__initial_response is not None: response = self.__initial_response self.__initial_response = None else: end_byte = self.__ComputeEndByte(self.progress, use_chunks=use_chunks) response = self.__GetChunk( self.progress, end_byte, additional_headers=additional_headers) if self.total_size is None: self.__SetTotal(response.info) response = self.__ProcessResponse(response) self._ExecuteCallback(callback, response) if (response.status_code == http_client.OK or self.progress >= self.total_size): break self._ExecuteCallback(finish_callback, response)
[ "def", "StreamMedia", "(", "self", ",", "callback", "=", "None", ",", "finish_callback", "=", "None", ",", "additional_headers", "=", "None", ",", "use_chunks", "=", "True", ")", ":", "callback", "=", "callback", "or", "self", ".", "progress_callback", "fini...
Stream the entire download. Args: callback: (default: None) Callback to call as each chunk is completed. finish_callback: (default: None) Callback to call when the download is complete. additional_headers: (default: None) Additional headers to include in fetching bytes. use_chunks: (bool, default: True) If False, ignore self.chunksize and stream this download in a single request. Returns: None. Streams bytes into self.stream.
[ "Stream", "the", "entire", "download", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L506-L544
train
207,628
google/apitools
apitools/base/py/transfer.py
Upload.FromFile
def FromFile(cls, filename, mime_type=None, auto_transfer=True, gzip_encoded=False, **kwds): """Create a new Upload object from a filename.""" path = os.path.expanduser(filename) if not os.path.exists(path): raise exceptions.NotFoundError('Could not find file %s' % path) if not mime_type: mime_type, _ = mimetypes.guess_type(path) if mime_type is None: raise exceptions.InvalidUserInputError( 'Could not guess mime type for %s' % path) size = os.stat(path).st_size return cls(open(path, 'rb'), mime_type, total_size=size, close_stream=True, auto_transfer=auto_transfer, gzip_encoded=gzip_encoded, **kwds)
python
def FromFile(cls, filename, mime_type=None, auto_transfer=True, gzip_encoded=False, **kwds): """Create a new Upload object from a filename.""" path = os.path.expanduser(filename) if not os.path.exists(path): raise exceptions.NotFoundError('Could not find file %s' % path) if not mime_type: mime_type, _ = mimetypes.guess_type(path) if mime_type is None: raise exceptions.InvalidUserInputError( 'Could not guess mime type for %s' % path) size = os.stat(path).st_size return cls(open(path, 'rb'), mime_type, total_size=size, close_stream=True, auto_transfer=auto_transfer, gzip_encoded=gzip_encoded, **kwds)
[ "def", "FromFile", "(", "cls", ",", "filename", ",", "mime_type", "=", "None", ",", "auto_transfer", "=", "True", ",", "gzip_encoded", "=", "False", ",", "*", "*", "kwds", ")", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "filename", ...
Create a new Upload object from a filename.
[ "Create", "a", "new", "Upload", "object", "from", "a", "filename", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L608-L622
train
207,629
google/apitools
apitools/base/py/transfer.py
Upload.FromStream
def FromStream(cls, stream, mime_type, total_size=None, auto_transfer=True, gzip_encoded=False, **kwds): """Create a new Upload object from a stream.""" if mime_type is None: raise exceptions.InvalidUserInputError( 'No mime_type specified for stream') return cls(stream, mime_type, total_size=total_size, close_stream=False, auto_transfer=auto_transfer, gzip_encoded=gzip_encoded, **kwds)
python
def FromStream(cls, stream, mime_type, total_size=None, auto_transfer=True, gzip_encoded=False, **kwds): """Create a new Upload object from a stream.""" if mime_type is None: raise exceptions.InvalidUserInputError( 'No mime_type specified for stream') return cls(stream, mime_type, total_size=total_size, close_stream=False, auto_transfer=auto_transfer, gzip_encoded=gzip_encoded, **kwds)
[ "def", "FromStream", "(", "cls", ",", "stream", ",", "mime_type", ",", "total_size", "=", "None", ",", "auto_transfer", "=", "True", ",", "gzip_encoded", "=", "False", ",", "*", "*", "kwds", ")", ":", "if", "mime_type", "is", "None", ":", "raise", "exc...
Create a new Upload object from a stream.
[ "Create", "a", "new", "Upload", "object", "from", "a", "stream", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L625-L633
train
207,630
google/apitools
apitools/base/py/transfer.py
Upload.FromData
def FromData(cls, stream, json_data, http, auto_transfer=None, gzip_encoded=False, **kwds): """Create a new Upload of stream from serialized json_data and http.""" info = json.loads(json_data) missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys()) if missing_keys: raise exceptions.InvalidDataError( 'Invalid serialization data, missing keys: %s' % ( ', '.join(missing_keys))) if 'total_size' in kwds: raise exceptions.InvalidUserInputError( 'Cannot override total_size on serialized Upload') upload = cls.FromStream(stream, info['mime_type'], total_size=info.get('total_size'), gzip_encoded=gzip_encoded, **kwds) if isinstance(stream, io.IOBase) and not stream.seekable(): raise exceptions.InvalidUserInputError( 'Cannot restart resumable upload on non-seekable stream') if auto_transfer is not None: upload.auto_transfer = auto_transfer else: upload.auto_transfer = info['auto_transfer'] upload.strategy = RESUMABLE_UPLOAD upload._Initialize( # pylint: disable=protected-access http, info['url']) upload.RefreshResumableUploadState() upload.EnsureInitialized() if upload.auto_transfer: upload.StreamInChunks() return upload
python
def FromData(cls, stream, json_data, http, auto_transfer=None, gzip_encoded=False, **kwds): """Create a new Upload of stream from serialized json_data and http.""" info = json.loads(json_data) missing_keys = cls._REQUIRED_SERIALIZATION_KEYS - set(info.keys()) if missing_keys: raise exceptions.InvalidDataError( 'Invalid serialization data, missing keys: %s' % ( ', '.join(missing_keys))) if 'total_size' in kwds: raise exceptions.InvalidUserInputError( 'Cannot override total_size on serialized Upload') upload = cls.FromStream(stream, info['mime_type'], total_size=info.get('total_size'), gzip_encoded=gzip_encoded, **kwds) if isinstance(stream, io.IOBase) and not stream.seekable(): raise exceptions.InvalidUserInputError( 'Cannot restart resumable upload on non-seekable stream') if auto_transfer is not None: upload.auto_transfer = auto_transfer else: upload.auto_transfer = info['auto_transfer'] upload.strategy = RESUMABLE_UPLOAD upload._Initialize( # pylint: disable=protected-access http, info['url']) upload.RefreshResumableUploadState() upload.EnsureInitialized() if upload.auto_transfer: upload.StreamInChunks() return upload
[ "def", "FromData", "(", "cls", ",", "stream", ",", "json_data", ",", "http", ",", "auto_transfer", "=", "None", ",", "gzip_encoded", "=", "False", ",", "*", "*", "kwds", ")", ":", "info", "=", "json", ".", "loads", "(", "json_data", ")", "missing_keys"...
Create a new Upload of stream from serialized json_data and http.
[ "Create", "a", "new", "Upload", "of", "stream", "from", "serialized", "json_data", "and", "http", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L636-L665
train
207,631
google/apitools
apitools/base/py/transfer.py
Upload.__SetDefaultUploadStrategy
def __SetDefaultUploadStrategy(self, upload_config, http_request): """Determine and set the default upload strategy for this upload. We generally prefer simple or multipart, unless we're forced to use resumable. This happens when any of (1) the upload is too large, (2) the simple endpoint doesn't support multipart requests and we have metadata, or (3) there is no simple upload endpoint. Args: upload_config: Configuration for the upload endpoint. http_request: The associated http request. Returns: None. """ if upload_config.resumable_path is None: self.strategy = SIMPLE_UPLOAD if self.strategy is not None: return strategy = SIMPLE_UPLOAD if (self.total_size is not None and self.total_size > _RESUMABLE_UPLOAD_THRESHOLD): strategy = RESUMABLE_UPLOAD if http_request.body and not upload_config.simple_multipart: strategy = RESUMABLE_UPLOAD if not upload_config.simple_path: strategy = RESUMABLE_UPLOAD self.strategy = strategy
python
def __SetDefaultUploadStrategy(self, upload_config, http_request): """Determine and set the default upload strategy for this upload. We generally prefer simple or multipart, unless we're forced to use resumable. This happens when any of (1) the upload is too large, (2) the simple endpoint doesn't support multipart requests and we have metadata, or (3) there is no simple upload endpoint. Args: upload_config: Configuration for the upload endpoint. http_request: The associated http request. Returns: None. """ if upload_config.resumable_path is None: self.strategy = SIMPLE_UPLOAD if self.strategy is not None: return strategy = SIMPLE_UPLOAD if (self.total_size is not None and self.total_size > _RESUMABLE_UPLOAD_THRESHOLD): strategy = RESUMABLE_UPLOAD if http_request.body and not upload_config.simple_multipart: strategy = RESUMABLE_UPLOAD if not upload_config.simple_path: strategy = RESUMABLE_UPLOAD self.strategy = strategy
[ "def", "__SetDefaultUploadStrategy", "(", "self", ",", "upload_config", ",", "http_request", ")", ":", "if", "upload_config", ".", "resumable_path", "is", "None", ":", "self", ".", "strategy", "=", "SIMPLE_UPLOAD", "if", "self", ".", "strategy", "is", "not", "...
Determine and set the default upload strategy for this upload. We generally prefer simple or multipart, unless we're forced to use resumable. This happens when any of (1) the upload is too large, (2) the simple endpoint doesn't support multipart requests and we have metadata, or (3) there is no simple upload endpoint. Args: upload_config: Configuration for the upload endpoint. http_request: The associated http request. Returns: None.
[ "Determine", "and", "set", "the", "default", "upload", "strategy", "for", "this", "upload", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L715-L742
train
207,632
google/apitools
apitools/base/py/transfer.py
Upload.ConfigureRequest
def ConfigureRequest(self, upload_config, http_request, url_builder): """Configure the request and url for this upload.""" # Validate total_size vs. max_size if (self.total_size and upload_config.max_size and self.total_size > upload_config.max_size): raise exceptions.InvalidUserInputError( 'Upload too big: %s larger than max size %s' % ( self.total_size, upload_config.max_size)) # Validate mime type if not util.AcceptableMimeType(upload_config.accept, self.mime_type): raise exceptions.InvalidUserInputError( 'MIME type %s does not match any accepted MIME ranges %s' % ( self.mime_type, upload_config.accept)) self.__SetDefaultUploadStrategy(upload_config, http_request) if self.strategy == SIMPLE_UPLOAD: url_builder.relative_path = upload_config.simple_path if http_request.body: url_builder.query_params['uploadType'] = 'multipart' self.__ConfigureMultipartRequest(http_request) else: url_builder.query_params['uploadType'] = 'media' self.__ConfigureMediaRequest(http_request) # Once the entire body is written, compress the body if configured # to. Both multipart and media request uploads will read the # entire stream into memory, which means full compression is also # safe to perform. Because the strategy is set to SIMPLE_UPLOAD, # StreamInChunks throws an exception, meaning double compression # cannot happen. if self.__gzip_encoded: http_request.headers['Content-Encoding'] = 'gzip' # Turn the body into a stream so that we can compress it, then # read the compressed bytes. In the event of a retry (e.g. if # our access token has expired), we need to be able to re-read # the body, which we can't do with a stream. So, we consume the # bytes from the stream now and store them in a re-readable # bytes container. http_request.body = ( compression.CompressStream( six.BytesIO(http_request.body))[0].read()) else: url_builder.relative_path = upload_config.resumable_path url_builder.query_params['uploadType'] = 'resumable' self.__ConfigureResumableRequest(http_request)
python
def ConfigureRequest(self, upload_config, http_request, url_builder): """Configure the request and url for this upload.""" # Validate total_size vs. max_size if (self.total_size and upload_config.max_size and self.total_size > upload_config.max_size): raise exceptions.InvalidUserInputError( 'Upload too big: %s larger than max size %s' % ( self.total_size, upload_config.max_size)) # Validate mime type if not util.AcceptableMimeType(upload_config.accept, self.mime_type): raise exceptions.InvalidUserInputError( 'MIME type %s does not match any accepted MIME ranges %s' % ( self.mime_type, upload_config.accept)) self.__SetDefaultUploadStrategy(upload_config, http_request) if self.strategy == SIMPLE_UPLOAD: url_builder.relative_path = upload_config.simple_path if http_request.body: url_builder.query_params['uploadType'] = 'multipart' self.__ConfigureMultipartRequest(http_request) else: url_builder.query_params['uploadType'] = 'media' self.__ConfigureMediaRequest(http_request) # Once the entire body is written, compress the body if configured # to. Both multipart and media request uploads will read the # entire stream into memory, which means full compression is also # safe to perform. Because the strategy is set to SIMPLE_UPLOAD, # StreamInChunks throws an exception, meaning double compression # cannot happen. if self.__gzip_encoded: http_request.headers['Content-Encoding'] = 'gzip' # Turn the body into a stream so that we can compress it, then # read the compressed bytes. In the event of a retry (e.g. if # our access token has expired), we need to be able to re-read # the body, which we can't do with a stream. So, we consume the # bytes from the stream now and store them in a re-readable # bytes container. http_request.body = ( compression.CompressStream( six.BytesIO(http_request.body))[0].read()) else: url_builder.relative_path = upload_config.resumable_path url_builder.query_params['uploadType'] = 'resumable' self.__ConfigureResumableRequest(http_request)
[ "def", "ConfigureRequest", "(", "self", ",", "upload_config", ",", "http_request", ",", "url_builder", ")", ":", "# Validate total_size vs. max_size", "if", "(", "self", ".", "total_size", "and", "upload_config", ".", "max_size", "and", "self", ".", "total_size", ...
Configure the request and url for this upload.
[ "Configure", "the", "request", "and", "url", "for", "this", "upload", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L744-L787
train
207,633
google/apitools
apitools/base/py/transfer.py
Upload.__ConfigureMediaRequest
def __ConfigureMediaRequest(self, http_request): """Configure http_request as a simple request for this upload.""" http_request.headers['content-type'] = self.mime_type http_request.body = self.stream.read() http_request.loggable_body = '<media body>'
python
def __ConfigureMediaRequest(self, http_request): """Configure http_request as a simple request for this upload.""" http_request.headers['content-type'] = self.mime_type http_request.body = self.stream.read() http_request.loggable_body = '<media body>'
[ "def", "__ConfigureMediaRequest", "(", "self", ",", "http_request", ")", ":", "http_request", ".", "headers", "[", "'content-type'", "]", "=", "self", ".", "mime_type", "http_request", ".", "body", "=", "self", ".", "stream", ".", "read", "(", ")", "http_req...
Configure http_request as a simple request for this upload.
[ "Configure", "http_request", "as", "a", "simple", "request", "for", "this", "upload", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L789-L793
train
207,634
google/apitools
apitools/base/py/transfer.py
Upload.__ConfigureMultipartRequest
def __ConfigureMultipartRequest(self, http_request): """Configure http_request as a multipart request for this upload.""" # This is a multipart/related upload. msg_root = mime_multipart.MIMEMultipart('related') # msg_root should not write out its own headers setattr(msg_root, '_write_headers', lambda self: None) # attach the body as one part msg = mime_nonmultipart.MIMENonMultipart( *http_request.headers['content-type'].split('/')) msg.set_payload(http_request.body) msg_root.attach(msg) # attach the media as the second part msg = mime_nonmultipart.MIMENonMultipart(*self.mime_type.split('/')) msg['Content-Transfer-Encoding'] = 'binary' msg.set_payload(self.stream.read()) msg_root.attach(msg) # NOTE: We encode the body, but can't use # `email.message.Message.as_string` because it prepends # `> ` to `From ` lines. fp = six.BytesIO() if six.PY3: generator_class = MultipartBytesGenerator else: generator_class = email_generator.Generator g = generator_class(fp, mangle_from_=False) g.flatten(msg_root, unixfrom=False) http_request.body = fp.getvalue() multipart_boundary = msg_root.get_boundary() http_request.headers['content-type'] = ( 'multipart/related; boundary=%r' % multipart_boundary) if isinstance(multipart_boundary, six.text_type): multipart_boundary = multipart_boundary.encode('ascii') body_components = http_request.body.split(multipart_boundary) headers, _, _ = body_components[-2].partition(b'\n\n') body_components[-2] = b'\n\n'.join([headers, b'<media body>\n\n--']) http_request.loggable_body = multipart_boundary.join(body_components)
python
def __ConfigureMultipartRequest(self, http_request): """Configure http_request as a multipart request for this upload.""" # This is a multipart/related upload. msg_root = mime_multipart.MIMEMultipart('related') # msg_root should not write out its own headers setattr(msg_root, '_write_headers', lambda self: None) # attach the body as one part msg = mime_nonmultipart.MIMENonMultipart( *http_request.headers['content-type'].split('/')) msg.set_payload(http_request.body) msg_root.attach(msg) # attach the media as the second part msg = mime_nonmultipart.MIMENonMultipart(*self.mime_type.split('/')) msg['Content-Transfer-Encoding'] = 'binary' msg.set_payload(self.stream.read()) msg_root.attach(msg) # NOTE: We encode the body, but can't use # `email.message.Message.as_string` because it prepends # `> ` to `From ` lines. fp = six.BytesIO() if six.PY3: generator_class = MultipartBytesGenerator else: generator_class = email_generator.Generator g = generator_class(fp, mangle_from_=False) g.flatten(msg_root, unixfrom=False) http_request.body = fp.getvalue() multipart_boundary = msg_root.get_boundary() http_request.headers['content-type'] = ( 'multipart/related; boundary=%r' % multipart_boundary) if isinstance(multipart_boundary, six.text_type): multipart_boundary = multipart_boundary.encode('ascii') body_components = http_request.body.split(multipart_boundary) headers, _, _ = body_components[-2].partition(b'\n\n') body_components[-2] = b'\n\n'.join([headers, b'<media body>\n\n--']) http_request.loggable_body = multipart_boundary.join(body_components)
[ "def", "__ConfigureMultipartRequest", "(", "self", ",", "http_request", ")", ":", "# This is a multipart/related upload.", "msg_root", "=", "mime_multipart", ".", "MIMEMultipart", "(", "'related'", ")", "# msg_root should not write out its own headers", "setattr", "(", "msg_r...
Configure http_request as a multipart request for this upload.
[ "Configure", "http_request", "as", "a", "multipart", "request", "for", "this", "upload", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L795-L835
train
207,635
google/apitools
apitools/base/py/transfer.py
Upload.RefreshResumableUploadState
def RefreshResumableUploadState(self): """Talk to the server and refresh the state of this resumable upload. Returns: Response if the upload is complete. """ if self.strategy != RESUMABLE_UPLOAD: return self.EnsureInitialized() refresh_request = http_wrapper.Request( url=self.url, http_method='PUT', headers={'Content-Range': 'bytes */*'}) refresh_response = http_wrapper.MakeRequest( self.http, refresh_request, redirections=0, retries=self.num_retries) range_header = self._GetRangeHeaderFromResponse(refresh_response) if refresh_response.status_code in (http_client.OK, http_client.CREATED): self.__complete = True self.__progress = self.total_size self.stream.seek(self.progress) # If we're finished, the refresh response will contain the metadata # originally requested. Cache it so it can be returned in # StreamInChunks. self.__final_response = refresh_response elif refresh_response.status_code == http_wrapper.RESUME_INCOMPLETE: if range_header is None: self.__progress = 0 else: self.__progress = self.__GetLastByte(range_header) + 1 self.stream.seek(self.progress) else: raise exceptions.HttpError.FromResponse(refresh_response)
python
def RefreshResumableUploadState(self): """Talk to the server and refresh the state of this resumable upload. Returns: Response if the upload is complete. """ if self.strategy != RESUMABLE_UPLOAD: return self.EnsureInitialized() refresh_request = http_wrapper.Request( url=self.url, http_method='PUT', headers={'Content-Range': 'bytes */*'}) refresh_response = http_wrapper.MakeRequest( self.http, refresh_request, redirections=0, retries=self.num_retries) range_header = self._GetRangeHeaderFromResponse(refresh_response) if refresh_response.status_code in (http_client.OK, http_client.CREATED): self.__complete = True self.__progress = self.total_size self.stream.seek(self.progress) # If we're finished, the refresh response will contain the metadata # originally requested. Cache it so it can be returned in # StreamInChunks. self.__final_response = refresh_response elif refresh_response.status_code == http_wrapper.RESUME_INCOMPLETE: if range_header is None: self.__progress = 0 else: self.__progress = self.__GetLastByte(range_header) + 1 self.stream.seek(self.progress) else: raise exceptions.HttpError.FromResponse(refresh_response)
[ "def", "RefreshResumableUploadState", "(", "self", ")", ":", "if", "self", ".", "strategy", "!=", "RESUMABLE_UPLOAD", ":", "return", "self", ".", "EnsureInitialized", "(", ")", "refresh_request", "=", "http_wrapper", ".", "Request", "(", "url", "=", "self", "....
Talk to the server and refresh the state of this resumable upload. Returns: Response if the upload is complete.
[ "Talk", "to", "the", "server", "and", "refresh", "the", "state", "of", "this", "resumable", "upload", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L843-L875
train
207,636
google/apitools
apitools/base/py/transfer.py
Upload.InitializeUpload
def InitializeUpload(self, http_request, http=None, client=None): """Initialize this upload from the given http_request.""" if self.strategy is None: raise exceptions.UserError( 'No upload strategy set; did you call ConfigureRequest?') if http is None and client is None: raise exceptions.UserError('Must provide client or http.') if self.strategy != RESUMABLE_UPLOAD: return http = http or client.http if client is not None: http_request.url = client.FinalizeTransferUrl(http_request.url) self.EnsureUninitialized() http_response = http_wrapper.MakeRequest(http, http_request, retries=self.num_retries) if http_response.status_code != http_client.OK: raise exceptions.HttpError.FromResponse(http_response) self.__server_chunk_granularity = http_response.info.get( 'X-Goog-Upload-Chunk-Granularity') url = http_response.info['location'] if client is not None: url = client.FinalizeTransferUrl(url) self._Initialize(http, url) # Unless the user has requested otherwise, we want to just # go ahead and pump the bytes now. if self.auto_transfer: return self.StreamInChunks() return http_response
python
def InitializeUpload(self, http_request, http=None, client=None): """Initialize this upload from the given http_request.""" if self.strategy is None: raise exceptions.UserError( 'No upload strategy set; did you call ConfigureRequest?') if http is None and client is None: raise exceptions.UserError('Must provide client or http.') if self.strategy != RESUMABLE_UPLOAD: return http = http or client.http if client is not None: http_request.url = client.FinalizeTransferUrl(http_request.url) self.EnsureUninitialized() http_response = http_wrapper.MakeRequest(http, http_request, retries=self.num_retries) if http_response.status_code != http_client.OK: raise exceptions.HttpError.FromResponse(http_response) self.__server_chunk_granularity = http_response.info.get( 'X-Goog-Upload-Chunk-Granularity') url = http_response.info['location'] if client is not None: url = client.FinalizeTransferUrl(url) self._Initialize(http, url) # Unless the user has requested otherwise, we want to just # go ahead and pump the bytes now. if self.auto_transfer: return self.StreamInChunks() return http_response
[ "def", "InitializeUpload", "(", "self", ",", "http_request", ",", "http", "=", "None", ",", "client", "=", "None", ")", ":", "if", "self", ".", "strategy", "is", "None", ":", "raise", "exceptions", ".", "UserError", "(", "'No upload strategy set; did you call ...
Initialize this upload from the given http_request.
[ "Initialize", "this", "upload", "from", "the", "given", "http_request", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L880-L909
train
207,637
google/apitools
apitools/base/py/transfer.py
Upload.StreamMedia
def StreamMedia(self, callback=None, finish_callback=None, additional_headers=None): """Send this resumable upload in a single request. Args: callback: Progress callback function with inputs (http_wrapper.Response, transfer.Upload) finish_callback: Final callback function with inputs (http_wrapper.Response, transfer.Upload) additional_headers: Dict of headers to include with the upload http_wrapper.Request. Returns: http_wrapper.Response of final response. """ return self.__StreamMedia( callback=callback, finish_callback=finish_callback, additional_headers=additional_headers, use_chunks=False)
python
def StreamMedia(self, callback=None, finish_callback=None, additional_headers=None): """Send this resumable upload in a single request. Args: callback: Progress callback function with inputs (http_wrapper.Response, transfer.Upload) finish_callback: Final callback function with inputs (http_wrapper.Response, transfer.Upload) additional_headers: Dict of headers to include with the upload http_wrapper.Request. Returns: http_wrapper.Response of final response. """ return self.__StreamMedia( callback=callback, finish_callback=finish_callback, additional_headers=additional_headers, use_chunks=False)
[ "def", "StreamMedia", "(", "self", ",", "callback", "=", "None", ",", "finish_callback", "=", "None", ",", "additional_headers", "=", "None", ")", ":", "return", "self", ".", "__StreamMedia", "(", "callback", "=", "callback", ",", "finish_callback", "=", "fi...
Send this resumable upload in a single request. Args: callback: Progress callback function with inputs (http_wrapper.Response, transfer.Upload) finish_callback: Final callback function with inputs (http_wrapper.Response, transfer.Upload) additional_headers: Dict of headers to include with the upload http_wrapper.Request. Returns: http_wrapper.Response of final response.
[ "Send", "this", "resumable", "upload", "in", "a", "single", "request", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L996-L1013
train
207,638
google/apitools
apitools/base/py/transfer.py
Upload.__SendMediaRequest
def __SendMediaRequest(self, request, end): """Request helper function for SendMediaBody & SendChunk.""" def CheckResponse(response): if response is None: # Caller shouldn't call us if the response is None, # but handle anyway. raise exceptions.RequestError( 'Request to url %s did not return a response.' % response.request_url) response = http_wrapper.MakeRequest( self.bytes_http, request, retry_func=self.retry_func, retries=self.num_retries, check_response_func=CheckResponse) if response.status_code == http_wrapper.RESUME_INCOMPLETE: last_byte = self.__GetLastByte( self._GetRangeHeaderFromResponse(response)) if last_byte + 1 != end: self.stream.seek(last_byte + 1) return response
python
def __SendMediaRequest(self, request, end): """Request helper function for SendMediaBody & SendChunk.""" def CheckResponse(response): if response is None: # Caller shouldn't call us if the response is None, # but handle anyway. raise exceptions.RequestError( 'Request to url %s did not return a response.' % response.request_url) response = http_wrapper.MakeRequest( self.bytes_http, request, retry_func=self.retry_func, retries=self.num_retries, check_response_func=CheckResponse) if response.status_code == http_wrapper.RESUME_INCOMPLETE: last_byte = self.__GetLastByte( self._GetRangeHeaderFromResponse(response)) if last_byte + 1 != end: self.stream.seek(last_byte + 1) return response
[ "def", "__SendMediaRequest", "(", "self", ",", "request", ",", "end", ")", ":", "def", "CheckResponse", "(", "response", ")", ":", "if", "response", "is", "None", ":", "# Caller shouldn't call us if the response is None,", "# but handle anyway.", "raise", "exceptions"...
Request helper function for SendMediaBody & SendChunk.
[ "Request", "helper", "function", "for", "SendMediaBody", "&", "SendChunk", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L1022-L1039
train
207,639
google/apitools
apitools/base/py/transfer.py
Upload.__SendMediaBody
def __SendMediaBody(self, start, additional_headers=None): """Send the entire media stream in a single request.""" self.EnsureInitialized() if self.total_size is None: raise exceptions.TransferInvalidError( 'Total size must be known for SendMediaBody') body_stream = stream_slice.StreamSlice( self.stream, self.total_size - start) request = http_wrapper.Request(url=self.url, http_method='PUT', body=body_stream) request.headers['Content-Type'] = self.mime_type if start == self.total_size: # End of an upload with 0 bytes left to send; just finalize. range_string = 'bytes */%s' % self.total_size else: range_string = 'bytes %s-%s/%s' % (start, self.total_size - 1, self.total_size) request.headers['Content-Range'] = range_string if additional_headers: request.headers.update(additional_headers) return self.__SendMediaRequest(request, self.total_size)
python
def __SendMediaBody(self, start, additional_headers=None): """Send the entire media stream in a single request.""" self.EnsureInitialized() if self.total_size is None: raise exceptions.TransferInvalidError( 'Total size must be known for SendMediaBody') body_stream = stream_slice.StreamSlice( self.stream, self.total_size - start) request = http_wrapper.Request(url=self.url, http_method='PUT', body=body_stream) request.headers['Content-Type'] = self.mime_type if start == self.total_size: # End of an upload with 0 bytes left to send; just finalize. range_string = 'bytes */%s' % self.total_size else: range_string = 'bytes %s-%s/%s' % (start, self.total_size - 1, self.total_size) request.headers['Content-Range'] = range_string if additional_headers: request.headers.update(additional_headers) return self.__SendMediaRequest(request, self.total_size)
[ "def", "__SendMediaBody", "(", "self", ",", "start", ",", "additional_headers", "=", "None", ")", ":", "self", ".", "EnsureInitialized", "(", ")", "if", "self", ".", "total_size", "is", "None", ":", "raise", "exceptions", ".", "TransferInvalidError", "(", "'...
Send the entire media stream in a single request.
[ "Send", "the", "entire", "media", "stream", "in", "a", "single", "request", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L1041-L1064
train
207,640
google/apitools
apitools/base/py/transfer.py
Upload.__SendChunk
def __SendChunk(self, start, additional_headers=None): """Send the specified chunk.""" self.EnsureInitialized() no_log_body = self.total_size is None request = http_wrapper.Request(url=self.url, http_method='PUT') if self.__gzip_encoded: request.headers['Content-Encoding'] = 'gzip' body_stream, read_length, exhausted = compression.CompressStream( self.stream, self.chunksize) end = start + read_length # If the stream length was previously unknown and the input stream # is exhausted, then we're at the end of the stream. if self.total_size is None and exhausted: self.__total_size = end elif self.total_size is None: # For the streaming resumable case, we need to detect when # we're at the end of the stream. body_stream = buffered_stream.BufferedStream( self.stream, start, self.chunksize) end = body_stream.stream_end_position if body_stream.stream_exhausted: self.__total_size = end # TODO: Here, change body_stream from a stream to a string object, # which means reading a chunk into memory. This works around # https://code.google.com/p/httplib2/issues/detail?id=176 which can # cause httplib2 to skip bytes on 401's for file objects. # Rework this solution to be more general. body_stream = body_stream.read(self.chunksize) else: end = min(start + self.chunksize, self.total_size) body_stream = stream_slice.StreamSlice(self.stream, end - start) # TODO(craigcitro): Think about clearer errors on "no data in # stream". request.body = body_stream request.headers['Content-Type'] = self.mime_type if no_log_body: # Disable logging of streaming body. # TODO: Remove no_log_body and rework as part of a larger logs # refactor. request.loggable_body = '<media body>' if self.total_size is None: # Streaming resumable upload case, unknown total size. range_string = 'bytes %s-%s/*' % (start, end - 1) elif end == start: # End of an upload with 0 bytes left to send; just finalize. range_string = 'bytes */%s' % self.total_size else: # Normal resumable upload case with known sizes. range_string = 'bytes %s-%s/%s' % (start, end - 1, self.total_size) request.headers['Content-Range'] = range_string if additional_headers: request.headers.update(additional_headers) return self.__SendMediaRequest(request, end)
python
def __SendChunk(self, start, additional_headers=None): """Send the specified chunk.""" self.EnsureInitialized() no_log_body = self.total_size is None request = http_wrapper.Request(url=self.url, http_method='PUT') if self.__gzip_encoded: request.headers['Content-Encoding'] = 'gzip' body_stream, read_length, exhausted = compression.CompressStream( self.stream, self.chunksize) end = start + read_length # If the stream length was previously unknown and the input stream # is exhausted, then we're at the end of the stream. if self.total_size is None and exhausted: self.__total_size = end elif self.total_size is None: # For the streaming resumable case, we need to detect when # we're at the end of the stream. body_stream = buffered_stream.BufferedStream( self.stream, start, self.chunksize) end = body_stream.stream_end_position if body_stream.stream_exhausted: self.__total_size = end # TODO: Here, change body_stream from a stream to a string object, # which means reading a chunk into memory. This works around # https://code.google.com/p/httplib2/issues/detail?id=176 which can # cause httplib2 to skip bytes on 401's for file objects. # Rework this solution to be more general. body_stream = body_stream.read(self.chunksize) else: end = min(start + self.chunksize, self.total_size) body_stream = stream_slice.StreamSlice(self.stream, end - start) # TODO(craigcitro): Think about clearer errors on "no data in # stream". request.body = body_stream request.headers['Content-Type'] = self.mime_type if no_log_body: # Disable logging of streaming body. # TODO: Remove no_log_body and rework as part of a larger logs # refactor. request.loggable_body = '<media body>' if self.total_size is None: # Streaming resumable upload case, unknown total size. range_string = 'bytes %s-%s/*' % (start, end - 1) elif end == start: # End of an upload with 0 bytes left to send; just finalize. range_string = 'bytes */%s' % self.total_size else: # Normal resumable upload case with known sizes. range_string = 'bytes %s-%s/%s' % (start, end - 1, self.total_size) request.headers['Content-Range'] = range_string if additional_headers: request.headers.update(additional_headers) return self.__SendMediaRequest(request, end)
[ "def", "__SendChunk", "(", "self", ",", "start", ",", "additional_headers", "=", "None", ")", ":", "self", ".", "EnsureInitialized", "(", ")", "no_log_body", "=", "self", ".", "total_size", "is", "None", "request", "=", "http_wrapper", ".", "Request", "(", ...
Send the specified chunk.
[ "Send", "the", "specified", "chunk", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L1066-L1120
train
207,641
google/apitools
apitools/base/py/compression.py
CompressStream
def CompressStream(in_stream, length=None, compresslevel=2, chunksize=16777216): """Compresses an input stream into a file-like buffer. This reads from the input stream until either we've stored at least length compressed bytes, or the input stream has been exhausted. This supports streams of unknown size. Args: in_stream: The input stream to read from. length: The target number of compressed bytes to buffer in the output stream. If length is none, the input stream will be compressed until it's exhausted. The actual length of the output buffer can vary from the target. If the input stream is exhaused, the output buffer may be smaller than expected. If the data is incompressible, the maximum length can be exceeded by can be calculated to be: chunksize + 5 * (floor((chunksize - 1) / 16383) + 1) + 17 This accounts for additional header data gzip adds. For the default 16MiB chunksize, this results in the max size of the output buffer being: length + 16Mib + 5142 bytes compresslevel: Optional, defaults to 2. The desired compression level. chunksize: Optional, defaults to 16MiB. The chunk size used when reading data from the input stream to write into the output buffer. Returns: A file-like output buffer of compressed bytes, the number of bytes read from the input stream, and a flag denoting if the input stream was exhausted. """ in_read = 0 in_exhausted = False out_stream = StreamingBuffer() with gzip.GzipFile(mode='wb', fileobj=out_stream, compresslevel=compresslevel) as compress_stream: # Read until we've written at least length bytes to the output stream. while not length or out_stream.length < length: data = in_stream.read(chunksize) data_length = len(data) compress_stream.write(data) in_read += data_length # If we read less than requested, the stream is exhausted. if data_length < chunksize: in_exhausted = True break return out_stream, in_read, in_exhausted
python
def CompressStream(in_stream, length=None, compresslevel=2, chunksize=16777216): """Compresses an input stream into a file-like buffer. This reads from the input stream until either we've stored at least length compressed bytes, or the input stream has been exhausted. This supports streams of unknown size. Args: in_stream: The input stream to read from. length: The target number of compressed bytes to buffer in the output stream. If length is none, the input stream will be compressed until it's exhausted. The actual length of the output buffer can vary from the target. If the input stream is exhaused, the output buffer may be smaller than expected. If the data is incompressible, the maximum length can be exceeded by can be calculated to be: chunksize + 5 * (floor((chunksize - 1) / 16383) + 1) + 17 This accounts for additional header data gzip adds. For the default 16MiB chunksize, this results in the max size of the output buffer being: length + 16Mib + 5142 bytes compresslevel: Optional, defaults to 2. The desired compression level. chunksize: Optional, defaults to 16MiB. The chunk size used when reading data from the input stream to write into the output buffer. Returns: A file-like output buffer of compressed bytes, the number of bytes read from the input stream, and a flag denoting if the input stream was exhausted. """ in_read = 0 in_exhausted = False out_stream = StreamingBuffer() with gzip.GzipFile(mode='wb', fileobj=out_stream, compresslevel=compresslevel) as compress_stream: # Read until we've written at least length bytes to the output stream. while not length or out_stream.length < length: data = in_stream.read(chunksize) data_length = len(data) compress_stream.write(data) in_read += data_length # If we read less than requested, the stream is exhausted. if data_length < chunksize: in_exhausted = True break return out_stream, in_read, in_exhausted
[ "def", "CompressStream", "(", "in_stream", ",", "length", "=", "None", ",", "compresslevel", "=", "2", ",", "chunksize", "=", "16777216", ")", ":", "in_read", "=", "0", "in_exhausted", "=", "False", "out_stream", "=", "StreamingBuffer", "(", ")", "with", "...
Compresses an input stream into a file-like buffer. This reads from the input stream until either we've stored at least length compressed bytes, or the input stream has been exhausted. This supports streams of unknown size. Args: in_stream: The input stream to read from. length: The target number of compressed bytes to buffer in the output stream. If length is none, the input stream will be compressed until it's exhausted. The actual length of the output buffer can vary from the target. If the input stream is exhaused, the output buffer may be smaller than expected. If the data is incompressible, the maximum length can be exceeded by can be calculated to be: chunksize + 5 * (floor((chunksize - 1) / 16383) + 1) + 17 This accounts for additional header data gzip adds. For the default 16MiB chunksize, this results in the max size of the output buffer being: length + 16Mib + 5142 bytes compresslevel: Optional, defaults to 2. The desired compression level. chunksize: Optional, defaults to 16MiB. The chunk size used when reading data from the input stream to write into the output buffer. Returns: A file-like output buffer of compressed bytes, the number of bytes read from the input stream, and a flag denoting if the input stream was exhausted.
[ "Compresses", "an", "input", "stream", "into", "a", "file", "-", "like", "buffer", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/compression.py#L30-L85
train
207,642
google/apitools
apitools/base/py/compression.py
StreamingBuffer.read
def read(self, size=None): """Read at most size bytes from this buffer. Bytes read from this buffer are consumed and are permanently removed. Args: size: If provided, read no more than size bytes from the buffer. Otherwise, this reads the entire buffer. Returns: The bytes read from this buffer. """ if size is None: size = self.__size ret_list = [] while size > 0 and self.__buf: data = self.__buf.popleft() size -= len(data) ret_list.append(data) if size < 0: ret_list[-1], remainder = ret_list[-1][:size], ret_list[-1][size:] self.__buf.appendleft(remainder) ret = b''.join(ret_list) self.__size -= len(ret) return ret
python
def read(self, size=None): """Read at most size bytes from this buffer. Bytes read from this buffer are consumed and are permanently removed. Args: size: If provided, read no more than size bytes from the buffer. Otherwise, this reads the entire buffer. Returns: The bytes read from this buffer. """ if size is None: size = self.__size ret_list = [] while size > 0 and self.__buf: data = self.__buf.popleft() size -= len(data) ret_list.append(data) if size < 0: ret_list[-1], remainder = ret_list[-1][:size], ret_list[-1][size:] self.__buf.appendleft(remainder) ret = b''.join(ret_list) self.__size -= len(ret) return ret
[ "def", "read", "(", "self", ",", "size", "=", "None", ")", ":", "if", "size", "is", "None", ":", "size", "=", "self", ".", "__size", "ret_list", "=", "[", "]", "while", "size", ">", "0", "and", "self", ".", "__buf", ":", "data", "=", "self", "....
Read at most size bytes from this buffer. Bytes read from this buffer are consumed and are permanently removed. Args: size: If provided, read no more than size bytes from the buffer. Otherwise, this reads the entire buffer. Returns: The bytes read from this buffer.
[ "Read", "at", "most", "size", "bytes", "from", "this", "buffer", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/compression.py#L123-L147
train
207,643
google/apitools
apitools/gen/extended_descriptor.py
_WriteFile
def _WriteFile(file_descriptor, package, version, proto_printer): """Write the given extended file descriptor to the printer.""" proto_printer.PrintPreamble(package, version, file_descriptor) _PrintEnums(proto_printer, file_descriptor.enum_types) _PrintMessages(proto_printer, file_descriptor.message_types) custom_json_mappings = _FetchCustomMappings(file_descriptor.enum_types) custom_json_mappings.extend( _FetchCustomMappings(file_descriptor.message_types)) for mapping in custom_json_mappings: proto_printer.PrintCustomJsonMapping(mapping)
python
def _WriteFile(file_descriptor, package, version, proto_printer): """Write the given extended file descriptor to the printer.""" proto_printer.PrintPreamble(package, version, file_descriptor) _PrintEnums(proto_printer, file_descriptor.enum_types) _PrintMessages(proto_printer, file_descriptor.message_types) custom_json_mappings = _FetchCustomMappings(file_descriptor.enum_types) custom_json_mappings.extend( _FetchCustomMappings(file_descriptor.message_types)) for mapping in custom_json_mappings: proto_printer.PrintCustomJsonMapping(mapping)
[ "def", "_WriteFile", "(", "file_descriptor", ",", "package", ",", "version", ",", "proto_printer", ")", ":", "proto_printer", ".", "PrintPreamble", "(", "package", ",", "version", ",", "file_descriptor", ")", "_PrintEnums", "(", "proto_printer", ",", "file_descrip...
Write the given extended file descriptor to the printer.
[ "Write", "the", "given", "extended", "file", "descriptor", "to", "the", "printer", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/extended_descriptor.py#L162-L171
train
207,644
google/apitools
apitools/gen/extended_descriptor.py
WriteMessagesFile
def WriteMessagesFile(file_descriptor, package, version, printer): """Write the given extended file descriptor to out as a message file.""" _WriteFile(file_descriptor, package, version, _Proto2Printer(printer))
python
def WriteMessagesFile(file_descriptor, package, version, printer): """Write the given extended file descriptor to out as a message file.""" _WriteFile(file_descriptor, package, version, _Proto2Printer(printer))
[ "def", "WriteMessagesFile", "(", "file_descriptor", ",", "package", ",", "version", ",", "printer", ")", ":", "_WriteFile", "(", "file_descriptor", ",", "package", ",", "version", ",", "_Proto2Printer", "(", "printer", ")", ")" ]
Write the given extended file descriptor to out as a message file.
[ "Write", "the", "given", "extended", "file", "descriptor", "to", "out", "as", "a", "message", "file", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/extended_descriptor.py#L174-L177
train
207,645
google/apitools
apitools/gen/extended_descriptor.py
WritePythonFile
def WritePythonFile(file_descriptor, package, version, printer): """Write the given extended file descriptor to out.""" _WriteFile(file_descriptor, package, version, _ProtoRpcPrinter(printer))
python
def WritePythonFile(file_descriptor, package, version, printer): """Write the given extended file descriptor to out.""" _WriteFile(file_descriptor, package, version, _ProtoRpcPrinter(printer))
[ "def", "WritePythonFile", "(", "file_descriptor", ",", "package", ",", "version", ",", "printer", ")", ":", "_WriteFile", "(", "file_descriptor", ",", "package", ",", "version", ",", "_ProtoRpcPrinter", "(", "printer", ")", ")" ]
Write the given extended file descriptor to out.
[ "Write", "the", "given", "extended", "file", "descriptor", "to", "out", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/extended_descriptor.py#L180-L183
train
207,646
google/apitools
apitools/gen/extended_descriptor.py
_FetchCustomMappings
def _FetchCustomMappings(descriptor_ls): """Find and return all custom mappings for descriptors in descriptor_ls.""" custom_mappings = [] for descriptor in descriptor_ls: if isinstance(descriptor, ExtendedEnumDescriptor): custom_mappings.extend( _FormatCustomJsonMapping('Enum', m, descriptor) for m in descriptor.enum_mappings) elif isinstance(descriptor, ExtendedMessageDescriptor): custom_mappings.extend( _FormatCustomJsonMapping('Field', m, descriptor) for m in descriptor.field_mappings) custom_mappings.extend( _FetchCustomMappings(descriptor.enum_types)) custom_mappings.extend( _FetchCustomMappings(descriptor.message_types)) return custom_mappings
python
def _FetchCustomMappings(descriptor_ls): """Find and return all custom mappings for descriptors in descriptor_ls.""" custom_mappings = [] for descriptor in descriptor_ls: if isinstance(descriptor, ExtendedEnumDescriptor): custom_mappings.extend( _FormatCustomJsonMapping('Enum', m, descriptor) for m in descriptor.enum_mappings) elif isinstance(descriptor, ExtendedMessageDescriptor): custom_mappings.extend( _FormatCustomJsonMapping('Field', m, descriptor) for m in descriptor.field_mappings) custom_mappings.extend( _FetchCustomMappings(descriptor.enum_types)) custom_mappings.extend( _FetchCustomMappings(descriptor.message_types)) return custom_mappings
[ "def", "_FetchCustomMappings", "(", "descriptor_ls", ")", ":", "custom_mappings", "=", "[", "]", "for", "descriptor", "in", "descriptor_ls", ":", "if", "isinstance", "(", "descriptor", ",", "ExtendedEnumDescriptor", ")", ":", "custom_mappings", ".", "extend", "(",...
Find and return all custom mappings for descriptors in descriptor_ls.
[ "Find", "and", "return", "all", "custom", "mappings", "for", "descriptors", "in", "descriptor_ls", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/extended_descriptor.py#L201-L217
train
207,647
google/apitools
apitools/gen/extended_descriptor.py
_PrintEnums
def _PrintEnums(proto_printer, enum_types): """Print all enums to the given proto_printer.""" enum_types = sorted(enum_types, key=operator.attrgetter('name')) for enum_type in enum_types: proto_printer.PrintEnum(enum_type)
python
def _PrintEnums(proto_printer, enum_types): """Print all enums to the given proto_printer.""" enum_types = sorted(enum_types, key=operator.attrgetter('name')) for enum_type in enum_types: proto_printer.PrintEnum(enum_type)
[ "def", "_PrintEnums", "(", "proto_printer", ",", "enum_types", ")", ":", "enum_types", "=", "sorted", "(", "enum_types", ",", "key", "=", "operator", ".", "attrgetter", "(", "'name'", ")", ")", "for", "enum_type", "in", "enum_types", ":", "proto_printer", "....
Print all enums to the given proto_printer.
[ "Print", "all", "enums", "to", "the", "given", "proto_printer", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/extended_descriptor.py#L471-L475
train
207,648
google/apitools
apitools/gen/extended_descriptor.py
_Proto2Printer.__PrintMessageCommentLines
def __PrintMessageCommentLines(self, message_type): """Print the description of this message.""" description = message_type.description or '%s message type.' % ( message_type.name) width = self.__printer.CalculateWidth() - 3 for line in textwrap.wrap(description, width): self.__printer('// %s', line) PrintIndentedDescriptions(self.__printer, message_type.enum_types, 'Enums', prefix='// ') PrintIndentedDescriptions(self.__printer, message_type.message_types, 'Messages', prefix='// ') PrintIndentedDescriptions(self.__printer, message_type.fields, 'Fields', prefix='// ')
python
def __PrintMessageCommentLines(self, message_type): """Print the description of this message.""" description = message_type.description or '%s message type.' % ( message_type.name) width = self.__printer.CalculateWidth() - 3 for line in textwrap.wrap(description, width): self.__printer('// %s', line) PrintIndentedDescriptions(self.__printer, message_type.enum_types, 'Enums', prefix='// ') PrintIndentedDescriptions(self.__printer, message_type.message_types, 'Messages', prefix='// ') PrintIndentedDescriptions(self.__printer, message_type.fields, 'Fields', prefix='// ')
[ "def", "__PrintMessageCommentLines", "(", "self", ",", "message_type", ")", ":", "description", "=", "message_type", ".", "description", "or", "'%s message type.'", "%", "(", "message_type", ".", "name", ")", "width", "=", "self", ".", "__printer", ".", "Calcula...
Print the description of this message.
[ "Print", "the", "description", "of", "this", "message", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/extended_descriptor.py#L299-L311
train
207,649
google/apitools
apitools/gen/extended_descriptor.py
_ProtoRpcPrinter.__PrintAdditionalImports
def __PrintAdditionalImports(self, imports): """Print additional imports needed for protorpc.""" google_imports = [x for x in imports if 'google' in x] other_imports = [x for x in imports if 'google' not in x] if other_imports: for import_ in sorted(other_imports): self.__printer(import_) self.__printer() # Note: If we ever were going to add imports from this package, we'd # need to sort those out and put them at the end. if google_imports: for import_ in sorted(google_imports): self.__printer(import_) self.__printer()
python
def __PrintAdditionalImports(self, imports): """Print additional imports needed for protorpc.""" google_imports = [x for x in imports if 'google' in x] other_imports = [x for x in imports if 'google' not in x] if other_imports: for import_ in sorted(other_imports): self.__printer(import_) self.__printer() # Note: If we ever were going to add imports from this package, we'd # need to sort those out and put them at the end. if google_imports: for import_ in sorted(google_imports): self.__printer(import_) self.__printer()
[ "def", "__PrintAdditionalImports", "(", "self", ",", "imports", ")", ":", "google_imports", "=", "[", "x", "for", "x", "in", "imports", "if", "'google'", "in", "x", "]", "other_imports", "=", "[", "x", "for", "x", "in", "imports", "if", "'google'", "not"...
Print additional imports needed for protorpc.
[ "Print", "additional", "imports", "needed", "for", "protorpc", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/extended_descriptor.py#L394-L407
train
207,650
google/apitools
apitools/base/protorpclite/util.py
positional
def positional(max_positional_args): """A decorator that declares only the first N arguments may be positional. This decorator makes it easy to support Python 3 style keyword-only parameters. For example, in Python 3 it is possible to write: def fn(pos1, *, kwonly1=None, kwonly1=None): ... All named parameters after * must be a keyword: fn(10, 'kw1', 'kw2') # Raises exception. fn(10, kwonly1='kw1') # Ok. Example: To define a function like above, do: @positional(1) def fn(pos1, kwonly1=None, kwonly2=None): ... If no default value is provided to a keyword argument, it becomes a required keyword argument: @positional(0) def fn(required_kw): ... This must be called with the keyword parameter: fn() # Raises exception. fn(10) # Raises exception. fn(required_kw=10) # Ok. When defining instance or class methods always remember to account for 'self' and 'cls': class MyClass(object): @positional(2) def my_method(self, pos1, kwonly1=None): ... @classmethod @positional(2) def my_method(cls, pos1, kwonly1=None): ... One can omit the argument to 'positional' altogether, and then no arguments with default values may be passed positionally. This would be equivalent to placing a '*' before the first argument with a default value in Python 3. If there are no arguments with default values, and no argument is given to 'positional', an error is raised. @positional def fn(arg1, arg2, required_kw1=None, required_kw2=0): ... fn(1, 3, 5) # Raises exception. fn(1, 3) # Ok. fn(1, 3, required_kw1=5) # Ok. Args: max_positional_arguments: Maximum number of positional arguments. All parameters after the this index must be keyword only. Returns: A decorator that prevents using arguments after max_positional_args from being used as positional parameters. Raises: TypeError if a keyword-only argument is provided as a positional parameter. ValueError if no maximum number of arguments is provided and the function has no arguments with default values. """ def positional_decorator(wrapped): """Creates a function wraper to enforce number of arguments.""" @functools.wraps(wrapped) def positional_wrapper(*args, **kwargs): if len(args) > max_positional_args: plural_s = '' if max_positional_args != 1: plural_s = 's' raise TypeError('%s() takes at most %d positional argument%s ' '(%d given)' % (wrapped.__name__, max_positional_args, plural_s, len(args))) return wrapped(*args, **kwargs) return positional_wrapper if isinstance(max_positional_args, six.integer_types): return positional_decorator else: args, _, _, defaults = inspect.getargspec(max_positional_args) if defaults is None: raise ValueError( 'Functions with no keyword arguments must specify ' 'max_positional_args') return positional(len(args) - len(defaults))(max_positional_args)
python
def positional(max_positional_args): """A decorator that declares only the first N arguments may be positional. This decorator makes it easy to support Python 3 style keyword-only parameters. For example, in Python 3 it is possible to write: def fn(pos1, *, kwonly1=None, kwonly1=None): ... All named parameters after * must be a keyword: fn(10, 'kw1', 'kw2') # Raises exception. fn(10, kwonly1='kw1') # Ok. Example: To define a function like above, do: @positional(1) def fn(pos1, kwonly1=None, kwonly2=None): ... If no default value is provided to a keyword argument, it becomes a required keyword argument: @positional(0) def fn(required_kw): ... This must be called with the keyword parameter: fn() # Raises exception. fn(10) # Raises exception. fn(required_kw=10) # Ok. When defining instance or class methods always remember to account for 'self' and 'cls': class MyClass(object): @positional(2) def my_method(self, pos1, kwonly1=None): ... @classmethod @positional(2) def my_method(cls, pos1, kwonly1=None): ... One can omit the argument to 'positional' altogether, and then no arguments with default values may be passed positionally. This would be equivalent to placing a '*' before the first argument with a default value in Python 3. If there are no arguments with default values, and no argument is given to 'positional', an error is raised. @positional def fn(arg1, arg2, required_kw1=None, required_kw2=0): ... fn(1, 3, 5) # Raises exception. fn(1, 3) # Ok. fn(1, 3, required_kw1=5) # Ok. Args: max_positional_arguments: Maximum number of positional arguments. All parameters after the this index must be keyword only. Returns: A decorator that prevents using arguments after max_positional_args from being used as positional parameters. Raises: TypeError if a keyword-only argument is provided as a positional parameter. ValueError if no maximum number of arguments is provided and the function has no arguments with default values. """ def positional_decorator(wrapped): """Creates a function wraper to enforce number of arguments.""" @functools.wraps(wrapped) def positional_wrapper(*args, **kwargs): if len(args) > max_positional_args: plural_s = '' if max_positional_args != 1: plural_s = 's' raise TypeError('%s() takes at most %d positional argument%s ' '(%d given)' % (wrapped.__name__, max_positional_args, plural_s, len(args))) return wrapped(*args, **kwargs) return positional_wrapper if isinstance(max_positional_args, six.integer_types): return positional_decorator else: args, _, _, defaults = inspect.getargspec(max_positional_args) if defaults is None: raise ValueError( 'Functions with no keyword arguments must specify ' 'max_positional_args') return positional(len(args) - len(defaults))(max_positional_args)
[ "def", "positional", "(", "max_positional_args", ")", ":", "def", "positional_decorator", "(", "wrapped", ")", ":", "\"\"\"Creates a function wraper to enforce number of arguments.\"\"\"", "@", "functools", ".", "wraps", "(", "wrapped", ")", "def", "positional_wrapper", "...
A decorator that declares only the first N arguments may be positional. This decorator makes it easy to support Python 3 style keyword-only parameters. For example, in Python 3 it is possible to write: def fn(pos1, *, kwonly1=None, kwonly1=None): ... All named parameters after * must be a keyword: fn(10, 'kw1', 'kw2') # Raises exception. fn(10, kwonly1='kw1') # Ok. Example: To define a function like above, do: @positional(1) def fn(pos1, kwonly1=None, kwonly2=None): ... If no default value is provided to a keyword argument, it becomes a required keyword argument: @positional(0) def fn(required_kw): ... This must be called with the keyword parameter: fn() # Raises exception. fn(10) # Raises exception. fn(required_kw=10) # Ok. When defining instance or class methods always remember to account for 'self' and 'cls': class MyClass(object): @positional(2) def my_method(self, pos1, kwonly1=None): ... @classmethod @positional(2) def my_method(cls, pos1, kwonly1=None): ... One can omit the argument to 'positional' altogether, and then no arguments with default values may be passed positionally. This would be equivalent to placing a '*' before the first argument with a default value in Python 3. If there are no arguments with default values, and no argument is given to 'positional', an error is raised. @positional def fn(arg1, arg2, required_kw1=None, required_kw2=0): ... fn(1, 3, 5) # Raises exception. fn(1, 3) # Ok. fn(1, 3, required_kw1=5) # Ok. Args: max_positional_arguments: Maximum number of positional arguments. All parameters after the this index must be keyword only. Returns: A decorator that prevents using arguments after max_positional_args from being used as positional parameters. Raises: TypeError if a keyword-only argument is provided as a positional parameter. ValueError if no maximum number of arguments is provided and the function has no arguments with default values.
[ "A", "decorator", "that", "declares", "only", "the", "first", "N", "arguments", "may", "be", "positional", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/util.py#L55-L155
train
207,651
google/apitools
apitools/base/protorpclite/util.py
get_package_for_module
def get_package_for_module(module): """Get package name for a module. Helper calculates the package name of a module. Args: module: Module to get name for. If module is a string, try to find module in sys.modules. Returns: If module contains 'package' attribute, uses that as package name. Else, if module is not the '__main__' module, the module __name__. Else, the base name of the module file name. Else None. """ if isinstance(module, six.string_types): try: module = sys.modules[module] except KeyError: return None try: return six.text_type(module.package) except AttributeError: if module.__name__ == '__main__': try: file_name = module.__file__ except AttributeError: pass else: base_name = os.path.basename(file_name) split_name = os.path.splitext(base_name) if len(split_name) == 1: return six.text_type(base_name) return u'.'.join(split_name[:-1]) return six.text_type(module.__name__)
python
def get_package_for_module(module): """Get package name for a module. Helper calculates the package name of a module. Args: module: Module to get name for. If module is a string, try to find module in sys.modules. Returns: If module contains 'package' attribute, uses that as package name. Else, if module is not the '__main__' module, the module __name__. Else, the base name of the module file name. Else None. """ if isinstance(module, six.string_types): try: module = sys.modules[module] except KeyError: return None try: return six.text_type(module.package) except AttributeError: if module.__name__ == '__main__': try: file_name = module.__file__ except AttributeError: pass else: base_name = os.path.basename(file_name) split_name = os.path.splitext(base_name) if len(split_name) == 1: return six.text_type(base_name) return u'.'.join(split_name[:-1]) return six.text_type(module.__name__)
[ "def", "get_package_for_module", "(", "module", ")", ":", "if", "isinstance", "(", "module", ",", "six", ".", "string_types", ")", ":", "try", ":", "module", "=", "sys", ".", "modules", "[", "module", "]", "except", "KeyError", ":", "return", "None", "tr...
Get package name for a module. Helper calculates the package name of a module. Args: module: Module to get name for. If module is a string, try to find module in sys.modules. Returns: If module contains 'package' attribute, uses that as package name. Else, if module is not the '__main__' module, the module __name__. Else, the base name of the module file name. Else None.
[ "Get", "package", "name", "for", "a", "module", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/util.py#L159-L194
train
207,652
google/apitools
apitools/base/protorpclite/util.py
decode_datetime
def decode_datetime(encoded_datetime): """Decode a DateTimeField parameter from a string to a python datetime. Args: encoded_datetime: A string in RFC 3339 format. Returns: A datetime object with the date and time specified in encoded_datetime. Raises: ValueError: If the string is not in a recognized format. """ # Check if the string includes a time zone offset. Break out the # part that doesn't include time zone info. Convert to uppercase # because all our comparisons should be case-insensitive. time_zone_match = _TIME_ZONE_RE.search(encoded_datetime) if time_zone_match: time_string = encoded_datetime[:time_zone_match.start(1)].upper() else: time_string = encoded_datetime.upper() if '.' in time_string: format_string = '%Y-%m-%dT%H:%M:%S.%f' else: format_string = '%Y-%m-%dT%H:%M:%S' decoded_datetime = datetime.datetime.strptime(time_string, format_string) if not time_zone_match: return decoded_datetime # Time zone info was included in the parameter. Add a tzinfo # object to the datetime. Datetimes can't be changed after they're # created, so we'll need to create a new one. if time_zone_match.group('z'): offset_minutes = 0 else: sign = time_zone_match.group('sign') hours, minutes = [int(value) for value in time_zone_match.group('hours', 'minutes')] offset_minutes = hours * 60 + minutes if sign == '-': offset_minutes *= -1 return datetime.datetime(decoded_datetime.year, decoded_datetime.month, decoded_datetime.day, decoded_datetime.hour, decoded_datetime.minute, decoded_datetime.second, decoded_datetime.microsecond, TimeZoneOffset(offset_minutes))
python
def decode_datetime(encoded_datetime): """Decode a DateTimeField parameter from a string to a python datetime. Args: encoded_datetime: A string in RFC 3339 format. Returns: A datetime object with the date and time specified in encoded_datetime. Raises: ValueError: If the string is not in a recognized format. """ # Check if the string includes a time zone offset. Break out the # part that doesn't include time zone info. Convert to uppercase # because all our comparisons should be case-insensitive. time_zone_match = _TIME_ZONE_RE.search(encoded_datetime) if time_zone_match: time_string = encoded_datetime[:time_zone_match.start(1)].upper() else: time_string = encoded_datetime.upper() if '.' in time_string: format_string = '%Y-%m-%dT%H:%M:%S.%f' else: format_string = '%Y-%m-%dT%H:%M:%S' decoded_datetime = datetime.datetime.strptime(time_string, format_string) if not time_zone_match: return decoded_datetime # Time zone info was included in the parameter. Add a tzinfo # object to the datetime. Datetimes can't be changed after they're # created, so we'll need to create a new one. if time_zone_match.group('z'): offset_minutes = 0 else: sign = time_zone_match.group('sign') hours, minutes = [int(value) for value in time_zone_match.group('hours', 'minutes')] offset_minutes = hours * 60 + minutes if sign == '-': offset_minutes *= -1 return datetime.datetime(decoded_datetime.year, decoded_datetime.month, decoded_datetime.day, decoded_datetime.hour, decoded_datetime.minute, decoded_datetime.second, decoded_datetime.microsecond, TimeZoneOffset(offset_minutes))
[ "def", "decode_datetime", "(", "encoded_datetime", ")", ":", "# Check if the string includes a time zone offset. Break out the", "# part that doesn't include time zone info. Convert to uppercase", "# because all our comparisons should be case-insensitive.", "time_zone_match", "=", "_TIME_ZON...
Decode a DateTimeField parameter from a string to a python datetime. Args: encoded_datetime: A string in RFC 3339 format. Returns: A datetime object with the date and time specified in encoded_datetime. Raises: ValueError: If the string is not in a recognized format.
[ "Decode", "a", "DateTimeField", "parameter", "from", "a", "string", "to", "a", "python", "datetime", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/util.py#L241-L292
train
207,653
google/apitools
apitools/base/protorpclite/message_types.py
DateTimeField.value_from_message
def value_from_message(self, message): """Convert DateTimeMessage to a datetime. Args: A DateTimeMessage instance. Returns: A datetime instance. """ message = super(DateTimeField, self).value_from_message(message) if message.time_zone_offset is None: return datetime.datetime.utcfromtimestamp( message.milliseconds / 1000.0) # Need to subtract the time zone offset, because when we call # datetime.fromtimestamp, it will add the time zone offset to the # value we pass. milliseconds = (message.milliseconds - 60000 * message.time_zone_offset) timezone = util.TimeZoneOffset(message.time_zone_offset) return datetime.datetime.fromtimestamp(milliseconds / 1000.0, tz=timezone)
python
def value_from_message(self, message): """Convert DateTimeMessage to a datetime. Args: A DateTimeMessage instance. Returns: A datetime instance. """ message = super(DateTimeField, self).value_from_message(message) if message.time_zone_offset is None: return datetime.datetime.utcfromtimestamp( message.milliseconds / 1000.0) # Need to subtract the time zone offset, because when we call # datetime.fromtimestamp, it will add the time zone offset to the # value we pass. milliseconds = (message.milliseconds - 60000 * message.time_zone_offset) timezone = util.TimeZoneOffset(message.time_zone_offset) return datetime.datetime.fromtimestamp(milliseconds / 1000.0, tz=timezone)
[ "def", "value_from_message", "(", "self", ",", "message", ")", ":", "message", "=", "super", "(", "DateTimeField", ",", "self", ")", ".", "value_from_message", "(", "message", ")", "if", "message", ".", "time_zone_offset", "is", "None", ":", "return", "datet...
Convert DateTimeMessage to a datetime. Args: A DateTimeMessage instance. Returns: A datetime instance.
[ "Convert", "DateTimeMessage", "to", "a", "datetime", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/message_types.py#L70-L92
train
207,654
google/apitools
apitools/base/py/util.py
DetectGce
def DetectGce(): """Determine whether or not we're running on GCE. This is based on: https://cloud.google.com/compute/docs/metadata#runninggce Returns: True iff we're running on a GCE instance. """ metadata_url = 'http://{}'.format( os.environ.get('GCE_METADATA_ROOT', 'metadata.google.internal')) try: o = urllib_request.build_opener(urllib_request.ProxyHandler({})).open( urllib_request.Request( metadata_url, headers={'Metadata-Flavor': 'Google'})) except urllib_error.URLError: return False return (o.getcode() == http_client.OK and o.headers.get('metadata-flavor') == 'Google')
python
def DetectGce(): """Determine whether or not we're running on GCE. This is based on: https://cloud.google.com/compute/docs/metadata#runninggce Returns: True iff we're running on a GCE instance. """ metadata_url = 'http://{}'.format( os.environ.get('GCE_METADATA_ROOT', 'metadata.google.internal')) try: o = urllib_request.build_opener(urllib_request.ProxyHandler({})).open( urllib_request.Request( metadata_url, headers={'Metadata-Flavor': 'Google'})) except urllib_error.URLError: return False return (o.getcode() == http_client.OK and o.headers.get('metadata-flavor') == 'Google')
[ "def", "DetectGce", "(", ")", ":", "metadata_url", "=", "'http://{}'", ".", "format", "(", "os", ".", "environ", ".", "get", "(", "'GCE_METADATA_ROOT'", ",", "'metadata.google.internal'", ")", ")", "try", ":", "o", "=", "urllib_request", ".", "build_opener", ...
Determine whether or not we're running on GCE. This is based on: https://cloud.google.com/compute/docs/metadata#runninggce Returns: True iff we're running on a GCE instance.
[ "Determine", "whether", "or", "not", "we", "re", "running", "on", "GCE", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/util.py#L55-L73
train
207,655
google/apitools
apitools/base/py/util.py
NormalizeScopes
def NormalizeScopes(scope_spec): """Normalize scope_spec to a set of strings.""" if isinstance(scope_spec, six.string_types): return set(scope_spec.split(' ')) elif isinstance(scope_spec, collections.Iterable): return set(scope_spec) raise exceptions.TypecheckError( 'NormalizeScopes expected string or iterable, found %s' % ( type(scope_spec),))
python
def NormalizeScopes(scope_spec): """Normalize scope_spec to a set of strings.""" if isinstance(scope_spec, six.string_types): return set(scope_spec.split(' ')) elif isinstance(scope_spec, collections.Iterable): return set(scope_spec) raise exceptions.TypecheckError( 'NormalizeScopes expected string or iterable, found %s' % ( type(scope_spec),))
[ "def", "NormalizeScopes", "(", "scope_spec", ")", ":", "if", "isinstance", "(", "scope_spec", ",", "six", ".", "string_types", ")", ":", "return", "set", "(", "scope_spec", ".", "split", "(", "' '", ")", ")", "elif", "isinstance", "(", "scope_spec", ",", ...
Normalize scope_spec to a set of strings.
[ "Normalize", "scope_spec", "to", "a", "set", "of", "strings", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/util.py#L76-L84
train
207,656
google/apitools
apitools/base/py/util.py
CalculateWaitForRetry
def CalculateWaitForRetry(retry_attempt, max_wait=60): """Calculates amount of time to wait before a retry attempt. Wait time grows exponentially with the number of attempts. A random amount of jitter is added to spread out retry attempts from different clients. Args: retry_attempt: Retry attempt counter. max_wait: Upper bound for wait time [seconds]. Returns: Number of seconds to wait before retrying request. """ wait_time = 2 ** retry_attempt max_jitter = wait_time / 4.0 wait_time += random.uniform(-max_jitter, max_jitter) return max(1, min(wait_time, max_wait))
python
def CalculateWaitForRetry(retry_attempt, max_wait=60): """Calculates amount of time to wait before a retry attempt. Wait time grows exponentially with the number of attempts. A random amount of jitter is added to spread out retry attempts from different clients. Args: retry_attempt: Retry attempt counter. max_wait: Upper bound for wait time [seconds]. Returns: Number of seconds to wait before retrying request. """ wait_time = 2 ** retry_attempt max_jitter = wait_time / 4.0 wait_time += random.uniform(-max_jitter, max_jitter) return max(1, min(wait_time, max_wait))
[ "def", "CalculateWaitForRetry", "(", "retry_attempt", ",", "max_wait", "=", "60", ")", ":", "wait_time", "=", "2", "**", "retry_attempt", "max_jitter", "=", "wait_time", "/", "4.0", "wait_time", "+=", "random", ".", "uniform", "(", "-", "max_jitter", ",", "m...
Calculates amount of time to wait before a retry attempt. Wait time grows exponentially with the number of attempts. A random amount of jitter is added to spread out retry attempts from different clients. Args: retry_attempt: Retry attempt counter. max_wait: Upper bound for wait time [seconds]. Returns: Number of seconds to wait before retrying request.
[ "Calculates", "amount", "of", "time", "to", "wait", "before", "a", "retry", "attempt", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/util.py#L138-L157
train
207,657
google/apitools
apitools/base/py/util.py
AcceptableMimeType
def AcceptableMimeType(accept_patterns, mime_type): """Return True iff mime_type is acceptable for one of accept_patterns. Note that this function assumes that all patterns in accept_patterns will be simple types of the form "type/subtype", where one or both of these can be "*". We do not support parameters (i.e. "; q=") in patterns. Args: accept_patterns: list of acceptable MIME types. mime_type: the mime type we would like to match. Returns: Whether or not mime_type matches (at least) one of these patterns. """ if '/' not in mime_type: raise exceptions.InvalidUserInputError( 'Invalid MIME type: "%s"' % mime_type) unsupported_patterns = [p for p in accept_patterns if ';' in p] if unsupported_patterns: raise exceptions.GeneratedClientError( 'MIME patterns with parameter unsupported: "%s"' % ', '.join( unsupported_patterns)) def MimeTypeMatches(pattern, mime_type): """Return True iff mime_type is acceptable for pattern.""" # Some systems use a single '*' instead of '*/*'. if pattern == '*': pattern = '*/*' return all(accept in ('*', provided) for accept, provided in zip(pattern.split('/'), mime_type.split('/'))) return any(MimeTypeMatches(pattern, mime_type) for pattern in accept_patterns)
python
def AcceptableMimeType(accept_patterns, mime_type): """Return True iff mime_type is acceptable for one of accept_patterns. Note that this function assumes that all patterns in accept_patterns will be simple types of the form "type/subtype", where one or both of these can be "*". We do not support parameters (i.e. "; q=") in patterns. Args: accept_patterns: list of acceptable MIME types. mime_type: the mime type we would like to match. Returns: Whether or not mime_type matches (at least) one of these patterns. """ if '/' not in mime_type: raise exceptions.InvalidUserInputError( 'Invalid MIME type: "%s"' % mime_type) unsupported_patterns = [p for p in accept_patterns if ';' in p] if unsupported_patterns: raise exceptions.GeneratedClientError( 'MIME patterns with parameter unsupported: "%s"' % ', '.join( unsupported_patterns)) def MimeTypeMatches(pattern, mime_type): """Return True iff mime_type is acceptable for pattern.""" # Some systems use a single '*' instead of '*/*'. if pattern == '*': pattern = '*/*' return all(accept in ('*', provided) for accept, provided in zip(pattern.split('/'), mime_type.split('/'))) return any(MimeTypeMatches(pattern, mime_type) for pattern in accept_patterns)
[ "def", "AcceptableMimeType", "(", "accept_patterns", ",", "mime_type", ")", ":", "if", "'/'", "not", "in", "mime_type", ":", "raise", "exceptions", ".", "InvalidUserInputError", "(", "'Invalid MIME type: \"%s\"'", "%", "mime_type", ")", "unsupported_patterns", "=", ...
Return True iff mime_type is acceptable for one of accept_patterns. Note that this function assumes that all patterns in accept_patterns will be simple types of the form "type/subtype", where one or both of these can be "*". We do not support parameters (i.e. "; q=") in patterns. Args: accept_patterns: list of acceptable MIME types. mime_type: the mime type we would like to match. Returns: Whether or not mime_type matches (at least) one of these patterns.
[ "Return", "True", "iff", "mime_type", "is", "acceptable", "for", "one", "of", "accept_patterns", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/util.py#L160-L193
train
207,658
google/apitools
apitools/base/py/util.py
MapParamNames
def MapParamNames(params, request_type): """Reverse parameter remappings for URL construction.""" return [encoding.GetCustomJsonFieldMapping(request_type, json_name=p) or p for p in params]
python
def MapParamNames(params, request_type): """Reverse parameter remappings for URL construction.""" return [encoding.GetCustomJsonFieldMapping(request_type, json_name=p) or p for p in params]
[ "def", "MapParamNames", "(", "params", ",", "request_type", ")", ":", "return", "[", "encoding", ".", "GetCustomJsonFieldMapping", "(", "request_type", ",", "json_name", "=", "p", ")", "or", "p", "for", "p", "in", "params", "]" ]
Reverse parameter remappings for URL construction.
[ "Reverse", "parameter", "remappings", "for", "URL", "construction", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/util.py#L196-L199
train
207,659
google/apitools
apitools/base/py/extra_types.py
_JsonValueToPythonValue
def _JsonValueToPythonValue(json_value): """Convert the given JsonValue to a json string.""" util.Typecheck(json_value, JsonValue) _ValidateJsonValue(json_value) if json_value.is_null: return None entries = [(f, json_value.get_assigned_value(f.name)) for f in json_value.all_fields()] assigned_entries = [(f, value) for f, value in entries if value is not None] field, value = assigned_entries[0] if not isinstance(field, messages.MessageField): return value elif field.message_type is JsonObject: return _JsonObjectToPythonValue(value) elif field.message_type is JsonArray: return _JsonArrayToPythonValue(value)
python
def _JsonValueToPythonValue(json_value): """Convert the given JsonValue to a json string.""" util.Typecheck(json_value, JsonValue) _ValidateJsonValue(json_value) if json_value.is_null: return None entries = [(f, json_value.get_assigned_value(f.name)) for f in json_value.all_fields()] assigned_entries = [(f, value) for f, value in entries if value is not None] field, value = assigned_entries[0] if not isinstance(field, messages.MessageField): return value elif field.message_type is JsonObject: return _JsonObjectToPythonValue(value) elif field.message_type is JsonArray: return _JsonArrayToPythonValue(value)
[ "def", "_JsonValueToPythonValue", "(", "json_value", ")", ":", "util", ".", "Typecheck", "(", "json_value", ",", "JsonValue", ")", "_ValidateJsonValue", "(", "json_value", ")", "if", "json_value", ".", "is_null", ":", "return", "None", "entries", "=", "[", "("...
Convert the given JsonValue to a json string.
[ "Convert", "the", "given", "JsonValue", "to", "a", "json", "string", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/extra_types.py#L83-L99
train
207,660
google/apitools
apitools/base/py/extra_types.py
_PythonValueToJsonValue
def _PythonValueToJsonValue(py_value): """Convert the given python value to a JsonValue.""" if py_value is None: return JsonValue(is_null=True) if isinstance(py_value, bool): return JsonValue(boolean_value=py_value) if isinstance(py_value, six.string_types): return JsonValue(string_value=py_value) if isinstance(py_value, numbers.Number): if isinstance(py_value, six.integer_types): if _MININT64 < py_value < _MAXINT64: return JsonValue(integer_value=py_value) return JsonValue(double_value=float(py_value)) if isinstance(py_value, dict): return JsonValue(object_value=_PythonValueToJsonObject(py_value)) if isinstance(py_value, collections.Iterable): return JsonValue(array_value=_PythonValueToJsonArray(py_value)) raise exceptions.InvalidDataError( 'Cannot convert "%s" to JsonValue' % py_value)
python
def _PythonValueToJsonValue(py_value): """Convert the given python value to a JsonValue.""" if py_value is None: return JsonValue(is_null=True) if isinstance(py_value, bool): return JsonValue(boolean_value=py_value) if isinstance(py_value, six.string_types): return JsonValue(string_value=py_value) if isinstance(py_value, numbers.Number): if isinstance(py_value, six.integer_types): if _MININT64 < py_value < _MAXINT64: return JsonValue(integer_value=py_value) return JsonValue(double_value=float(py_value)) if isinstance(py_value, dict): return JsonValue(object_value=_PythonValueToJsonObject(py_value)) if isinstance(py_value, collections.Iterable): return JsonValue(array_value=_PythonValueToJsonArray(py_value)) raise exceptions.InvalidDataError( 'Cannot convert "%s" to JsonValue' % py_value)
[ "def", "_PythonValueToJsonValue", "(", "py_value", ")", ":", "if", "py_value", "is", "None", ":", "return", "JsonValue", "(", "is_null", "=", "True", ")", "if", "isinstance", "(", "py_value", ",", "bool", ")", ":", "return", "JsonValue", "(", "boolean_value"...
Convert the given python value to a JsonValue.
[ "Convert", "the", "given", "python", "value", "to", "a", "JsonValue", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/extra_types.py#L117-L135
train
207,661
google/apitools
apitools/base/py/extra_types.py
_EncodeInt64Field
def _EncodeInt64Field(field, value): """Handle the special case of int64 as a string.""" capabilities = [ messages.Variant.INT64, messages.Variant.UINT64, ] if field.variant not in capabilities: return encoding.CodecResult(value=value, complete=False) if field.repeated: result = [str(x) for x in value] else: result = str(value) return encoding.CodecResult(value=result, complete=True)
python
def _EncodeInt64Field(field, value): """Handle the special case of int64 as a string.""" capabilities = [ messages.Variant.INT64, messages.Variant.UINT64, ] if field.variant not in capabilities: return encoding.CodecResult(value=value, complete=False) if field.repeated: result = [str(x) for x in value] else: result = str(value) return encoding.CodecResult(value=result, complete=True)
[ "def", "_EncodeInt64Field", "(", "field", ",", "value", ")", ":", "capabilities", "=", "[", "messages", ".", "Variant", ".", "INT64", ",", "messages", ".", "Variant", ".", "UINT64", ",", "]", "if", "field", ".", "variant", "not", "in", "capabilities", ":...
Handle the special case of int64 as a string.
[ "Handle", "the", "special", "case", "of", "int64", "as", "a", "string", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/extra_types.py#L269-L282
train
207,662
google/apitools
apitools/base/py/extra_types.py
_EncodeDateField
def _EncodeDateField(field, value): """Encoder for datetime.date objects.""" if field.repeated: result = [d.isoformat() for d in value] else: result = value.isoformat() return encoding.CodecResult(value=result, complete=True)
python
def _EncodeDateField(field, value): """Encoder for datetime.date objects.""" if field.repeated: result = [d.isoformat() for d in value] else: result = value.isoformat() return encoding.CodecResult(value=result, complete=True)
[ "def", "_EncodeDateField", "(", "field", ",", "value", ")", ":", "if", "field", ".", "repeated", ":", "result", "=", "[", "d", ".", "isoformat", "(", ")", "for", "d", "in", "value", "]", "else", ":", "result", "=", "value", ".", "isoformat", "(", "...
Encoder for datetime.date objects.
[ "Encoder", "for", "datetime", ".", "date", "objects", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/extra_types.py#L294-L300
train
207,663
google/apitools
apitools/gen/util.py
ReplaceHomoglyphs
def ReplaceHomoglyphs(s): """Returns s with unicode homoglyphs replaced by ascii equivalents.""" homoglyphs = { '\xa0': ' ', # &nbsp; ? '\u00e3': '', # TODO(gsfowler) drop after .proto spurious char elided '\u00a0': ' ', # &nbsp; ? '\u00a9': '(C)', # COPYRIGHT SIGN (would you believe "asciiglyph"?) '\u00ae': '(R)', # REGISTERED SIGN (would you believe "asciiglyph"?) '\u2014': '-', # EM DASH '\u2018': "'", # LEFT SINGLE QUOTATION MARK '\u2019': "'", # RIGHT SINGLE QUOTATION MARK '\u201c': '"', # LEFT DOUBLE QUOTATION MARK '\u201d': '"', # RIGHT DOUBLE QUOTATION MARK '\u2026': '...', # HORIZONTAL ELLIPSIS '\u2e3a': '-', # TWO-EM DASH } def _ReplaceOne(c): """Returns the homoglyph or escaped replacement for c.""" equiv = homoglyphs.get(c) if equiv is not None: return equiv try: c.encode('ascii') return c except UnicodeError: pass try: return c.encode('unicode-escape').decode('ascii') except UnicodeError: return '?' return ''.join([_ReplaceOne(c) for c in s])
python
def ReplaceHomoglyphs(s): """Returns s with unicode homoglyphs replaced by ascii equivalents.""" homoglyphs = { '\xa0': ' ', # &nbsp; ? '\u00e3': '', # TODO(gsfowler) drop after .proto spurious char elided '\u00a0': ' ', # &nbsp; ? '\u00a9': '(C)', # COPYRIGHT SIGN (would you believe "asciiglyph"?) '\u00ae': '(R)', # REGISTERED SIGN (would you believe "asciiglyph"?) '\u2014': '-', # EM DASH '\u2018': "'", # LEFT SINGLE QUOTATION MARK '\u2019': "'", # RIGHT SINGLE QUOTATION MARK '\u201c': '"', # LEFT DOUBLE QUOTATION MARK '\u201d': '"', # RIGHT DOUBLE QUOTATION MARK '\u2026': '...', # HORIZONTAL ELLIPSIS '\u2e3a': '-', # TWO-EM DASH } def _ReplaceOne(c): """Returns the homoglyph or escaped replacement for c.""" equiv = homoglyphs.get(c) if equiv is not None: return equiv try: c.encode('ascii') return c except UnicodeError: pass try: return c.encode('unicode-escape').decode('ascii') except UnicodeError: return '?' return ''.join([_ReplaceOne(c) for c in s])
[ "def", "ReplaceHomoglyphs", "(", "s", ")", ":", "homoglyphs", "=", "{", "'\\xa0'", ":", "' '", ",", "# &nbsp; ?", "'\\u00e3'", ":", "''", ",", "# TODO(gsfowler) drop after .proto spurious char elided", "'\\u00a0'", ":", "' '", ",", "# &nbsp; ?", "'\\u00a9'", ":", ...
Returns s with unicode homoglyphs replaced by ascii equivalents.
[ "Returns", "s", "with", "unicode", "homoglyphs", "replaced", "by", "ascii", "equivalents", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/util.py#L257-L289
train
207,664
google/apitools
apitools/gen/util.py
_NormalizeDiscoveryUrls
def _NormalizeDiscoveryUrls(discovery_url): """Expands a few abbreviations into full discovery urls.""" if discovery_url.startswith('http'): return [discovery_url] elif '.' not in discovery_url: raise ValueError('Unrecognized value "%s" for discovery url') api_name, _, api_version = discovery_url.partition('.') return [ 'https://www.googleapis.com/discovery/v1/apis/%s/%s/rest' % ( api_name, api_version), 'https://%s.googleapis.com/$discovery/rest?version=%s' % ( api_name, api_version), ]
python
def _NormalizeDiscoveryUrls(discovery_url): """Expands a few abbreviations into full discovery urls.""" if discovery_url.startswith('http'): return [discovery_url] elif '.' not in discovery_url: raise ValueError('Unrecognized value "%s" for discovery url') api_name, _, api_version = discovery_url.partition('.') return [ 'https://www.googleapis.com/discovery/v1/apis/%s/%s/rest' % ( api_name, api_version), 'https://%s.googleapis.com/$discovery/rest?version=%s' % ( api_name, api_version), ]
[ "def", "_NormalizeDiscoveryUrls", "(", "discovery_url", ")", ":", "if", "discovery_url", ".", "startswith", "(", "'http'", ")", ":", "return", "[", "discovery_url", "]", "elif", "'.'", "not", "in", "discovery_url", ":", "raise", "ValueError", "(", "'Unrecognized...
Expands a few abbreviations into full discovery urls.
[ "Expands", "a", "few", "abbreviations", "into", "full", "discovery", "urls", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/util.py#L355-L367
train
207,665
google/apitools
apitools/gen/util.py
_Gunzip
def _Gunzip(gzipped_content): """Returns gunzipped content from gzipped contents.""" f = tempfile.NamedTemporaryFile(suffix='gz', mode='w+b', delete=False) try: f.write(gzipped_content) f.close() # force file synchronization with gzip.open(f.name, 'rb') as h: decompressed_content = h.read() return decompressed_content finally: os.unlink(f.name)
python
def _Gunzip(gzipped_content): """Returns gunzipped content from gzipped contents.""" f = tempfile.NamedTemporaryFile(suffix='gz', mode='w+b', delete=False) try: f.write(gzipped_content) f.close() # force file synchronization with gzip.open(f.name, 'rb') as h: decompressed_content = h.read() return decompressed_content finally: os.unlink(f.name)
[ "def", "_Gunzip", "(", "gzipped_content", ")", ":", "f", "=", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "'gz'", ",", "mode", "=", "'w+b'", ",", "delete", "=", "False", ")", "try", ":", "f", ".", "write", "(", "gzipped_content", ")", "f...
Returns gunzipped content from gzipped contents.
[ "Returns", "gunzipped", "content", "from", "gzipped", "contents", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/util.py#L370-L380
train
207,666
google/apitools
apitools/gen/util.py
_GetURLContent
def _GetURLContent(url): """Download and return the content of URL.""" response = urllib_request.urlopen(url) encoding = response.info().get('Content-Encoding') if encoding == 'gzip': content = _Gunzip(response.read()) else: content = response.read() return content
python
def _GetURLContent(url): """Download and return the content of URL.""" response = urllib_request.urlopen(url) encoding = response.info().get('Content-Encoding') if encoding == 'gzip': content = _Gunzip(response.read()) else: content = response.read() return content
[ "def", "_GetURLContent", "(", "url", ")", ":", "response", "=", "urllib_request", ".", "urlopen", "(", "url", ")", "encoding", "=", "response", ".", "info", "(", ")", ".", "get", "(", "'Content-Encoding'", ")", "if", "encoding", "==", "'gzip'", ":", "con...
Download and return the content of URL.
[ "Download", "and", "return", "the", "content", "of", "URL", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/util.py#L383-L391
train
207,667
google/apitools
apitools/gen/util.py
FetchDiscoveryDoc
def FetchDiscoveryDoc(discovery_url, retries=5): """Fetch the discovery document at the given url.""" discovery_urls = _NormalizeDiscoveryUrls(discovery_url) discovery_doc = None last_exception = None for url in discovery_urls: for _ in range(retries): try: content = _GetURLContent(url) if isinstance(content, bytes): content = content.decode('utf8') discovery_doc = json.loads(content) break except (urllib_error.HTTPError, urllib_error.URLError) as e: logging.info( 'Attempting to fetch discovery doc again after "%s"', e) last_exception = e if discovery_doc is None: raise CommunicationError( 'Could not find discovery doc at any of %s: %s' % ( discovery_urls, last_exception)) return discovery_doc
python
def FetchDiscoveryDoc(discovery_url, retries=5): """Fetch the discovery document at the given url.""" discovery_urls = _NormalizeDiscoveryUrls(discovery_url) discovery_doc = None last_exception = None for url in discovery_urls: for _ in range(retries): try: content = _GetURLContent(url) if isinstance(content, bytes): content = content.decode('utf8') discovery_doc = json.loads(content) break except (urllib_error.HTTPError, urllib_error.URLError) as e: logging.info( 'Attempting to fetch discovery doc again after "%s"', e) last_exception = e if discovery_doc is None: raise CommunicationError( 'Could not find discovery doc at any of %s: %s' % ( discovery_urls, last_exception)) return discovery_doc
[ "def", "FetchDiscoveryDoc", "(", "discovery_url", ",", "retries", "=", "5", ")", ":", "discovery_urls", "=", "_NormalizeDiscoveryUrls", "(", "discovery_url", ")", "discovery_doc", "=", "None", "last_exception", "=", "None", "for", "url", "in", "discovery_urls", ":...
Fetch the discovery document at the given url.
[ "Fetch", "the", "discovery", "document", "at", "the", "given", "url", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/util.py#L394-L415
train
207,668
google/apitools
apitools/gen/util.py
Names.__StripName
def __StripName(self, name): """Strip strip_prefix entries from name.""" if not name: return name for prefix in self.__strip_prefixes: if name.startswith(prefix): return name[len(prefix):] return name
python
def __StripName(self, name): """Strip strip_prefix entries from name.""" if not name: return name for prefix in self.__strip_prefixes: if name.startswith(prefix): return name[len(prefix):] return name
[ "def", "__StripName", "(", "self", ",", "name", ")", ":", "if", "not", "name", ":", "return", "name", "for", "prefix", "in", "self", ".", "__strip_prefixes", ":", "if", "name", ".", "startswith", "(", "prefix", ")", ":", "return", "name", "[", "len", ...
Strip strip_prefix entries from name.
[ "Strip", "strip_prefix", "entries", "from", "name", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/util.py#L81-L88
train
207,669
google/apitools
apitools/gen/util.py
Names.CleanName
def CleanName(name): """Perform generic name cleaning.""" name = re.sub('[^_A-Za-z0-9]', '_', name) if name[0].isdigit(): name = '_%s' % name while keyword.iskeyword(name): name = '%s_' % name # If we end up with __ as a prefix, we'll run afoul of python # field renaming, so we manually correct for it. if name.startswith('__'): name = 'f%s' % name return name
python
def CleanName(name): """Perform generic name cleaning.""" name = re.sub('[^_A-Za-z0-9]', '_', name) if name[0].isdigit(): name = '_%s' % name while keyword.iskeyword(name): name = '%s_' % name # If we end up with __ as a prefix, we'll run afoul of python # field renaming, so we manually correct for it. if name.startswith('__'): name = 'f%s' % name return name
[ "def", "CleanName", "(", "name", ")", ":", "name", "=", "re", ".", "sub", "(", "'[^_A-Za-z0-9]'", ",", "'_'", ",", "name", ")", "if", "name", "[", "0", "]", ".", "isdigit", "(", ")", ":", "name", "=", "'_%s'", "%", "name", "while", "keyword", "."...
Perform generic name cleaning.
[ "Perform", "generic", "name", "cleaning", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/util.py#L91-L102
train
207,670
google/apitools
apitools/gen/util.py
Names.NormalizeRelativePath
def NormalizeRelativePath(path): """Normalize camelCase entries in path.""" path_components = path.split('/') normalized_components = [] for component in path_components: if re.match(r'{[A-Za-z0-9_]+}$', component): normalized_components.append( '{%s}' % Names.CleanName(component[1:-1])) else: normalized_components.append(component) return '/'.join(normalized_components)
python
def NormalizeRelativePath(path): """Normalize camelCase entries in path.""" path_components = path.split('/') normalized_components = [] for component in path_components: if re.match(r'{[A-Za-z0-9_]+}$', component): normalized_components.append( '{%s}' % Names.CleanName(component[1:-1])) else: normalized_components.append(component) return '/'.join(normalized_components)
[ "def", "NormalizeRelativePath", "(", "path", ")", ":", "path_components", "=", "path", ".", "split", "(", "'/'", ")", "normalized_components", "=", "[", "]", "for", "component", "in", "path_components", ":", "if", "re", ".", "match", "(", "r'{[A-Za-z0-9_]+}$'"...
Normalize camelCase entries in path.
[ "Normalize", "camelCase", "entries", "in", "path", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/util.py#L105-L115
train
207,671
google/apitools
apitools/gen/util.py
Names.ClassName
def ClassName(self, name, separator='_'): """Generate a valid class name from name.""" # TODO(craigcitro): Get rid of this case here and in MethodName. if name is None: return name # TODO(craigcitro): This is a hack to handle the case of specific # protorpc class names; clean this up. if name.startswith(('protorpc.', 'message_types.', 'apitools.base.protorpclite.', 'apitools.base.protorpclite.message_types.')): return name name = self.__StripName(name) name = self.__ToCamel(name, separator=separator) return self.CleanName(name)
python
def ClassName(self, name, separator='_'): """Generate a valid class name from name.""" # TODO(craigcitro): Get rid of this case here and in MethodName. if name is None: return name # TODO(craigcitro): This is a hack to handle the case of specific # protorpc class names; clean this up. if name.startswith(('protorpc.', 'message_types.', 'apitools.base.protorpclite.', 'apitools.base.protorpclite.message_types.')): return name name = self.__StripName(name) name = self.__ToCamel(name, separator=separator) return self.CleanName(name)
[ "def", "ClassName", "(", "self", ",", "name", ",", "separator", "=", "'_'", ")", ":", "# TODO(craigcitro): Get rid of this case here and in MethodName.", "if", "name", "is", "None", ":", "return", "name", "# TODO(craigcitro): This is a hack to handle the case of specific", ...
Generate a valid class name from name.
[ "Generate", "a", "valid", "class", "name", "from", "name", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/util.py#L122-L135
train
207,672
google/apitools
apitools/gen/util.py
Names.MethodName
def MethodName(self, name, separator='_'): """Generate a valid method name from name.""" if name is None: return None name = Names.__ToCamel(name, separator=separator) return Names.CleanName(name)
python
def MethodName(self, name, separator='_'): """Generate a valid method name from name.""" if name is None: return None name = Names.__ToCamel(name, separator=separator) return Names.CleanName(name)
[ "def", "MethodName", "(", "self", ",", "name", ",", "separator", "=", "'_'", ")", ":", "if", "name", "is", "None", ":", "return", "None", "name", "=", "Names", ".", "__ToCamel", "(", "name", ",", "separator", "=", "separator", ")", "return", "Names", ...
Generate a valid method name from name.
[ "Generate", "a", "valid", "method", "name", "from", "name", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/util.py#L137-L142
train
207,673
google/apitools
apitools/gen/util.py
Names.FieldName
def FieldName(self, name): """Generate a valid field name from name.""" # TODO(craigcitro): We shouldn't need to strip this name, but some # of the service names here are excessive. Fix the API and then # remove this. name = self.__StripName(name) if self.__name_convention == 'LOWER_CAMEL': name = Names.__ToLowerCamel(name) elif self.__name_convention == 'LOWER_WITH_UNDER': name = Names.__FromCamel(name) return Names.CleanName(name)
python
def FieldName(self, name): """Generate a valid field name from name.""" # TODO(craigcitro): We shouldn't need to strip this name, but some # of the service names here are excessive. Fix the API and then # remove this. name = self.__StripName(name) if self.__name_convention == 'LOWER_CAMEL': name = Names.__ToLowerCamel(name) elif self.__name_convention == 'LOWER_WITH_UNDER': name = Names.__FromCamel(name) return Names.CleanName(name)
[ "def", "FieldName", "(", "self", ",", "name", ")", ":", "# TODO(craigcitro): We shouldn't need to strip this name, but some", "# of the service names here are excessive. Fix the API and then", "# remove this.", "name", "=", "self", ".", "__StripName", "(", "name", ")", "if", ...
Generate a valid field name from name.
[ "Generate", "a", "valid", "field", "name", "from", "name", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/util.py#L144-L154
train
207,674
google/apitools
apitools/gen/util.py
ClientInfo.Create
def Create(cls, discovery_doc, scope_ls, client_id, client_secret, user_agent, names, api_key): """Create a new ClientInfo object from a discovery document.""" scopes = set( discovery_doc.get('auth', {}).get('oauth2', {}).get('scopes', {})) scopes.update(scope_ls) package = discovery_doc['name'] url_version = discovery_doc['version'] base_url, base_path = _ComputePaths(package, url_version, discovery_doc) client_info = { 'package': package, 'version': NormalizeVersion(discovery_doc['version']), 'url_version': url_version, 'scopes': sorted(list(scopes)), 'client_id': client_id, 'client_secret': client_secret, 'user_agent': user_agent, 'api_key': api_key, 'base_url': base_url, 'base_path': base_path, } client_class_name = '%s%s' % ( names.ClassName(client_info['package']), names.ClassName(client_info['version'])) client_info['client_class_name'] = client_class_name return cls(**client_info)
python
def Create(cls, discovery_doc, scope_ls, client_id, client_secret, user_agent, names, api_key): """Create a new ClientInfo object from a discovery document.""" scopes = set( discovery_doc.get('auth', {}).get('oauth2', {}).get('scopes', {})) scopes.update(scope_ls) package = discovery_doc['name'] url_version = discovery_doc['version'] base_url, base_path = _ComputePaths(package, url_version, discovery_doc) client_info = { 'package': package, 'version': NormalizeVersion(discovery_doc['version']), 'url_version': url_version, 'scopes': sorted(list(scopes)), 'client_id': client_id, 'client_secret': client_secret, 'user_agent': user_agent, 'api_key': api_key, 'base_url': base_url, 'base_path': base_path, } client_class_name = '%s%s' % ( names.ClassName(client_info['package']), names.ClassName(client_info['version'])) client_info['client_class_name'] = client_class_name return cls(**client_info)
[ "def", "Create", "(", "cls", ",", "discovery_doc", ",", "scope_ls", ",", "client_id", ",", "client_secret", ",", "user_agent", ",", "names", ",", "api_key", ")", ":", "scopes", "=", "set", "(", "discovery_doc", ".", "get", "(", "'auth'", ",", "{", "}", ...
Create a new ClientInfo object from a discovery document.
[ "Create", "a", "new", "ClientInfo", "object", "from", "a", "discovery", "document", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/util.py#L195-L222
train
207,675
google/apitools
apitools/gen/util.py
SimplePrettyPrinter.CommentContext
def CommentContext(self): """Print without any argument formatting.""" old_context = self.__comment_context self.__comment_context = True yield self.__comment_context = old_context
python
def CommentContext(self): """Print without any argument formatting.""" old_context = self.__comment_context self.__comment_context = True yield self.__comment_context = old_context
[ "def", "CommentContext", "(", "self", ")", ":", "old_context", "=", "self", ".", "__comment_context", "self", ".", "__comment_context", "=", "True", "yield", "self", ".", "__comment_context", "=", "old_context" ]
Print without any argument formatting.
[ "Print", "without", "any", "argument", "formatting", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/util.py#L330-L335
train
207,676
google/apitools
apitools/base/py/credentials_lib.py
_RegisterCredentialsMethod
def _RegisterCredentialsMethod(method, position=None): """Register a new method for fetching credentials. This new method should be a function with signature: client_info, **kwds -> Credentials or None This method can be used as a decorator, unless position needs to be supplied. Note that method must *always* accept arbitrary keyword arguments. Args: method: New credential-fetching method. position: (default: None) Where in the list of methods to add this; if None, we append. In all but rare cases, this should be either 0 or None. Returns: method, for use as a decorator. """ if position is None: position = len(_CREDENTIALS_METHODS) else: position = min(position, len(_CREDENTIALS_METHODS)) _CREDENTIALS_METHODS.insert(position, method) return method
python
def _RegisterCredentialsMethod(method, position=None): """Register a new method for fetching credentials. This new method should be a function with signature: client_info, **kwds -> Credentials or None This method can be used as a decorator, unless position needs to be supplied. Note that method must *always* accept arbitrary keyword arguments. Args: method: New credential-fetching method. position: (default: None) Where in the list of methods to add this; if None, we append. In all but rare cases, this should be either 0 or None. Returns: method, for use as a decorator. """ if position is None: position = len(_CREDENTIALS_METHODS) else: position = min(position, len(_CREDENTIALS_METHODS)) _CREDENTIALS_METHODS.insert(position, method) return method
[ "def", "_RegisterCredentialsMethod", "(", "method", ",", "position", "=", "None", ")", ":", "if", "position", "is", "None", ":", "position", "=", "len", "(", "_CREDENTIALS_METHODS", ")", "else", ":", "position", "=", "min", "(", "position", ",", "len", "("...
Register a new method for fetching credentials. This new method should be a function with signature: client_info, **kwds -> Credentials or None This method can be used as a decorator, unless position needs to be supplied. Note that method must *always* accept arbitrary keyword arguments. Args: method: New credential-fetching method. position: (default: None) Where in the list of methods to add this; if None, we append. In all but rare cases, this should be either 0 or None. Returns: method, for use as a decorator.
[ "Register", "a", "new", "method", "for", "fetching", "credentials", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L105-L129
train
207,677
google/apitools
apitools/base/py/credentials_lib.py
GetCredentials
def GetCredentials(package_name, scopes, client_id, client_secret, user_agent, credentials_filename=None, api_key=None, # pylint: disable=unused-argument client=None, # pylint: disable=unused-argument oauth2client_args=None, **kwds): """Attempt to get credentials, using an oauth dance as the last resort.""" scopes = util.NormalizeScopes(scopes) client_info = { 'client_id': client_id, 'client_secret': client_secret, 'scope': ' '.join(sorted(scopes)), 'user_agent': user_agent or '%s-generated/0.1' % package_name, } for method in _CREDENTIALS_METHODS: credentials = method(client_info, **kwds) if credentials is not None: return credentials credentials_filename = credentials_filename or os.path.expanduser( '~/.apitools.token') credentials = CredentialsFromFile(credentials_filename, client_info, oauth2client_args=oauth2client_args) if credentials is not None: return credentials raise exceptions.CredentialsError('Could not create valid credentials')
python
def GetCredentials(package_name, scopes, client_id, client_secret, user_agent, credentials_filename=None, api_key=None, # pylint: disable=unused-argument client=None, # pylint: disable=unused-argument oauth2client_args=None, **kwds): """Attempt to get credentials, using an oauth dance as the last resort.""" scopes = util.NormalizeScopes(scopes) client_info = { 'client_id': client_id, 'client_secret': client_secret, 'scope': ' '.join(sorted(scopes)), 'user_agent': user_agent or '%s-generated/0.1' % package_name, } for method in _CREDENTIALS_METHODS: credentials = method(client_info, **kwds) if credentials is not None: return credentials credentials_filename = credentials_filename or os.path.expanduser( '~/.apitools.token') credentials = CredentialsFromFile(credentials_filename, client_info, oauth2client_args=oauth2client_args) if credentials is not None: return credentials raise exceptions.CredentialsError('Could not create valid credentials')
[ "def", "GetCredentials", "(", "package_name", ",", "scopes", ",", "client_id", ",", "client_secret", ",", "user_agent", ",", "credentials_filename", "=", "None", ",", "api_key", "=", "None", ",", "# pylint: disable=unused-argument", "client", "=", "None", ",", "# ...
Attempt to get credentials, using an oauth dance as the last resort.
[ "Attempt", "to", "get", "credentials", "using", "an", "oauth", "dance", "as", "the", "last", "resort", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L132-L156
train
207,678
google/apitools
apitools/base/py/credentials_lib.py
ServiceAccountCredentialsFromFile
def ServiceAccountCredentialsFromFile(filename, scopes, user_agent=None): """Use the credentials in filename to create a token for scopes.""" filename = os.path.expanduser(filename) # We have two options, based on our version of oauth2client. if oauth2client.__version__ > '1.5.2': # oauth2client >= 2.0.0 credentials = ( service_account.ServiceAccountCredentials.from_json_keyfile_name( filename, scopes=scopes)) if credentials is not None: if user_agent is not None: credentials.user_agent = user_agent return credentials else: # oauth2client < 2.0.0 with open(filename) as keyfile: service_account_info = json.load(keyfile) account_type = service_account_info.get('type') if account_type != oauth2client.client.SERVICE_ACCOUNT: raise exceptions.CredentialsError( 'Invalid service account credentials: %s' % (filename,)) # pylint: disable=protected-access credentials = service_account._ServiceAccountCredentials( service_account_id=service_account_info['client_id'], service_account_email=service_account_info['client_email'], private_key_id=service_account_info['private_key_id'], private_key_pkcs8_text=service_account_info['private_key'], scopes=scopes, user_agent=user_agent) # pylint: enable=protected-access return credentials
python
def ServiceAccountCredentialsFromFile(filename, scopes, user_agent=None): """Use the credentials in filename to create a token for scopes.""" filename = os.path.expanduser(filename) # We have two options, based on our version of oauth2client. if oauth2client.__version__ > '1.5.2': # oauth2client >= 2.0.0 credentials = ( service_account.ServiceAccountCredentials.from_json_keyfile_name( filename, scopes=scopes)) if credentials is not None: if user_agent is not None: credentials.user_agent = user_agent return credentials else: # oauth2client < 2.0.0 with open(filename) as keyfile: service_account_info = json.load(keyfile) account_type = service_account_info.get('type') if account_type != oauth2client.client.SERVICE_ACCOUNT: raise exceptions.CredentialsError( 'Invalid service account credentials: %s' % (filename,)) # pylint: disable=protected-access credentials = service_account._ServiceAccountCredentials( service_account_id=service_account_info['client_id'], service_account_email=service_account_info['client_email'], private_key_id=service_account_info['private_key_id'], private_key_pkcs8_text=service_account_info['private_key'], scopes=scopes, user_agent=user_agent) # pylint: enable=protected-access return credentials
[ "def", "ServiceAccountCredentialsFromFile", "(", "filename", ",", "scopes", ",", "user_agent", "=", "None", ")", ":", "filename", "=", "os", ".", "path", ".", "expanduser", "(", "filename", ")", "# We have two options, based on our version of oauth2client.", "if", "oa...
Use the credentials in filename to create a token for scopes.
[ "Use", "the", "credentials", "in", "filename", "to", "create", "a", "token", "for", "scopes", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L159-L188
train
207,679
google/apitools
apitools/base/py/credentials_lib.py
ServiceAccountCredentialsFromP12File
def ServiceAccountCredentialsFromP12File( service_account_name, private_key_filename, scopes, user_agent): """Create a new credential from the named .p12 keyfile.""" private_key_filename = os.path.expanduser(private_key_filename) scopes = util.NormalizeScopes(scopes) if oauth2client.__version__ > '1.5.2': # oauth2client >= 2.0.0 credentials = ( service_account.ServiceAccountCredentials.from_p12_keyfile( service_account_name, private_key_filename, scopes=scopes)) if credentials is not None: credentials.user_agent = user_agent return credentials else: # oauth2client < 2.0.0 with open(private_key_filename, 'rb') as key_file: return oauth2client.client.SignedJwtAssertionCredentials( service_account_name, key_file.read(), scopes, user_agent=user_agent)
python
def ServiceAccountCredentialsFromP12File( service_account_name, private_key_filename, scopes, user_agent): """Create a new credential from the named .p12 keyfile.""" private_key_filename = os.path.expanduser(private_key_filename) scopes = util.NormalizeScopes(scopes) if oauth2client.__version__ > '1.5.2': # oauth2client >= 2.0.0 credentials = ( service_account.ServiceAccountCredentials.from_p12_keyfile( service_account_name, private_key_filename, scopes=scopes)) if credentials is not None: credentials.user_agent = user_agent return credentials else: # oauth2client < 2.0.0 with open(private_key_filename, 'rb') as key_file: return oauth2client.client.SignedJwtAssertionCredentials( service_account_name, key_file.read(), scopes, user_agent=user_agent)
[ "def", "ServiceAccountCredentialsFromP12File", "(", "service_account_name", ",", "private_key_filename", ",", "scopes", ",", "user_agent", ")", ":", "private_key_filename", "=", "os", ".", "path", ".", "expanduser", "(", "private_key_filename", ")", "scopes", "=", "ut...
Create a new credential from the named .p12 keyfile.
[ "Create", "a", "new", "credential", "from", "the", "named", ".", "p12", "keyfile", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L191-L209
train
207,680
google/apitools
apitools/base/py/credentials_lib.py
_GceMetadataRequest
def _GceMetadataRequest(relative_url, use_metadata_ip=False): """Request the given url from the GCE metadata service.""" if use_metadata_ip: base_url = os.environ.get('GCE_METADATA_IP', '169.254.169.254') else: base_url = os.environ.get( 'GCE_METADATA_ROOT', 'metadata.google.internal') url = 'http://' + base_url + '/computeMetadata/v1/' + relative_url # Extra header requirement can be found here: # https://developers.google.com/compute/docs/metadata headers = {'Metadata-Flavor': 'Google'} request = urllib.request.Request(url, headers=headers) opener = urllib.request.build_opener(urllib.request.ProxyHandler({})) try: response = opener.open(request) except urllib.error.URLError as e: raise exceptions.CommunicationError( 'Could not reach metadata service: %s' % e.reason) return response
python
def _GceMetadataRequest(relative_url, use_metadata_ip=False): """Request the given url from the GCE metadata service.""" if use_metadata_ip: base_url = os.environ.get('GCE_METADATA_IP', '169.254.169.254') else: base_url = os.environ.get( 'GCE_METADATA_ROOT', 'metadata.google.internal') url = 'http://' + base_url + '/computeMetadata/v1/' + relative_url # Extra header requirement can be found here: # https://developers.google.com/compute/docs/metadata headers = {'Metadata-Flavor': 'Google'} request = urllib.request.Request(url, headers=headers) opener = urllib.request.build_opener(urllib.request.ProxyHandler({})) try: response = opener.open(request) except urllib.error.URLError as e: raise exceptions.CommunicationError( 'Could not reach metadata service: %s' % e.reason) return response
[ "def", "_GceMetadataRequest", "(", "relative_url", ",", "use_metadata_ip", "=", "False", ")", ":", "if", "use_metadata_ip", ":", "base_url", "=", "os", ".", "environ", ".", "get", "(", "'GCE_METADATA_IP'", ",", "'169.254.169.254'", ")", "else", ":", "base_url", ...
Request the given url from the GCE metadata service.
[ "Request", "the", "given", "url", "from", "the", "GCE", "metadata", "service", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L212-L230
train
207,681
google/apitools
apitools/base/py/credentials_lib.py
_GetRunFlowFlags
def _GetRunFlowFlags(args=None): """Retrieves command line flags based on gflags module.""" # There's one rare situation where gsutil will not have argparse # available, but doesn't need anything depending on argparse anyway, # since they're bringing their own credentials. So we just allow this # to fail with an ImportError in those cases. # # TODO(craigcitro): Move this import back to the top when we drop # python 2.6 support (eg when gsutil does). import argparse parser = argparse.ArgumentParser(parents=[tools.argparser]) # Get command line argparse flags. flags, _ = parser.parse_known_args(args=args) # Allow `gflags` and `argparse` to be used side-by-side. if hasattr(FLAGS, 'auth_host_name'): flags.auth_host_name = FLAGS.auth_host_name if hasattr(FLAGS, 'auth_host_port'): flags.auth_host_port = FLAGS.auth_host_port if hasattr(FLAGS, 'auth_local_webserver'): flags.noauth_local_webserver = (not FLAGS.auth_local_webserver) return flags
python
def _GetRunFlowFlags(args=None): """Retrieves command line flags based on gflags module.""" # There's one rare situation where gsutil will not have argparse # available, but doesn't need anything depending on argparse anyway, # since they're bringing their own credentials. So we just allow this # to fail with an ImportError in those cases. # # TODO(craigcitro): Move this import back to the top when we drop # python 2.6 support (eg when gsutil does). import argparse parser = argparse.ArgumentParser(parents=[tools.argparser]) # Get command line argparse flags. flags, _ = parser.parse_known_args(args=args) # Allow `gflags` and `argparse` to be used side-by-side. if hasattr(FLAGS, 'auth_host_name'): flags.auth_host_name = FLAGS.auth_host_name if hasattr(FLAGS, 'auth_host_port'): flags.auth_host_port = FLAGS.auth_host_port if hasattr(FLAGS, 'auth_local_webserver'): flags.noauth_local_webserver = (not FLAGS.auth_local_webserver) return flags
[ "def", "_GetRunFlowFlags", "(", "args", "=", "None", ")", ":", "# There's one rare situation where gsutil will not have argparse", "# available, but doesn't need anything depending on argparse anyway,", "# since they're bringing their own credentials. So we just allow this", "# to fail with an...
Retrieves command line flags based on gflags module.
[ "Retrieves", "command", "line", "flags", "based", "on", "gflags", "module", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L508-L530
train
207,682
google/apitools
apitools/base/py/credentials_lib.py
CredentialsFromFile
def CredentialsFromFile(path, client_info, oauth2client_args=None): """Read credentials from a file.""" user_agent = client_info['user_agent'] scope_key = client_info['scope'] if not isinstance(scope_key, six.string_types): scope_key = ':'.join(scope_key) storage_key = client_info['client_id'] + user_agent + scope_key if _NEW_FILESTORE: credential_store = multiprocess_file_storage.MultiprocessFileStorage( path, storage_key) else: credential_store = multistore_file.get_credential_storage_custom_string_key( # noqa path, storage_key) if hasattr(FLAGS, 'auth_local_webserver'): FLAGS.auth_local_webserver = False credentials = credential_store.get() if credentials is None or credentials.invalid: print('Generating new OAuth credentials ...') for _ in range(20): # If authorization fails, we want to retry, rather than let this # cascade up and get caught elsewhere. If users want out of the # retry loop, they can ^C. try: flow = oauth2client.client.OAuth2WebServerFlow(**client_info) flags = _GetRunFlowFlags(args=oauth2client_args) credentials = tools.run_flow(flow, credential_store, flags) break except (oauth2client.client.FlowExchangeError, SystemExit) as e: # Here SystemExit is "no credential at all", and the # FlowExchangeError is "invalid" -- usually because # you reused a token. print('Invalid authorization: %s' % (e,)) except httplib2.HttpLib2Error as e: print('Communication error: %s' % (e,)) raise exceptions.CredentialsError( 'Communication error creating credentials: %s' % e) return credentials
python
def CredentialsFromFile(path, client_info, oauth2client_args=None): """Read credentials from a file.""" user_agent = client_info['user_agent'] scope_key = client_info['scope'] if not isinstance(scope_key, six.string_types): scope_key = ':'.join(scope_key) storage_key = client_info['client_id'] + user_agent + scope_key if _NEW_FILESTORE: credential_store = multiprocess_file_storage.MultiprocessFileStorage( path, storage_key) else: credential_store = multistore_file.get_credential_storage_custom_string_key( # noqa path, storage_key) if hasattr(FLAGS, 'auth_local_webserver'): FLAGS.auth_local_webserver = False credentials = credential_store.get() if credentials is None or credentials.invalid: print('Generating new OAuth credentials ...') for _ in range(20): # If authorization fails, we want to retry, rather than let this # cascade up and get caught elsewhere. If users want out of the # retry loop, they can ^C. try: flow = oauth2client.client.OAuth2WebServerFlow(**client_info) flags = _GetRunFlowFlags(args=oauth2client_args) credentials = tools.run_flow(flow, credential_store, flags) break except (oauth2client.client.FlowExchangeError, SystemExit) as e: # Here SystemExit is "no credential at all", and the # FlowExchangeError is "invalid" -- usually because # you reused a token. print('Invalid authorization: %s' % (e,)) except httplib2.HttpLib2Error as e: print('Communication error: %s' % (e,)) raise exceptions.CredentialsError( 'Communication error creating credentials: %s' % e) return credentials
[ "def", "CredentialsFromFile", "(", "path", ",", "client_info", ",", "oauth2client_args", "=", "None", ")", ":", "user_agent", "=", "client_info", "[", "'user_agent'", "]", "scope_key", "=", "client_info", "[", "'scope'", "]", "if", "not", "isinstance", "(", "s...
Read credentials from a file.
[ "Read", "credentials", "from", "a", "file", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L534-L571
train
207,683
google/apitools
apitools/base/py/credentials_lib.py
GetUserinfo
def GetUserinfo(credentials, http=None): # pylint: disable=invalid-name """Get the userinfo associated with the given credentials. This is dependent on the token having either the userinfo.email or userinfo.profile scope for the given token. Args: credentials: (oauth2client.client.Credentials) incoming credentials http: (httplib2.Http, optional) http instance to use Returns: The email address for this token, or None if the required scopes aren't available. """ http = http or httplib2.Http() url = _GetUserinfoUrl(credentials) # We ignore communication woes here (i.e. SSL errors, socket # timeout), as handling these should be done in a common location. response, content = http.request(url) if response.status == http_client.BAD_REQUEST: credentials.refresh(http) url = _GetUserinfoUrl(credentials) response, content = http.request(url) return json.loads(content or '{}')
python
def GetUserinfo(credentials, http=None): # pylint: disable=invalid-name """Get the userinfo associated with the given credentials. This is dependent on the token having either the userinfo.email or userinfo.profile scope for the given token. Args: credentials: (oauth2client.client.Credentials) incoming credentials http: (httplib2.Http, optional) http instance to use Returns: The email address for this token, or None if the required scopes aren't available. """ http = http or httplib2.Http() url = _GetUserinfoUrl(credentials) # We ignore communication woes here (i.e. SSL errors, socket # timeout), as handling these should be done in a common location. response, content = http.request(url) if response.status == http_client.BAD_REQUEST: credentials.refresh(http) url = _GetUserinfoUrl(credentials) response, content = http.request(url) return json.loads(content or '{}')
[ "def", "GetUserinfo", "(", "credentials", ",", "http", "=", "None", ")", ":", "# pylint: disable=invalid-name", "http", "=", "http", "or", "httplib2", ".", "Http", "(", ")", "url", "=", "_GetUserinfoUrl", "(", "credentials", ")", "# We ignore communication woes he...
Get the userinfo associated with the given credentials. This is dependent on the token having either the userinfo.email or userinfo.profile scope for the given token. Args: credentials: (oauth2client.client.Credentials) incoming credentials http: (httplib2.Http, optional) http instance to use Returns: The email address for this token, or None if the required scopes aren't available.
[ "Get", "the", "userinfo", "associated", "with", "the", "given", "credentials", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L684-L707
train
207,684
google/apitools
apitools/base/py/credentials_lib.py
_GetServiceAccountCredentials
def _GetServiceAccountCredentials( client_info, service_account_name=None, service_account_keyfile=None, service_account_json_keyfile=None, **unused_kwds): """Returns ServiceAccountCredentials from give file.""" if ((service_account_name and not service_account_keyfile) or (service_account_keyfile and not service_account_name)): raise exceptions.CredentialsError( 'Service account name or keyfile provided without the other') scopes = client_info['scope'].split() user_agent = client_info['user_agent'] # Use the .json credentials, if provided. if service_account_json_keyfile: return ServiceAccountCredentialsFromFile( service_account_json_keyfile, scopes, user_agent=user_agent) # Fall back to .p12 if there's no .json credentials. if service_account_name is not None: return ServiceAccountCredentialsFromP12File( service_account_name, service_account_keyfile, scopes, user_agent)
python
def _GetServiceAccountCredentials( client_info, service_account_name=None, service_account_keyfile=None, service_account_json_keyfile=None, **unused_kwds): """Returns ServiceAccountCredentials from give file.""" if ((service_account_name and not service_account_keyfile) or (service_account_keyfile and not service_account_name)): raise exceptions.CredentialsError( 'Service account name or keyfile provided without the other') scopes = client_info['scope'].split() user_agent = client_info['user_agent'] # Use the .json credentials, if provided. if service_account_json_keyfile: return ServiceAccountCredentialsFromFile( service_account_json_keyfile, scopes, user_agent=user_agent) # Fall back to .p12 if there's no .json credentials. if service_account_name is not None: return ServiceAccountCredentialsFromP12File( service_account_name, service_account_keyfile, scopes, user_agent)
[ "def", "_GetServiceAccountCredentials", "(", "client_info", ",", "service_account_name", "=", "None", ",", "service_account_keyfile", "=", "None", ",", "service_account_json_keyfile", "=", "None", ",", "*", "*", "unused_kwds", ")", ":", "if", "(", "(", "service_acco...
Returns ServiceAccountCredentials from give file.
[ "Returns", "ServiceAccountCredentials", "from", "give", "file", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L717-L734
train
207,685
google/apitools
apitools/base/py/credentials_lib.py
_GetApplicationDefaultCredentials
def _GetApplicationDefaultCredentials( client_info, skip_application_default_credentials=False, **unused_kwds): """Returns ADC with right scopes.""" scopes = client_info['scope'].split() if skip_application_default_credentials: return None gc = oauth2client.client.GoogleCredentials with cache_file_lock: try: # pylint: disable=protected-access # We've already done our own check for GAE/GCE # credentials, we don't want to pay for checking again. credentials = gc._implicit_credentials_from_files() except oauth2client.client.ApplicationDefaultCredentialsError: return None # If we got back a non-service account credential, we need to use # a heuristic to decide whether or not the application default # credential will work for us. We assume that if we're requesting # cloud-platform, our scopes are a subset of cloud scopes, and the # ADC will work. cp = 'https://www.googleapis.com/auth/cloud-platform' if credentials is None: return None if not isinstance(credentials, gc) or cp in scopes: return credentials.create_scoped(scopes) return None
python
def _GetApplicationDefaultCredentials( client_info, skip_application_default_credentials=False, **unused_kwds): """Returns ADC with right scopes.""" scopes = client_info['scope'].split() if skip_application_default_credentials: return None gc = oauth2client.client.GoogleCredentials with cache_file_lock: try: # pylint: disable=protected-access # We've already done our own check for GAE/GCE # credentials, we don't want to pay for checking again. credentials = gc._implicit_credentials_from_files() except oauth2client.client.ApplicationDefaultCredentialsError: return None # If we got back a non-service account credential, we need to use # a heuristic to decide whether or not the application default # credential will work for us. We assume that if we're requesting # cloud-platform, our scopes are a subset of cloud scopes, and the # ADC will work. cp = 'https://www.googleapis.com/auth/cloud-platform' if credentials is None: return None if not isinstance(credentials, gc) or cp in scopes: return credentials.create_scoped(scopes) return None
[ "def", "_GetApplicationDefaultCredentials", "(", "client_info", ",", "skip_application_default_credentials", "=", "False", ",", "*", "*", "unused_kwds", ")", ":", "scopes", "=", "client_info", "[", "'scope'", "]", ".", "split", "(", ")", "if", "skip_application_defa...
Returns ADC with right scopes.
[ "Returns", "ADC", "with", "right", "scopes", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L750-L776
train
207,686
google/apitools
apitools/base/py/credentials_lib.py
GceAssertionCredentials._CheckCacheFileForMatch
def _CheckCacheFileForMatch(self, cache_filename, scopes): """Checks the cache file to see if it matches the given credentials. Args: cache_filename: Cache filename to check. scopes: Scopes for the desired credentials. Returns: List of scopes (if cache matches) or None. """ creds = { # Credentials metadata dict. 'scopes': sorted(list(scopes)) if scopes else None, 'svc_acct_name': self.__service_account_name, } cache_file = _MultiProcessCacheFile(cache_filename) try: cached_creds_str = cache_file.LockedRead() if not cached_creds_str: return None cached_creds = json.loads(cached_creds_str) if creds['svc_acct_name'] == cached_creds['svc_acct_name']: if creds['scopes'] in (None, cached_creds['scopes']): return cached_creds['scopes'] except KeyboardInterrupt: raise except: # pylint: disable=bare-except # Treat exceptions as a cache miss. pass
python
def _CheckCacheFileForMatch(self, cache_filename, scopes): """Checks the cache file to see if it matches the given credentials. Args: cache_filename: Cache filename to check. scopes: Scopes for the desired credentials. Returns: List of scopes (if cache matches) or None. """ creds = { # Credentials metadata dict. 'scopes': sorted(list(scopes)) if scopes else None, 'svc_acct_name': self.__service_account_name, } cache_file = _MultiProcessCacheFile(cache_filename) try: cached_creds_str = cache_file.LockedRead() if not cached_creds_str: return None cached_creds = json.loads(cached_creds_str) if creds['svc_acct_name'] == cached_creds['svc_acct_name']: if creds['scopes'] in (None, cached_creds['scopes']): return cached_creds['scopes'] except KeyboardInterrupt: raise except: # pylint: disable=bare-except # Treat exceptions as a cache miss. pass
[ "def", "_CheckCacheFileForMatch", "(", "self", ",", "cache_filename", ",", "scopes", ")", ":", "creds", "=", "{", "# Credentials metadata dict.", "'scopes'", ":", "sorted", "(", "list", "(", "scopes", ")", ")", "if", "scopes", "else", "None", ",", "'svc_acct_n...
Checks the cache file to see if it matches the given credentials. Args: cache_filename: Cache filename to check. scopes: Scopes for the desired credentials. Returns: List of scopes (if cache matches) or None.
[ "Checks", "the", "cache", "file", "to", "see", "if", "it", "matches", "the", "given", "credentials", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L280-L307
train
207,687
google/apitools
apitools/base/py/credentials_lib.py
GceAssertionCredentials._WriteCacheFile
def _WriteCacheFile(self, cache_filename, scopes): """Writes the credential metadata to the cache file. This does not save the credentials themselves (CredentialStore class optionally handles that after this class is initialized). Args: cache_filename: Cache filename to check. scopes: Scopes for the desired credentials. """ # Credentials metadata dict. creds = {'scopes': sorted(list(scopes)), 'svc_acct_name': self.__service_account_name} creds_str = json.dumps(creds) cache_file = _MultiProcessCacheFile(cache_filename) try: cache_file.LockedWrite(creds_str) except KeyboardInterrupt: raise except: # pylint: disable=bare-except # Treat exceptions as a cache miss. pass
python
def _WriteCacheFile(self, cache_filename, scopes): """Writes the credential metadata to the cache file. This does not save the credentials themselves (CredentialStore class optionally handles that after this class is initialized). Args: cache_filename: Cache filename to check. scopes: Scopes for the desired credentials. """ # Credentials metadata dict. creds = {'scopes': sorted(list(scopes)), 'svc_acct_name': self.__service_account_name} creds_str = json.dumps(creds) cache_file = _MultiProcessCacheFile(cache_filename) try: cache_file.LockedWrite(creds_str) except KeyboardInterrupt: raise except: # pylint: disable=bare-except # Treat exceptions as a cache miss. pass
[ "def", "_WriteCacheFile", "(", "self", ",", "cache_filename", ",", "scopes", ")", ":", "# Credentials metadata dict.", "creds", "=", "{", "'scopes'", ":", "sorted", "(", "list", "(", "scopes", ")", ")", ",", "'svc_acct_name'", ":", "self", ".", "__service_acco...
Writes the credential metadata to the cache file. This does not save the credentials themselves (CredentialStore class optionally handles that after this class is initialized). Args: cache_filename: Cache filename to check. scopes: Scopes for the desired credentials.
[ "Writes", "the", "credential", "metadata", "to", "the", "cache", "file", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L309-L330
train
207,688
google/apitools
apitools/base/py/credentials_lib.py
GceAssertionCredentials._ScopesFromMetadataServer
def _ScopesFromMetadataServer(self, scopes): """Returns instance scopes based on GCE metadata server.""" if not util.DetectGce(): raise exceptions.ResourceUnavailableError( 'GCE credentials requested outside a GCE instance') if not self.GetServiceAccount(self.__service_account_name): raise exceptions.ResourceUnavailableError( 'GCE credentials requested but service account ' '%s does not exist.' % self.__service_account_name) if scopes: scope_ls = util.NormalizeScopes(scopes) instance_scopes = self.GetInstanceScopes() if scope_ls > instance_scopes: raise exceptions.CredentialsError( 'Instance did not have access to scopes %s' % ( sorted(list(scope_ls - instance_scopes)),)) else: scopes = self.GetInstanceScopes() return scopes
python
def _ScopesFromMetadataServer(self, scopes): """Returns instance scopes based on GCE metadata server.""" if not util.DetectGce(): raise exceptions.ResourceUnavailableError( 'GCE credentials requested outside a GCE instance') if not self.GetServiceAccount(self.__service_account_name): raise exceptions.ResourceUnavailableError( 'GCE credentials requested but service account ' '%s does not exist.' % self.__service_account_name) if scopes: scope_ls = util.NormalizeScopes(scopes) instance_scopes = self.GetInstanceScopes() if scope_ls > instance_scopes: raise exceptions.CredentialsError( 'Instance did not have access to scopes %s' % ( sorted(list(scope_ls - instance_scopes)),)) else: scopes = self.GetInstanceScopes() return scopes
[ "def", "_ScopesFromMetadataServer", "(", "self", ",", "scopes", ")", ":", "if", "not", "util", ".", "DetectGce", "(", ")", ":", "raise", "exceptions", ".", "ResourceUnavailableError", "(", "'GCE credentials requested outside a GCE instance'", ")", "if", "not", "self...
Returns instance scopes based on GCE metadata server.
[ "Returns", "instance", "scopes", "based", "on", "GCE", "metadata", "server", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L332-L350
train
207,689
google/apitools
apitools/base/py/credentials_lib.py
GceAssertionCredentials._do_refresh_request
def _do_refresh_request(self, unused_http_request): """Refresh self.access_token by querying the metadata server. If self.store is initialized, store acquired credentials there. """ relative_url = 'instance/service-accounts/{0}/token'.format( self.__service_account_name) try: response = _GceMetadataRequest(relative_url) except exceptions.CommunicationError: self.invalid = True if self.store: self.store.locked_put(self) raise content = response.read() try: credential_info = json.loads(content) except ValueError: raise exceptions.CredentialsError( 'Could not parse response as JSON: %s' % content) self.access_token = credential_info['access_token'] if 'expires_in' in credential_info: expires_in = int(credential_info['expires_in']) self.token_expiry = ( datetime.timedelta(seconds=expires_in) + datetime.datetime.utcnow()) else: self.token_expiry = None self.invalid = False if self.store: self.store.locked_put(self)
python
def _do_refresh_request(self, unused_http_request): """Refresh self.access_token by querying the metadata server. If self.store is initialized, store acquired credentials there. """ relative_url = 'instance/service-accounts/{0}/token'.format( self.__service_account_name) try: response = _GceMetadataRequest(relative_url) except exceptions.CommunicationError: self.invalid = True if self.store: self.store.locked_put(self) raise content = response.read() try: credential_info = json.loads(content) except ValueError: raise exceptions.CredentialsError( 'Could not parse response as JSON: %s' % content) self.access_token = credential_info['access_token'] if 'expires_in' in credential_info: expires_in = int(credential_info['expires_in']) self.token_expiry = ( datetime.timedelta(seconds=expires_in) + datetime.datetime.utcnow()) else: self.token_expiry = None self.invalid = False if self.store: self.store.locked_put(self)
[ "def", "_do_refresh_request", "(", "self", ",", "unused_http_request", ")", ":", "relative_url", "=", "'instance/service-accounts/{0}/token'", ".", "format", "(", "self", ".", "__service_account_name", ")", "try", ":", "response", "=", "_GceMetadataRequest", "(", "rel...
Refresh self.access_token by querying the metadata server. If self.store is initialized, store acquired credentials there.
[ "Refresh", "self", ".", "access_token", "by", "querying", "the", "metadata", "server", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L382-L413
train
207,690
google/apitools
apitools/base/py/credentials_lib.py
GaeAssertionCredentials._refresh
def _refresh(self, _): """Refresh self.access_token. Args: _: (ignored) A function matching httplib2.Http.request's signature. """ # pylint: disable=import-error from google.appengine.api import app_identity try: token, _ = app_identity.get_access_token(self._scopes) except app_identity.Error as e: raise exceptions.CredentialsError(str(e)) self.access_token = token
python
def _refresh(self, _): """Refresh self.access_token. Args: _: (ignored) A function matching httplib2.Http.request's signature. """ # pylint: disable=import-error from google.appengine.api import app_identity try: token, _ = app_identity.get_access_token(self._scopes) except app_identity.Error as e: raise exceptions.CredentialsError(str(e)) self.access_token = token
[ "def", "_refresh", "(", "self", ",", "_", ")", ":", "# pylint: disable=import-error", "from", "google", ".", "appengine", ".", "api", "import", "app_identity", "try", ":", "token", ",", "_", "=", "app_identity", ".", "get_access_token", "(", "self", ".", "_s...
Refresh self.access_token. Args: _: (ignored) A function matching httplib2.Http.request's signature.
[ "Refresh", "self", ".", "access_token", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L477-L489
train
207,691
google/apitools
apitools/base/py/credentials_lib.py
_MultiProcessCacheFile._ProcessLockAcquired
def _ProcessLockAcquired(self): """Context manager for process locks with timeout.""" try: is_locked = self._process_lock.acquire(timeout=self._lock_timeout) yield is_locked finally: if is_locked: self._process_lock.release()
python
def _ProcessLockAcquired(self): """Context manager for process locks with timeout.""" try: is_locked = self._process_lock.acquire(timeout=self._lock_timeout) yield is_locked finally: if is_locked: self._process_lock.release()
[ "def", "_ProcessLockAcquired", "(", "self", ")", ":", "try", ":", "is_locked", "=", "self", ".", "_process_lock", ".", "acquire", "(", "timeout", "=", "self", ".", "_lock_timeout", ")", "yield", "is_locked", "finally", ":", "if", "is_locked", ":", "self", ...
Context manager for process locks with timeout.
[ "Context", "manager", "for", "process", "locks", "with", "timeout", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L607-L614
train
207,692
google/apitools
apitools/base/py/credentials_lib.py
_MultiProcessCacheFile.LockedRead
def LockedRead(self): """Acquire an interprocess lock and dump cache contents. This method safely acquires the locks then reads a string from the cache file. If the file does not exist and cannot be created, it will return None. If the locks cannot be acquired, this will also return None. Returns: cache data - string if present, None on failure. """ file_contents = None with self._thread_lock: if not self._EnsureFileExists(): return None with self._process_lock_getter() as acquired_plock: if not acquired_plock: return None with open(self._filename, 'rb') as f: file_contents = f.read().decode(encoding=self._encoding) return file_contents
python
def LockedRead(self): """Acquire an interprocess lock and dump cache contents. This method safely acquires the locks then reads a string from the cache file. If the file does not exist and cannot be created, it will return None. If the locks cannot be acquired, this will also return None. Returns: cache data - string if present, None on failure. """ file_contents = None with self._thread_lock: if not self._EnsureFileExists(): return None with self._process_lock_getter() as acquired_plock: if not acquired_plock: return None with open(self._filename, 'rb') as f: file_contents = f.read().decode(encoding=self._encoding) return file_contents
[ "def", "LockedRead", "(", "self", ")", ":", "file_contents", "=", "None", "with", "self", ".", "_thread_lock", ":", "if", "not", "self", ".", "_EnsureFileExists", "(", ")", ":", "return", "None", "with", "self", ".", "_process_lock_getter", "(", ")", "as",...
Acquire an interprocess lock and dump cache contents. This method safely acquires the locks then reads a string from the cache file. If the file does not exist and cannot be created, it will return None. If the locks cannot be acquired, this will also return None. Returns: cache data - string if present, None on failure.
[ "Acquire", "an", "interprocess", "lock", "and", "dump", "cache", "contents", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L621-L641
train
207,693
google/apitools
apitools/base/py/credentials_lib.py
_MultiProcessCacheFile.LockedWrite
def LockedWrite(self, cache_data): """Acquire an interprocess lock and write a string. This method safely acquires the locks then writes a string to the cache file. If the string is written successfully the function will return True, if the write fails for any reason it will return False. Args: cache_data: string or bytes to write. Returns: bool: success """ if isinstance(cache_data, six.text_type): cache_data = cache_data.encode(encoding=self._encoding) with self._thread_lock: if not self._EnsureFileExists(): return False with self._process_lock_getter() as acquired_plock: if not acquired_plock: return False with open(self._filename, 'wb') as f: f.write(cache_data) return True
python
def LockedWrite(self, cache_data): """Acquire an interprocess lock and write a string. This method safely acquires the locks then writes a string to the cache file. If the string is written successfully the function will return True, if the write fails for any reason it will return False. Args: cache_data: string or bytes to write. Returns: bool: success """ if isinstance(cache_data, six.text_type): cache_data = cache_data.encode(encoding=self._encoding) with self._thread_lock: if not self._EnsureFileExists(): return False with self._process_lock_getter() as acquired_plock: if not acquired_plock: return False with open(self._filename, 'wb') as f: f.write(cache_data) return True
[ "def", "LockedWrite", "(", "self", ",", "cache_data", ")", ":", "if", "isinstance", "(", "cache_data", ",", "six", ".", "text_type", ")", ":", "cache_data", "=", "cache_data", ".", "encode", "(", "encoding", "=", "self", ".", "_encoding", ")", "with", "s...
Acquire an interprocess lock and write a string. This method safely acquires the locks then writes a string to the cache file. If the string is written successfully the function will return True, if the write fails for any reason it will return False. Args: cache_data: string or bytes to write. Returns: bool: success
[ "Acquire", "an", "interprocess", "lock", "and", "write", "a", "string", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L643-L668
train
207,694
google/apitools
apitools/base/py/credentials_lib.py
_MultiProcessCacheFile._EnsureFileExists
def _EnsureFileExists(self): """Touches a file; returns False on error, True on success.""" if not os.path.exists(self._filename): old_umask = os.umask(0o177) try: open(self._filename, 'a+b').close() except OSError: return False finally: os.umask(old_umask) return True
python
def _EnsureFileExists(self): """Touches a file; returns False on error, True on success.""" if not os.path.exists(self._filename): old_umask = os.umask(0o177) try: open(self._filename, 'a+b').close() except OSError: return False finally: os.umask(old_umask) return True
[ "def", "_EnsureFileExists", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "_filename", ")", ":", "old_umask", "=", "os", ".", "umask", "(", "0o177", ")", "try", ":", "open", "(", "self", ".", "_filename", "...
Touches a file; returns False on error, True on success.
[ "Touches", "a", "file", ";", "returns", "False", "on", "error", "True", "on", "success", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/credentials_lib.py#L670-L680
train
207,695
google/apitools
apitools/base/py/list_pager.py
YieldFromList
def YieldFromList( service, request, global_params=None, limit=None, batch_size=100, method='List', field='items', predicate=None, current_token_attribute='pageToken', next_token_attribute='nextPageToken', batch_size_attribute='maxResults'): """Make a series of List requests, keeping track of page tokens. Args: service: apitools_base.BaseApiService, A service with a .List() method. request: protorpc.messages.Message, The request message corresponding to the service's .List() method, with all the attributes populated except the .maxResults and .pageToken attributes. global_params: protorpc.messages.Message, The global query parameters to provide when calling the given method. limit: int, The maximum number of records to yield. None if all available records should be yielded. batch_size: int, The number of items to retrieve per request. method: str, The name of the method used to fetch resources. field: str, The field in the response that will be a list of items. predicate: lambda, A function that returns true for items to be yielded. current_token_attribute: str, The name of the attribute in a request message holding the page token for the page being requested. next_token_attribute: str, The name of the attribute in a response message holding the page token for the next page. batch_size_attribute: str, The name of the attribute in a response message holding the maximum number of results to be returned. None if caller-specified batch size is unsupported. Yields: protorpc.message.Message, The resources listed by the service. """ request = encoding.CopyProtoMessage(request) setattr(request, current_token_attribute, None) while limit is None or limit: if batch_size_attribute: # On Py3, None is not comparable so min() below will fail. # On Py2, None is always less than any number so if batch_size # is None, the request_batch_size will always be None regardless # of the value of limit. This doesn't generally strike me as the # correct behavior, but this change preserves the existing Py2 # behavior on Py3. if batch_size is None: request_batch_size = None else: request_batch_size = min(batch_size, limit or batch_size) setattr(request, batch_size_attribute, request_batch_size) response = getattr(service, method)(request, global_params=global_params) items = getattr(response, field) if predicate: items = list(filter(predicate, items)) for item in items: yield item if limit is None: continue limit -= 1 if not limit: return token = getattr(response, next_token_attribute) if not token: return setattr(request, current_token_attribute, token)
python
def YieldFromList( service, request, global_params=None, limit=None, batch_size=100, method='List', field='items', predicate=None, current_token_attribute='pageToken', next_token_attribute='nextPageToken', batch_size_attribute='maxResults'): """Make a series of List requests, keeping track of page tokens. Args: service: apitools_base.BaseApiService, A service with a .List() method. request: protorpc.messages.Message, The request message corresponding to the service's .List() method, with all the attributes populated except the .maxResults and .pageToken attributes. global_params: protorpc.messages.Message, The global query parameters to provide when calling the given method. limit: int, The maximum number of records to yield. None if all available records should be yielded. batch_size: int, The number of items to retrieve per request. method: str, The name of the method used to fetch resources. field: str, The field in the response that will be a list of items. predicate: lambda, A function that returns true for items to be yielded. current_token_attribute: str, The name of the attribute in a request message holding the page token for the page being requested. next_token_attribute: str, The name of the attribute in a response message holding the page token for the next page. batch_size_attribute: str, The name of the attribute in a response message holding the maximum number of results to be returned. None if caller-specified batch size is unsupported. Yields: protorpc.message.Message, The resources listed by the service. """ request = encoding.CopyProtoMessage(request) setattr(request, current_token_attribute, None) while limit is None or limit: if batch_size_attribute: # On Py3, None is not comparable so min() below will fail. # On Py2, None is always less than any number so if batch_size # is None, the request_batch_size will always be None regardless # of the value of limit. This doesn't generally strike me as the # correct behavior, but this change preserves the existing Py2 # behavior on Py3. if batch_size is None: request_batch_size = None else: request_batch_size = min(batch_size, limit or batch_size) setattr(request, batch_size_attribute, request_batch_size) response = getattr(service, method)(request, global_params=global_params) items = getattr(response, field) if predicate: items = list(filter(predicate, items)) for item in items: yield item if limit is None: continue limit -= 1 if not limit: return token = getattr(response, next_token_attribute) if not token: return setattr(request, current_token_attribute, token)
[ "def", "YieldFromList", "(", "service", ",", "request", ",", "global_params", "=", "None", ",", "limit", "=", "None", ",", "batch_size", "=", "100", ",", "method", "=", "'List'", ",", "field", "=", "'items'", ",", "predicate", "=", "None", ",", "current_...
Make a series of List requests, keeping track of page tokens. Args: service: apitools_base.BaseApiService, A service with a .List() method. request: protorpc.messages.Message, The request message corresponding to the service's .List() method, with all the attributes populated except the .maxResults and .pageToken attributes. global_params: protorpc.messages.Message, The global query parameters to provide when calling the given method. limit: int, The maximum number of records to yield. None if all available records should be yielded. batch_size: int, The number of items to retrieve per request. method: str, The name of the method used to fetch resources. field: str, The field in the response that will be a list of items. predicate: lambda, A function that returns true for items to be yielded. current_token_attribute: str, The name of the attribute in a request message holding the page token for the page being requested. next_token_attribute: str, The name of the attribute in a response message holding the page token for the next page. batch_size_attribute: str, The name of the attribute in a response message holding the maximum number of results to be returned. None if caller-specified batch size is unsupported. Yields: protorpc.message.Message, The resources listed by the service.
[ "Make", "a", "series", "of", "List", "requests", "keeping", "track", "of", "page", "tokens", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/list_pager.py#L26-L91
train
207,696
google/apitools
apitools/gen/service_registry.py
ServiceRegistry.__PrintDocstring
def __PrintDocstring(self, printer, method_info, method_name, name): """Print a docstring for a service method.""" if method_info.description: description = util.CleanDescription(method_info.description) first_line, newline, remaining = method_info.description.partition( '\n') if not first_line.endswith('.'): first_line = '%s.' % first_line description = '%s%s%s' % (first_line, newline, remaining) else: description = '%s method for the %s service.' % (method_name, name) with printer.CommentContext(): printer('r"""%s' % description) printer() printer('Args:') printer(' request: (%s) input message', method_info.request_type_name) printer(' global_params: (StandardQueryParameters, default: None) ' 'global arguments') if method_info.upload_config: printer(' upload: (Upload, default: None) If present, upload') printer(' this stream with the request.') if method_info.supports_download: printer( ' download: (Download, default: None) If present, download') printer(' data from the request via this stream.') printer('Returns:') printer(' (%s) The response message.', method_info.response_type_name) printer('"""')
python
def __PrintDocstring(self, printer, method_info, method_name, name): """Print a docstring for a service method.""" if method_info.description: description = util.CleanDescription(method_info.description) first_line, newline, remaining = method_info.description.partition( '\n') if not first_line.endswith('.'): first_line = '%s.' % first_line description = '%s%s%s' % (first_line, newline, remaining) else: description = '%s method for the %s service.' % (method_name, name) with printer.CommentContext(): printer('r"""%s' % description) printer() printer('Args:') printer(' request: (%s) input message', method_info.request_type_name) printer(' global_params: (StandardQueryParameters, default: None) ' 'global arguments') if method_info.upload_config: printer(' upload: (Upload, default: None) If present, upload') printer(' this stream with the request.') if method_info.supports_download: printer( ' download: (Download, default: None) If present, download') printer(' data from the request via this stream.') printer('Returns:') printer(' (%s) The response message.', method_info.response_type_name) printer('"""')
[ "def", "__PrintDocstring", "(", "self", ",", "printer", ",", "method_info", ",", "method_name", ",", "name", ")", ":", "if", "method_info", ".", "description", ":", "description", "=", "util", ".", "CleanDescription", "(", "method_info", ".", "description", ")...
Print a docstring for a service method.
[ "Print", "a", "docstring", "for", "a", "service", "method", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/service_registry.py#L61-L88
train
207,697
google/apitools
apitools/gen/service_registry.py
ServiceRegistry.__WriteProtoServiceDeclaration
def __WriteProtoServiceDeclaration(self, printer, name, method_info_map): """Write a single service declaration to a proto file.""" printer() printer('service %s {', self.__GetServiceClassName(name)) with printer.Indent(): for method_name, method_info in method_info_map.items(): for line in textwrap.wrap(method_info.description, printer.CalculateWidth() - 3): printer('// %s', line) printer('rpc %s (%s) returns (%s);', method_name, method_info.request_type_name, method_info.response_type_name) printer('}')
python
def __WriteProtoServiceDeclaration(self, printer, name, method_info_map): """Write a single service declaration to a proto file.""" printer() printer('service %s {', self.__GetServiceClassName(name)) with printer.Indent(): for method_name, method_info in method_info_map.items(): for line in textwrap.wrap(method_info.description, printer.CalculateWidth() - 3): printer('// %s', line) printer('rpc %s (%s) returns (%s);', method_name, method_info.request_type_name, method_info.response_type_name) printer('}')
[ "def", "__WriteProtoServiceDeclaration", "(", "self", ",", "printer", ",", "name", ",", "method_info_map", ")", ":", "printer", "(", ")", "printer", "(", "'service %s {'", ",", "self", ".", "__GetServiceClassName", "(", "name", ")", ")", "with", "printer", "."...
Write a single service declaration to a proto file.
[ "Write", "a", "single", "service", "declaration", "to", "a", "proto", "file", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/service_registry.py#L166-L179
train
207,698
google/apitools
apitools/gen/service_registry.py
ServiceRegistry.WriteProtoFile
def WriteProtoFile(self, printer): """Write the services in this registry to out as proto.""" self.Validate() client_info = self.__client_info printer('// Generated services for %s version %s.', client_info.package, client_info.version) printer() printer('syntax = "proto2";') printer('package %s;', self.__package) printer('import "%s";', client_info.messages_proto_file_name) printer() for name, method_info_map in self.__service_method_info_map.items(): self.__WriteProtoServiceDeclaration(printer, name, method_info_map)
python
def WriteProtoFile(self, printer): """Write the services in this registry to out as proto.""" self.Validate() client_info = self.__client_info printer('// Generated services for %s version %s.', client_info.package, client_info.version) printer() printer('syntax = "proto2";') printer('package %s;', self.__package) printer('import "%s";', client_info.messages_proto_file_name) printer() for name, method_info_map in self.__service_method_info_map.items(): self.__WriteProtoServiceDeclaration(printer, name, method_info_map)
[ "def", "WriteProtoFile", "(", "self", ",", "printer", ")", ":", "self", ".", "Validate", "(", ")", "client_info", "=", "self", ".", "__client_info", "printer", "(", "'// Generated services for %s version %s.'", ",", "client_info", ".", "package", ",", "client_info...
Write the services in this registry to out as proto.
[ "Write", "the", "services", "in", "this", "registry", "to", "out", "as", "proto", "." ]
f3745a7ea535aa0e88b0650c16479b696d6fd446
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/gen/service_registry.py#L181-L193
train
207,699